diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / include / rocksdb / status . h <nl> ppp b / include / rocksdb / status . h <nl> class Status { <nl> <nl> / / Copy the specified status . <nl> Status ( const Status & s ) ; <nl> - void operator = ( const Status & s ) ; <nl> + Status & operator = ( const Status & s ) ; <nl> + Status ( Status & & s ) <nl> + # if ! ( defined _MSC_VER ) | | ( ( defined _MSC_VER ) & & ( _MSC_VER > = 1900 ) ) <nl> + noexcept <nl> + # endif <nl> + ; <nl> + Status & operator = ( Status & & s ) <nl> + # if ! ( defined _MSC_VER ) | | ( ( defined _MSC_VER ) & & ( _MSC_VER > = 1900 ) ) <nl> + noexcept <nl> + # endif <nl> + ; <nl> bool operator = = ( const Status & rhs ) const ; <nl> bool operator ! = ( const Status & rhs ) const ; <nl> <nl> class Status { <nl> inline Status : : Status ( const Status & s ) : code_ ( s . code_ ) , subcode_ ( s . subcode_ ) { <nl> state_ = ( s . state_ = = nullptr ) ? nullptr : CopyState ( s . state_ ) ; <nl> } <nl> - inline void Status : : operator = ( const Status & s ) { <nl> + inline Status & Status : : operator = ( const Status & s ) { <nl> / / The following condition catches both aliasing ( when this = = & s ) , <nl> / / and the common case where both s and * this are ok . <nl> - code_ = s . code_ ; <nl> - subcode_ = s . subcode_ ; <nl> - if ( state_ ! = s . state_ ) { <nl> + if ( this ! = & s ) { <nl> + code_ = s . code_ ; <nl> + subcode_ = s . subcode_ ; <nl> delete [ ] state_ ; <nl> state_ = ( s . state_ = = nullptr ) ? nullptr : CopyState ( s . state_ ) ; <nl> } <nl> + return * this ; <nl> + } <nl> + <nl> + inline Status : : Status ( Status & & s ) <nl> + # if ! ( defined _MSC_VER ) | | ( ( defined _MSC_VER ) & & ( _MSC_VER > = 1900 ) ) <nl> + noexcept <nl> + # endif <nl> + : Status ( ) { <nl> + * this = std : : move ( s ) ; <nl> + } <nl> + <nl> + inline Status & Status : : operator = ( Status & & s ) <nl> + # if ! ( defined _MSC_VER ) | | ( ( defined _MSC_VER ) & & ( _MSC_VER > = 1900 ) ) <nl> + noexcept <nl> + # endif <nl> + { <nl> + if ( this ! = & s ) { <nl> + code_ = std : : move ( s . code_ ) ; <nl> + s . code_ = kOk ; <nl> + subcode_ = std : : move ( s . subcode_ ) ; <nl> + s . subcode_ = kNone ; <nl> + delete [ ] state_ ; <nl> + state_ = nullptr ; <nl> + std : : swap ( state_ , s . state_ ) ; <nl> + } <nl> + return * this ; <nl> } <nl> <nl> inline bool Status : : operator = = ( const Status & rhs ) const { <nl>
|
Merge pull request from yuslepukhin / enable_status_move
|
facebook/rocksdb
|
2ba03196d8fb5015ff29df86d1298b7d0bf8c9b6
|
2015-12-23T02:00:04Z
|
new file mode 100755 <nl> index 0000000000000 . . 3fd9babb53f34 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / ci_build / build_rbe . sh <nl> <nl> + # ! / bin / bash <nl> + <nl> + # Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + # Script for helping to record method for building the RBE docker images . <nl> + # <nl> + # The first argument to the script is expected to be the name of the docker file <nl> + # to build . Example : <nl> + # <nl> + # $ . / build_rbe . sh Dockerfile . rbe . ubuntu16 . 04 - manylinux2010 <nl> + <nl> + function main ( ) { <nl> + set - eu <nl> + <nl> + cd " $ { 0 % / * } " <nl> + <nl> + local DOCKERFILE = " $ ( basename " $ 1 " ) " <nl> + if [ [ ! - e " $ DOCKERFILE " ] ] ; then <nl> + echo " $ DOCKERFILE does not exist in $ PWD " > > / dev / stderr <nl> + exit 1 <nl> + fi <nl> + <nl> + local IMAGE_NAME_SUFFIX = " $ { 1 # Dockerfile . rbe . } " <nl> + if [ [ " $ IMAGE_NAME_SUFFIX " = = " $ DOCKERFILE " ] ] ; then <nl> + echo ' File must start with " Dockerfile . rbe . " ' > > / dev / stderr <nl> + exit 1 <nl> + fi <nl> + <nl> + local ARGS = ( <nl> + - - config = cloudbuild . yaml <nl> + - - machine - type = n1 - highcpu - 32 <nl> + - - substitutions = _DOCKERFILE = " $ 1 " , _IMAGE_NAME = " nosla - $ IMAGE_NAME_SUFFIX " <nl> + - - timeout = 1h <nl> + ) <nl> + <nl> + gcloud - - project = tensorflow - testing builds submit " $ { ARGS [ @ ] } " . <nl> + } <nl> + <nl> + main " $ @ " <nl> deleted file mode 100755 <nl> index cd811de6bdf92 . . 0000000000000 <nl> mmm a / tensorflow / tools / ci_build / ci_rbe_docker_build . sh <nl> ppp / dev / null <nl> <nl> - # ! / usr / bin / env bash <nl> - # Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> - # <nl> - # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - # you may not use this file except in compliance with the License . <nl> - # You may obtain a copy of the License at <nl> - # <nl> - # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - # <nl> - # Unless required by applicable law or agreed to in writing , software <nl> - # distributed under the License is distributed on an " AS IS " BASIS , <nl> - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - # See the License for the specific language governing permissions and <nl> - # limitations under the License . <nl> - # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - # Build TensorFlow Docker images for remote build <nl> - # <nl> - # Usage : <nl> - # ci_rbe_docker_build . sh - c # docker image for cpu build <nl> - # ci_rbe_docker_build . sh - g # docker image for gpu build <nl> - <nl> - function main { <nl> - cpu_build = false <nl> - gpu_build = false <nl> - publish = false <nl> - <nl> - script_dir = $ ( dirname " $ ( readlink - f " $ 0 " ) " ) <nl> - cd $ script_dir <nl> - <nl> - set_script_flags $ @ <nl> - <nl> - build_tf_image <nl> - <nl> - if [ " $ publish " = true ] ; then <nl> - publish_tf_image <nl> - fi <nl> - } <nl> - <nl> - <nl> - function set_script_flags { <nl> - OPTIND = 1 # Reset for getopts , just in case . <nl> - while getopts " cf : ghn " opt ; do <nl> - case " $ opt " in <nl> - c ) <nl> - cpu_build = true <nl> - ; ; <nl> - g ) <nl> - gpu_build = true <nl> - ; ; <nl> - h ) <nl> - print_usage <nl> - ; ; <nl> - p ) <nl> - publish = true <nl> - ; ; <nl> - * ) <nl> - print_usage " ERROR : unknown option " <nl> - ; ; <nl> - esac <nl> - done <nl> - [ [ " $ cpu_build " = true ] ] | | [ [ " $ gpu_build " = true ] ] | | print_usage " ERROR : must specify build at least for one build type : cpu or gpu " <nl> - <nl> - } <nl> - <nl> - <nl> - function print_usage { <nl> - echo " Usage : $ ( basename $ 0 ) - c | - g [ options ] " <nl> - echo " - c build image for CPU build ( base image debian8 - clang ) " <nl> - echo " - g build image for GPU build ( base image nvidia - clang ) " <nl> - echo " [ option ] is one of " <nl> - echo " - n not publish the locally - built image to GCR ; " <nl> - echo " the build process will publish image to GCR by default " <nl> - echo " - h display help messages " <nl> - if [ [ - n $ 1 ] ] ; then <nl> - echo $ 1 <nl> - fi <nl> - exit 1 <nl> - } <nl> - <nl> - function build_tf_image { <nl> - if [ " $ cpu_build " = true ] ; then <nl> - dockerfile = " Dockerfile . rbe . cpu " <nl> - tf_image = " tensorflow - rbe - cpu " <nl> - else <nl> - dockerfile = " Dockerfile . rbe . gpu " <nl> - tf_image = " tensorflow - rbe - gpu " <nl> - fi <nl> - <nl> - docker build - f $ dockerfile - t $ tf_image . <nl> - } <nl> - <nl> - function publish_tf_image { <nl> - gcr_tf_image = " gcr . io / tensorflow / $ { tf_image } " <nl> - docker tag $ tf_image $ gcr_tf_image <nl> - gcloud docker - - push $ gcr_tf_image <nl> - } <nl> - <nl> - main $ @ <nl> new file mode 100644 <nl> index 0000000000000 . . 77748837dd216 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / ci_build / cloudbuild . yaml <nl> <nl> + steps : <nl> + - name : ' gcr . io / cloud - builders / docker ' <nl> + args : [ ' build ' , ' - f ' , ' $ _DOCKERFILE ' , ' - t ' , ' gcr . io / $ PROJECT_ID / $ _IMAGE_NAME ' , ' . ' ] <nl> + substitutions : <nl> + _DOCKERFILE : ' ' <nl> + _IMAGE_NAME : ' ' <nl> + images : <nl> + - ' gcr . io / $ PROJECT_ID / $ _IMAGE_NAME ' <nl>
|
Add a simple script and config for building RBE images with Cloud Build .
|
tensorflow/tensorflow
|
25cadc04ba9c5b94865595a0d32bedb733191b5c
|
2020-01-13T23:41:37Z
|
mmm a / xbmc / VideoReferenceClock . cpp <nl> ppp b / xbmc / VideoReferenceClock . cpp <nl> bool CVideoReferenceClock : : SetupD3D ( ) <nl> if ( m_RefreshRate = = 23 | | m_RefreshRate = = 29 | | m_RefreshRate = = 59 ) <nl> m_RefreshRate + + ; <nl> <nl> + if ( g_Windowing . Interlaced ( ) ) <nl> + { <nl> + m_RefreshRate * = 2 ; <nl> + CLog : : Log ( LOGDEBUG , " CVideoReferenceClock : display is interlaced " ) ; <nl> + } <nl> + <nl> CLog : : Log ( LOGDEBUG , " CVideoReferenceClock : detected refreshrate : % i hertz , assuming % i hertz " , m_PrevRefreshRate , ( int ) m_RefreshRate ) ; <nl> } <nl> <nl> bool CVideoReferenceClock : : UpdateRefreshrate ( bool Forced / * = false * / ) <nl> if ( DisplayMode . RefreshRate = = 0 ) <nl> DisplayMode . RefreshRate = 60 ; <nl> <nl> - if ( g_Windowing . Interlaced ( ) ) <nl> - { <nl> - if ( DisplayMode . RefreshRate = = 23 | | DisplayMode . RefreshRate = = 29 | | DisplayMode . RefreshRate = = 59 ) <nl> - DisplayMode . RefreshRate = ( int ) ( ( float ) ( DisplayMode . RefreshRate + 1 ) / 1 . 001f * 2 . 0f ) ; <nl> - else <nl> - DisplayMode . RefreshRate * = 2 ; <nl> - } <nl> - <nl> if ( m_PrevRefreshRate ! = DisplayMode . RefreshRate | | m_Width ! = DisplayMode . Width | | m_Height ! = DisplayMode . Height | | Forced ) <nl> { <nl> m_PrevRefreshRate = DisplayMode . RefreshRate ; <nl>
|
fixed : put interlaced check in the right place
|
xbmc/xbmc
|
8852ef0ad49b29a311ab422f9dc1998bf4a2434f
|
2010-05-30T13:33:52Z
|
mmm a / tensorflow / core / distributed_runtime / rpc / BUILD <nl> ppp b / tensorflow / core / distributed_runtime / rpc / BUILD <nl> <nl> <nl> load ( <nl> " / / tensorflow : tensorflow . bzl " , <nl> + " if_windows " , <nl> " tf_cc_binary " , <nl> " tf_cc_test " , <nl> " tf_cuda_library " , <nl> cc_library ( <nl> name = " grpc_util " , <nl> srcs = [ " grpc_util . cc " ] , <nl> hdrs = [ " grpc_util . h " ] , <nl> + linkopts = if_windows ( [ " - DEFAULTLIB : ws2_32 . lib " ] ) , <nl> deps = [ <nl> " / / tensorflow : grpc " , <nl> " / / tensorflow : grpc + + " , <nl>
|
grpc_util requires ws2_32 . lib on windows .
|
tensorflow/tensorflow
|
dafe5a1400afdb5043fe31e7634fb72a9ebc241a
|
2020-01-15T22:51:06Z
|
new file mode 100644 <nl> index 00000000 . . f660c4fe <nl> mmm / dev / null <nl> ppp b / NOTICE <nl> <nl> + NOTICES AND INFORMATION <nl> + Do Not Translate or Localize <nl> + <nl> + This software incorporates material from third parties . <nl> + Microsoft makes certain open source code available at https : / / 3rdpartysource . microsoft . com , <nl> + or you may send a check or money order for US $ 5 . 00 , including the product name , <nl> + the open source component name , platform , and version number , to : <nl> + <nl> + Source Code Compliance Team <nl> + Microsoft Corporation <nl> + One Microsoft Way <nl> + Redmond , WA 98052 <nl> + USA <nl> + <nl> + Notwithstanding any other terms , you may reverse engineer this software to the extent <nl> + required to debug changes to any libraries licensed under the GNU Lesser General Public License . <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + NuGet . Frameworks 5 . 0 . 0 - Apache - 2 . 0 <nl> + <nl> + <nl> + ( c ) 2008 VeriSign , Inc . <nl> + ( c ) Microsoft Corporation . <nl> + <nl> + Apache License <nl> + <nl> + Version 2 . 0 , January 2004 <nl> + <nl> + http : / / www . apache . org / licenses / TERMS AND CONDITIONS FOR USE , REPRODUCTION , AND DISTRIBUTION <nl> + <nl> + 1 . Definitions . <nl> + <nl> + <nl> + <nl> + " License " shall mean the terms and conditions for use , reproduction , and distribution as defined by Sections 1 through 9 of this document . <nl> + <nl> + <nl> + <nl> + " Licensor " shall mean the copyright owner or entity authorized by the copyright owner that is granting the License . <nl> + <nl> + <nl> + <nl> + " Legal Entity " shall mean the union of the acting entity and all other entities that control , are controlled by , or are under common control with that entity . For the purposes of this definition , " control " means ( i ) the power , direct or indirect , to cause the direction or management of such entity , whether by contract or otherwise , or ( ii ) ownership of fifty percent ( 50 % ) or more of the outstanding shares , or ( iii ) beneficial ownership of such entity . <nl> + <nl> + <nl> + <nl> + " You " ( or " Your " ) shall mean an individual or Legal Entity exercising permissions granted by this License . <nl> + <nl> + <nl> + <nl> + " Source " form shall mean the preferred form for making modifications , including but not limited to software source code , documentation source , and configuration files . <nl> + <nl> + <nl> + <nl> + " Object " form shall mean any form resulting from mechanical transformation or translation of a Source form , including but not limited to compiled object code , generated documentation , and conversions to other media types . <nl> + <nl> + <nl> + <nl> + " Work " shall mean the work of authorship , whether in Source or Object form , made available under the License , as indicated by a copyright notice that is included in or attached to the work ( an example is provided in the Appendix below ) . <nl> + <nl> + <nl> + <nl> + " Derivative Works " shall mean any work , whether in Source or Object form , that is based on ( or derived from ) the Work and for which the editorial revisions , annotations , elaborations , or other modifications represent , as a whole , an original work of authorship . For the purposes of this License , Derivative Works shall not include works that remain separable from , or merely link ( or bind by name ) to the interfaces of , the Work and Derivative Works thereof . <nl> + <nl> + <nl> + <nl> + " Contribution " shall mean any work of authorship , including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof , that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner . For the purposes of this definition , " submitted " means any form of electronic , verbal , or written communication sent to the Licensor or its representatives , including but not limited to communication on electronic mailing lists , source code control systems , and issue tracking systems that are managed by , or on behalf of , the Licensor for the purpose of discussing and improving the Work , but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as " Not a Contribution . " <nl> + <nl> + <nl> + <nl> + " Contributor " shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work . <nl> + <nl> + 2 . Grant of Copyright License . Subject to the terms and conditions of this License , each Contributor hereby grants to You a perpetual , worldwide , non - exclusive , no - charge , royalty - free , irrevocable copyright license to reproduce , prepare Derivative Works of , publicly display , publicly perform , sublicense , and distribute the Work and such Derivative Works in Source or Object form . <nl> + <nl> + 3 . Grant of Patent License . Subject to the terms and conditions of this License , each Contributor hereby grants to You a perpetual , worldwide , non - exclusive , no - charge , royalty - free , irrevocable ( except as stated in this section ) patent license to make , have made , use , offer to sell , sell , import , and otherwise transfer the Work , where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution ( s ) alone or by combination of their Contribution ( s ) with the Work to which such Contribution ( s ) was submitted . If You institute patent litigation against any entity ( including a cross - claim or counterclaim in a lawsuit ) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement , then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed . <nl> + <nl> + 4 . Redistribution . You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium , with or without modifications , and in Source or Object form , provided that You meet the following conditions : <nl> + <nl> + ( a ) You must give any other recipients of the Work or Derivative Works a copy of this License ; and <nl> + <nl> + ( b ) You must cause any modified files to carry prominent notices stating that You changed the files ; and <nl> + <nl> + ( c ) You must retain , in the Source form of any Derivative Works that You distribute , all copyright , patent , trademark , and attribution notices from the Source form of the Work , excluding those notices that do not pertain to any part of the Derivative Works ; and <nl> + <nl> + ( d ) If the Work includes a " NOTICE " text file as part of its distribution , then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file , excluding those notices that do not pertain to any part of the Derivative Works , in at least one of the following places : within a NOTICE text file distributed as part of the Derivative Works ; within the Source form or documentation , if provided along with the Derivative Works ; or , within a display generated by the Derivative Works , if and wherever such third - party notices normally appear . The contents of the NOTICE file are for informational purposes only and do not modify the License . You may add Your own attribution notices within Derivative Works that You distribute , alongside or as an addendum to the NOTICE text from the Work , provided that such additional attribution notices cannot be construed as modifying the License . <nl> + <nl> + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use , reproduction , or distribution of Your modifications , or for any such Derivative Works as a whole , provided Your use , reproduction , and distribution of the Work otherwise complies with the conditions stated in this License . <nl> + <nl> + 5 . Submission of Contributions . Unless You explicitly state otherwise , any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License , without any additional terms or conditions . Notwithstanding the above , nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions . <nl> + <nl> + 6 . Trademarks . This License does not grant permission to use the trade names , trademarks , service marks , or product names of the Licensor , except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file . <nl> + <nl> + 7 . Disclaimer of Warranty . Unless required by applicable law or agreed to in writing , Licensor provides the Work ( and each Contributor provides its Contributions ) on an " AS IS " BASIS , WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied , including , without limitation , any warranties or conditions of TITLE , NON - INFRINGEMENT , MERCHANTABILITY , or FITNESS FOR A PARTICULAR PURPOSE . You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License . <nl> + <nl> + 8 . Limitation of Liability . In no event and under no legal theory , whether in tort ( including negligence ) , contract , or otherwise , unless required by applicable law ( such as deliberate and grossly negligent acts ) or agreed to in writing , shall any Contributor be liable to You for damages , including any direct , indirect , special , incidental , or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work ( including but not limited to damages for loss of goodwill , work stoppage , computer failure or malfunction , or any and all other commercial damages or losses ) , even if such Contributor has been advised of the possibility of such damages . <nl> + <nl> + 9 . Accepting Warranty or Additional Liability . While redistributing the Work or Derivative Works thereof , You may choose to offer , and charge a fee for , acceptance of support , warranty , indemnity , or other liability obligations and / or rights consistent with this License . However , in accepting such obligations , You may act only on Your own behalf and on Your sole responsibility , not on behalf of any other Contributor , and only if You agree to indemnify , defend , and hold each Contributor harmless for any liability incurred by , or claims asserted against , such Contributor by reason of your accepting any such warranty or additional liability . END OF TERMS AND CONDITIONS <nl> + <nl> + APPENDIX : How to apply the Apache License to your work . <nl> + <nl> + To apply the Apache License to your work , attach the following boilerplate notice , with the fields enclosed by brackets " [ ] " replaced with your own identifying information . ( Don ' t include the brackets ! ) The text should be enclosed in the appropriate comment syntax for the file format . We also recommend that a file or class name and description of purpose be included on the same " printed page " as the copyright notice for easier identification within third - party archives . <nl> + <nl> + Copyright [ yyyy ] [ name of copyright owner ] <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + <nl> + you may not use this file except in compliance with the License . <nl> + <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + <nl> + See the License for the specific language governing permissions and <nl> + <nl> + limitations under the License . <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + catchorg / catch2 e1c9d5569dc4135babb9c81891d70a8ba8ed938c - BSL - 1 . 0 <nl> + <nl> + <nl> + Copyright Social Point <nl> + Copyright 2012 Martin Moene . <nl> + Copyright 2015 Martin Moene . <nl> + Copyright 2017 Justin R . Wilson . <nl> + Copyright ( c ) Two Blue Cubes Ltd . <nl> + Copyright 2010 Two Blue Cubes Ltd . <nl> + Copyright 2011 Two Blue Cubes Ltd . <nl> + Copyright 2012 Two Blue Cubes Ltd . <nl> + Copyright 2013 Two Blue Cubes Ltd . <nl> + Copyright 2014 Two Blue Cubes Ltd . <nl> + Copyright 2015 Two Blue Cubes Ltd . <nl> + Copyright 2016 Two Blue Cubes Ltd . <nl> + Copyright 2017 Two Blue Cubes Ltd . <nl> + Copyright 2018 Two Blue Cubes Ltd . <nl> + Copyright 2019 Two Blue Cubes Ltd . <nl> + Copyright ( c ) 2012 Two Blue Cubes Ltd . <nl> + Copyright ( c ) 2017 Two Blue Cubes Ltd . <nl> + Copyright ( c ) 2019 Two Blue Cubes Ltd . <nl> + Copyright . txt or https : / / cmake . org / licensing <nl> + Copyright ( c ) 2015 - 2017 RWTH Aachen University , Federal Republic of Germany <nl> + <nl> + Boost Software License - Version 1 . 0 - August 17th , 2003 <nl> + <nl> + Permission is hereby granted , free of charge , to any person or organization <nl> + obtaining a copy of the software and accompanying documentation covered by <nl> + this license ( the " Software " ) to use , reproduce , display , distribute , <nl> + execute , and transmit the Software , and to prepare derivative works of the <nl> + Software , and to permit third - parties to whom the Software is furnished to <nl> + do so , all subject to the following : <nl> + <nl> + The copyright notices in the Software and this entire statement , including <nl> + the above license grant , this restriction and the following disclaimer , <nl> + must be included in all copies of the Software , in whole or in part , and <nl> + all derivative works of the Software , unless such copies or derivative <nl> + works are solely in the form of machine - executable object code generated by <nl> + a source language processor . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> + FITNESS FOR A PARTICULAR PURPOSE , TITLE AND NON - INFRINGEMENT . IN NO EVENT <nl> + SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE <nl> + FOR ANY DAMAGES OR OTHER LIABILITY , WHETHER IN CONTRACT , TORT OR OTHERWISE , <nl> + ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER <nl> + DEALINGS IN THE SOFTWARE . <nl> + <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + jbeder / yaml - cpp 9a3624205e8774953ef18f57067b3426c1c5ada6 - MIT <nl> + <nl> + <nl> + ( c ) 2008 Google Inc . <nl> + Copyright 2003 Google Inc . <nl> + Copyright 2005 Google Inc . <nl> + Copyright 2007 Google Inc . <nl> + Copyright 2008 Google Inc . <nl> + Copyright 2009 Google Inc . <nl> + Copyright 2010 Google Inc . <nl> + Copyright 2013 Google Inc . <nl> + Copyright 2003 , Google Inc . <nl> + Copyright 2005 , Google Inc . <nl> + Copyright 2006 , Google Inc . <nl> + Copyright 2007 Neal Norwitz <nl> + Copyright 2007 , Google Inc . <nl> + Copyright 2008 , Google Inc . <nl> + Copyright 2009 Neal Norwitz <nl> + Copyright 2009 , Google Inc . <nl> + Copyright 2010 , Google Inc . <nl> + Copyright 2013 , Google Inc . <nl> + Copyright 2015 , Google Inc . <nl> + Portions Copyright 2007 Google Inc . <nl> + Portions Copyright 2009 Google Inc . <nl> + Copyright ( c ) 2008 - 2015 Jesse Beder . <nl> + <nl> + MIT License <nl> + <nl> + Copyright ( c ) < year > < copyright holders > <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy of this software and associated documentation files ( the " Software " ) , to deal in the Software without restriction , including without limitation the rights to use , copy , modify , merge , publish , distribute , sublicense , and / or sell copies of the Software , and to permit persons to whom the Software is furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE . <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + Microsoft . NETCore . Platforms 3 . 1 . 0 - MIT <nl> + <nl> + <nl> + ( c ) 2008 VeriSign , Inc . <nl> + ( c ) Microsoft Corporation . <nl> + Copyright ( c ) . NET Foundation . <nl> + Copyright ( c ) 2011 , Google Inc . <nl> + ( c ) 1997 - 2005 Sean Eron Anderson . <nl> + Copyright ( c ) 2007 James Newton - King <nl> + Copyright ( c ) 1991 - 2017 Unicode , Inc . <nl> + Copyright ( c ) 2013 - 2017 , Alfred Klomp <nl> + Copyright ( c ) 2015 - 2017 , Wojciech Mula <nl> + Copyright ( c ) 2005 - 2007 , Nick Galbreath <nl> + Portions ( c ) International Organization <nl> + Copyright ( c ) 2015 The Chromium Authors . <nl> + Copyright ( c ) 2004 - 2006 Intel Corporation <nl> + Copyright ( c ) 2016 - 2017 , Matthieu Darbois <nl> + Copyright ( c ) . NET Foundation Contributors <nl> + Copyright ( c ) . NET Foundation and Contributors <nl> + Copyright ( c ) 2011 Novell , Inc ( http : / / www . novell . com ) <nl> + Copyright ( c ) 1995 - 2017 Jean - loup Gailly and Mark Adler <nl> + Copyright ( c ) 2015 Xamarin , Inc ( http : / / www . xamarin . com ) <nl> + Copyright ( c ) 2009 , 2010 , 2013 - 2016 by the Brotli Authors . <nl> + Copyright ( c ) YEAR W3C ( r ) ( MIT , ERCIM , Keio , Beihang ) . Disclaimers THIS WORK IS PROVIDED AS <nl> + <nl> + The MIT License ( MIT ) <nl> + <nl> + Copyright ( c ) . NET Foundation and Contributors <nl> + <nl> + All rights reserved . <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy <nl> + of this software and associated documentation files ( the " Software " ) , to deal <nl> + in the Software without restriction , including without limitation the rights <nl> + to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> + copies of the Software , and to permit persons to whom the Software is <nl> + furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in all <nl> + copies or substantial portions of the Software . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE <nl> + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER <nl> + LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , <nl> + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE <nl> + SOFTWARE . <nl> + <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + Microsoft . Win32 . Registry 4 . 7 . 0 - MIT <nl> + <nl> + <nl> + ( c ) 2008 VeriSign , Inc . <nl> + ( c ) Microsoft Corporation . <nl> + Copyright ( c ) . NET Foundation . <nl> + Copyright ( c ) 2011 , Google Inc . <nl> + ( c ) 1997 - 2005 Sean Eron Anderson . <nl> + Copyright ( c ) 2007 James Newton - King <nl> + Copyright ( c ) 1991 - 2017 Unicode , Inc . <nl> + Copyright ( c ) 2013 - 2017 , Alfred Klomp <nl> + Copyright ( c ) 2015 - 2017 , Wojciech Mula <nl> + Copyright ( c ) 2005 - 2007 , Nick Galbreath <nl> + Portions ( c ) International Organization <nl> + Copyright ( c ) 2015 The Chromium Authors . <nl> + Copyright ( c ) 2004 - 2006 Intel Corporation <nl> + Copyright ( c ) 2016 - 2017 , Matthieu Darbois <nl> + Copyright ( c ) . NET Foundation Contributors <nl> + Copyright ( c ) . NET Foundation and Contributors <nl> + Copyright ( c ) 2011 Novell , Inc ( http : / / www . novell . com ) <nl> + Copyright ( c ) 1995 - 2017 Jean - loup Gailly and Mark Adler <nl> + Copyright ( c ) 2015 Xamarin , Inc ( http : / / www . xamarin . com ) <nl> + Copyright ( c ) 2009 , 2010 , 2013 - 2016 by the Brotli Authors . <nl> + Copyright ( c ) YEAR W3C ( r ) ( MIT , ERCIM , Keio , Beihang ) . Disclaimers THIS WORK IS PROVIDED AS <nl> + <nl> + The MIT License ( MIT ) <nl> + <nl> + Copyright ( c ) . NET Foundation and Contributors <nl> + <nl> + All rights reserved . <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy <nl> + of this software and associated documentation files ( the " Software " ) , to deal <nl> + in the Software without restriction , including without limitation the rights <nl> + to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> + copies of the Software , and to permit persons to whom the Software is <nl> + furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in all <nl> + copies or substantial portions of the Software . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE <nl> + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER <nl> + LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , <nl> + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE <nl> + SOFTWARE . <nl> + <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + Microsoft . Windows . CppWinRT 2 . 0 . 191202 . 6 - MIT <nl> + <nl> + <nl> + ( c ) 2008 VeriSign , Inc . <nl> + ( c ) Microsoft Corporation . <nl> + Copyright ( c ) Microsoft Corporation . <nl> + <nl> + MIT License <nl> + <nl> + Copyright ( c ) Microsoft Corporation . <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy <nl> + of this software and associated documentation files ( the " Software " ) , to deal <nl> + in the Software without restriction , including without limitation the rights <nl> + to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> + copies of the Software , and to permit persons to whom the Software is <nl> + furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in all <nl> + copies or substantial portions of the Software . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE <nl> + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER <nl> + LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , <nl> + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE <nl> + SOFTWARE <nl> + <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + Microsoft . Windows . CppWinRT 2 . 0 . 191111 . 2 - MIT <nl> + <nl> + <nl> + ( c ) 2008 VeriSign , Inc . <nl> + ( c ) Microsoft Corporation . <nl> + Copyright ( c ) Microsoft Corporation . <nl> + <nl> + MIT License <nl> + <nl> + Copyright ( c ) Microsoft Corporation . <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy <nl> + of this software and associated documentation files ( the " Software " ) , to deal <nl> + in the Software without restriction , including without limitation the rights <nl> + to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> + copies of the Software , and to permit persons to whom the Software is <nl> + furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in all <nl> + copies or substantial portions of the Software . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE <nl> + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER <nl> + LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , <nl> + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE <nl> + SOFTWARE <nl> + <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + Microsoft . Windows . ImplementationLibrary 1 . 0 . 191107 . 2 - MIT <nl> + <nl> + <nl> + ( c ) 2008 VeriSign , Inc . <nl> + Copyright ( c ) Microsoft . <nl> + ( c ) Microsoft Corporation . <nl> + Copyright ( c ) Microsoft Corporation . <nl> + Copyright ( c ) 2009 - 2014 by the contributors <nl> + <nl> + MIT License <nl> + <nl> + Copyright ( c ) Microsoft Corporation . All rights reserved . <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy <nl> + of this software and associated documentation files ( the " Software " ) , to deal <nl> + in the Software without restriction , including without limitation the rights <nl> + to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> + copies of the Software , and to permit persons to whom the Software is <nl> + furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in all <nl> + copies or substantial portions of the Software . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE <nl> + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER <nl> + LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , <nl> + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE <nl> + SOFTWARE <nl> + <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + NETStandard . Library 2 . 0 . 0 - MIT <nl> + <nl> + <nl> + ( c ) 2008 VeriSign , Inc . <nl> + copyright Unmanaged32Bit Required32Bit <nl> + Copyright ( c ) . NET Foundation and Contributors <nl> + <nl> + The MIT License ( MIT ) <nl> + <nl> + Copyright ( c ) . NET Foundation and Contributors <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy <nl> + of this software and associated documentation files ( the " Software " ) , to deal <nl> + in the Software without restriction , including without limitation the rights <nl> + to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> + copies of the Software , and to permit persons to whom the Software is <nl> + furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in all <nl> + copies or substantial portions of the Software . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE <nl> + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER <nl> + LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , <nl> + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE <nl> + SOFTWARE . <nl> + <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + Newtonsoft . Json 9 . 0 . 1 - MIT <nl> + <nl> + <nl> + ( c ) 2008 VeriSign , Inc . <nl> + Copyright James Newton - King 2008 <nl> + <nl> + MIT License <nl> + <nl> + Copyright ( c ) < year > < copyright holders > <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy of this software and associated documentation files ( the " Software " ) , to deal in the Software without restriction , including without limitation the rights to use , copy , modify , merge , publish , distribute , sublicense , and / or sell copies of the Software , and to permit persons to whom the Software is furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE . <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + NUnit 3 . 12 . 0 - MIT <nl> + <nl> + <nl> + <nl> + Copyright ( c ) 2019 Charlie Poole , Rob Prouse <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy <nl> + of this software and associated documentation files ( the " Software " ) , to deal <nl> + in the Software without restriction , including without limitation the rights <nl> + to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> + copies of the Software , and to permit persons to whom the Software is <nl> + furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in <nl> + all copies or substantial portions of the Software . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE <nl> + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER <nl> + LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , <nl> + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN <nl> + THE SOFTWARE . <nl> + <nl> + <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + NUnit3TestAdapter 3 . 15 . 1 - MIT <nl> + <nl> + <nl> + ( c ) 2008 VeriSign , Inc . <nl> + Copyright 2008 - 2015 Jb Evain <nl> + Copyright 2008 - 2018 Jb Evain <nl> + Copyright ( c ) 2019 Charlie Poole , Rob Prouse <nl> + Copyright 2011 - 2019 Charlie Poole , 2014 - 2019 Terje Sandstrom <nl> + Copyright ( c ) 2011 - 2019 Charlie Poole , 2014 - 2019 Terje Sandstrom <nl> + https : / / github . com / nunit / docs / wiki / Adapter - Release - Notes Copyright ( c ) 2011 - 2019 Charlie Poole , 2014 - 2019 Terje Sandstrom <nl> + works with Visual Studio 2012 and newer . NUnit Project NUnit3TestAdapter ? Copyright 2011 - 2019 Charlie Poole , 2014 - 2019 Terje Sandstrom <nl> + <nl> + Copyright ( c ) 2011 - 2019 Charlie Poole , 2014 - 2019 Terje Sandstrom <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy <nl> + of this software and associated documentation files ( the " Software " ) , to deal <nl> + in the Software without restriction , including without limitation the rights <nl> + to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> + copies of the Software , and to permit persons to whom the Software is <nl> + furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in <nl> + all copies or substantial portions of the Software . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE <nl> + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER <nl> + LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , <nl> + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN <nl> + THE SOFTWARE . <nl> + <nl> + <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + System . Security . AccessControl 4 . 7 . 0 - MIT <nl> + <nl> + <nl> + ( c ) 2008 VeriSign , Inc . <nl> + ( c ) Microsoft Corporation . <nl> + Copyright ( c ) . NET Foundation . <nl> + Copyright ( c ) 2011 , Google Inc . <nl> + ( c ) 1997 - 2005 Sean Eron Anderson . <nl> + Copyright ( c ) 2007 James Newton - King <nl> + Copyright ( c ) 1991 - 2017 Unicode , Inc . <nl> + Copyright ( c ) 2013 - 2017 , Alfred Klomp <nl> + Copyright ( c ) 2015 - 2017 , Wojciech Mula <nl> + Copyright ( c ) 2005 - 2007 , Nick Galbreath <nl> + Portions ( c ) International Organization <nl> + Copyright ( c ) 2015 The Chromium Authors . <nl> + Copyright ( c ) 2004 - 2006 Intel Corporation <nl> + Copyright ( c ) 2016 - 2017 , Matthieu Darbois <nl> + Copyright ( c ) . NET Foundation Contributors <nl> + Copyright ( c ) . NET Foundation and Contributors <nl> + Copyright ( c ) 2011 Novell , Inc ( http : / / www . novell . com ) <nl> + Copyright ( c ) 1995 - 2017 Jean - loup Gailly and Mark Adler <nl> + Copyright ( c ) 2015 Xamarin , Inc ( http : / / www . xamarin . com ) <nl> + Copyright ( c ) 2009 , 2010 , 2013 - 2016 by the Brotli Authors . <nl> + Copyright ( c ) YEAR W3C ( r ) ( MIT , ERCIM , Keio , Beihang ) . Disclaimers THIS WORK IS PROVIDED AS <nl> + <nl> + The MIT License ( MIT ) <nl> + <nl> + Copyright ( c ) . NET Foundation and Contributors <nl> + <nl> + All rights reserved . <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy <nl> + of this software and associated documentation files ( the " Software " ) , to deal <nl> + in the Software without restriction , including without limitation the rights <nl> + to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> + copies of the Software , and to permit persons to whom the Software is <nl> + furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in all <nl> + copies or substantial portions of the Software . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE <nl> + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER <nl> + LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , <nl> + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE <nl> + SOFTWARE . <nl> + <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + System . Security . Principal . Windows 4 . 7 . 0 - MIT <nl> + <nl> + <nl> + ( c ) 2008 VeriSign , Inc . <nl> + ( c ) Microsoft Corporation . <nl> + Copyright ( c ) . NET Foundation . <nl> + Copyright ( c ) 2011 , Google Inc . <nl> + ( c ) 1997 - 2005 Sean Eron Anderson . <nl> + Copyright ( c ) 2007 James Newton - King <nl> + Copyright ( c ) 1991 - 2017 Unicode , Inc . <nl> + Copyright ( c ) 2013 - 2017 , Alfred Klomp <nl> + Copyright ( c ) 2015 - 2017 , Wojciech Mula <nl> + Copyright ( c ) 2005 - 2007 , Nick Galbreath <nl> + Portions ( c ) International Organization <nl> + Copyright ( c ) 2015 The Chromium Authors . <nl> + Copyright ( c ) 2004 - 2006 Intel Corporation <nl> + Copyright ( c ) 2016 - 2017 , Matthieu Darbois <nl> + Copyright ( c ) . NET Foundation Contributors <nl> + Copyright ( c ) . NET Foundation and Contributors <nl> + Copyright ( c ) 2011 Novell , Inc ( http : / / www . novell . com ) <nl> + Copyright ( c ) 1995 - 2017 Jean - loup Gailly and Mark Adler <nl> + Copyright ( c ) 2015 Xamarin , Inc ( http : / / www . xamarin . com ) <nl> + Copyright ( c ) 2009 , 2010 , 2013 - 2016 by the Brotli Authors . <nl> + Copyright ( c ) YEAR W3C ( r ) ( MIT , ERCIM , Keio , Beihang ) . Disclaimers THIS WORK IS PROVIDED AS <nl> + <nl> + The MIT License ( MIT ) <nl> + <nl> + Copyright ( c ) . NET Foundation and Contributors <nl> + <nl> + All rights reserved . <nl> + <nl> + Permission is hereby granted , free of charge , to any person obtaining a copy <nl> + of this software and associated documentation files ( the " Software " ) , to deal <nl> + in the Software without restriction , including without limitation the rights <nl> + to use , copy , modify , merge , publish , distribute , sublicense , and / or sell <nl> + copies of the Software , and to permit persons to whom the Software is <nl> + furnished to do so , subject to the following conditions : <nl> + <nl> + The above copyright notice and this permission notice shall be included in all <nl> + copies or substantial portions of the Software . <nl> + <nl> + THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR <nl> + IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , <nl> + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE <nl> + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER <nl> + LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , <nl> + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE <nl> + SOFTWARE . <nl> + <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl>
|
Apply generated NOTICE file ( )
|
microsoft/winget-cli
|
122bb529fe16aa60489679a30cb4b35c49e23532
|
2020-05-13T22:16:22Z
|
mmm a / src / video_core / gpu . cpp <nl> ppp b / src / video_core / gpu . cpp <nl> MICROPROFILE_DEFINE ( GPU_wait , " GPU " , " Wait for the GPU " , MP_RGB ( 128 , 128 , 192 ) ) ; <nl> GPU : : GPU ( Core : : System & system , VideoCore : : RendererBase & renderer , bool is_async ) <nl> : system { system } , renderer { renderer } , is_async { is_async } { <nl> auto & rasterizer { renderer . Rasterizer ( ) } ; <nl> - memory_manager = std : : make_unique < Tegra : : MemoryManager > ( system , rasterizer ) ; <nl> + memory_manager = std : : make_unique < Tegra : : MemoryManager > ( system ) ; <nl> dma_pusher = std : : make_unique < Tegra : : DmaPusher > ( * this ) ; <nl> maxwell_3d = std : : make_unique < Engines : : Maxwell3D > ( system , rasterizer , * memory_manager ) ; <nl> fermi_2d = std : : make_unique < Engines : : Fermi2D > ( rasterizer ) ; <nl> mmm a / src / video_core / memory_manager . cpp <nl> ppp b / src / video_core / memory_manager . cpp <nl> <nl> # include " core / hle / kernel / process . h " <nl> # include " core / hle / kernel / vm_manager . h " <nl> # include " core / memory . h " <nl> + # include " video_core / gpu . h " <nl> # include " video_core / memory_manager . h " <nl> - # include " video_core / rasterizer_interface . h " <nl> <nl> namespace Tegra { <nl> <nl> - MemoryManager : : MemoryManager ( Core : : System & system , VideoCore : : RasterizerInterface & rasterizer ) <nl> - : rasterizer { rasterizer } , system { system } { <nl> + MemoryManager : : MemoryManager ( Core : : System & system ) : system { system } { <nl> std : : fill ( page_table . pointers . begin ( ) , page_table . pointers . end ( ) , nullptr ) ; <nl> std : : fill ( page_table . attributes . begin ( ) , page_table . attributes . end ( ) , <nl> Common : : PageType : : Unmapped ) ; <nl> GPUVAddr MemoryManager : : UnmapBuffer ( GPUVAddr gpu_addr , u64 size ) { <nl> const auto cpu_addr = GpuToCpuAddress ( gpu_addr ) ; <nl> ASSERT ( cpu_addr ) ; <nl> <nl> - rasterizer . FlushAndInvalidateRegion ( cache_addr , aligned_size ) ; <nl> + system . GPU ( ) . FlushAndInvalidateRegion ( cache_addr , aligned_size ) ; <nl> + <nl> UnmapRange ( gpu_addr , aligned_size ) ; <nl> ASSERT ( system . CurrentProcess ( ) <nl> - > VMManager ( ) <nl> void MemoryManager : : ReadBlock ( GPUVAddr src_addr , void * dest_buffer , const std : : s <nl> switch ( page_table . attributes [ page_index ] ) { <nl> case Common : : PageType : : Memory : { <nl> const u8 * src_ptr { page_table . pointers [ page_index ] + page_offset } ; <nl> - rasterizer . FlushRegion ( ToCacheAddr ( src_ptr ) , copy_amount ) ; <nl> + system . GPU ( ) . FlushRegion ( ToCacheAddr ( src_ptr ) , copy_amount ) ; <nl> std : : memcpy ( dest_buffer , src_ptr , copy_amount ) ; <nl> break ; <nl> } <nl> void MemoryManager : : WriteBlock ( GPUVAddr dest_addr , const void * src_buffer , const <nl> switch ( page_table . attributes [ page_index ] ) { <nl> case Common : : PageType : : Memory : { <nl> u8 * dest_ptr { page_table . pointers [ page_index ] + page_offset } ; <nl> - rasterizer . InvalidateRegion ( ToCacheAddr ( dest_ptr ) , copy_amount ) ; <nl> + system . GPU ( ) . InvalidateRegion ( ToCacheAddr ( dest_ptr ) , copy_amount ) ; <nl> std : : memcpy ( dest_ptr , src_buffer , copy_amount ) ; <nl> break ; <nl> } <nl> void MemoryManager : : CopyBlock ( GPUVAddr dest_addr , GPUVAddr src_addr , const std : : <nl> switch ( page_table . attributes [ page_index ] ) { <nl> case Common : : PageType : : Memory : { <nl> const u8 * src_ptr { page_table . pointers [ page_index ] + page_offset } ; <nl> - rasterizer . FlushRegion ( ToCacheAddr ( src_ptr ) , copy_amount ) ; <nl> + system . GPU ( ) . FlushRegion ( ToCacheAddr ( src_ptr ) , copy_amount ) ; <nl> WriteBlock ( dest_addr , src_ptr , copy_amount ) ; <nl> break ; <nl> } <nl> mmm a / src / video_core / memory_manager . h <nl> ppp b / src / video_core / memory_manager . h <nl> <nl> # include " common / common_types . h " <nl> # include " common / page_table . h " <nl> <nl> - namespace VideoCore { <nl> - class RasterizerInterface ; <nl> - } <nl> - <nl> namespace Core { <nl> class System ; <nl> } <nl> struct VirtualMemoryArea { <nl> <nl> class MemoryManager final { <nl> public : <nl> - explicit MemoryManager ( Core : : System & system , VideoCore : : RasterizerInterface & rasterizer ) ; <nl> + explicit MemoryManager ( Core : : System & system ) ; <nl> ~ MemoryManager ( ) ; <nl> <nl> GPUVAddr AllocateSpace ( u64 size , u64 align ) ; <nl> class MemoryManager final { <nl> <nl> Common : : PageTable page_table { page_bits } ; <nl> VMAMap vma_map ; <nl> - VideoCore : : RasterizerInterface & rasterizer ; <nl> <nl> Core : : System & system ; <nl> } ; <nl>
|
Merge pull request from bunnei / gpu - mem - interface
|
yuzu-emu/yuzu
|
6536cc97410bc20e2bb9c81620c15f353e1ed97f
|
2020-02-09T02:15:27Z
|
mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreeQueue . cpp <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreeQueue . cpp <nl> bool ReplicatedMergeTreeQueue : : shouldExecuteLogEntry ( <nl> MergeTreeDataMerger & merger , <nl> MergeTreeData & data ) <nl> { <nl> - / / / mutex has already been captured . The function is called only from ` selectEntryToProcess ` . <nl> + / / / mutex has already been acquired . The function is called only from ` selectEntryToProcess ` . <nl> <nl> if ( entry . type = = LogEntry : : MERGE_PARTS | | entry . type = = LogEntry : : GET_PART | | entry . type = = LogEntry : : ATTACH_PART ) <nl> { <nl>
|
Fixed translation error [ # CLICKHOUSE - 3 ] .
|
ClickHouse/ClickHouse
|
502f42a5724cc1101cf7550743053e450abfe1d3
|
2017-04-18T19:13:12Z
|
mmm a / src / arguments . cc <nl> ppp b / src / arguments . cc <nl> CommandLineParserPrivate : : CommandLineParserPrivate ( Settings & s ) : <nl> qthack ( true ) ; <nl> addarg ( " outline " , 0 , " Put an outline into the pdf " , new ConstSetter < bool > ( s . outline , true , false ) ) ; <nl> addarg ( " outline - depth " , 0 , " Set the depth of the outline " , new IntSetter ( s . outlineDepth , " level " , 4 ) ) ; <nl> + addarg ( " dump - outline " , 0 , " Dump the outline to a file " , new QStrSetter ( s . dumpOutline , " file " , " " ) ) ; <nl> qthack ( true ) ; <nl> extended ( false ) ; <nl> } <nl> mmm a / src / outline . cc <nl> ppp b / src / outline . cc <nl> void OutlinePrivate : : fillChildAnchors ( OutlineItem * item , QHash < QString , QWebEle <nl> if ( i - > anchor . isEmpty ( ) ) continue ; <nl> anchors [ i - > anchor ] = i - > element ; <nl> fillChildAnchors ( i , anchors ) ; <nl> - } <nl> + } <nl> } <nl> <nl> + # include < fstream > <nl> # include < iostream > <nl> using namespace std ; <nl> void OutlinePrivate : : outlineChildren ( OutlineItem * item , QPrinter * printer , int level ) { <nl> void OutlinePrivate : : outlineChildren ( OutlineItem * item , QPrinter * printer , int <nl> printer - > beginSectionOutline ( i - > value , i - > anchor ) ; <nl> outlineChildren ( i , printer , level + 1 ) ; <nl> printer - > endSectionOutline ( ) ; <nl> - } <nl> + } <nl> + } <nl> + <nl> + / * dump outline * / <nl> + void OutlinePrivate : : dumpOutlineChildren ( OutlineItem * item , ofstream & dumpfile , int level ) { <nl> + if ( level ) { <nl> + dumpfile < < " Level : " < < level < < " " ; <nl> + dumpfile < < " Page : " < < item - > page < < " " ; <nl> + dumpfile < < " Title : " < < item - > value . toUtf8 ( ) . toPercentEncoding ( ) . data ( ) ; <nl> + / / dumpfile < < item - > anchor . toUtf8 ( ) . toPercentEncoding ( ) < < " " ; <nl> + dumpfile < < " \ n " ; <nl> + } <nl> + if ( level + 1 < = settings . outlineDepth ) { <nl> + foreach ( OutlineItem * i , item - > children ) { <nl> + dumpOutlineChildren ( i , dumpfile , level + 1 ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + void Outline : : dumpOutline ( ) { <nl> + if ( d - > settings . dumpOutline . isEmpty ( ) ) return ; <nl> + char * filename = d - > settings . dumpOutline . toUtf8 ( ) . data ( ) ; <nl> + ofstream dumpfile ( filename , ios : : out ) ; <nl> + dumpfile < < " Pages : " < < pageCount ( ) < < " \ n " ; <nl> + foreach ( OutlineItem * i , d - > documentOutlines ) { <nl> + d - > dumpOutlineChildren ( i , dumpfile , 0 ) ; <nl> + } <nl> + dumpfile . close ( ) ; <nl> } <nl> <nl> <nl> mmm a / src / outline . hh <nl> ppp b / src / outline . hh <nl> public : <nl> void fillAnchors ( int d , QHash < QString , QWebElement > & anchors ) ; <nl> int pageCount ( ) ; <nl> void printOutline ( QPrinter * printer ) ; <nl> + void dumpOutline ( ) ; <nl> private : <nl> OutlinePrivate * d ; <nl> friend class TocPrinter ; <nl> mmm a / src / outline_p . hh <nl> ppp b / src / outline_p . hh <nl> public : <nl> void fillChildAnchors ( OutlineItem * item , QHash < QString , QWebElement > & anchors ) ; <nl> void outlineChildren ( OutlineItem * item , QPrinter * printer , int level ) ; <nl> void buildHFCache ( OutlineItem * i , int level ) ; <nl> + void dumpOutlineChildren ( OutlineItem * item , std : : ofstream & dumpfile , int level ) ; <nl> } ; <nl> <nl> # endif / / __EXTENSIVE_WKHTMLTOPDF_QT_HACK__ <nl> mmm a / src / pageconverter . cc <nl> ppp b / src / pageconverter . cc <nl> void PageConverterPrivate : : printPage ( bool ok ) { <nl> } <nl> } <nl> outline - > printOutline ( printer ) ; <nl> + outline - > dumpOutline ( ) ; <nl> painter - > end ( ) ; <nl> # endif <nl> if ( settings . out = = " - " & & lout ! = " / dev / stdout " ) { <nl> mmm a / src / settings . hh <nl> ppp b / src / settings . hh <nl> struct Settings { <nl> bool outline ; <nl> / / ! Maximal depth of the generated outline <nl> int outlineDepth ; <nl> + / / ! dump outline to this filename <nl> + QString dumpOutline ; <nl> / / ! List of input files <nl> QList < QString > in ; <nl> / / ! The file where in to store the output <nl>
|
Add option to write outline information to a file with - - dump - outline FILE
|
wkhtmltopdf/wkhtmltopdf
|
da952f5dfcd1bf257096b5cc20889c4be1f81fd3
|
2010-04-29T07:43:16Z
|
mmm a / include / internal / catch_compiler_capabilities . h <nl> ppp b / include / internal / catch_compiler_capabilities . h <nl> <nl> # define CATCH_INTERNAL_CONFIG_NO_WCHAR <nl> # endif / / __DJGPP__ <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / Embarcadero C + + Build <nl> + # if defined ( __BORLANDC__ ) <nl> + # define CATCH_INTERNAL_CONFIG_POLYFILL_ISNAN <nl> + # endif <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / Use of __COUNTER__ is suppressed during code analysis in <nl> <nl> # define CATCH_CONFIG_DISABLE_EXCEPTIONS <nl> # endif <nl> <nl> + # if defined ( CATCH_INTERNAL_CONFIG_POLYFILL_ISNAN ) & & ! defined ( CATCH_CONFIG_NO_POLYFILL_ISNAN ) & & ! defined ( CATCH_CONFIG_POLYFILL_ISNAN ) <nl> + # define CATCH_CONFIG_POLYFILL_ISNAN <nl> + # endif <nl> + <nl> # if ! defined ( CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS ) <nl> # define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS <nl> # define CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS <nl> mmm a / include / internal / catch_matchers_floating . cpp <nl> ppp b / include / internal / catch_matchers_floating . cpp <nl> <nl> <nl> # include " catch_matchers_floating . h " <nl> # include " catch_enforce . h " <nl> + # include " catch_polyfills . hpp " <nl> # include " catch_to_string . hpp " <nl> # include " catch_tostring . h " <nl> <nl> template < typename FP > <nl> bool almostEqualUlps ( FP lhs , FP rhs , int maxUlpDiff ) { <nl> / / Comparison with NaN should always be false . <nl> / / This way we can rule it out before getting into the ugly details <nl> - if ( std : : isnan ( lhs ) | | std : : isnan ( rhs ) ) { <nl> + if ( Catch : : isnan ( lhs ) | | Catch : : isnan ( rhs ) ) { <nl> return false ; <nl> } <nl> <nl> new file mode 100644 <nl> index 000000000 . . 68a2c827c <nl> mmm / dev / null <nl> ppp b / include / internal / catch_polyfills . cpp <nl> <nl> + / * <nl> + * Created by Martin on 17 / 11 / 2017 . <nl> + * <nl> + * Distributed under the Boost Software License , Version 1 . 0 . ( See accompanying <nl> + * file LICENSE_1_0 . txt or copy at http : / / www . boost . org / LICENSE_1_0 . txt ) <nl> + * / <nl> + <nl> + # include " catch_polyfills . hpp " <nl> + <nl> + # include < cmath > <nl> + <nl> + namespace Catch { <nl> + <nl> + # if ! defined ( CATCH_CONFIG_POLYFILL_ISNAN ) <nl> + bool isnan ( float f ) { <nl> + return std : : isnan ( f ) ; <nl> + } <nl> + bool isnan ( double d ) { <nl> + return std : : isnan ( d ) ; <nl> + } <nl> + # else <nl> + / / For now we only use this for embarcadero <nl> + bool isnan ( float f ) { <nl> + return std : : _isnan ( f ) ; <nl> + } <nl> + bool isnan ( double d ) { <nl> + return std : : _isnan ( d ) ; <nl> + } <nl> + # endif <nl> + <nl> + } / / end namespace Catch <nl> new file mode 100644 <nl> index 000000000 . . ba4189ef3 <nl> mmm / dev / null <nl> ppp b / include / internal / catch_polyfills . hpp <nl> <nl> + / * <nl> + * Created by Martin on 17 / 11 / 2017 . <nl> + * <nl> + * Distributed under the Boost Software License , Version 1 . 0 . ( See accompanying <nl> + * file LICENSE_1_0 . txt or copy at http : / / www . boost . org / LICENSE_1_0 . txt ) <nl> + * / <nl> + # ifndef TWOBLUECUBES_CATCH_POLYFILLS_HPP_INCLUDED <nl> + # define TWOBLUECUBES_CATCH_POLYFILLS_HPP_INCLUDED <nl> + <nl> + namespace Catch { <nl> + bool isnan ( float f ) ; <nl> + bool isnan ( double d ) ; <nl> + } <nl> + <nl> + # endif / / TWOBLUECUBES_CATCH_POLYFILLS_HPP_INCLUDED <nl> mmm a / include / internal / catch_tostring . cpp <nl> ppp b / include / internal / catch_tostring . cpp <nl> <nl> # include " catch_tostring . h " <nl> # include " catch_interfaces_config . h " <nl> # include " catch_context . h " <nl> + # include " catch_polyfills . hpp " <nl> <nl> # include < cmath > <nl> # include < iomanip > <nl> namespace Detail { <nl> <nl> template < typename T > <nl> std : : string fpToString ( T value , int precision ) { <nl> - if ( std : : isnan ( value ) ) { <nl> + if ( Catch : : isnan ( value ) ) { <nl> return " nan " ; <nl> } <nl> <nl> mmm a / projects / CMakeLists . txt <nl> ppp b / projects / CMakeLists . txt <nl> set ( INTERNAL_HEADERS <nl> $ { HEADER_DIR } / internal / catch_option . hpp <nl> $ { HEADER_DIR } / internal / catch_output_redirect . h <nl> $ { HEADER_DIR } / internal / catch_platform . h <nl> + $ { HEADER_DIR } / internal / catch_polyfills . hpp <nl> $ { HEADER_DIR } / internal / catch_preprocessor . hpp <nl> $ { HEADER_DIR } / internal / catch_random_number_generator . h <nl> $ { HEADER_DIR } / internal / catch_reenable_warnings . h <nl> set ( IMPL_SOURCES <nl> $ { HEADER_DIR } / internal / catch_output_redirect . cpp <nl> $ { HEADER_DIR } / internal / catch_registry_hub . cpp <nl> $ { HEADER_DIR } / internal / catch_interfaces_reporter . cpp <nl> + $ { HEADER_DIR } / internal / catch_polyfills . cpp <nl> $ { HEADER_DIR } / internal / catch_random_number_generator . cpp <nl> $ { HEADER_DIR } / internal / catch_reporter_registry . cpp <nl> $ { HEADER_DIR } / internal / catch_result_type . cpp <nl>
|
Add ` std : : isnan ` polyfill , fixing compilation under Embarcadero
|
catchorg/Catch2
|
c6a89f14c2d2134363ac186cc401ce331f158d44
|
2018-11-17T19:52:18Z
|
mmm a / tensorflow / examples / learn / examples_test . sh <nl> ppp b / tensorflow / examples / learn / examples_test . sh <nl> <nl> # See the License for the specific language governing permissions and <nl> # limitations under the License . <nl> <nl> - # This script exercises the examples of using SkFlow . <nl> + # This script exercises the examples of using TF . Learn . <nl> <nl> DIR = " $ TEST_SRCDIR " <nl> <nl> then <nl> DIR = " $ DIR " / " $ TEST_WORKSPACE " <nl> fi <nl> <nl> - SKFLOW_EXAMPLE_BASE_DIR = $ DIR / tensorflow / examples / learn <nl> + TFLEARN_EXAMPLE_BASE_DIR = $ DIR / tensorflow / examples / learn <nl> <nl> <nl> function test ( ) { <nl> echo " Test " $ 1 " : " <nl> - $ SKFLOW_EXAMPLE_BASE_DIR / $ 1 $ 2 <nl> + $ TFLEARN_EXAMPLE_BASE_DIR / $ 1 $ 2 <nl> if [ $ ? - eq 0 ] <nl> then <nl> echo " Test passed . " <nl> - echo <nl> return 0 <nl> else <nl> echo " Test failed . " <nl> - echo <nl> exit 1 <nl> fi <nl> } <nl>
|
Rename examples test dir to tflearn
|
tensorflow/tensorflow
|
8c2918c3df1a2f9cb61d67b7d287039e30a95c49
|
2016-10-19T00:45:07Z
|
mmm a / etc / evergreen . yml <nl> ppp b / etc / evergreen . yml <nl> buildvariants : <nl> - name : concurrency_sharded_replication_with_balancer <nl> - name : concurrency_simultaneous <nl> - name : concurrency_simultaneous_replication <nl> - distros : <nl> - - rhel72 - zseries - build <nl> - name : ese <nl> - name : failpoints <nl> - name : failpoints_auth <nl>
|
SERVER - 36819 Run concurrency_simultaneous_replication on PowerPC distro .
|
mongodb/mongo
|
1a700a31fd103fe74aec9ff0ad4723f67681f744
|
2018-08-27T15:25:16Z
|
mmm a / port / win / env_win . cc <nl> ppp b / port / win / env_win . cc <nl> class WinEnv : public Env { <nl> } <nl> <nl> virtual uint64_t NowMicros ( ) override { <nl> - using namespace std : : chrono ; <nl> - return duration_cast < microseconds > ( system_clock : : now ( ) . time_since_epoch ( ) ) <nl> - . count ( ) ; <nl> + / / all std : : chrono clocks on windows have the same resolution that is only <nl> + / / On Windows 8 and Windows 2012 Server <nl> + / / GetSystemTimePreciseAsFileTime ( & current_time ) can be used <nl> + LARGE_INTEGER li ; <nl> + QueryPerformanceCounter ( & li ) ; <nl> + / / Convert to nanoseconds first to avoid loss of precision <nl> + / / and divide by frequency <nl> + li . QuadPart * = std : : micro : : den ; <nl> + li . QuadPart / = perf_counter_frequency_ ; <nl> + return li . QuadPart ; <nl> } <nl> <nl> virtual uint64_t NowNanos ( ) override { <nl> mmm a / util / auto_roll_logger . cc <nl> ppp b / util / auto_roll_logger . cc <nl> void AutoRollLogger : : Logv ( const char * format , va_list ap ) { <nl> <nl> void AutoRollLogger : : WriteHeaderInfo ( ) { <nl> mutex_ . AssertHeld ( ) ; <nl> - for ( auto header : headers_ ) { <nl> + for ( auto & header : headers_ ) { <nl> LogInternal ( " % s " , header . c_str ( ) ) ; <nl> } <nl> } <nl> mmm a / util / auto_roll_logger_test . cc <nl> ppp b / util / auto_roll_logger_test . cc <nl> namespace rocksdb { <nl> class AutoRollLoggerTest : public testing : : Test { <nl> public : <nl> static void InitTestDb ( ) { <nl> - string deleteCmd = " rm - rf " + kTestDir ; <nl> + # ifdef OS_WIN <nl> + / / Replace all slashes in the path so windows CompSpec does not <nl> + / / become confused <nl> + std : : string testDir ( kTestDir ) ; <nl> + std : : replace_if ( testDir . begin ( ) , testDir . end ( ) , <nl> + [ ] ( char ch ) { return ch = = ' / ' ; } , <nl> + ' \ \ ' ) ; <nl> + std : : string deleteCmd = " if exist " + testDir + " rd / s / q " + testDir ; <nl> + # else <nl> + std : : string deleteCmd = " rm - rf " + kTestDir ; <nl> + # endif <nl> ASSERT_TRUE ( system ( deleteCmd . c_str ( ) ) = = 0 ) ; <nl> Env : : Default ( ) - > CreateDir ( kTestDir ) ; <nl> } <nl> TEST_F ( AutoRollLoggerTest , InfoLogLevel ) { <nl> <nl> / / Test the logger Header function for roll over logs <nl> / / We expect the new logs creates as roll over to carry the headers specified <nl> - static list < string > GetOldFileNames ( const string & path ) { <nl> + static std : : vector < string > GetOldFileNames ( const string & path ) { <nl> + std : : vector < string > ret ; <nl> + <nl> const string dirname = path . substr ( / * start = * / 0 , path . find_last_of ( " / " ) ) ; <nl> const string fname = path . substr ( path . find_last_of ( " / " ) + 1 ) ; <nl> <nl> - vector < string > children ; <nl> + std : : vector < string > children ; <nl> Env : : Default ( ) - > GetChildren ( dirname , & children ) ; <nl> <nl> / / We know that the old log files are named [ path ] < something > <nl> / / Return all entities that match the pattern <nl> - list < string > ret ; <nl> - for ( auto child : children ) { <nl> + for ( auto & child : children ) { <nl> if ( fname ! = child & & child . find ( fname ) = = 0 ) { <nl> ret . push_back ( dirname + " / " + child ) ; <nl> } <nl> TEST_F ( AutoRollLoggerTest , LogHeaderTest ) { <nl> } <nl> } <nl> <nl> - const string & newfname = logger . TEST_log_fname ( ) . c_str ( ) ; <nl> + const string newfname = logger . TEST_log_fname ( ) ; <nl> <nl> / / Log enough data to cause a roll over <nl> int i = 0 ; <nl> TEST_F ( AutoRollLoggerTest , LogHeaderTest ) { <nl> / / Flush the log for the latest file <nl> LogFlush ( & logger ) ; <nl> <nl> - const list < string > oldfiles = GetOldFileNames ( newfname ) ; <nl> + const auto oldfiles = GetOldFileNames ( newfname ) ; <nl> <nl> ASSERT_EQ ( oldfiles . size ( ) , ( size_t ) 2 ) ; <nl> <nl> - for ( auto oldfname : oldfiles ) { <nl> + for ( auto & oldfname : oldfiles ) { <nl> / / verify that the files rolled over <nl> ASSERT_NE ( oldfname , newfname ) ; <nl> / / verify that the old log contains all the header logs <nl>
|
Merge pull request from yuslepukhin / fix_now_microsec_win
|
facebook/rocksdb
|
8279d41972102d4bb1a197fe501682b7ee0b4fdb
|
2015-07-23T18:00:43Z
|
mmm a / src / AbstractDiskWriter . cc <nl> ppp b / src / AbstractDiskWriter . cc <nl> void AbstractDiskWriter : : allocate ( off_t offset , uint64_t length ) <nl> # elif HAVE_POSIX_FALLOCATE <nl> int r = posix_fallocate ( fd_ , offset , length ) ; <nl> if ( r ! = 0 ) { <nl> - throw DL_ABORT_EX3 ( errNum , <nl> + throw DL_ABORT_EX3 ( r , <nl> fmt ( " posix_fallocate failed . cause : % s " , <nl> util : : safeStrerror ( r ) . c_str ( ) ) , <nl> error_code : : FILE_IO_ERROR ) ; <nl>
|
Fixed compile error which occurs when system has posix_fallocate ( ) but
|
aria2/aria2
|
829803d4f5945bebca2ad0321d160f873b85949e
|
2010-12-05T14:47:35Z
|
mmm a / xbmc / PlayListPlayer . cpp <nl> ppp b / xbmc / PlayListPlayer . cpp <nl> bool CPlayListPlayer : : Play ( int iSong , bool bAutoPlay / * = false * / , bool bPlayPr <nl> } <nl> } <nl> <nl> + / / reset the start offset of this item <nl> + if ( item - > m_lStartOffset = = STARTOFFSET_RESUME ) <nl> + item - > m_lStartOffset = 0 ; <nl> + <nl> / / TODO - move the above failure logic and the below success logic <nl> / / to callbacks instead so we don ' t rely on the return value <nl> / / of PlayFile ( ) <nl>
|
reset resume point after playing items via the playlist player , so that when they repeat the start at the beginning . fixes
|
xbmc/xbmc
|
87b242f779ed612c96c291061c6602914df0cc14
|
2012-11-11T02:42:45Z
|
mmm a / src / init . cpp <nl> ppp b / src / init . cpp <nl> std : : string HelpMessage ( ) <nl> " - gen = 0 " + _ ( " Don ' t generate coins " ) + " \ n " + <nl> " - datadir = < dir > " + _ ( " Specify data directory " ) + " \ n " + <nl> " - dbcache = < n > " + _ ( " Set database cache size in megabytes ( default : 25 ) " ) + " \ n " + <nl> - " - dblogsize = < n > " + _ ( " Set database disk log size in megabytes ( default : 100 ) " ) + " \ n " + <nl> " - timeout = < n > " + _ ( " Specify connection timeout in milliseconds ( default : 5000 ) " ) + " \ n " + <nl> " - proxy = < ip : port > " + _ ( " Connect through socks proxy " ) + " \ n " + <nl> " - socks = < n > " + _ ( " Select the version of socks proxy to use ( 4 - 5 , default : 5 ) " ) + " \ n " + <nl> bool AppInit2 ( ) <nl> return InitError ( msg ) ; <nl> } <nl> <nl> + / / cache size calculations <nl> + size_t nTotalCache = GetArg ( " - dbcache " , 25 ) < < 20 ; <nl> + if ( nTotalCache < ( 1 < < 22 ) ) <nl> + nTotalCache = ( 1 < < 22 ) ; / / total cache cannot be less than 4 MiB <nl> + size_t nBlockTreeDBCache = nTotalCache / 8 ; <nl> + if ( nBlockTreeDBCache > ( 1 < < 21 ) ) <nl> + nBlockTreeDBCache = ( 1 < < 21 ) ; / / block tree db cache shouldn ' t be larger than 2 MiB <nl> + nTotalCache - = nBlockTreeDBCache ; <nl> + size_t nCoinDBCache = nTotalCache / 2 ; / / use half of the remaining cache for coindb cache <nl> + nTotalCache - = nCoinDBCache ; <nl> + nCoinCacheSize = nTotalCache / 300 ; / / coins in memory require around 300 bytes <nl> + <nl> uiInterface . InitMessage ( _ ( " Loading block index . . . " ) ) ; <nl> printf ( " Loading block index . . . \ n " ) ; <nl> nStart = GetTimeMillis ( ) ; <nl> - pblocktree = new CBlockTreeDB ( ) ; <nl> - pcoinsdbview = new CCoinsViewDB ( ) ; <nl> + pblocktree = new CBlockTreeDB ( nBlockTreeDBCache ) ; <nl> + pcoinsdbview = new CCoinsViewDB ( nCoinDBCache ) ; <nl> pcoinsTip = new CCoinsViewCache ( * pcoinsdbview ) ; <nl> <nl> if ( ! LoadBlockIndex ( ) ) <nl> mmm a / src / leveldb . cpp <nl> ppp b / src / leveldb . cpp <nl> <nl> <nl> # include < boost / filesystem . hpp > <nl> <nl> - static leveldb : : Options GetOptions ( ) { <nl> + static leveldb : : Options GetOptions ( size_t nCacheSize ) { <nl> leveldb : : Options options ; <nl> - int nCacheSizeMB = GetArg ( " - dbcache " , 25 ) ; <nl> - options . block_cache = leveldb : : NewLRUCache ( nCacheSizeMB * 1048576 ) ; <nl> + options . block_cache = leveldb : : NewLRUCache ( nCacheSize / 2 ) ; <nl> + options . write_buffer_size = nCacheSize / 4 ; / / up to two write buffers may be held in memory simultaneously <nl> options . filter_policy = leveldb : : NewBloomFilterPolicy ( 10 ) ; <nl> options . compression = leveldb : : kNoCompression ; <nl> return options ; <nl> } <nl> <nl> - CLevelDB : : CLevelDB ( const boost : : filesystem : : path & path , bool fMemory ) { <nl> + CLevelDB : : CLevelDB ( const boost : : filesystem : : path & path , size_t nCacheSize , bool fMemory ) { <nl> penv = NULL ; <nl> readoptions . verify_checksums = true ; <nl> iteroptions . verify_checksums = true ; <nl> iteroptions . fill_cache = false ; <nl> syncoptions . sync = true ; <nl> - options = GetOptions ( ) ; <nl> + options = GetOptions ( nCacheSize ) ; <nl> options . create_if_missing = true ; <nl> if ( fMemory ) { <nl> penv = leveldb : : NewMemEnv ( leveldb : : Env : : Default ( ) ) ; <nl> mmm a / src / leveldb . h <nl> ppp b / src / leveldb . h <nl> class CLevelDB <nl> leveldb : : DB * pdb ; <nl> <nl> public : <nl> - CLevelDB ( const boost : : filesystem : : path & path , bool fMemory = false ) ; <nl> + CLevelDB ( const boost : : filesystem : : path & path , size_t nCacheSize , bool fMemory = false ) ; <nl> ~ CLevelDB ( ) ; <nl> <nl> template < typename K , typename V > bool Read ( const K & key , V & value ) { <nl> mmm a / src / main . cpp <nl> ppp b / src / main . cpp <nl> CBlockIndex * pindexBest = NULL ; <nl> set < CBlockIndex * , CBlockIndexWorkComparator > setBlockIndexValid ; / / may contain all CBlockIndex * ' s that have validness > = BLOCK_VALID_TRANSACTIONS , and must contain those who aren ' t failed <nl> int64 nTimeBestReceived = 0 ; <nl> bool fImporting = false ; <nl> + unsigned int nCoinCacheSize = 5000 ; <nl> <nl> CMedianFilter < int > cPeerBlockCounts ( 5 , 0 ) ; / / Amount of blocks that other nodes claim to have <nl> <nl> bool SetBestChain ( CBlockIndex * pindexNew ) <nl> <nl> / / Make sure it ' s successfully written to disk before changing memory structure <nl> bool fIsInitialDownload = IsInitialBlockDownload ( ) ; <nl> - if ( ! fIsInitialDownload | | view . GetCacheSize ( ) > 5000 ) { <nl> + if ( ! fIsInitialDownload | | view . GetCacheSize ( ) > nCoinCacheSize ) { <nl> FlushBlockFile ( ) ; <nl> pblocktree - > Sync ( ) ; <nl> if ( ! view . Flush ( ) ) <nl> mmm a / src / main . h <nl> ppp b / src / main . h <nl> extern CCriticalSection cs_setpwalletRegistered ; <nl> extern std : : set < CWallet * > setpwalletRegistered ; <nl> extern unsigned char pchMessageStart [ 4 ] ; <nl> extern bool fImporting ; <nl> + extern unsigned int nCoinCacheSize ; <nl> <nl> / / Settings <nl> extern int64 nTransactionFee ; <nl> mmm a / src / txdb . cpp <nl> ppp b / src / txdb . cpp <nl> void static BatchWriteHashBestChain ( CLevelDBBatch & batch , const uint256 & hash ) { <nl> batch . Write ( ' B ' , hash ) ; <nl> } <nl> <nl> - CCoinsViewDB : : CCoinsViewDB ( bool fMemory ) : db ( GetDataDir ( ) / " coins " , fMemory ) { <nl> + CCoinsViewDB : : CCoinsViewDB ( size_t nCacheSize , bool fMemory ) : db ( GetDataDir ( ) / " coins " , nCacheSize , fMemory ) { <nl> } <nl> <nl> bool CCoinsViewDB : : GetCoins ( uint256 txid , CCoins & coins ) { <nl> bool CCoinsViewDB : : BatchWrite ( const std : : map < uint256 , CCoins > & mapCoins , CBlockI <nl> return db . WriteBatch ( batch ) ; <nl> } <nl> <nl> - CBlockTreeDB : : CBlockTreeDB ( bool fMemory ) : CLevelDB ( GetDataDir ( ) / " blktree " , fMemory ) { <nl> + CBlockTreeDB : : CBlockTreeDB ( size_t nCacheSize , bool fMemory ) : CLevelDB ( GetDataDir ( ) / " blktree " , nCacheSize , fMemory ) { <nl> } <nl> <nl> bool CBlockTreeDB : : WriteBlockIndex ( const CDiskBlockIndex & blockindex ) <nl> mmm a / src / txdb . h <nl> ppp b / src / txdb . h <nl> class CCoinsViewDB : public CCoinsView <nl> protected : <nl> CLevelDB db ; <nl> public : <nl> - CCoinsViewDB ( bool fMemory = false ) ; <nl> + CCoinsViewDB ( size_t nCacheSize , bool fMemory = false ) ; <nl> <nl> bool GetCoins ( uint256 txid , CCoins & coins ) ; <nl> bool SetCoins ( uint256 txid , const CCoins & coins ) ; <nl> class CCoinsViewDB : public CCoinsView <nl> class CBlockTreeDB : public CLevelDB <nl> { <nl> public : <nl> - CBlockTreeDB ( bool fMemory = false ) ; <nl> + CBlockTreeDB ( size_t nCacheSize , bool fMemory = false ) ; <nl> private : <nl> CBlockTreeDB ( const CBlockTreeDB & ) ; <nl> void operator = ( const CBlockTreeDB & ) ; <nl>
|
Cache size optimizations
|
bitcoin/bitcoin
|
1c83b0a3771bc601fdc75588f2cd45318b19c526
|
2012-11-04T17:06:25Z
|
mmm a / tensorflow / core / grappler / optimizers / data / BUILD <nl> ppp b / tensorflow / core / grappler / optimizers / data / BUILD <nl> cc_library ( <nl> " : filter_with_random_uniform_fusion " , <nl> " : hoist_random_uniform " , <nl> " : latency_all_edges " , <nl> - " : make_numa_aware " , <nl> " : make_sloppy " , <nl> " : map_and_batch_fusion " , <nl> " : map_and_filter_fusion " , <nl> tf_cc_test ( <nl> ] , <nl> ) <nl> <nl> - cc_library ( <nl> - name = " make_numa_aware " , <nl> - srcs = [ " make_numa_aware . cc " ] , <nl> - hdrs = [ " make_numa_aware . h " ] , <nl> - deps = [ <nl> - " : graph_utils " , <nl> - " : optimizer_base " , <nl> - " @ com_google_absl / / absl / container : flat_hash_set " , <nl> - " / / tensorflow / core / grappler : mutable_graph_view " , <nl> - " / / tensorflow / core / grappler : grappler_item " , <nl> - " / / tensorflow / core / grappler : op_types " , <nl> - " / / tensorflow / core / grappler / clusters : cluster " , <nl> - " / / tensorflow / core / grappler / optimizers : custom_graph_optimizer_registry " , <nl> - ] + tf_protos_all ( ) , <nl> - alwayslink = 1 , <nl> - ) <nl> - <nl> - tf_cc_test ( <nl> - name = " make_numa_aware_test " , <nl> - srcs = [ " make_numa_aware_test . cc " ] , <nl> - deps = [ <nl> - " : graph_test_utils " , <nl> - " : graph_utils " , <nl> - " : make_numa_aware " , <nl> - " / / tensorflow / core : framework " , <nl> - " / / tensorflow / core : test " , <nl> - " / / tensorflow / core : test_main " , <nl> - " / / tensorflow / core : testlib " , <nl> - " / / tensorflow / core / grappler : grappler_item " , <nl> - ] , <nl> - ) <nl> - <nl> cc_library ( <nl> name = " make_sloppy " , <nl> srcs = [ " make_sloppy . cc " ] , <nl> deleted file mode 100644 <nl> index 221f4c252583c . . 0000000000000 <nl> mmm a / tensorflow / core / grappler / optimizers / data / make_numa_aware . cc <nl> ppp / dev / null <nl> <nl> - / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - # include " tensorflow / core / grappler / optimizers / data / make_numa_aware . h " <nl> - <nl> - # include " absl / container / flat_hash_set . h " <nl> - # include " tensorflow / core / framework / node_def . pb . h " <nl> - # include " tensorflow / core / grappler / clusters / cluster . h " <nl> - # include " tensorflow / core / grappler / grappler_item . h " <nl> - # include " tensorflow / core / grappler / mutable_graph_view . h " <nl> - # include " tensorflow / core / grappler / op_types . h " <nl> - # include " tensorflow / core / grappler / optimizers / custom_graph_optimizer_registry . h " <nl> - # include " tensorflow / core / grappler / optimizers / data / graph_utils . h " <nl> - <nl> - namespace tensorflow { <nl> - namespace grappler { <nl> - namespace { <nl> - <nl> - NodeDef MakeNumaAwareNode ( const NodeDef & node , MutableGraphView * graph ) { <nl> - NodeDef numa_aware_node = node ; <nl> - graph_utils : : SetUniqueGraphNodeName ( " make_numa_aware " , graph - > graph ( ) , <nl> - & numa_aware_node ) ; <nl> - numa_aware_node . set_op ( " ExperimentalNumaMapAndBatchDataset " ) ; <nl> - return numa_aware_node ; <nl> - } <nl> - <nl> - } / / namespace <nl> - <nl> - Status MakeNumaAware : : OptimizeAndCollectStats ( Cluster * cluster , <nl> - const GrapplerItem & item , <nl> - GraphDef * output , <nl> - OptimizationStats * stats ) { <nl> - * output = item . graph ; <nl> - MutableGraphView graph ( output ) ; <nl> - absl : : flat_hash_set < string > nodes_to_delete ; <nl> - <nl> - for ( const NodeDef & node : item . graph . node ( ) ) { <nl> - if ( node . op ( ) ! = " ExperimentalMapAndBatchDataset " ) continue ; <nl> - <nl> - auto * numa_node = graph . AddNode ( MakeNumaAwareNode ( node , & graph ) ) ; <nl> - TF_RETURN_IF_ERROR ( graph . UpdateFanouts ( node . name ( ) , numa_node - > name ( ) ) ) ; <nl> - nodes_to_delete . insert ( node . name ( ) ) ; <nl> - stats - > num_changes + + ; <nl> - } <nl> - TF_RETURN_IF_ERROR ( graph . DeleteNodes ( nodes_to_delete ) ) ; <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - REGISTER_GRAPH_OPTIMIZER_AS ( MakeNumaAware , " make_numa_aware " ) ; <nl> - <nl> - } / / namespace grappler <nl> - } / / namespace tensorflow <nl> deleted file mode 100644 <nl> index 81dbb31e6d55c . . 0000000000000 <nl> mmm a / tensorflow / core / grappler / optimizers / data / make_numa_aware . h <nl> ppp / dev / null <nl> <nl> - / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - # ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAKE_NUMA_AWARE_H_ <nl> - # define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAKE_NUMA_AWARE_H_ <nl> - <nl> - # include " tensorflow / core / grappler / optimizers / data / optimizer_base . h " <nl> - <nl> - namespace tensorflow { <nl> - namespace grappler { <nl> - <nl> - class MakeNumaAware : public TFDataOptimizerBase { <nl> - public : <nl> - MakeNumaAware ( ) = default ; <nl> - ~ MakeNumaAware ( ) override = default ; <nl> - <nl> - string name ( ) const override { return " make_numa_aware " ; } <nl> - <nl> - Status Init ( <nl> - const tensorflow : : RewriterConfig_CustomGraphOptimizer * config ) override { <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - Status OptimizeAndCollectStats ( Cluster * cluster , const GrapplerItem & item , <nl> - GraphDef * output , <nl> - OptimizationStats * stats ) override ; <nl> - <nl> - void Feedback ( Cluster * cluster , const GrapplerItem & item , <nl> - const GraphDef & optimize_output , double result ) override { } <nl> - } ; <nl> - <nl> - } / / namespace grappler <nl> - } / / namespace tensorflow <nl> - <nl> - # endif / / TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAKE_NUMA_AWARE_H_ <nl> deleted file mode 100644 <nl> index 4b83fb6ef19f8 . . 0000000000000 <nl> mmm a / tensorflow / core / grappler / optimizers / data / make_numa_aware_test . cc <nl> ppp / dev / null <nl> <nl> - / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - # include " tensorflow / core / grappler / optimizers / data / make_numa_aware . h " <nl> - <nl> - # include " tensorflow / core / framework / attr_value_util . h " <nl> - # include " tensorflow / core / framework / function_testlib . h " <nl> - # include " tensorflow / core / framework / tensor_testutil . h " <nl> - # include " tensorflow / core / grappler / grappler_item . h " <nl> - # include " tensorflow / core / grappler / optimizers / data / graph_test_utils . h " <nl> - # include " tensorflow / core / grappler / optimizers / data / graph_utils . h " <nl> - <nl> - # include " tensorflow / core / lib / core / status_test_util . h " <nl> - # include " tensorflow / core / platform / test . h " <nl> - <nl> - namespace tensorflow { <nl> - namespace grappler { <nl> - namespace { <nl> - <nl> - TEST ( MakeNumaAwareTest , ReplaceSimple ) { <nl> - using test : : function : : NDef ; <nl> - GrapplerItem item ; <nl> - item . graph = test : : function : : GDef ( <nl> - { <nl> - NDef ( " start " , " Const " , { } , { { " value " , 0 } , { " dtype " , DT_INT32 } } ) , <nl> - NDef ( " stop " , " Const " , { } , { { " value " , 10 } , { " dtype " , DT_INT32 } } ) , <nl> - NDef ( " step " , " Const " , { } , { { " value " , 1 } , { " dtype " , DT_INT32 } } ) , <nl> - NDef ( " range " , " RangeDataset " , { " start " , " stop " , " step " } , { } ) , <nl> - NDef ( " batch_size " , " Const " , { } , { { " value " , 3 } , { " dtype " , DT_INT32 } } ) , <nl> - NDef ( " num_parallel_calls " , " Const " , { } , <nl> - { { " value " , 5 } , { " dtype " , DT_INT32 } } ) , <nl> - NDef ( " drop_remainder " , " Const " , { } , <nl> - { { " value " , 0 } , { " dtype " , DT_BOOL } } ) , <nl> - graph_tests_utils : : MakeMapAndBatchNode ( <nl> - " map_and_batch " , " range " , " batch_size " , " num_parallel_calls " , <nl> - " drop_remainder " ) , <nl> - } , <nl> - / / FunctionLib <nl> - { <nl> - test : : function : : XTimesTwo ( ) , <nl> - } ) ; <nl> - <nl> - MakeNumaAware optimizer ; <nl> - GraphDef output ; <nl> - TF_ASSERT_OK ( optimizer . Optimize ( nullptr , item , & output ) ) ; <nl> - <nl> - EXPECT_FALSE ( graph_utils : : ContainsGraphNodeWithName ( " map_and_batch " , output ) ) ; <nl> - EXPECT_FALSE ( graph_utils : : ContainsNodeWithOp ( " ExperimentalMapAndBatchDataset " , <nl> - output ) ) ; <nl> - EXPECT_TRUE ( graph_utils : : ContainsNodeWithOp ( <nl> - " ExperimentalNumaMapAndBatchDataset " , output ) ) ; <nl> - } <nl> - <nl> - TEST ( MapAndBatchNumaAawareReplacementTest , ReplaceWithExtraChild ) { <nl> - using test : : function : : NDef ; <nl> - GrapplerItem item ; <nl> - item . graph = test : : function : : GDef ( <nl> - { <nl> - NDef ( " start " , " Const " , { } , { { " value " , 0 } , { " dtype " , DT_INT32 } } ) , <nl> - NDef ( " stop " , " Const " , { } , { { " value " , 10 } , { " dtype " , DT_INT32 } } ) , <nl> - NDef ( " step " , " Const " , { } , { { " value " , 1 } , { " dtype " , DT_INT32 } } ) , <nl> - NDef ( " range " , " RangeDataset " , { " start " , " stop " , " step " } , { } ) , <nl> - NDef ( " batch_size " , " Const " , { } , { { " value " , 3 } , { " dtype " , DT_INT32 } } ) , <nl> - NDef ( " num_parallel_calls " , " Const " , { } , <nl> - { { " value " , 5 } , { " dtype " , DT_INT32 } } ) , <nl> - NDef ( " drop_remainder " , " Const " , { } , <nl> - { { " value " , 0 } , { " dtype " , DT_BOOL } } ) , <nl> - graph_tests_utils : : MakeMapAndBatchNode ( <nl> - " map_and_batch " , " range " , " batch_size " , " num_parallel_calls " , <nl> - " drop_remainder " ) , <nl> - NDef ( " cache " , " CacheDataset " , { " map_and_batch " } , { } ) , <nl> - } , <nl> - / / FunctionLib <nl> - { <nl> - test : : function : : XTimesTwo ( ) , <nl> - } ) ; <nl> - <nl> - MakeNumaAware optimizer ; <nl> - GraphDef output ; <nl> - TF_ASSERT_OK ( optimizer . Optimize ( nullptr , item , & output ) ) ; <nl> - <nl> - EXPECT_FALSE ( graph_utils : : ContainsGraphNodeWithName ( " map_and_batch " , output ) ) ; <nl> - EXPECT_FALSE ( graph_utils : : ContainsNodeWithOp ( " ExperimentalMapAndBatchDataset " , <nl> - output ) ) ; <nl> - EXPECT_TRUE ( graph_utils : : ContainsNodeWithOp ( <nl> - " ExperimentalNumaMapAndBatchDataset " , output ) ) ; <nl> - EXPECT_TRUE ( graph_utils : : ContainsNodeWithOp ( " CacheDataset " , output ) ) ; <nl> - <nl> - int numa_map_and_batch_component_id = graph_utils : : FindGraphNodeWithOp ( <nl> - " ExperimentalNumaMapAndBatchDataset " , output ) ; <nl> - auto & numa_map_and_batch_component = <nl> - output . node ( numa_map_and_batch_component_id ) ; <nl> - EXPECT_EQ ( numa_map_and_batch_component . input ( 0 ) , " range " ) ; <nl> - <nl> - int cache_id = graph_utils : : FindGraphNodeWithOp ( " CacheDataset " , output ) ; <nl> - auto & cache_node = output . node ( cache_id ) ; <nl> - EXPECT_EQ ( cache_node . input ( 0 ) , numa_map_and_batch_component . name ( ) ) ; <nl> - } <nl> - <nl> - } / / namespace <nl> - } / / namespace grappler <nl> - } / / namespace tensorflow <nl> mmm a / tensorflow / core / grappler / optimizers / data / meta_optimizer . cc <nl> ppp b / tensorflow / core / grappler / optimizers / data / meta_optimizer . cc <nl> Status TFDataMetaOptimizer : : Optimize ( Cluster * cluster , const GrapplerItem & item , <nl> { " noop_elimination " , " shuffle_and_repeat_fusion " , " map_fusion " , <nl> " filter_fusion " , " filter_with_random_uniform_fusion " , <nl> " map_and_filter_fusion " , " hoist_random_uniform " , " map_parallelization " , <nl> - " map_and_batch_fusion " , " map_vectorization " , " make_numa_aware " , <nl> - " latency_all_edges " , " make_sloppy " , " parallel_batch " , " pruning " , <nl> - " function " , " shape " , " arithmetic " , " dependency " } ) { <nl> + " map_and_batch_fusion " , " map_vectorization " , " latency_all_edges " , <nl> + " make_sloppy " , " parallel_batch " , " pruning " , " function " , " shape " , <nl> + " arithmetic " , " dependency " } ) { <nl> TF_RETURN_IF_ERROR ( <nl> ApplyOptimization ( optimization , cluster , & optimized_item ) ) ; <nl> } <nl> mmm a / tensorflow / core / kernels / data / experimental / BUILD <nl> ppp b / tensorflow / core / kernels / data / experimental / BUILD <nl> tf_kernel_library ( <nl> ] , <nl> ) <nl> <nl> - tf_kernel_library ( <nl> - name = " numa_map_and_batch_dataset_op " , <nl> - srcs = [ " numa_map_and_batch_dataset_op . cc " ] , <nl> - deps = [ <nl> - " / / tensorflow / core : array_ops_op_lib " , <nl> - " / / tensorflow / core : core_cpu_internal " , <nl> - " / / tensorflow / core : experimental_dataset_ops_op_lib " , <nl> - " / / tensorflow / core : framework " , <nl> - " / / tensorflow / core : lib " , <nl> - " / / tensorflow / core : lib_internal " , <nl> - " / / tensorflow / core : nn_ops_op_lib " , <nl> - " / / tensorflow / core / kernels : inplace_ops " , <nl> - " / / tensorflow / core / kernels / data : captured_function " , <nl> - " / / tensorflow / core / kernels / data : dataset_utils " , <nl> - " / / tensorflow / core / profiler / lib : traceme " , <nl> - " @ com_google_absl / / absl / memory " , <nl> - ] , <nl> - ) <nl> - <nl> tf_kernel_library ( <nl> name = " parallel_interleave_dataset_op " , <nl> srcs = [ " parallel_interleave_dataset_op . cc " ] , <nl> tf_kernel_library ( <nl> " : map_and_batch_dataset_op " , <nl> " : matching_files_dataset_op " , <nl> " : non_serializable_dataset_op " , <nl> - " : numa_map_and_batch_dataset_op " , <nl> " : parallel_interleave_dataset_op " , <nl> " : parse_example_dataset_op " , <nl> " : prefetching_kernels " , <nl> deleted file mode 100644 <nl> index 77d4815ee9607 . . 0000000000000 <nl> mmm a / tensorflow / core / kernels / data / experimental / numa_map_and_batch_dataset_op . cc <nl> ppp / dev / null <nl> <nl> - / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - # define EIGEN_USE_THREADS <nl> - <nl> - # include < atomic > <nl> - # include < utility > <nl> - <nl> - # include " tensorflow / core / common_runtime / function . h " <nl> - # include " tensorflow / core / framework / allocator . h " <nl> - # include " tensorflow / core / framework / dataset . h " <nl> - # include " tensorflow / core / framework / partial_tensor_shape . h " <nl> - # include " tensorflow / core / framework / tensor . h " <nl> - # include " tensorflow / core / kernels / data / captured_function . h " <nl> - # include " tensorflow / core / kernels / data / dataset_utils . h " <nl> - # include " tensorflow / core / kernels / inplace_ops_functor . h " <nl> - # include " tensorflow / core / lib / core / blocking_counter . h " <nl> - # include " tensorflow / core / lib / core / errors . h " <nl> - # include " tensorflow / core / lib / gtl / cleanup . h " <nl> - # include " tensorflow / core / lib / random / random . h " <nl> - # include " tensorflow / core / lib / strings / strcat . h " <nl> - # include " tensorflow / core / platform / cpu_info . h " <nl> - # include " tensorflow / core / platform / numa . h " <nl> - # include " tensorflow / core / platform / tracing . h " <nl> - # include " tensorflow / core / profiler / lib / traceme . h " <nl> - <nl> - namespace tensorflow { <nl> - namespace data { <nl> - namespace { <nl> - <nl> - / / kWindowSize is the fixed constant controlling the number of batch outputs <nl> - / / each NumaWorkerBlock may be processing at a time . This is currently a <nl> - / / constant and not user configurable to enable future performance optimizations <nl> - / / in the implementation . <nl> - const int64 kWindowSize = 10 ; <nl> - <nl> - / / Define a helper for more consistent logging . <nl> - # define WORKER_VLOG ( verbose_level ) \ <nl> - VLOG ( verbose_level ) < < " WorkerThread ( " < < numa_node < < " , " < < thread_num \ <nl> - < < " ) : " <nl> - <nl> - / / See documentation in . . / ops / dataset_ops . cc for a high - level <nl> - / / description of the following op . <nl> - <nl> - class NumaMapAndBatchDatasetOp : public UnaryDatasetOpKernel { <nl> - public : <nl> - explicit NumaMapAndBatchDatasetOp ( OpKernelConstruction * ctx ) <nl> - : UnaryDatasetOpKernel ( ctx ) { <nl> - FunctionMetadata : : Params params ; <nl> - params . use_inter_op_parallelism = false ; <nl> - OP_REQUIRES_OK ( ctx , <nl> - FunctionMetadata : : Create ( ctx , " f " , params , & func_metadata_ ) ) ; <nl> - OP_REQUIRES_OK ( ctx , ctx - > GetAttr ( " output_types " , & output_types_ ) ) ; <nl> - OP_REQUIRES_OK ( ctx , ctx - > GetAttr ( " output_shapes " , & output_shapes_ ) ) ; <nl> - / / TODO ( saeta ) : Implement support for preserve_cardinality logic . <nl> - OP_REQUIRES_OK ( <nl> - ctx , ctx - > GetAttr ( " preserve_cardinality " , & preserve_cardinality_ ) ) ; <nl> - } <nl> - <nl> - protected : <nl> - void MakeDataset ( OpKernelContext * ctx , DatasetBase * input , <nl> - DatasetBase * * output ) override { <nl> - int64 batch_size ; <nl> - OP_REQUIRES_OK ( ctx , ParseScalarArgument ( ctx , " batch_size " , & batch_size ) ) ; <nl> - OP_REQUIRES ( <nl> - ctx , batch_size > 0 , <nl> - errors : : InvalidArgument ( " batch_size must be greater than zero . " ) ) ; <nl> - <nl> - int64 num_parallel_calls ; <nl> - OP_REQUIRES_OK ( ctx , ParseScalarArgument ( ctx , " num_parallel_calls " , <nl> - & num_parallel_calls ) ) ; <nl> - OP_REQUIRES ( <nl> - ctx , num_parallel_calls > 0 | | num_parallel_calls = = model : : kAutoTune , <nl> - errors : : InvalidArgument ( <nl> - " num_parallel_calls must be greater than zero . " ) ) ; <nl> - <nl> - bool drop_remainder ; <nl> - OP_REQUIRES_OK ( ctx , <nl> - ParseScalarArgument ( ctx , " drop_remainder " , & drop_remainder ) ) ; <nl> - <nl> - std : : unique_ptr < CapturedFunction > captured_func ; <nl> - OP_REQUIRES_OK ( <nl> - ctx , CapturedFunction : : Create ( ctx , func_metadata_ , " other_arguments " , <nl> - & captured_func ) ) ; <nl> - <nl> - * output = <nl> - new Dataset ( ctx , input , batch_size , num_parallel_calls , drop_remainder , <nl> - output_types_ , output_shapes_ , std : : move ( captured_func ) ) ; <nl> - } <nl> - <nl> - private : <nl> - class Dataset : public DatasetBase { <nl> - public : <nl> - Dataset ( OpKernelContext * ctx , const DatasetBase * input , int64 batch_size , <nl> - int64 num_parallel_calls , bool drop_remainder , <nl> - const DataTypeVector & output_types , <nl> - const std : : vector < PartialTensorShape > & output_shapes , <nl> - std : : unique_ptr < CapturedFunction > captured_func ) <nl> - : DatasetBase ( DatasetContext ( ctx ) ) , <nl> - input_ ( input ) , <nl> - batch_size_ ( batch_size ) , <nl> - num_parallel_calls_ ( num_parallel_calls ) , <nl> - drop_remainder_ ( drop_remainder ) , <nl> - output_types_ ( output_types ) , <nl> - output_shapes_ ( output_shapes ) , <nl> - captured_func_ ( std : : move ( captured_func ) ) { <nl> - input_ - > Ref ( ) ; <nl> - } <nl> - <nl> - ~ Dataset ( ) override { input_ - > Unref ( ) ; } <nl> - <nl> - std : : unique_ptr < IteratorBase > MakeIteratorInternal ( <nl> - const string & prefix ) const override { <nl> - return absl : : make_unique < Iterator > ( <nl> - Iterator : : Params { this , strings : : StrCat ( prefix , " : : NumaMapAndBatch " ) } ) ; <nl> - } <nl> - <nl> - const DataTypeVector & output_dtypes ( ) const override { <nl> - return output_types_ ; <nl> - } <nl> - <nl> - const std : : vector < PartialTensorShape > & output_shapes ( ) const override { <nl> - return output_shapes_ ; <nl> - } <nl> - <nl> - string DebugString ( ) const override { <nl> - return " NumaMapAndBatchDatasetOp : : Dataset " ; <nl> - } <nl> - <nl> - / / TODO ( b / 120482302 ) : Note that this is inaccurate until <nl> - / / NumaMapAndBatchMapDataset modified to preserve cardinality . <nl> - int64 Cardinality ( ) const override { <nl> - int64 n = input_ - > Cardinality ( ) ; <nl> - if ( n = = kInfiniteCardinality | | n = = kUnknownCardinality ) { <nl> - return n ; <nl> - } <nl> - return n / batch_size_ + <nl> - ( n % batch_size_ = = 0 | | drop_remainder_ ? 0 : 1 ) ; <nl> - } <nl> - <nl> - protected : <nl> - Status AsGraphDefInternal ( SerializationContext * ctx , <nl> - DatasetGraphDefBuilder * b , <nl> - Node * * output ) const override { <nl> - Node * input_graph_node = nullptr ; <nl> - TF_RETURN_IF_ERROR ( b - > AddInputDataset ( ctx , input_ , & input_graph_node ) ) ; <nl> - Node * batch_size_node ; <nl> - TF_RETURN_IF_ERROR ( b - > AddScalar ( batch_size_ , & batch_size_node ) ) ; <nl> - Node * num_parallel_calls_node ; <nl> - TF_RETURN_IF_ERROR ( <nl> - b - > AddScalar ( num_parallel_calls_ , & num_parallel_calls_node ) ) ; <nl> - Node * drop_remainder_node ; <nl> - TF_RETURN_IF_ERROR ( b - > AddScalar ( drop_remainder_ , & drop_remainder_node ) ) ; <nl> - std : : vector < Node * > other_arguments ; <nl> - DataTypeVector other_arguments_types ; <nl> - TF_RETURN_IF_ERROR ( captured_func_ - > AddToGraph ( ctx , b , & other_arguments , <nl> - & other_arguments_types ) ) ; <nl> - AttrValue f ; <nl> - b - > BuildAttrValue ( captured_func_ - > func ( ) , & f ) ; <nl> - AttrValue other_arguments_types_attr ; <nl> - b - > BuildAttrValue ( other_arguments_types , & other_arguments_types_attr ) ; <nl> - <nl> - TF_RETURN_IF_ERROR ( b - > AddDataset ( <nl> - this , <nl> - { std : : make_pair ( 0 , input_graph_node ) , <nl> - std : : make_pair ( 2 , batch_size_node ) , <nl> - std : : make_pair ( 3 , num_parallel_calls_node ) , <nl> - std : : make_pair ( 4 , drop_remainder_node ) } , / / Single tensor inputs . <nl> - { std : : make_pair ( 1 , other_arguments ) } , / / Tensor list inputs . <nl> - { std : : make_pair ( " f " , f ) , <nl> - std : : make_pair ( " Targuments " , other_arguments_types_attr ) } , / / Attrs <nl> - output ) ) ; <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - private : <nl> - class Iterator : public DatasetIterator < Dataset > { <nl> - public : <nl> - explicit Iterator ( const Params & params ) <nl> - : DatasetIterator < Dataset > ( params ) , <nl> - mu_ ( std : : make_shared < mutex > ( ) ) , <nl> - autotune_cond_var_ ( std : : make_shared < condition_variable > ( ) ) , <nl> - num_parallel_calls_ ( std : : make_shared < model : : SharedState > ( <nl> - params . dataset - > num_parallel_calls_ , mu_ , autotune_cond_var_ ) ) { <nl> - } <nl> - <nl> - ~ Iterator ( ) override { <nl> - mutex_lock l ( * mu_ ) ; <nl> - cancelled_ = true ; <nl> - VLOG ( 3 ) < < " NumaMapAndBatchIterator : : ~ Iterator : cancelling operations . " ; <nl> - for ( size_t i = 0 ; i < workers_ . size ( ) ; + + i ) { <nl> - workers_ [ i ] - > manager . Cancel ( ) ; <nl> - } <nl> - VLOG ( 3 ) < < " NumaMapAndBatchIterator : : ~ Iterator : waiting for threads to " <nl> - " shut down . " ; <nl> - } <nl> - <nl> - Status Initialize ( IteratorContext * ctx ) override { <nl> - mutex_lock l ( * mu_ ) ; <nl> - if ( num_parallel_calls_ - > value = = model : : kAutoTune ) { <nl> - num_parallel_calls_ - > value = ctx - > runner_threadpool_size ( ) ; <nl> - } <nl> - TF_RETURN_IF_ERROR ( <nl> - dataset ( ) - > input_ - > MakeIterator ( ctx , prefix ( ) , & input_impl_ ) ) ; <nl> - TF_RETURN_IF_ERROR ( dataset ( ) - > captured_func_ - > Instantiate ( <nl> - ctx , & instantiated_captured_func_ ) ) ; <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - Status GetNextInternal ( IteratorContext * ctx , <nl> - std : : vector < Tensor > * out_tensors , <nl> - bool * end_of_sequence ) override { <nl> - auto cleanup = gtl : : MakeCleanup ( <nl> - [ ] { VLOG ( 3 ) < < " GetNextInternal call returning . " ; } ) ; <nl> - NumaWorkerBlock * worker = nullptr ; <nl> - { <nl> - mutex_lock l ( * mu_ ) ; <nl> - VLOG ( 3 ) < < " GetNextInternal call ; current block : " < < cur_block_ ; <nl> - if ( global_end_of_input_ ) { <nl> - * end_of_sequence = true ; <nl> - return Status : : OK ( ) ; <nl> - } <nl> - TF_RETURN_IF_ERROR ( EnsureBackgroundThreadsStarted ( ctx ) ) ; <nl> - worker = workers_ [ cur_block_ ] . get ( ) ; <nl> - cur_block_ = ( cur_block_ + 1 ) % workers_ . size ( ) ; <nl> - } <nl> - bool global_end_of_input_local = false ; <nl> - Status s = worker - > manager . GetBatch ( ctx , dataset ( ) - > drop_remainder_ , <nl> - & global_end_of_input_local , <nl> - out_tensors , end_of_sequence ) ; <nl> - if ( global_end_of_input_local ) { <nl> - mutex_lock l ( * mu_ ) ; <nl> - global_end_of_input_ = global_end_of_input_local ; <nl> - } <nl> - return s ; <nl> - } <nl> - <nl> - protected : <nl> - std : : shared_ptr < model : : Node > CreateNode ( <nl> - IteratorContext * ctx , model : : Node : : Args args ) const override { <nl> - return model : : MakeAsyncKnownRatioNode ( <nl> - std : : move ( args ) , dataset ( ) - > batch_size_ , <nl> - { model : : MakeParameter ( " parallelism " , num_parallel_calls_ , / * min = * / 1 , <nl> - / * max = * / ctx - > runner_threadpool_size ( ) ) } ) ; <nl> - } <nl> - <nl> - Status SaveInternal ( IteratorStateWriter * writer ) override { <nl> - mutex_lock l ( * mu_ ) ; <nl> - for ( size_t i = 0 ; i < workers_ . size ( ) ; + + i ) { <nl> - if ( ! workers_ [ i ] - > manager . Quiesce ( ) ) { <nl> - return errors : : Cancelled ( <nl> - " The iterator was deleted before it could reach a " <nl> - " checkpointable state . " ) ; <nl> - } <nl> - } <nl> - <nl> - TF_RETURN_IF_ERROR ( SaveInput ( writer , input_impl_ ) ) ; <nl> - TF_RETURN_IF_ERROR ( <nl> - writer - > WriteScalar ( full_name ( " num_workers " ) , workers_ . size ( ) ) ) ; <nl> - <nl> - for ( size_t i = 0 ; i < workers_ . size ( ) ; + + i ) { <nl> - size_t index = ( cur_block_ + i ) % workers_ . size ( ) ; <nl> - TF_RETURN_IF_ERROR ( workers_ [ index ] - > manager . Save ( writer , this , i ) ) ; <nl> - } <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - Status RestoreInternal ( IteratorContext * ctx , <nl> - IteratorStateReader * reader ) override { <nl> - mutex_lock l ( * mu_ ) ; <nl> - TF_RETURN_IF_ERROR ( RestoreInput ( ctx , reader , input_impl_ ) ) ; <nl> - int64 num_workers = - 1 ; <nl> - TF_RETURN_IF_ERROR ( <nl> - reader - > ReadScalar ( full_name ( " num_workers " ) , & num_workers ) ) ; <nl> - / / Note : num_workers can be 0 if the iterator wasn ' t started when <nl> - / / first checkpointed . <nl> - if ( num_workers < 0 ) { <nl> - return errors : : DataLoss ( <nl> - " When restoring from checkpoint , we encountered a data " <nl> - " consistency error : num_workers has an invalid value : " , <nl> - num_workers ) ; <nl> - } <nl> - if ( port : : NUMAEnabled ( ) ) { <nl> - int actual_numa_domains = port : : NUMANumNodes ( ) ; <nl> - if ( actual_numa_domains ! = num_workers & & num_workers > 0 ) { <nl> - LOG ( WARNING ) < < " # NUMA domains mismatch when restoring from " <nl> - " checkpoint : checkpoint has " <nl> - < < num_workers <nl> - < < " NUMA domains , while this host has : " <nl> - < < actual_numa_domains < < " NUMA domains . " ; <nl> - } <nl> - } <nl> - if ( num_workers > 1 & & ! port : : NUMAEnabled ( ) ) { <nl> - LOG ( WARNING ) < < " NUMA is not enabled for this process , but restoring " <nl> - " a checkpoint that assumes " <nl> - < < num_workers < < " NUMA domains . " ; <nl> - } <nl> - workers_ . resize ( num_workers ) ; <nl> - for ( size_t i = 0 ; i < num_workers ; + + i ) { <nl> - workers_ [ i ] = absl : : make_unique < NumaWorkerBlock > ( this ) ; <nl> - TF_RETURN_IF_ERROR ( <nl> - workers_ [ i ] - > manager . Restore ( ctx , reader , this , i ) ) ; <nl> - } <nl> - cur_block_ = 0 ; <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - private : <nl> - / / NumaBlockManager manages all the state for a set of threads pinned to a <nl> - / / single NUMA domain . <nl> - / / <nl> - / / The methods can be divided into 3 categories based on who should call <nl> - / / them : <nl> - / / <nl> - / / ( 1 ) RunnerThread : WaitForInputSpace , PushInputs , SetEndOfInput . <nl> - / / ( 2 ) WorkerThread : RetrieveInput , GetBatchTensors . <nl> - / / RecordBatchEntryComplete <nl> - / / ( 3 ) Client threads : GetBatch , Cancel , Save , Restore . <nl> - / / <nl> - / / Internally , we manage state in a circular buffer of size ` kWindowSize ` . <nl> - / / There are 3 pointers into the circular buffer , and must maintain the <nl> - / / following order : ( 1 ) next_input_batch_ ( corresponding to the next input <nl> - / / batch to be pulled from the input iterator ) , ( 2 ) next_input_ <nl> - / / ( corresponding to the batch the WorkerThreads should pull from for <nl> - / / their next inputs ) , and ( 3 ) next_output_ corresponding to the next <nl> - / / value to be consumed by the output iterator . <nl> - / / <nl> - / / Methods return errors : : Cancelled if the iteration is cancelled before <nl> - / / completing . <nl> - / / <nl> - / / NumaBlockManager is thread safe . <nl> - class NumaBlockManager { <nl> - public : <nl> - explicit NumaBlockManager ( Iterator * itr ) : itr_ ( itr ) { } <nl> - <nl> - / / WaitForInputSpace blocks until there is space in the circular buffer <nl> - / / to begin processing a new batch of elements . <nl> - / / <nl> - / / Returns true when there is space , false if the Iterator is cancelled . <nl> - bool WaitForInputSpace ( IteratorContext * ctx ) { <nl> - mutex_lock l ( mu_ ) ; <nl> - <nl> - size_t next = ( next_input_batch_ + 1 ) % kWindowSize ; <nl> - DCHECK ( next < kWindowSize ) < < next ; <nl> - <nl> - / / Wait for space in the circular buffer . <nl> - while ( ! cancelled_ & & batches_ [ next ] . state ! = BatchState : : kEmpty ) { <nl> - VLOG ( 3 ) < < " Waiting for input space ; next : " < < next <nl> - < < " , next_output_ : " < < next_output_ <nl> - < < " , next_input_batch_ : " < < next_input_batch_ ; <nl> - itr_ - > RecordStop ( ctx ) ; <nl> - runner_cond_var_ . wait ( l ) ; <nl> - itr_ - > RecordStart ( ctx ) ; <nl> - } <nl> - if ( cancelled_ ) { <nl> - VLOG ( 3 ) < < " WaitForInputSpace cancelled . " ; <nl> - return false ; <nl> - } <nl> - <nl> - DCHECK ( batches_ [ next ] . state = = BatchState : : kEmpty ) ; <nl> - <nl> - next_input_batch_ = next ; <nl> - return true ; <nl> - } <nl> - <nl> - / / PushInputs sets the inputs for the next batch as retrieved from the <nl> - / / input iterator . <nl> - void PushInputs ( const Status & status , <nl> - std : : vector < std : : vector < Tensor > > inputs ) { <nl> - mutex_lock l ( mu_ ) ; <nl> - <nl> - DCHECK ( next_input_ < kWindowSize ) < < next_input_ ; <nl> - DCHECK ( batches_ [ next_input_batch_ ] . state = = BatchState : : kEmpty ) ; <nl> - DCHECK ( batches_ [ next_input_batch_ ] . next_input_to_process = = 0 ) <nl> - < < batches_ [ next_input_batch_ ] . next_input_to_process ; <nl> - DCHECK ( batches_ [ next_input_batch_ ] . status . ok ( ) ) <nl> - < < batches_ [ next_input_batch_ ] . status ; <nl> - <nl> - batches_ [ next_input_batch_ ] . inputs . swap ( inputs ) ; <nl> - batches_ [ next_input_batch_ ] . state = BatchState : : kInputsFilled ; <nl> - batches_ [ next_input_batch_ ] . status . Update ( status ) ; <nl> - if ( batches_ [ next_input_batch_ ] . status . ok ( ) ) { <nl> - worker_cond_var_ . notify_all ( ) ; <nl> - } else { <nl> - client_cond_var_ . notify_all ( ) ; <nl> - batches_ [ next_input_batch_ ] . error_index = 0 ; <nl> - } <nl> - } <nl> - <nl> - / / SetEndOfInput records the fact that we have reached the end of the <nl> - / / input iterator , and that we should return end_of_sequence = true when <nl> - / / we have exhaused all buffered batches . <nl> - void SetEndOfInput ( ) { <nl> - mutex_lock l ( mu_ ) ; <nl> - reached_eof_ = true ; <nl> - worker_cond_var_ . notify_all ( ) ; <nl> - client_cond_var_ . notify_all ( ) ; <nl> - } <nl> - <nl> - / / RetrieveInput gets the next input tuple to be mapped by a worker <nl> - / / thread . <nl> - / / <nl> - / / Returns true if an input was retrieved , false if the iterator has <nl> - / / been cancelled . <nl> - bool RetrieveInput ( IteratorContext * ctx , std : : vector < Tensor > * input , <nl> - uint64 * index , size_t * sequence_number ) { <nl> - mutex_lock l ( mu_ ) ; <nl> - <nl> - / / Wait for inputs to be ready . <nl> - while ( ! cancelled_ & & <nl> - batches_ [ next_input_ ] . state ! = BatchState : : kInputsFilled ) { <nl> - itr_ - > RecordStop ( ctx ) ; <nl> - worker_cond_var_ . wait ( l ) ; <nl> - itr_ - > RecordStart ( ctx ) ; <nl> - } <nl> - <nl> - if ( cancelled_ ) { <nl> - return false ; <nl> - } <nl> - <nl> - DCHECK ( batches_ [ next_input_ ] . next_input_to_process < <nl> - batches_ [ next_input_ ] . inputs . size ( ) ) <nl> - < < " next_input_ : " < < next_input_ < < " , next_input_to_process : " <nl> - < < batches_ [ next_input_ ] . next_input_to_process <nl> - < < " , inputs . size ( ) : " < < batches_ [ next_input_ ] . inputs . size ( ) <nl> - < < " , state : " < < static_cast < int32 > ( batches_ [ next_input_ ] . state ) <nl> - < < " , this : " < < this ; <nl> - * index = batches_ [ next_input_ ] . next_input_to_process ; <nl> - * sequence_number = next_input_ ; <nl> - input - > swap ( batches_ [ next_input_ ] <nl> - . inputs [ batches_ [ next_input_ ] . next_input_to_process ] ) ; <nl> - / / Increment pointers . <nl> - batches_ [ next_input_ ] . next_input_to_process + + ; <nl> - <nl> - if ( batches_ [ next_input_ ] . next_input_to_process = = <nl> - batches_ [ next_input_ ] . inputs . size ( ) ) { <nl> - batches_ [ next_input_ ] . state = BatchState : : kAllMapsStarted ; <nl> - next_input_ = ( next_input_ + 1 ) % kWindowSize ; <nl> - } <nl> - return true ; <nl> - } <nl> - <nl> - / / GetBatchTensors returns a pointer to the output batch tensors for the <nl> - / / worker thread to copy into . <nl> - / / <nl> - / / allocate_output is a function taking a batch size , and a pointer to <nl> - / / the output tuple of Tensors to allocate them . The allocate_output <nl> - / / function is called at most once per output batch . <nl> - std : : vector < Tensor > * GetBatchTensors ( <nl> - size_t sequence_number , <nl> - std : : function < void ( size_t , std : : vector < Tensor > * ) > allocate_output ) { <nl> - mutex_lock l ( mu_ ) ; <nl> - DCHECK ( sequence_number < kWindowSize ) < < sequence_number ; <nl> - DCHECK ( batches_ [ sequence_number ] . state = = BatchState : : kInputsFilled | | <nl> - batches_ [ sequence_number ] . state = = BatchState : : kAllMapsStarted ) <nl> - < < sequence_number ; <nl> - <nl> - if ( batches_ [ sequence_number ] . outputs . empty ( ) ) { <nl> - allocate_output ( batches_ [ sequence_number ] . inputs . size ( ) , <nl> - & batches_ [ sequence_number ] . outputs ) ; <nl> - } <nl> - return & batches_ [ sequence_number ] . outputs ; <nl> - } <nl> - <nl> - / / RecordBatchEntryComplete records an element of the batch has finished <nl> - / / copying into the output tensors . <nl> - void RecordBatchEntryComplete ( size_t sequence_number , uint64 index , <nl> - Status s ) { <nl> - mutex_lock l ( mu_ ) ; <nl> - DCHECK ( sequence_number < kWindowSize ) < < sequence_number ; <nl> - DCHECK ( batches_ [ sequence_number ] . state = = BatchState : : kInputsFilled | | <nl> - batches_ [ sequence_number ] . state = = BatchState : : kAllMapsStarted ) <nl> - < < sequence_number ; <nl> - <nl> - batches_ [ sequence_number ] . num_outputs_complete + + ; <nl> - if ( ! s . ok ( ) & & batches_ [ sequence_number ] . error_index > index ) { <nl> - batches_ [ sequence_number ] . status = s ; <nl> - batches_ [ sequence_number ] . error_index = index ; <nl> - } <nl> - <nl> - if ( batches_ [ sequence_number ] . num_outputs_complete = = <nl> - batches_ [ sequence_number ] . inputs . size ( ) ) { <nl> - DCHECK ( batches_ [ sequence_number ] . state = = <nl> - BatchState : : kAllMapsStarted ) ; <nl> - batches_ [ sequence_number ] . state = BatchState : : kOutputsComplete ; <nl> - batches_ [ sequence_number ] . inputs . clear ( ) ; / / Eagerly save memory . <nl> - batches_ [ sequence_number ] . inputs . shrink_to_fit ( ) ; <nl> - client_cond_var_ . notify_all ( ) ; <nl> - } <nl> - } <nl> - <nl> - / / GetBatch retrieves the next output batch tensors . <nl> - Status GetBatch ( IteratorContext * ctx , bool drop_remainder , <nl> - bool * global_eof , std : : vector < Tensor > * out_tensor , <nl> - bool * end_of_sequence ) { <nl> - mutex_lock l ( mu_ ) ; <nl> - / / Wait until one of 3 conditions occurs : <nl> - / / ( 1 ) we ' re cancelled . <nl> - / / ( 2 ) the state becomes kOutputsComplete <nl> - / / ( 3 ) state is empty & & reached_eof . <nl> - while ( ! cancelled_ & & <nl> - batches_ [ next_output_ ] . state ! = BatchState : : kOutputsComplete & & <nl> - ! ( reached_eof_ & & <nl> - batches_ [ next_output_ ] . state = = BatchState : : kEmpty ) ) { <nl> - VLOG ( 3 ) < < " Waiting in GetBatch . " ; <nl> - itr_ - > RecordStop ( ctx ) ; <nl> - client_cond_var_ . wait ( l ) ; <nl> - itr_ - > RecordStart ( ctx ) ; <nl> - } <nl> - <nl> - if ( cancelled_ ) { <nl> - return errors : : Cancelled ( <nl> - " Cancelled in NumaMapAndBatch : : GetNext call . " ) ; <nl> - } <nl> - <nl> - if ( reached_eof_ & & <nl> - batches_ [ next_output_ ] . state = = BatchState : : kEmpty ) { <nl> - VLOG ( 4 ) < < " GetBatch returning end of sequence . " ; <nl> - * end_of_sequence = true ; <nl> - * global_eof = true ; <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - VLOG ( 3 ) < < " Returning output index : " < < next_output_ <nl> - < < " , this : " < < this ; <nl> - <nl> - * end_of_sequence = false ; <nl> - Status s = batches_ [ next_output_ ] . status ; <nl> - if ( s . ok ( ) ) { <nl> - out_tensor - > swap ( batches_ [ next_output_ ] . outputs ) ; <nl> - } <nl> - / / Handle early termination . <nl> - if ( errors : : IsOutOfRange ( s ) ) { <nl> - * global_eof = true ; <nl> - s = Status : : OK ( ) ; <nl> - if ( drop_remainder | | batches_ [ next_output_ ] . error_index = = 0 ) { <nl> - * end_of_sequence = true ; <nl> - } else { <nl> - std : : vector < Tensor > true_outputs ; <nl> - for ( size_t i = 0 ; i < batches_ [ next_output_ ] . outputs . size ( ) ; <nl> - + + i ) { <nl> - TensorShape component_shape ( <nl> - batches_ [ next_output_ ] . outputs [ i ] . shape ( ) ) ; <nl> - component_shape . set_dim ( 0 , batches_ [ next_output_ ] . error_index ) ; <nl> - AllocatorAttributes attr ; <nl> - attr . set_gpu_compatible ( true ) ; <nl> - true_outputs . emplace_back ( <nl> - ctx - > allocator ( attr ) , <nl> - batches_ [ next_output_ ] . outputs [ i ] . dtype ( ) , component_shape ) ; <nl> - TF_RETURN_IF_ERROR ( CopyPartialBatch ( <nl> - & true_outputs . back ( ) , batches_ [ next_output_ ] . outputs [ i ] , <nl> - batches_ [ next_output_ ] . error_index ) ) ; <nl> - } <nl> - out_tensor - > swap ( true_outputs ) ; <nl> - } <nl> - } <nl> - <nl> - batches_ [ next_output_ ] . Reset ( ) ; <nl> - next_output_ = ( next_output_ + 1 ) % kWindowSize ; <nl> - runner_cond_var_ . notify_all ( ) ; <nl> - <nl> - return s ; <nl> - } <nl> - <nl> - void Cancel ( ) { <nl> - mutex_lock l ( mu_ ) ; <nl> - VLOG ( 3 ) < < " Cancelling NUMA block . " ; <nl> - cancelled_ = true ; <nl> - runner_cond_var_ . notify_all ( ) ; <nl> - worker_cond_var_ . notify_all ( ) ; <nl> - client_cond_var_ . notify_all ( ) ; <nl> - } <nl> - <nl> - / / Waits until all the worker threads have completed their work and all <nl> - / / internal state has reached a " safe - point " where we can safely <nl> - / / checkpoint . <nl> - / / <nl> - / / Returns true if completed successfully , false if cancelled while <nl> - / / waiting . <nl> - bool Quiesce ( ) { <nl> - mutex_lock l ( mu_ ) ; <nl> - VLOG ( 3 ) < < " Waiting until the operations have quiesced . " ; <nl> - while ( ! cancelled_ & & ! AllMapOperationsFinished ( ) ) { <nl> - client_cond_var_ . wait ( l ) ; <nl> - } <nl> - if ( cancelled_ ) { <nl> - return false ; <nl> - } <nl> - return true ; <nl> - } <nl> - <nl> - Status Save ( IteratorStateWriter * writer , Iterator * itr , size_t index ) { <nl> - mutex_lock l ( mu_ ) ; <nl> - string prefix = itr - > full_name ( strings : : StrCat ( " numa_block_ " , index ) ) ; <nl> - if ( reached_eof_ ) { <nl> - TF_RETURN_IF_ERROR ( writer - > WriteScalar ( <nl> - strings : : StrCat ( prefix , " _end_of_input " ) , " " ) ) ; <nl> - } <nl> - for ( size_t i = 0 ; i < kWindowSize ; + + i ) { <nl> - size_t index = ( next_output_ + i ) % kWindowSize ; <nl> - if ( batches_ [ index ] . state = = BatchState : : kEmpty ) { <nl> - break ; <nl> - } <nl> - string batch_prefix = strings : : StrCat ( prefix , " _batch_ " , i ) ; <nl> - TF_RETURN_IF_ERROR ( writer - > WriteScalar ( <nl> - strings : : StrCat ( batch_prefix , " _code " ) , <nl> - static_cast < int64 > ( batches_ [ index ] . status . code ( ) ) ) ) ; <nl> - if ( ! batches_ [ index ] . status . ok ( ) ) { <nl> - TF_RETURN_IF_ERROR ( <nl> - writer - > WriteScalar ( strings : : StrCat ( batch_prefix , " _msg " ) , <nl> - batches_ [ index ] . status . error_message ( ) ) ) ; <nl> - TF_RETURN_IF_ERROR ( writer - > WriteScalar ( <nl> - strings : : StrCat ( batch_prefix , " _error_index " ) , <nl> - batches_ [ index ] . error_index ) ) ; <nl> - } <nl> - <nl> - TF_RETURN_IF_ERROR ( writer - > WriteScalar ( <nl> - strings : : StrCat ( batch_prefix , " _output_size " ) , <nl> - batches_ [ index ] . outputs . size ( ) ) ) ; <nl> - for ( size_t j = 0 ; j < batches_ [ index ] . outputs . size ( ) ; + + j ) { <nl> - string tensor_prefix = <nl> - strings : : StrCat ( batch_prefix , " _output_ " , j ) ; <nl> - if ( ! batches_ [ index ] . status . ok ( ) ) { <nl> - DCHECK ( batches_ [ index ] . error_index > = 0 & & <nl> - batches_ [ index ] . error_index < <nl> - itr_ - > dataset ( ) - > batch_size_ ) ; <nl> - / / If the batch is not full , we only store the first <nl> - / / ` error_index ` values . The rest of the batch tensor might not <nl> - / / be initialized , and accessing that will raise msan errors . <nl> - TF_RETURN_IF_ERROR ( writer - > WriteTensor ( <nl> - tensor_prefix , batches_ [ index ] . outputs [ j ] . Slice ( <nl> - 0 , batches_ [ index ] . error_index ) ) ) ; <nl> - } else { <nl> - TF_RETURN_IF_ERROR ( writer - > WriteTensor ( <nl> - tensor_prefix , batches_ [ index ] . outputs [ j ] ) ) ; <nl> - } <nl> - } <nl> - } <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - Status Restore ( IteratorContext * ctx , IteratorStateReader * reader , <nl> - Iterator * itr , size_t index ) { <nl> - mutex_lock l ( mu_ ) ; <nl> - if ( reached_eof_ ) { <nl> - return errors : : FailedPrecondition ( <nl> - " Already reached the end of the sequence . " ) ; <nl> - } <nl> - string prefix = itr - > full_name ( strings : : StrCat ( " numa_block_ " , index ) ) ; <nl> - reached_eof_ = <nl> - reader - > Contains ( strings : : StrCat ( prefix , " _end_of_input " ) ) ; <nl> - for ( size_t i = 0 ; i < kWindowSize ; + + i ) { <nl> - string batch_prefix = strings : : StrCat ( prefix , " _batch_ " , i ) ; <nl> - if ( ! reader - > Contains ( strings : : StrCat ( batch_prefix , " _code " ) ) ) { <nl> - break ; <nl> - } <nl> - Batch batch ; <nl> - batch . state = BatchState : : kOutputsComplete ; <nl> - int64 code_int ; <nl> - TF_RETURN_IF_ERROR ( reader - > ReadScalar ( <nl> - strings : : StrCat ( batch_prefix , " _code " ) , & code_int ) ) ; <nl> - error : : Code code = static_cast < error : : Code > ( code_int ) ; <nl> - if ( code ! = error : : Code : : OK ) { <nl> - string error_message ; <nl> - TF_RETURN_IF_ERROR ( reader - > ReadScalar ( <nl> - strings : : StrCat ( batch_prefix , " _msg " ) , & error_message ) ) ; <nl> - batch . status = Status ( code , error_message ) ; <nl> - int64 error_index_int = - 1 ; <nl> - TF_RETURN_IF_ERROR ( reader - > ReadScalar ( <nl> - strings : : StrCat ( batch_prefix , " _error_index " ) , <nl> - & error_index_int ) ) ; <nl> - if ( error_index_int < 0 | | <nl> - error_index_int > itr - > dataset ( ) - > batch_size_ ) { <nl> - return errors : : FailedPrecondition ( <nl> - " Error index out of bounds when restoring from checkpoint ; " <nl> - " error index : " , <nl> - error_index_int ) ; <nl> - } <nl> - batch . error_index = static_cast < size_t > ( error_index_int ) ; <nl> - } <nl> - int64 output_size = - 1 ; <nl> - TF_RETURN_IF_ERROR ( reader - > ReadScalar ( <nl> - strings : : StrCat ( batch_prefix , " _output_size " ) , & output_size ) ) ; <nl> - batch . outputs . reserve ( output_size ) ; <nl> - for ( size_t j = 0 ; j < output_size ; + + j ) { <nl> - string tensor_name = strings : : StrCat ( batch_prefix , " _output_ " , j ) ; <nl> - Tensor t ; <nl> - TF_RETURN_IF_ERROR ( reader - > ReadTensor ( tensor_name , & t ) ) ; <nl> - batch . outputs . emplace_back ( std : : move ( t ) ) ; <nl> - } <nl> - batches_ [ i ] = std : : move ( batch ) ; <nl> - } <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - private : <nl> - bool AllMapOperationsFinished ( ) EXCLUSIVE_LOCKS_REQUIRED ( mu_ ) { <nl> - for ( size_t i = 0 ; i < kWindowSize ; + + i ) { <nl> - if ( batches_ [ i ] . state = = BatchState : : kInputsFilled | | <nl> - batches_ [ i ] . state = = BatchState : : kAllMapsStarted ) { <nl> - return false ; <nl> - } <nl> - if ( batches_ [ i ] . state ! = BatchState : : kOutputsComplete & & <nl> - ! reached_eof_ ) { <nl> - return false ; <nl> - } <nl> - } <nl> - return true ; <nl> - } <nl> - <nl> - / / Batches begin in the ` kEmpty ` state . Once the RunnerThread has <nl> - / / filled the ` inputs ` to a ` Batch ` , it transitions to the <nl> - / / ` kInputsFilled ` state . At this point , the Worker threads run the map <nl> - / / function and copy the outputs appropriately . Once all worker threads <nl> - / / have started , it transitions to ` kAllMapsStarted ` . After the outputs <nl> - / / are complete , the GetNext call can consume the outputs , and return <nl> - / / the batch to the kEmpty state . <nl> - enum class BatchState { <nl> - kEmpty , <nl> - kInputsFilled , <nl> - kAllMapsStarted , <nl> - kOutputsComplete , <nl> - } ; <nl> - <nl> - / / Batch captures all the state of an output batch as it progresses <nl> - / / through the machinery . Once the RunnerThread fills inputs , it <nl> - / / transitions to ` kInputsFilled ` . At this point , the worker threads can <nl> - / / work on it , incrementing outputs_complete for every element of the <nl> - / / input set that is copied into the output Tensors . Once all the input <nl> - / / tuples have been processed ( i . e . num_outputs_complete = = <nl> - / / inputs . size ( ) ) , it transitions to the ` kOutputsComplete ` stage , where <nl> - / / it is ready to be returned by a ` GetBatch ` call ( called from <nl> - / / ` GetNextInternal ` ) . <nl> - struct Batch { <nl> - BatchState state ; <nl> - / / Aggregates the Status of the input iterator ' s GetNext <nl> - / / calls , in addition to the Status of the map function invocations . <nl> - / / <nl> - / / In the case where multiple non - OK statuses are encountered , we <nl> - / / return the first one encountered . <nl> - Status status ; <nl> - / / In order to return the correct error status , we keep track of the <nl> - / / error_index . <nl> - size_t error_index ; <nl> - / / The batch_size input tuples ( or fewer in the case of the last <nl> - / / batch ) . <nl> - / / TODO ( saeta ) : Avoid re - allocating vectors all the time ! <nl> - std : : vector < std : : vector < Tensor > > inputs ; <nl> - std : : vector < Tensor > outputs ; <nl> - size_t next_input_to_process ; <nl> - size_t num_outputs_complete ; <nl> - <nl> - Batch ( ) { Reset ( ) ; } <nl> - <nl> - / / Resets the Batch state ( e . g . after consuming the outputs ) . <nl> - void Reset ( ) { <nl> - state = BatchState : : kEmpty ; <nl> - status = Status : : OK ( ) ; <nl> - inputs . clear ( ) ; <nl> - inputs . shrink_to_fit ( ) ; <nl> - outputs . clear ( ) ; <nl> - outputs . shrink_to_fit ( ) ; <nl> - next_input_to_process = 0 ; <nl> - num_outputs_complete = 0 ; <nl> - error_index = - 1 ; <nl> - } <nl> - } ; <nl> - <nl> - Iterator * itr_ ; / / Not owned . <nl> - mutex mu_ ; <nl> - Batch batches_ [ kWindowSize ] GUARDED_BY ( mu_ ) ; <nl> - size_t next_input_batch_ GUARDED_BY ( mu_ ) = - 1 ; <nl> - size_t next_input_ GUARDED_BY ( mu_ ) = 0 ; <nl> - size_t next_output_ GUARDED_BY ( mu_ ) = 0 ; <nl> - bool cancelled_ GUARDED_BY ( mu_ ) = false ; <nl> - bool reached_eof_ GUARDED_BY ( mu_ ) = false ; <nl> - <nl> - / / The runner thread waits on this condition variable for space to be <nl> - / / available . When the client thread takes a value out of the circular <nl> - / / buffer , it notifies this condition variable that space is now <nl> - / / available . <nl> - condition_variable runner_cond_var_ GUARDED_BY ( mu_ ) ; <nl> - / / The worker threads wait on this condition variable for available <nl> - / / inputs . When the runner thread makes new inputs available , it <nl> - / / notifies this condition variable . <nl> - condition_variable worker_cond_var_ GUARDED_BY ( mu_ ) ; <nl> - / / The client threads wait on this condition variable for available <nl> - / / batched outputs . When worker threads complete a batch , they notify <nl> - / / this condition variable . <nl> - condition_variable client_cond_var_ GUARDED_BY ( mu_ ) ; <nl> - } ; <nl> - / / Mark NumaBlockManager as a friend of Iterator in order to call <nl> - / / protected Iterator methods during checkpointing . <nl> - friend NumaBlockManager ; <nl> - <nl> - struct NumaWorkerBlock { <nl> - NumaBlockManager manager ; <nl> - / / TODO ( saeta ) : Migrate to BackgroundWorker . <nl> - std : : vector < std : : unique_ptr < Thread > > threads ; <nl> - <nl> - explicit NumaWorkerBlock ( Iterator * itr ) : manager ( itr ) { } <nl> - } ; <nl> - <nl> - static void CustomNumaWorkerBlockDeleter ( NumaWorkerBlock * ptr ) { <nl> - ptr - > ~ NumaWorkerBlock ( ) ; <nl> - port : : NUMAFree ( ptr , sizeof ( NumaWorkerBlock ) ) ; <nl> - } <nl> - static void DefaultNumaWorkerBlockDeleter ( NumaWorkerBlock * ptr ) { <nl> - delete ptr ; <nl> - } <nl> - <nl> - static Status CopyPartialBatch ( Tensor * output , const Tensor & value , <nl> - int64 num_elements ) { <nl> - switch ( value . dtype ( ) ) { <nl> - # define HANDLE_TYPE ( type ) \ <nl> - case DataTypeToEnum < type > : : value : { \ <nl> - auto output_t = output - > flat_outer_dims < type > ( ) ; \ <nl> - auto value_t = value . flat_outer_dims < type > ( ) ; \ <nl> - for ( size_t i = 0 ; i < num_elements ; i + + ) { \ <nl> - output_t . template chip < 0 > ( i ) = value_t . template chip < 0 > ( i ) ; \ <nl> - } \ <nl> - return Status : : OK ( ) ; \ <nl> - } <nl> - TF_CALL_DATASET_TYPES ( HANDLE_TYPE ) ; <nl> - # undef HANDLE_TYPE <nl> - default : <nl> - return errors : : InvalidArgument ( " Unsupported data type : " , <nl> - DataTypeString ( value . dtype ( ) ) ) ; <nl> - } <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - Status EnsureBackgroundThreadsStarted ( IteratorContext * ctx ) <nl> - EXCLUSIVE_LOCKS_REQUIRED ( * mu_ ) { <nl> - if ( curr_num_parallel_calls_ > = num_parallel_calls_ - > value ) { <nl> - / / All necessary threads have been started . <nl> - curr_num_parallel_calls_ = num_parallel_calls_ - > value ; <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - VLOG ( 4 ) < < " Starting workers " ; <nl> - bool numa_enabled = port : : NUMAEnabled ( ) ; <nl> - <nl> - if ( ! numa_enabled ) { <nl> - LOG ( INFO ) < < " NUMA not enabled on this host . " ; <nl> - } <nl> - <nl> - int num_numa_nodes = port : : NUMANumNodes ( ) ; <nl> - if ( num_numa_nodes < 1 ) { <nl> - return errors : : Internal ( " The number of NUMA nodes is invalid : " , <nl> - num_numa_nodes ) ; <nl> - } <nl> - <nl> - / / Only resize when empty to support restoring from checkpoints . <nl> - if ( workers_ . empty ( ) ) { <nl> - VLOG ( 3 ) < < " # NUMA Nodes : " < < num_numa_nodes <nl> - < < " , # Parallel Calls : " < < num_parallel_calls_ - > value ; <nl> - workers_ . resize ( num_numa_nodes ) ; <nl> - } else { <nl> - num_numa_nodes = workers_ . size ( ) ; <nl> - } <nl> - <nl> - / / Round up num_parallel_calls , with a minimum of 1 . <nl> - const size_t num_threads_per_block = <nl> - std : : max ( 1LL , ( num_parallel_calls_ - > value + num_numa_nodes - 1 ) / <nl> - num_numa_nodes ) ; <nl> - <nl> - VLOG ( 3 ) < < " Starting " < < num_threads_per_block * num_numa_nodes <nl> - < < " worker threads , with " < < num_threads_per_block <nl> - < < " threads per block . " ; <nl> - <nl> - / / Only allocate new_ctx if required . <nl> - std : : shared_ptr < IteratorContext > new_ctx ; <nl> - <nl> - for ( int i = 0 ; i < num_numa_nodes ; + + i ) { <nl> - if ( ! workers_ [ i ] ) { <nl> - if ( numa_enabled ) { <nl> - / / Allocate in appropriate NUMA domain . <nl> - / / 4k page align . <nl> - void * ptr = port : : NUMAMalloc ( i , sizeof ( NumaWorkerBlock ) , 0 ) ; <nl> - if ( ptr ! = nullptr ) { <nl> - NumaWorkerBlock * block = new ( ptr ) NumaWorkerBlock ( this ) ; <nl> - workers_ [ i ] = <nl> - std : : unique_ptr < NumaWorkerBlock , <nl> - std : : function < void ( NumaWorkerBlock * ) > > ( <nl> - block , CustomNumaWorkerBlockDeleter ) ; <nl> - } else { <nl> - LOG ( ERROR ) < < " Could not NUMA - allocate worker block : " < < i ; <nl> - } <nl> - } <nl> - / / If the NUMA allocation fails , or NUMA is not enabled . <nl> - if ( ! workers_ [ i ] ) { <nl> - workers_ [ i ] = <nl> - std : : unique_ptr < NumaWorkerBlock , <nl> - std : : function < void ( NumaWorkerBlock * ) > > ( <nl> - new NumaWorkerBlock ( this ) , DefaultNumaWorkerBlockDeleter ) ; <nl> - } <nl> - } <nl> - / / Be sure to start threads if num_parallel_calls_ has changed . <nl> - for ( size_t j = workers_ [ i ] - > threads . size ( ) ; <nl> - j < num_threads_per_block ; + + j ) { <nl> - VLOG ( 3 ) < < " Starting worker " < < i < < " , " < < j ; <nl> - if ( ! new_ctx ) { <nl> - new_ctx = std : : make_shared < IteratorContext > ( * ctx ) ; <nl> - } <nl> - workers_ [ i ] - > threads . emplace_back ( ctx - > StartThread ( <nl> - strings : : StrCat ( " tf_data_numa_map_and_batch_ " , i , " _ " , j ) , <nl> - [ this , new_ctx , i , j ] ( ) { WorkerThread ( new_ctx , i , j ) ; } ) ) ; <nl> - VLOG ( 3 ) < < " Worker " < < i < < " , " < < j < < " successfully started . " ; <nl> - } <nl> - } <nl> - if ( ! runner_thread_ ) { <nl> - if ( ! new_ctx ) { <nl> - new_ctx = std : : make_shared < IteratorContext > ( * ctx ) ; <nl> - } <nl> - runner_thread_ = <nl> - ctx - > StartThread ( " tf_data_numa_map_and_batch " , <nl> - [ this , new_ctx ] { RunnerThread ( new_ctx ) ; } ) ; <nl> - } <nl> - VLOG ( 3 ) < < " All workers & runner thread started . " ; <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - void AllocateOutput ( IteratorContext * ctx , size_t batch_size , <nl> - const std : : vector < Tensor > & map_fn_outputs , <nl> - std : : vector < Tensor > * batch_outputs ) { <nl> - DCHECK ( dataset ( ) - > output_dtypes ( ) . size ( ) = = <nl> - dataset ( ) - > output_shapes ( ) . size ( ) ) ; <nl> - DCHECK ( map_fn_outputs . size ( ) = = dataset ( ) - > output_dtypes ( ) . size ( ) ) ; <nl> - for ( size_t i = 0 ; i < dataset ( ) - > output_dtypes ( ) . size ( ) ; + + i ) { <nl> - TensorShape component_shape ( { static_cast < uint32 > ( batch_size ) } ) ; <nl> - component_shape . AppendShape ( map_fn_outputs . at ( i ) . shape ( ) ) ; <nl> - AllocatorAttributes attr ; <nl> - attr . set_gpu_compatible ( true ) ; <nl> - batch_outputs - > emplace_back ( ctx - > allocator ( attr ) , <nl> - map_fn_outputs . at ( i ) . dtype ( ) , <nl> - component_shape ) ; <nl> - } <nl> - } <nl> - <nl> - void RunnerThread ( std : : shared_ptr < IteratorContext > ctx ) <nl> - LOCKS_EXCLUDED ( mu_ ) { <nl> - RecordStart ( ctx . get ( ) ) ; <nl> - auto cleanup = gtl : : MakeCleanup ( [ this , & ctx ] { <nl> - / / Set end of input on all the managers in order to clean up in an <nl> - / / orderly fashion . <nl> - VLOG ( 3 ) < < " Setting End of Input on workers_ [ * ] - > manager " ; <nl> - for ( size_t i = 0 ; i < workers_ . size ( ) ; + + i ) { <nl> - workers_ [ i ] - > manager . SetEndOfInput ( ) ; <nl> - } <nl> - RecordStop ( ctx . get ( ) ) ; <nl> - } ) ; <nl> - <nl> - const size_t num_blocks = workers_ . size ( ) ; <nl> - <nl> - while ( true ) { <nl> - for ( size_t block = 0 ; block < num_blocks ; + + block ) { <nl> - VLOG ( 4 ) < < " RunnerThread waiting for input space in block : " <nl> - < < block ; <nl> - if ( TF_PREDICT_FALSE ( <nl> - ! workers_ [ block ] - > manager . WaitForInputSpace ( ctx . get ( ) ) ) ) { <nl> - VLOG ( 3 ) < < " RunnerThread exiting due to cancellation . " ; <nl> - return ; <nl> - } <nl> - VLOG ( 4 ) < < " RunnerThread has space ; pulling on upstream for block " <nl> - < < block ; <nl> - <nl> - Status s ; <nl> - std : : vector < std : : vector < Tensor > > inputs ; <nl> - bool end_of_sequence = false ; <nl> - for ( size_t i = 0 ; i < dataset ( ) - > batch_size_ ; + + i ) { <nl> - std : : vector < Tensor > tuple ; <nl> - s . Update ( <nl> - input_impl_ - > GetNext ( ctx . get ( ) , & tuple , & end_of_sequence ) ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - break ; <nl> - } <nl> - if ( end_of_sequence ) { <nl> - VLOG ( 4 ) < < " Runner thread encountered end of sequence . " ; <nl> - if ( dataset ( ) - > drop_remainder_ ) { <nl> - return ; <nl> - } <nl> - break ; <nl> - } <nl> - inputs . push_back ( std : : move ( tuple ) ) ; <nl> - } <nl> - <nl> - VLOG ( 4 ) < < " Moving inputs to block " < < block <nl> - < < " , which has size : " < < inputs . size ( ) ; <nl> - if ( ! s . ok ( ) | | ! inputs . empty ( ) ) { <nl> - workers_ [ block ] - > manager . PushInputs ( s , std : : move ( inputs ) ) ; <nl> - VLOG ( 4 ) < < " Inputs moved into block " < < block ; <nl> - } <nl> - if ( end_of_sequence ) { <nl> - return ; <nl> - } <nl> - } <nl> - } <nl> - } <nl> - <nl> - void WorkerThread ( std : : shared_ptr < IteratorContext > ctx , <nl> - const int numa_node , const int thread_num ) { <nl> - RecordStart ( ctx . get ( ) ) ; <nl> - WORKER_VLOG ( 3 ) < < " started . " ; <nl> - auto stop_cleanup = <nl> - gtl : : MakeCleanup ( [ this , numa_node , thread_num , & ctx ] ( ) { <nl> - RecordStop ( ctx . get ( ) ) ; <nl> - WORKER_VLOG ( 3 ) < < " exiting . " ; <nl> - } ) ; <nl> - <nl> - NumaWorkerBlock * block = workers_ [ numa_node ] . get ( ) ; <nl> - port : : NUMASetThreadNodeAffinity ( numa_node ) ; <nl> - const int num_numa_nodes = port : : NUMANumNodes ( ) ; <nl> - const int minimum_num_parallel_calls = thread_num * num_numa_nodes ; <nl> - <nl> - while ( true ) { <nl> - / / Put threads to sleep based on autotuner . <nl> - { <nl> - mutex_lock l ( * mu_ ) ; <nl> - while ( minimum_num_parallel_calls > = num_parallel_calls_ - > value & & <nl> - ! cancelled_ ) { <nl> - RecordStop ( ctx . get ( ) ) ; <nl> - autotune_cond_var_ - > wait ( l ) ; <nl> - RecordStart ( ctx . get ( ) ) ; <nl> - } <nl> - if ( cancelled_ ) { <nl> - return ; <nl> - } <nl> - } <nl> - <nl> - std : : vector < Tensor > input ; <nl> - uint64 index = 0 ; <nl> - size_t sequence_number = 0 ; <nl> - WORKER_VLOG ( 4 ) < < " retrieving input . " ; <nl> - { <nl> - profiler : : TraceMe activity ( <nl> - " NumaMapAndBatch : : Iterator : : Worker : : RetrieveInput " , <nl> - profiler : : TraceMeLevel : : kInfo ) ; <nl> - if ( ! block - > manager . RetrieveInput ( ctx . get ( ) , & input , & index , <nl> - & sequence_number ) ) { <nl> - return ; <nl> - } <nl> - } <nl> - <nl> - WORKER_VLOG ( 4 ) < < " retrieved input ; index : " < < index <nl> - < < " , sequence_number : " < < sequence_number ; <nl> - <nl> - std : : vector < Tensor > return_values ; <nl> - Status s ; <nl> - { <nl> - profiler : : TraceMe activity ( <nl> - " NumaMapAndBatch : : Iterator : : Worker : : FunctionExecution " , <nl> - profiler : : TraceMeLevel : : kInfo ) ; <nl> - s = instantiated_captured_func_ - > Run ( ctx . get ( ) , std : : move ( input ) , <nl> - & return_values ) ; <nl> - } <nl> - WORKER_VLOG ( 4 ) < < " ran function for index : " < < index <nl> - < < " , sequence_number : " < < sequence_number ; <nl> - <nl> - if ( s . ok ( ) ) { <nl> - std : : vector < Tensor > * output = block - > manager . GetBatchTensors ( <nl> - sequence_number , <nl> - [ this , ctx , & return_values ] ( size_t batch_size , <nl> - std : : vector < Tensor > * output ) { <nl> - AllocateOutput ( ctx . get ( ) , batch_size , return_values , output ) ; <nl> - } ) ; <nl> - WORKER_VLOG ( 4 ) < < " copying tensors to batch output . " ; <nl> - { <nl> - profiler : : TraceMe activity ( <nl> - " NumaMapAndBatch : : Iterator : : Worker : : BatchCopy " , <nl> - profiler : : TraceMeLevel : : kInfo ) ; <nl> - for ( size_t i = 0 ; i < return_values . size ( ) & & s . ok ( ) ; + + i ) { <nl> - Tensor & tensor = return_values . at ( i ) ; <nl> - Tensor * batch = & output - > at ( i ) ; <nl> - if ( tensor . NumElements ( ) ! = <nl> - ( batch - > NumElements ( ) / batch - > dim_size ( 0 ) ) ) { <nl> - s . Update ( errors : : InvalidArgument ( <nl> - " Cannot add tensor to the batch : number of elements does " <nl> - " not match . Shapes are : [ tensor ] : " , <nl> - tensor . shape ( ) . DebugString ( ) , <nl> - " , [ batch ] : " , batch - > shape ( ) . DebugString ( ) ) ) ; <nl> - break ; <nl> - } <nl> - s . Update ( batch_util : : CopyElementToSlice ( std : : move ( tensor ) , <nl> - batch , index ) ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - block - > manager . RecordBatchEntryComplete ( sequence_number , index , s ) ; <nl> - WORKER_VLOG ( 4 ) < < " finished index : " < < index <nl> - < < " , sequence_number : " < < sequence_number ; <nl> - } <nl> - } <nl> - <nl> - / / mu_ protects shared internal state and is used to coordinate between <nl> - / / the auto - tuner , client threads , worker threads , and the runner thread . <nl> - const std : : shared_ptr < mutex > mu_ ; <nl> - const std : : shared_ptr < condition_variable > autotune_cond_var_ ; <nl> - / / The maximum number of parallel calls ( can be auto - tuned ) . <nl> - const std : : shared_ptr < model : : SharedState > num_parallel_calls_ ; <nl> - std : : unique_ptr < InstantiatedCapturedFunction > instantiated_captured_func_ ; <nl> - <nl> - / / Caches the last - seen value of num_parallel_calls_ - > value to <nl> - / / short - circuit starting workers . <nl> - int64 curr_num_parallel_calls_ GUARDED_BY ( * mu_ ) = 0 ; <nl> - <nl> - std : : unique_ptr < IteratorBase > input_impl_ ; <nl> - int64 cur_block_ GUARDED_BY ( * mu_ ) = 0 ; <nl> - bool global_end_of_input_ GUARDED_BY ( * mu_ ) = false ; <nl> - bool cancelled_ GUARDED_BY ( * mu_ ) = false ; <nl> - std : : vector < std : : unique_ptr < NumaWorkerBlock , <nl> - std : : function < void ( NumaWorkerBlock * ) > > > <nl> - workers_ ; / / Const after initialization . <nl> - std : : unique_ptr < Thread > runner_thread_ GUARDED_BY ( * mu_ ) ; <nl> - } ; <nl> - <nl> - const DatasetBase * const input_ ; <nl> - const int64 batch_size_ ; <nl> - const int64 num_parallel_calls_ ; <nl> - const bool drop_remainder_ ; <nl> - const DataTypeVector output_types_ ; <nl> - const std : : vector < PartialTensorShape > output_shapes_ ; <nl> - const std : : unique_ptr < CapturedFunction > captured_func_ ; <nl> - } ; <nl> - <nl> - std : : shared_ptr < FunctionMetadata > func_metadata_ = nullptr ; <nl> - DataTypeVector output_types_ ; <nl> - std : : vector < PartialTensorShape > output_shapes_ ; <nl> - bool preserve_cardinality_ ; <nl> - } ; <nl> - <nl> - REGISTER_KERNEL_BUILDER ( <nl> - Name ( " ExperimentalNumaMapAndBatchDataset " ) . Device ( DEVICE_CPU ) , <nl> - NumaMapAndBatchDatasetOp ) ; <nl> - <nl> - } / / namespace <nl> - } / / namespace data <nl> - } / / namespace tensorflow <nl> mmm a / tensorflow / python / data / experimental / kernel_tests / map_and_batch_test . py <nl> ppp b / tensorflow / python / data / experimental / kernel_tests / map_and_batch_test . py <nl> <nl> class MapAndBatchTest ( test_base . DatasetTestBase , parameterized . TestCase ) : <nl> <nl> @ parameterized . named_parameters ( <nl> - ( " Default " , None , None , False ) , <nl> - ( " SequentialCalls " , 1 , None , False ) , <nl> - ( " ParallelCalls " , 2 , None , False ) , <nl> - ( " ParallelBatches " , None , 10 , False ) , <nl> - ( " DefaultNUMA " , None , None , True ) , <nl> - ( " SequentialCallsNUMA " , 1 , None , True ) , <nl> - ( " ParallelCallsNUMA " , 2 , None , True ) , <nl> - ( " ParallelBatchesNUMA " , None , 10 , True ) , <nl> + ( " Default " , None , None ) , <nl> + ( " SequentialCalls " , 1 , None ) , <nl> + ( " ParallelCalls " , 2 , None ) , <nl> + ( " ParallelBatches " , None , 10 ) , <nl> ) <nl> - def testMapAndBatch ( self , num_parallel_calls , num_parallel_batches , <nl> - numa_aware ) : <nl> + def testMapAndBatch ( self , num_parallel_calls , num_parallel_batches ) : <nl> " " " Test a dataset that maps a TF function across its input elements . " " " <nl> # The pipeline is TensorSliceDataset - > <nl> # RepeatDataset ( count ) - > MapAndBatchDataset ( square_3 , batch_size ) . <nl> def testMapAndBatch ( self , num_parallel_calls , num_parallel_batches , <nl> def _map_fn ( x , y , z ) : <nl> return math_ops . square ( x ) , math_ops . square ( y ) , math_ops . square ( z ) <nl> <nl> - def dataset_fn ( batch_size , count , numa_aware = numa_aware ) : <nl> + def dataset_fn ( batch_size , count ) : <nl> dataset = dataset_ops . Dataset . from_tensor_slices ( components ) . repeat ( <nl> count ) . apply ( <nl> batching . map_and_batch ( <nl> def dataset_fn ( batch_size , count , numa_aware = numa_aware ) : <nl> batch_size = batch_size , <nl> num_parallel_calls = num_parallel_calls , <nl> num_parallel_batches = num_parallel_batches ) ) <nl> - if numa_aware : <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - dataset = dataset . with_options ( options ) <nl> return dataset <nl> <nl> # Batch of a finite input , where the batch_size divides the <nl> def dataset_fn ( batch_size , count , numa_aware = numa_aware ) : <nl> self . assertDatasetProduces ( dataset_fn ( 0 , 14 ) , expected_output = [ ] ) <nl> <nl> @ parameterized . named_parameters ( <nl> - ( " Even " , False , False ) , <nl> - ( " Uneven " , True , False ) , <nl> - ( " EvenNUMA " , False , True ) , <nl> - ( " UnevenNUMA " , True , True ) , <nl> + ( " Even " , False ) , <nl> + ( " Uneven " , True ) , <nl> ) <nl> - def testMapAndBatchPartialBatch ( self , drop_remainder , numa_aware ) : <nl> + def testMapAndBatchPartialBatch ( self , drop_remainder ) : <nl> dataset = ( <nl> dataset_ops . Dataset . range ( 10 ) . apply ( <nl> batching . map_and_batch ( <nl> def testMapAndBatchPartialBatch ( self , drop_remainder , numa_aware ) : <nl> batch_size = 4 , <nl> drop_remainder = drop_remainder ) ) ) <nl> <nl> - if numa_aware : <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - dataset = dataset . with_options ( options ) <nl> - <nl> if drop_remainder : <nl> self . assertEqual ( <nl> [ 4 , 1 ] , dataset_ops . get_legacy_output_shapes ( dataset ) . as_list ( ) ) <nl> def testMapAndBatchPartialBatch ( self , drop_remainder , numa_aware ) : <nl> expected_output . append ( [ [ 64 ] , [ 81 ] ] ) <nl> self . assertDatasetProduces ( dataset , expected_output = expected_output ) <nl> <nl> - @ parameterized . named_parameters ( <nl> - ( " Normal " , False ) , <nl> - ( " NUMA " , True ) , <nl> - ) <nl> - def testMapAndBatchYieldsPartialBatch ( self , numa_aware ) : <nl> + def testMapAndBatchYieldsPartialBatch ( self ) : <nl> dataset = ( <nl> dataset_ops . Dataset . range ( 10 ) . apply ( <nl> batching . map_and_batch ( lambda x : array_ops . reshape ( x * x , [ 1 ] ) , 4 ) ) ) <nl> <nl> - if numa_aware : <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - dataset = dataset . with_options ( options ) <nl> self . assertEqual ( <nl> [ None , 1 ] , dataset_ops . get_legacy_output_shapes ( dataset ) . as_list ( ) ) <nl> expected_output = [ [ [ 0 ] , [ 1 ] , [ 4 ] , [ 9 ] ] , [ [ 16 ] , [ 25 ] , [ 36 ] , [ 49 ] ] , <nl> [ [ 64 ] , [ 81 ] ] ] <nl> self . assertDatasetProduces ( dataset , expected_output = expected_output ) <nl> <nl> - @ parameterized . named_parameters ( <nl> - ( " Normal " , False ) , <nl> - ( " NUMA " , True ) , <nl> - ) <nl> - def testMapAndBatchParallelGetNext ( self , numa_aware ) : <nl> + def testMapAndBatchParallelGetNext ( self ) : <nl> dataset = dataset_ops . Dataset . range ( 50000 ) . apply ( <nl> batching . map_and_batch ( lambda x : x , batch_size = 100 ) ) <nl> - if numa_aware : <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - dataset = dataset . with_options ( options ) <nl> <nl> if context . executing_eagerly ( ) : <nl> iterator = iter ( dataset ) <nl> def testMapAndBatchParallelGetNext ( self , numa_aware ) : <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> self . evaluate ( [ element ( ) for element in elements ] ) <nl> <nl> - @ parameterized . named_parameters ( <nl> - ( " Normal " , False ) , <nl> - ( " NUMA " , True ) , <nl> - ) <nl> - def testMapAndBatchParallelGetNextDropRemainder ( self , numa_aware ) : <nl> + def testMapAndBatchParallelGetNextDropRemainder ( self ) : <nl> dataset = dataset_ops . Dataset . range ( 49999 ) . apply ( <nl> batching . map_and_batch ( <nl> lambda x : x , batch_size = 100 , drop_remainder = True ) ) <nl> <nl> - if numa_aware : <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - dataset = dataset . with_options ( options ) <nl> - <nl> if context . executing_eagerly ( ) : <nl> iterator = iter ( dataset ) <nl> get_next = iterator . _next_internal # pylint : disable = protected - access <nl> def testMapAndBatchParallelGetNextDropRemainder ( self , numa_aware ) : <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> self . evaluate ( [ element ( ) for element in elements ] ) <nl> <nl> - @ parameterized . named_parameters ( <nl> - ( " Normal " , False ) , <nl> - ( " NUMA " , True ) , <nl> - ) <nl> - def testMapAndBatchSparse ( self , numa_aware ) : <nl> + def testMapAndBatchSparse ( self ) : <nl> <nl> def _sparse ( i ) : <nl> return sparse_tensor . SparseTensorValue ( <nl> def _sparse ( i ) : <nl> <nl> dataset = dataset_ops . Dataset . range ( 10 ) . apply ( <nl> batching . map_and_batch ( _sparse , 5 ) ) <nl> - if numa_aware : <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - dataset = dataset . with_options ( options ) <nl> <nl> self . assertDatasetProduces ( <nl> dataset , <nl> def _sparse ( i ) : <nl> dense_shape = [ 5 , 1 ] ) for i in range ( 2 ) <nl> ] ) <nl> <nl> - @ parameterized . named_parameters ( <nl> - ( " Normal " , False ) , <nl> - ( " NUMA " , True ) , <nl> - ) <nl> - def testMapAndBatchFails ( self , numa_aware ) : <nl> + def testMapAndBatchFails ( self ) : <nl> " " " Test a dataset that maps a TF function across its input elements . " " " <nl> <nl> with self . assertRaisesRegexp ( errors . InvalidArgumentError , " oops " ) : <nl> def testMapAndBatchFails ( self , numa_aware ) : <nl> array_ops . check_numerics ( <nl> constant_op . constant ( 1 . 0 ) / constant_op . constant ( 0 . 0 ) , " oops " ) ) <nl> dataset = dataset . apply ( batching . map_and_batch ( lambda x : x , 14 ) ) <nl> - if numa_aware : <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - dataset = dataset . with_options ( options ) <nl> get_next = self . getNext ( dataset ) <nl> self . evaluate ( get_next ( ) ) <nl> <nl> - @ parameterized . named_parameters ( <nl> - ( " Normal " , False ) , <nl> - ( " NUMA " , True ) , <nl> - ) <nl> - def testMapAndBatchShapeMismatch ( self , numa_aware ) : <nl> + def testMapAndBatchShapeMismatch ( self ) : <nl> " " " Test a dataset that maps a TF function across its input elements . " " " <nl> <nl> def generator ( ) : <nl> def generator ( ) : <nl> generator , output_types = dtypes . int32 ) <nl> batch_size = 4 <nl> dataset = dataset . apply ( batching . map_and_batch ( lambda x : x , batch_size ) ) <nl> - if numa_aware : <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - dataset = dataset . with_options ( options ) <nl> self . assertDatasetProduces ( <nl> dataset , <nl> expected_error = ( errors . InvalidArgumentError , <nl> " number of elements does not match " ) ) <nl> <nl> - @ parameterized . named_parameters ( <nl> - ( " Normal " , False ) , <nl> - ( " NUMA " , True ) , <nl> - ) <nl> - def testMapAndBatchImplicitDispose ( self , numa_aware ) : <nl> + def testMapAndBatchImplicitDispose ( self ) : <nl> # Tests whether a map and batch dataset will be cleaned up correctly when <nl> # the pipeline does not run it until exhaustion . <nl> # The pipeline is TensorSliceDataset - > RepeatDataset ( 1000 ) - > <nl> def _map_fn ( x , y , z ) : <nl> dataset = dataset_ops . Dataset . from_tensor_slices ( components ) . repeat ( <nl> 1000 ) . apply ( batching . map_and_batch ( _map_fn , batch_size = 100 ) ) <nl> dataset = dataset . prefetch ( 5 ) <nl> - if numa_aware : <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - dataset = dataset . with_options ( options ) <nl> get_next = self . getNext ( dataset ) <nl> for _ in range ( 3 ) : <nl> self . evaluate ( get_next ( ) ) <nl> <nl> @ parameterized . named_parameters ( <nl> - ( " 1 " , 0 , False ) , <nl> - ( " 2 " , 5 , False ) , <nl> - ( " 3 " , 10 , False ) , <nl> - ( " 4 " , 90 , False ) , <nl> - ( " 5 " , 95 , False ) , <nl> - ( " 6 " , 99 , False ) , <nl> - ( " 1NUMA " , 0 , True ) , <nl> - ( " 2NUMA " , 5 , True ) , <nl> - ( " 3NUMA " , 10 , True ) , <nl> - ( " 4NUMA " , 90 , True ) , <nl> - ( " 5NUMA " , 95 , True ) , <nl> - ( " 6NUMA " , 99 , True ) , <nl> + ( " 1 " , 0 ) , <nl> + ( " 2 " , 5 ) , <nl> + ( " 3 " , 10 ) , <nl> + ( " 4 " , 90 ) , <nl> + ( " 5 " , 95 ) , <nl> + ( " 6 " , 99 ) , <nl> ) <nl> - def testMapAndBatchMapError ( self , threshold , numa_aware ) : <nl> + def testMapAndBatchMapError ( self , threshold ) : <nl> <nl> def raising_py_fn ( i ) : <nl> if i > = threshold : <nl> def raising_py_fn ( i ) : <nl> batching . map_and_batch ( <nl> lambda x : script_ops . py_func ( raising_py_fn , [ x ] , dtypes . int64 ) , <nl> batch_size = 10 ) ) <nl> - if numa_aware : <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - dataset = dataset . with_options ( options ) <nl> <nl> get_next = self . getNext ( dataset ) <nl> for i in range ( threshold / / 10 ) : <nl> self . assertAllEqual ( [ i * 10 + j for j in range ( 10 ) ] , <nl> self . evaluate ( get_next ( ) ) ) <nl> - if numa_aware : <nl> - if threshold % 10 ! = 0 : <nl> - self . assertAllEqual ( <nl> - [ threshold / / 10 * 10 + j for j in range ( threshold % 10 ) ] , <nl> - self . evaluate ( get_next ( ) ) ) <nl> - else : <nl> - for i in range ( threshold / / 10 , 10 ) : <nl> - with self . assertRaises ( errors . InvalidArgumentError ) : <nl> - self . evaluate ( get_next ( ) ) <nl> + for i in range ( threshold / / 10 , 10 ) : <nl> + with self . assertRaises ( errors . InvalidArgumentError ) : <nl> + self . evaluate ( get_next ( ) ) <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> self . evaluate ( get_next ( ) ) <nl> <nl> @ parameterized . named_parameters ( <nl> - ( " 1 " , False , dtypes . bool , False ) , <nl> - ( " 2 " , - 42 , dtypes . int8 , False ) , <nl> - ( " 3 " , - 42 , dtypes . int16 , False ) , <nl> - ( " 4 " , - 42 , dtypes . int32 , False ) , <nl> - ( " 5 " , - 42 , dtypes . int64 , False ) , <nl> - ( " 6 " , 42 , dtypes . uint8 , False ) , <nl> - ( " 7 " , 42 , dtypes . uint16 , False ) , <nl> - ( " 8 " , 42 . 0 , dtypes . float16 , False ) , <nl> - ( " 9 " , 42 . 0 , dtypes . float32 , False ) , <nl> - ( " 10 " , 42 . 0 , dtypes . float64 , False ) , <nl> - ( " 11 " , b " hello " , dtypes . string , False ) , <nl> - ( " 1NUMA " , False , dtypes . bool , True ) , <nl> - ( " 2NUMA " , - 42 , dtypes . int8 , True ) , <nl> - ( " 3NUMA " , - 42 , dtypes . int16 , True ) , <nl> - ( " 4NUMA " , - 42 , dtypes . int32 , True ) , <nl> - ( " 5NUMA " , - 42 , dtypes . int64 , True ) , <nl> - ( " 6NUMA " , 42 , dtypes . uint8 , True ) , <nl> - ( " 7NUMA " , 42 , dtypes . uint16 , True ) , <nl> - ( " 8NUMA " , 42 . 0 , dtypes . float16 , True ) , <nl> - ( " 9NUMA " , 42 . 0 , dtypes . float32 , True ) , <nl> - ( " 10NUMA " , 42 . 0 , dtypes . float64 , True ) , <nl> - ( " 11NUMA " , b " hello " , dtypes . string , True ) , <nl> + ( " 1 " , False , dtypes . bool ) , <nl> + ( " 2 " , - 42 , dtypes . int8 ) , <nl> + ( " 3 " , - 42 , dtypes . int16 ) , <nl> + ( " 4 " , - 42 , dtypes . int32 ) , <nl> + ( " 5 " , - 42 , dtypes . int64 ) , <nl> + ( " 6 " , 42 , dtypes . uint8 ) , <nl> + ( " 7 " , 42 , dtypes . uint16 ) , <nl> + ( " 8 " , 42 . 0 , dtypes . float16 ) , <nl> + ( " 9 " , 42 . 0 , dtypes . float32 ) , <nl> + ( " 10 " , 42 . 0 , dtypes . float64 ) , <nl> + ( " 11 " , b " hello " , dtypes . string ) , <nl> ) <nl> - def testMapAndBatchTypes ( self , element , dtype , numa_aware ) : <nl> + def testMapAndBatchTypes ( self , element , dtype ) : <nl> <nl> def gen ( ) : <nl> yield element <nl> def gen ( ) : <nl> dataset = dataset_ops . Dataset . from_generator ( gen , dtype ) . repeat ( 100 ) . apply ( <nl> batching . map_and_batch ( lambda x : x , batch_size = 10 ) ) <nl> <nl> - if numa_aware : <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - dataset = dataset . with_options ( options ) <nl> - <nl> get_next = self . getNext ( dataset ) <nl> for _ in range ( 10 ) : <nl> self . assertAllEqual ( [ element for _ in range ( 10 ) ] , <nl> def testShortCircuitCapturedInput ( self ) : <nl> get_next = self . getNext ( dataset , requires_initialization = True ) <nl> self . assertAllEqual ( [ 42 ] * 10 , self . evaluate ( get_next ( ) ) ) <nl> <nl> - @ parameterized . named_parameters ( <nl> - ( " Normal " , False ) , <nl> - ( " NUMA " , True ) , <nl> - ) <nl> - def testMapAndBatchControlFlow ( self , numa_aware ) : <nl> + def testMapAndBatchControlFlow ( self ) : <nl> <nl> def map_fn ( x ) : <nl> previous_control_flow_v2_value = control_flow_util . ENABLE_CONTROL_FLOW_V2 <nl> def map_fn ( x ) : <nl> <nl> dataset = dataset_ops . Dataset . range ( 100 ) . apply ( <nl> batching . map_and_batch ( map_fn , batch_size = 10 ) ) <nl> - if numa_aware : <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - dataset = dataset . with_options ( options ) <nl> get_next = self . getNext ( dataset ) <nl> for i in range ( 10 ) : <nl> if i < 5 : <nl> mmm a / tensorflow / python / data / experimental / kernel_tests / optimization / BUILD <nl> ppp b / tensorflow / python / data / experimental / kernel_tests / optimization / BUILD <nl> py_test ( <nl> ] , <nl> ) <nl> <nl> - py_test ( <nl> - name = " make_numa_aware_test " , <nl> - srcs = [ " make_numa_aware_test . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - tags = [ <nl> - " no_oss " , <nl> - " no_pip " , <nl> - " no_windows " , <nl> - ] , <nl> - deps = [ <nl> - " / / tensorflow / python : client_testlib " , <nl> - " / / tensorflow / python : errors " , <nl> - " / / tensorflow / python / data / experimental / ops : batching " , <nl> - " / / tensorflow / python / data / experimental / ops : optimization " , <nl> - " / / tensorflow / python / data / kernel_tests : test_base " , <nl> - " / / tensorflow / python / data / ops : dataset_ops " , <nl> - ] , <nl> - ) <nl> - <nl> py_test ( <nl> name = " map_and_batch_fusion_test " , <nl> srcs = [ " map_and_batch_fusion_test . py " ] , <nl> deleted file mode 100644 <nl> index d79ae4387c868 . . 0000000000000 <nl> mmm a / tensorflow / python / data / experimental / kernel_tests / optimization / make_numa_aware_test . py <nl> ppp / dev / null <nl> <nl> - # Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> - # <nl> - # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - # you may not use this file except in compliance with the License . <nl> - # You may obtain a copy of the License at <nl> - # <nl> - # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - # <nl> - # Unless required by applicable law or agreed to in writing , software <nl> - # distributed under the License is distributed on an " AS IS " BASIS , <nl> - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - # See the License for the specific language governing permissions and <nl> - # limitations under the License . <nl> - # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - " " " Tests for the ` MakeNumaAware ` optimization . " " " <nl> - from __future__ import absolute_import <nl> - from __future__ import division <nl> - from __future__ import print_function <nl> - <nl> - from tensorflow . python . data . experimental . ops import batching <nl> - from tensorflow . python . data . experimental . ops import optimization <nl> - from tensorflow . python . data . kernel_tests import test_base <nl> - from tensorflow . python . data . ops import dataset_ops <nl> - from tensorflow . python . framework import test_util <nl> - from tensorflow . python . platform import test <nl> - <nl> - <nl> - @ test_util . run_all_in_graph_and_eager_modes <nl> - class MakeNumaAwareTest ( test_base . DatasetTestBase ) : <nl> - <nl> - def testMakeNumaAware ( self ) : <nl> - dataset = dataset_ops . Dataset . range ( 10 ) . apply ( <nl> - optimization . assert_next ( [ " NumaMapAndBatch " ] ) ) . apply ( <nl> - batching . map_and_batch ( lambda x : x * x , 10 ) ) <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - options . experimental_optimization . apply_default_optimizations = False <nl> - dataset = dataset . with_options ( options ) <nl> - self . assertDatasetProduces ( <nl> - dataset , expected_output = [ [ x * x for x in range ( 10 ) ] ] ) <nl> - <nl> - <nl> - if __name__ = = " __main__ " : <nl> - test . main ( ) <nl> mmm a / tensorflow / python / data / experimental / kernel_tests / serialization / BUILD <nl> ppp b / tensorflow / python / data / experimental / kernel_tests / serialization / BUILD <nl> py_test ( <nl> ] , <nl> ) <nl> <nl> - py_test ( <nl> - name = " numa_map_and_batch_dataset_serialization_test " , <nl> - size = " medium " , <nl> - srcs = [ " numa_map_and_batch_dataset_serialization_test . py " ] , <nl> - srcs_version = " PY2AND3 " , <nl> - tags = [ <nl> - " no_oss " , # b / 118497483 <nl> - " no_pip " , <nl> - " no_windows " , <nl> - " notap " , <nl> - ] , <nl> - deps = [ <nl> - " : dataset_serialization_test_base " , <nl> - " / / tensorflow / python : client_testlib " , <nl> - " / / tensorflow / python : math_ops " , <nl> - " / / tensorflow / python / data / experimental / ops : batching " , <nl> - " / / tensorflow / python / data / ops : dataset_ops " , <nl> - ] , <nl> - ) <nl> - <nl> py_test ( <nl> name = " map_dataset_serialization_test " , <nl> size = " medium " , <nl> deleted file mode 100644 <nl> index 04aab329cd81c . . 0000000000000 <nl> mmm a / tensorflow / python / data / experimental / kernel_tests / serialization / numa_map_and_batch_dataset_serialization_test . py <nl> ppp / dev / null <nl> <nl> - # Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> - # <nl> - # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - # you may not use this file except in compliance with the License . <nl> - # You may obtain a copy of the License at <nl> - # <nl> - # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - # <nl> - # Unless required by applicable law or agreed to in writing , software <nl> - # distributed under the License is distributed on an " AS IS " BASIS , <nl> - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - # See the License for the specific language governing permissions and <nl> - # limitations under the License . <nl> - # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - " " " Tests for the MapAndBatchDataset serialization . " " " <nl> - from __future__ import absolute_import <nl> - from __future__ import division <nl> - from __future__ import print_function <nl> - <nl> - import math <nl> - <nl> - from tensorflow . python . data . experimental . kernel_tests . serialization import dataset_serialization_test_base <nl> - from tensorflow . python . data . experimental . ops import batching <nl> - from tensorflow . python . data . ops import dataset_ops <nl> - from tensorflow . python . ops import math_ops <nl> - from tensorflow . python . platform import test <nl> - <nl> - <nl> - class MapAndBatchDatasetSerializationTest ( <nl> - dataset_serialization_test_base . DatasetSerializationTestBase ) : <nl> - <nl> - def testNumParallelBatches ( self ) : <nl> - range_size = 11 <nl> - num_repeats = 2 <nl> - batch_size = 5 <nl> - total_outputs = range_size * num_repeats <nl> - num_outputs_drop_remainder = total_outputs / / batch_size <nl> - num_outputs_keep_remainder = int ( math . ceil ( total_outputs / batch_size ) ) <nl> - num_parallel_batches = 2 <nl> - <nl> - def build_ds ( range_start , drop_remainder = False ) : <nl> - <nl> - def _map_fn ( x ) : <nl> - return math_ops . square ( x ) <nl> - <nl> - ds = dataset_ops . Dataset . range ( <nl> - range_start , range_start + range_size ) . repeat ( num_repeats ) . apply ( <nl> - batching . map_and_batch ( <nl> - map_func = _map_fn , <nl> - batch_size = batch_size , <nl> - num_parallel_batches = num_parallel_batches , <nl> - drop_remainder = drop_remainder ) ) <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - return ds . with_options ( options ) <nl> - <nl> - self . run_core_tests ( lambda : build_ds ( 10 ) , lambda : build_ds ( 15 ) , <nl> - num_outputs_keep_remainder ) <nl> - self . run_core_tests ( lambda : build_ds ( 10 , True ) , lambda : build_ds ( 15 , True ) , <nl> - num_outputs_drop_remainder ) <nl> - <nl> - def testNumParallelCalls ( self ) : <nl> - range_size = 11 <nl> - num_repeats = 2 <nl> - batch_size = 5 <nl> - total_outputs = range_size * num_repeats <nl> - num_outputs_drop_remainder = total_outputs / / batch_size <nl> - num_outputs_keep_remainder = int ( math . ceil ( total_outputs / batch_size ) ) <nl> - num_parallel_calls = 7 <nl> - <nl> - def build_ds ( range_start , drop_remainder = False ) : <nl> - <nl> - def _map_fn ( x ) : <nl> - return math_ops . square ( x ) <nl> - <nl> - ds = dataset_ops . Dataset . range ( <nl> - range_start , range_start + range_size ) . repeat ( num_repeats ) . apply ( <nl> - batching . map_and_batch ( <nl> - map_func = _map_fn , <nl> - batch_size = batch_size , <nl> - num_parallel_calls = num_parallel_calls , <nl> - drop_remainder = drop_remainder ) ) <nl> - options = dataset_ops . Options ( ) <nl> - options . experimental_numa_aware = True <nl> - return ds . with_options ( options ) <nl> - <nl> - self . run_core_tests ( lambda : build_ds ( 10 ) , lambda : build_ds ( 15 ) , <nl> - num_outputs_keep_remainder ) <nl> - self . run_core_tests ( lambda : build_ds ( 10 , True ) , lambda : build_ds ( 15 , True ) , <nl> - num_outputs_drop_remainder ) <nl> - <nl> - <nl> - if __name__ = = " __main__ " : <nl> - test . main ( ) <nl> - <nl> mmm a / tensorflow / python / data / ops / dataset_ops . py <nl> ppp b / tensorflow / python / data / ops / dataset_ops . py <nl> class Options ( options_lib . OptionsBase ) : <nl> " Whether the outputs need to be produced in deterministic order . If None , " <nl> " defaults to True . " ) <nl> <nl> - experimental_numa_aware = options_lib . create_option ( <nl> - name = " experimental_numa_aware " , <nl> - ty = bool , <nl> - docstring = <nl> - " Whether to use NUMA - aware operations . If None , defaults to False . " ) <nl> - <nl> experimental_optimization = options_lib . create_option ( <nl> name = " experimental_optimization " , <nl> ty = optimization_options . OptimizationOptions , <nl> def _static_optimizations ( self ) : <nl> result = [ ] <nl> result . extend ( self . experimental_optimization . _static_optimizations ( ) ) # pylint : disable = protected - access <nl> <nl> - if self . experimental_numa_aware : <nl> - result . append ( " make_numa_aware " ) <nl> if self . experimental_deterministic is False : <nl> result . append ( " make_sloppy " ) <nl> exp_stats_options = self . experimental_stats <nl> mmm a / tensorflow / tools / api / golden / v1 / tensorflow . data . - options . pbtxt <nl> ppp b / tensorflow / tools / api / golden / v1 / tensorflow . data . - options . pbtxt <nl> tf_class { <nl> name : " experimental_deterministic " <nl> mtype : " < type \ ' property \ ' > " <nl> } <nl> - member { <nl> - name : " experimental_numa_aware " <nl> - mtype : " < type \ ' property \ ' > " <nl> - } <nl> member { <nl> name : " experimental_optimization " <nl> mtype : " < type \ ' property \ ' > " <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . data . - options . pbtxt <nl> ppp b / tensorflow / tools / api / golden / v2 / tensorflow . data . - options . pbtxt <nl> tf_class { <nl> name : " experimental_deterministic " <nl> mtype : " < type \ ' property \ ' > " <nl> } <nl> - member { <nl> - name : " experimental_numa_aware " <nl> - mtype : " < type \ ' property \ ' > " <nl> - } <nl> member { <nl> name : " experimental_optimization " <nl> mtype : " < type \ ' property \ ' > " <nl>
|
[ tf . data ] Removing unused experimental functionality .
|
tensorflow/tensorflow
|
8681fa62e11b81f526be9f3b6bd2a0656c26c784
|
2019-04-30T00:29:46Z
|
mmm a / Marlin / stepper . cpp <nl> ppp b / Marlin / stepper . cpp <nl> void Stepper : : set_directions ( ) { <nl> ISR ( TIMER1_COMPA_vect ) { Stepper : : isr ( ) ; } <nl> <nl> void Stepper : : isr ( ) { <nl> + / / Disable Timer0 ISRs and enable global ISR again to capture UART events ( incoming chars ) <nl> + # if ENABLED ( ADVANCE ) | | ENABLED ( LIN_ADVANCE ) <nl> + CBI ( TIMSK0 , OCIE0A ) ; / / estepper ISR <nl> + # endif <nl> + CBI ( TIMSK0 , OCIE0B ) ; / / Temperature ISR <nl> + DISABLE_STEPPER_DRIVER_INTERRUPT ( ) ; <nl> + sei ( ) ; <nl> + <nl> if ( cleaning_buffer_counter ) { <nl> - - cleaning_buffer_counter ; <nl> current_block = NULL ; <nl> void Stepper : : isr ( ) { <nl> if ( ! cleaning_buffer_counter & & ( SD_FINISHED_STEPPERRELEASE ) ) enqueue_and_echo_commands_P ( PSTR ( SD_FINISHED_RELEASECOMMAND ) ) ; <nl> # endif <nl> OCR1A = 200 ; / / Run at max speed - 10 KHz <nl> + / / re - enable ISRs <nl> + # if ENABLED ( ADVANCE ) | | ENABLED ( LIN_ADVANCE ) <nl> + SBI ( TIMSK0 , OCIE0A ) ; <nl> + # endif <nl> + SBI ( TIMSK0 , OCIE0B ) ; <nl> + ENABLE_STEPPER_DRIVER_INTERRUPT ( ) ; <nl> return ; <nl> } <nl> <nl> void Stepper : : isr ( ) { <nl> if ( current_block - > steps [ Z_AXIS ] > 0 ) { <nl> enable_z ( ) ; <nl> OCR1A = 2000 ; / / Run at slow speed - 1 KHz <nl> + # if ENABLED ( ADVANCE ) | | ENABLED ( LIN_ADVANCE ) <nl> + SBI ( TIMSK0 , OCIE0A ) ; <nl> + # endif <nl> + SBI ( TIMSK0 , OCIE0B ) ; <nl> + ENABLE_STEPPER_DRIVER_INTERRUPT ( ) ; <nl> return ; <nl> } <nl> # endif <nl> void Stepper : : isr ( ) { <nl> } <nl> else { <nl> OCR1A = 2000 ; / / Run at slow speed - 1 KHz <nl> + # if ENABLED ( ADVANCE ) | | ENABLED ( LIN_ADVANCE ) <nl> + SBI ( TIMSK0 , OCIE0A ) ; <nl> + # endif <nl> + SBI ( TIMSK0 , OCIE0B ) ; <nl> + ENABLE_STEPPER_DRIVER_INTERRUPT ( ) ; <nl> return ; <nl> } <nl> } <nl> void Stepper : : isr ( ) { <nl> / / Take multiple steps per interrupt ( For high speed moves ) <nl> bool all_steps_done = false ; <nl> for ( int8_t i = 0 ; i < step_loops ; i + + ) { <nl> - # ifndef USBCON <nl> - customizedSerial . checkRx ( ) ; / / Check for serial chars . <nl> - # endif <nl> - <nl> # if ENABLED ( LIN_ADVANCE ) <nl> <nl> counter_E + = current_block - > steps [ E_AXIS ] ; <nl> void Stepper : : isr ( ) { <nl> current_block = NULL ; <nl> planner . discard_current_block ( ) ; <nl> } <nl> + # if ENABLED ( ADVANCE ) | | ENABLED ( LIN_ADVANCE ) <nl> + SBI ( TIMSK0 , OCIE0A ) ; <nl> + # endif <nl> + SBI ( TIMSK0 , OCIE0B ) ; <nl> + ENABLE_STEPPER_DRIVER_INTERRUPT ( ) ; <nl> } <nl> <nl> # if ENABLED ( ADVANCE ) | | ENABLED ( LIN_ADVANCE ) <nl> mmm a / Marlin / temperature . cpp <nl> ppp b / Marlin / temperature . cpp <nl> void Temperature : : set_current_temp_raw ( ) { <nl> ISR ( TIMER0_COMPB_vect ) { Temperature : : isr ( ) ; } <nl> <nl> void Temperature : : isr ( ) { <nl> + / / Allow UART and stepper ISRs <nl> + CBI ( TIMSK0 , OCIE0B ) ; / / Disable Temperature ISR <nl> + sei ( ) ; <nl> <nl> static uint8_t temp_count = 0 ; <nl> static TempState temp_state = StartupDelay ; <nl> void Temperature : : isr ( ) { <nl> if ( ! endstop_monitor_count ) endstop_monitor ( ) ; / / report changes in endstop status <nl> } <nl> # endif <nl> + <nl> + SBI ( TIMSK0 , OCIE0B ) ; / / re - enable Temperature ISR <nl> } <nl>
|
Merge pull request from Sebastianv650 / Allow_UART - ISR_inside_Stepper
|
MarlinFirmware/Marlin
|
3f4c02e42f07cddf0120e9966abdee1ba472736e
|
2016-12-02T05:17:42Z
|
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> SET ( src - cuda THC . cu ) <nl> <nl> CUDA_ADD_LIBRARY ( THC SHARED $ { src } $ { src - cuda } ) <nl> CUDA_ADD_CUBLAS_TO_TARGET ( THC ) <nl> - TARGET_LINK_LIBRARIES ( THC TH ) <nl> + TARGET_LINK_LIBRARIES ( THC TH $ { CUDA_curand_LIBRARY } ) <nl> <nl> INSTALL ( TARGETS THC <nl> RUNTIME DESTINATION " $ { Torch_INSTALL_BIN_SUBDIR } " <nl> mmm a / THCTensorRandom . cu <nl> ppp b / THCTensorRandom . cu <nl> <nl> # include " THCTensorRandom . h " <nl> # include " THCGeneral . h " <nl> <nl> - # include < thrust / random . h > <nl> - # include < thrust / fill . h > <nl> # include < thrust / functional . h > <nl> - # include < thrust / reduce . h > <nl> - # include < thrust / inner_product . h > <nl> - # include < thrust / sequence . h > <nl> + # include < curand . h > <nl> <nl> - / * The initial seed . * / <nl> - __device__ static int initf = 0 ; <nl> - __device__ static unsigned long the_initial_seed = 0 ; <nl> - __device__ static unsigned long step = 0 ; <nl> + / * Generator * / <nl> + static curandGenerator_t gen ; <nl> <nl> - / * Max float precision * / <nl> - / * This is the max block size when drawing from a unique seed * / <nl> - # define MAXBLOCK ( 1 < < 24 ) <nl> + / * Initial seed * / <nl> + static int initf = 0 ; <nl> + static unsigned long initial_seed = 0 ; <nl> <nl> - / * Seeds * / <nl> + / * Random seed ( this must be called once ) * / <nl> __host__ unsigned long THCRandom_seed ( ) <nl> { <nl> - unsigned long s = ( unsigned long ) 1 ; / / TODO : this should be random <nl> + unsigned long s = ( unsigned long ) time ( 0 ) ; <nl> THCRandom_manualSeed ( s ) ; <nl> return s ; <nl> } <nl> <nl> - __host__ void THCRandom_manualSeed ( unsigned long the_seed_ ) <nl> + / * Manually set the seed * / <nl> + __host__ void THCRandom_manualSeed ( unsigned long seed ) <nl> { <nl> - the_initial_seed = the_seed_ ; <nl> + initial_seed = seed ; <nl> + if ( initf = = 1 ) curandDestroyGenerator ( gen ) ; <nl> + curandCreateGenerator ( & gen , CURAND_RNG_PSEUDO_MTGP32 ) ; <nl> + curandSetPseudoRandomGeneratorSeed ( gen , initial_seed ) ; <nl> initf = 1 ; <nl> } <nl> <nl> + / * Get the initial seed * / <nl> __host__ unsigned long THCRandom_initialSeed ( ) <nl> { <nl> - if ( initf = = 0 ) THCRandom_seed ( ) ; <nl> - return the_initial_seed ; <nl> + return initial_seed ; <nl> } <nl> <nl> - __host__ __device__ unsigned long THCRandom_random ( ) <nl> - { <nl> - thrust : : default_random_engine rng ( the_initial_seed ) ; rng . discard ( step + + ) ; <nl> - thrust : : uniform_int_distribution < unsigned long > ufm ( 0 , ( ( ( unsigned long ) 1 ) < < 31 ) - 1 ) ; <nl> - return ufm ( rng ) ; <nl> - } <nl> - <nl> - / * generates a random number on [ 0 , 1 ) - double - interval * / <nl> - __host__ __device__ static double __uniform__ ( ) <nl> - { <nl> - thrust : : default_random_engine rng ( the_initial_seed ) ; rng . discard ( step + + ) ; <nl> - thrust : : uniform_real_distribution < double > ufm ( 0 , 1 ) ; <nl> - return ufm ( rng ) ; <nl> - } <nl> - <nl> - __host__ __device__ unsigned long THCRandom_random1 ( long b ) <nl> - { <nl> - / / THArgCheck ( b > 0 , 1 , " upper bound must be strictly positive " ) ; <nl> - return ( THCRandom_random ( ) % b + 1 ) ; <nl> - } <nl> - <nl> - __host__ __device__ unsigned long THCRandom_random2 ( long a , long b ) <nl> - { <nl> - / / THArgCheck ( b > = a , 2 , " upper bound must be larger than lower bound " ) ; <nl> - return ( ( THCRandom_random ( ) % ( b + 1 - a ) ) + a ) ; <nl> - } <nl> - <nl> - __host__ __device__ double THCRandom_uniform ( double a , double b ) <nl> - { <nl> - return ( __uniform__ ( ) * ( b - a ) + a ) ; <nl> - } <nl> - <nl> - __host__ __device__ double THCRandom_normal ( double mean , double stdv ) <nl> - { <nl> - / / THArgCheck ( stdv > 0 , 2 , " standard deviation must be strictly positive " ) ; <nl> - thrust : : default_random_engine rng ( the_initial_seed ) ; rng . discard ( step + + ) ; <nl> - thrust : : random : : experimental : : normal_distribution < double > normal ( mean , stdv ) ; <nl> - return normal ( rng ) ; <nl> - } <nl> - <nl> - __host__ __device__ double THCRandom_exponential ( double lambda ) <nl> - { <nl> - return ( - 1 . / lambda * log ( 1 - __uniform__ ( ) ) ) ; <nl> - } <nl> - <nl> - __host__ __device__ double THCRandom_cauchy ( double median , double sigma ) <nl> - { <nl> - return ( median + sigma * tan ( M_PI * ( __uniform__ ( ) - 0 . 5 ) ) ) ; <nl> - } <nl> - <nl> - __host__ __device__ double THCRandom_logNormal ( double mean , double stdv ) <nl> - { <nl> - / / THArgCheck ( stdv > 0 , 2 , " standard deviation must be strictly positive " ) ; <nl> - double zm = mean * mean ; <nl> - double zs = stdv * stdv ; <nl> - thrust : : default_random_engine rng ( the_initial_seed ) ; rng . discard ( step + + ) ; <nl> - thrust : : random : : experimental : : normal_distribution < double > normal ( log ( zm / sqrt ( zs + zm ) ) , sqrt ( log ( zs / zm + 1 ) ) ) ; <nl> - return exp ( normal ( rng ) ) ; <nl> - } <nl> - <nl> - __host__ __device__ int THCRandom_geometric ( double p ) <nl> - { <nl> - / / THArgCheck ( p > 0 & & p < 1 , 1 , " must be > 0 and < 1 " ) ; <nl> - return ( ( int ) ( log ( 1 - __uniform__ ( ) ) / log ( p ) ) + 1 ) ; <nl> - } <nl> - <nl> - __host__ __device__ int THCRandom_bernoulli ( double p ) <nl> - { <nl> - / / THArgCheck ( p > 0 & & p < 1 , 1 , " must be > 0 and < 1 " ) ; <nl> - return ( __uniform__ ( ) < = p ) ; <nl> - } <nl> - <nl> - struct random_functor <nl> + / * The following functors are use to modify uniform distributions * / <nl> + struct bernoulli_functor <nl> { <nl> - const long step ; <nl> - <nl> - random_functor ( long step_ ) : step ( step_ ) { } <nl> + const double p ; <nl> + bernoulli_functor ( double p_ ) : p ( p_ ) { } <nl> <nl> __host__ __device__ float operator ( ) ( const float & x ) const <nl> { <nl> - thrust : : default_random_engine rng ( the_initial_seed + step ) ; rng . discard ( x ) ; <nl> - thrust : : uniform_int_distribution < unsigned long > ufm ( 0 , ( ( ( unsigned long ) 1 ) < < 31 ) - 1 ) ; <nl> - unsigned long r = ufm ( rng ) ; <nl> - return ( float ) ( r % ( ( 1UL < < FLT_MANT_DIG ) + 1 ) ) ; <nl> + return ( float ) ( x < = p ) ; <nl> } <nl> } ; <nl> <nl> - TH_API void THCudaTensor_random ( THCudaTensor * self_ ) { <nl> - THCudaTensor * self = THCudaTensor_newContiguous ( self_ ) ; <nl> - long size = THCudaTensor_nElement ( self ) ; <nl> - thrust : : device_ptr < float > self_data ( THCudaTensor_data ( self ) ) ; <nl> - <nl> - while ( true ) { <nl> - long bsize = size < MAXBLOCK ? size : MAXBLOCK ; <nl> - thrust : : sequence ( self_data , self_data + bsize , 0 ) ; <nl> - thrust : : transform ( self_data , self_data + bsize , self_data , random_functor ( step ) ) ; <nl> - step + = bsize ; <nl> - self_data + = bsize ; <nl> - size - = bsize ; <nl> - if ( bsize = = 0 ) break ; <nl> - } <nl> - <nl> - THCudaTensor_freeCopyTo ( self , self_ ) ; <nl> - } ; <nl> - <nl> - struct random1_functor <nl> + struct geometric_functor <nl> { <nl> - const long b ; <nl> - const long step ; <nl> - <nl> - random1_functor ( long b_ , long step_ ) : b ( b_ ) , step ( step_ ) { } <nl> + const double p ; <nl> + geometric_functor ( double p_ ) : p ( p_ ) { } <nl> <nl> __host__ __device__ float operator ( ) ( const float & x ) const <nl> { <nl> - thrust : : default_random_engine rng ( the_initial_seed + step ) ; rng . discard ( x ) ; <nl> - thrust : : uniform_int_distribution < unsigned long > ufm ( 0 , ( ( ( unsigned long ) 1 ) < < 31 ) - 1 ) ; <nl> - unsigned long r = ufm ( rng ) ; <nl> - return ( float ) ( r % b + 1 ) ; <nl> + return ( float ) ( ( log ( 1 - x ) / log ( p ) ) + 1 ) ; <nl> } <nl> } ; <nl> <nl> - TH_API void THCudaTensor_random1 ( THCudaTensor * self_ , long b ) { <nl> - THCudaTensor * self = THCudaTensor_newContiguous ( self_ ) ; <nl> - long size = THCudaTensor_nElement ( self ) ; <nl> - thrust : : device_ptr < float > self_data ( THCudaTensor_data ( self ) ) ; <nl> - <nl> - while ( true ) { <nl> - long bsize = size < MAXBLOCK ? size : MAXBLOCK ; <nl> - thrust : : sequence ( self_data , self_data + bsize , 0 ) ; <nl> - thrust : : transform ( self_data , self_data + bsize , self_data , random1_functor ( b , step ) ) ; <nl> - step + = bsize ; <nl> - self_data + = bsize ; <nl> - size - = bsize ; <nl> - if ( bsize = = 0 ) break ; <nl> - } <nl> - <nl> - THCudaTensor_freeCopyTo ( self , self_ ) ; <nl> - } ; <nl> - <nl> - struct random2_functor <nl> + struct exponential_functor <nl> { <nl> - const long a , b ; <nl> - const long step ; <nl> - <nl> - random2_functor ( long a_ , long b_ , long step_ ) : a ( a_ ) , b ( b_ ) , step ( step_ ) { } <nl> + const double lambda ; <nl> + exponential_functor ( double lambda_ ) : lambda ( lambda_ ) { } <nl> <nl> __host__ __device__ float operator ( ) ( const float & x ) const <nl> { <nl> - thrust : : default_random_engine rng ( the_initial_seed + step ) ; rng . discard ( x ) ; <nl> - thrust : : uniform_int_distribution < unsigned long > ufm ( 0 , ( ( ( unsigned long ) 1 ) < < 31 ) - 1 ) ; <nl> - unsigned long r = ufm ( rng ) ; <nl> - return ( float ) ( ( r % ( b + 1 - a ) ) + a ) ; <nl> + return ( float ) ( - 1 . / lambda * log ( 1 - x ) ) ; <nl> } <nl> } ; <nl> <nl> - TH_API void THCudaTensor_random2 ( THCudaTensor * self_ , long a , long b ) { <nl> - THCudaTensor * self = THCudaTensor_newContiguous ( self_ ) ; <nl> - long size = THCudaTensor_nElement ( self ) ; <nl> - thrust : : device_ptr < float > self_data ( THCudaTensor_data ( self ) ) ; <nl> - <nl> - while ( true ) { <nl> - long bsize = size < MAXBLOCK ? size : MAXBLOCK ; <nl> - thrust : : sequence ( self_data , self_data + bsize , 0 ) ; <nl> - thrust : : transform ( self_data , self_data + bsize , self_data , random2_functor ( a , b , step ) ) ; <nl> - step + = bsize ; <nl> - self_data + = bsize ; <nl> - size - = bsize ; <nl> - if ( bsize = = 0 ) break ; <nl> - } <nl> - <nl> - THCudaTensor_freeCopyTo ( self , self_ ) ; <nl> - } ; <nl> - <nl> - struct bernoulli_functor <nl> + struct cauchy_functor <nl> { <nl> - const double p ; <nl> - const long step ; <nl> - <nl> - bernoulli_functor ( double p_ , long step_ ) : p ( p_ ) , step ( step_ ) { } <nl> + const double median , sigma ; <nl> + cauchy_functor ( double median_ , double sigma_ ) : median ( median_ ) , sigma ( sigma_ ) { } <nl> <nl> __host__ __device__ float operator ( ) ( const float & x ) const <nl> { <nl> - thrust : : default_random_engine rng ( the_initial_seed + step ) ; rng . discard ( x ) ; <nl> - thrust : : uniform_real_distribution < float > uniform ( 0 , 1 ) ; <nl> - return ( float ) ( uniform ( rng ) < = p ) ; <nl> + return ( float ) ( median + sigma * tan ( M_PI * ( x - 0 . 5 ) ) ) ; <nl> } <nl> } ; <nl> <nl> - TH_API void THCudaTensor_bernoulli ( THCudaTensor * self_ , double p ) { <nl> + TH_API void THCudaTensor_uniform ( THCudaTensor * self_ , double a , double b ) { <nl> THCudaTensor * self = THCudaTensor_newContiguous ( self_ ) ; <nl> long size = THCudaTensor_nElement ( self ) ; <nl> - thrust : : device_ptr < float > self_data ( THCudaTensor_data ( self ) ) ; <nl> - <nl> - while ( true ) { <nl> - long bsize = size < MAXBLOCK ? size : MAXBLOCK ; <nl> - thrust : : sequence ( self_data , self_data + bsize , 0 ) ; <nl> - thrust : : transform ( self_data , self_data + bsize , self_data , bernoulli_functor ( p , step ) ) ; <nl> - step + = bsize ; <nl> - self_data + = bsize ; <nl> - size - = bsize ; <nl> - if ( bsize = = 0 ) break ; <nl> - } <nl> + float * data = THCudaTensor_data ( self ) ; <nl> <nl> - THCudaTensor_freeCopyTo ( self , self_ ) ; <nl> - } ; <nl> - <nl> - struct uniform_functor <nl> - { <nl> - const double a , b ; <nl> - const long step ; <nl> - <nl> - uniform_functor ( double a_ , double b_ , long step_ ) : a ( a_ ) , b ( b_ ) , step ( step_ ) { } <nl> + curandGenerateUniform ( gen , data , size ) ; <nl> <nl> - __host__ __device__ float operator ( ) ( const float & x ) const <nl> - { <nl> - thrust : : default_random_engine rng ( the_initial_seed + step ) ; rng . discard ( x ) ; <nl> - thrust : : uniform_real_distribution < float > uniform ( a , b ) ; <nl> - return uniform ( rng ) ; <nl> + if ( ( a ! = 0 ) | | ( b ! = 1 ) ) { <nl> + THCudaTensor_mul ( self , b - a ) ; <nl> + THCudaTensor_add ( self , a ) ; <nl> } <nl> + <nl> + THCudaTensor_freeCopyTo ( self , self_ ) ; <nl> } ; <nl> <nl> - TH_API void THCudaTensor_uniform ( THCudaTensor * self_ , double a , double b ) { <nl> + TH_API void THCudaTensor_bernoulli ( THCudaTensor * self_ , double p ) { <nl> THCudaTensor * self = THCudaTensor_newContiguous ( self_ ) ; <nl> long size = THCudaTensor_nElement ( self ) ; <nl> - thrust : : device_ptr < float > self_data ( THCudaTensor_data ( self ) ) ; <nl> + float * data = THCudaTensor_data ( self ) ; <nl> + thrust : : device_ptr < float > tdata ( data ) ; <nl> <nl> - while ( true ) { <nl> - long bsize = size < MAXBLOCK ? size : MAXBLOCK ; <nl> - thrust : : sequence ( self_data , self_data + bsize , 0 ) ; <nl> - thrust : : transform ( self_data , self_data + bsize , self_data , uniform_functor ( a , b , step ) ) ; <nl> - step + = bsize ; <nl> - self_data + = bsize ; <nl> - size - = bsize ; <nl> - if ( bsize = = 0 ) break ; <nl> - } <nl> + curandGenerateUniform ( gen , data , size ) ; <nl> + <nl> + thrust : : transform ( tdata , tdata + size , tdata , bernoulli_functor ( p ) ) ; <nl> <nl> THCudaTensor_freeCopyTo ( self , self_ ) ; <nl> } ; <nl> <nl> - struct normal_functor <nl> - { <nl> - const double mean , stdv ; <nl> - const long step ; <nl> - <nl> - normal_functor ( double mean_ , double stdv_ , long step_ ) : mean ( mean_ ) , stdv ( stdv_ ) , step ( step_ ) { } <nl> - <nl> - __host__ __device__ <nl> - float operator ( ) ( const float & x ) const <nl> - { <nl> - thrust : : default_random_engine rng ( the_initial_seed + step ) ; rng . discard ( x ) ; <nl> - thrust : : random : : experimental : : normal_distribution < float > normal ( mean , stdv ) ; <nl> - return normal ( rng ) ; <nl> - } <nl> - } ; <nl> - <nl> TH_API void THCudaTensor_normal ( THCudaTensor * self_ , double mean , double stdv ) { <nl> THCudaTensor * self = THCudaTensor_newContiguous ( self_ ) ; <nl> long size = THCudaTensor_nElement ( self ) ; <nl> - thrust : : device_ptr < float > self_data ( THCudaTensor_data ( self ) ) ; <nl> + float * data = THCudaTensor_data ( self ) ; <nl> <nl> - while ( true ) { <nl> - long bsize = size < MAXBLOCK ? size : MAXBLOCK ; <nl> - thrust : : sequence ( self_data , self_data + bsize , 0 ) ; <nl> - thrust : : transform ( self_data , self_data + bsize , self_data , normal_functor ( mean , stdv , step ) ) ; <nl> - step + = bsize ; <nl> - self_data + = bsize ; <nl> - size - = bsize ; <nl> - if ( bsize = = 0 ) break ; <nl> - } <nl> + curandGenerateNormal ( gen , data , size , mean , stdv ) ; <nl> <nl> THCudaTensor_freeCopyTo ( self , self_ ) ; <nl> } ; <nl> <nl> - struct geometric_functor <nl> - { <nl> - const double p ; <nl> - const long step ; <nl> - <nl> - geometric_functor ( double p_ , long step_ ) : p ( p_ ) , step ( step_ ) { } <nl> - <nl> - __host__ __device__ float operator ( ) ( const float & x ) const <nl> - { <nl> - thrust : : default_random_engine rng ( the_initial_seed + step ) ; rng . discard ( x ) ; <nl> - thrust : : uniform_real_distribution < float > uniform ( 0 , 1 ) ; <nl> - float u = uniform ( rng ) ; <nl> - return ( float ) ( ( log ( 1 - u ) / log ( p ) ) + 1 ) ; <nl> - } <nl> - } ; <nl> - <nl> - TH_API void THCudaTensor_geometric ( THCudaTensor * self_ , double p ) { <nl> + TH_API void THCudaTensor_logNormal ( THCudaTensor * self_ , double mean , double stdv ) { <nl> THCudaTensor * self = THCudaTensor_newContiguous ( self_ ) ; <nl> long size = THCudaTensor_nElement ( self ) ; <nl> - thrust : : device_ptr < float > self_data ( THCudaTensor_data ( self ) ) ; <nl> + float * data = THCudaTensor_data ( self ) ; <nl> <nl> - while ( true ) { <nl> - long bsize = size < MAXBLOCK ? size : MAXBLOCK ; <nl> - thrust : : sequence ( self_data , self_data + bsize , 0 ) ; <nl> - thrust : : transform ( self_data , self_data + bsize , self_data , geometric_functor ( p , step ) ) ; <nl> - step + = bsize ; <nl> - self_data + = bsize ; <nl> - size - = bsize ; <nl> - if ( bsize = = 0 ) break ; <nl> - } <nl> + curandGenerateLogNormal ( gen , data , size , mean , stdv ) ; <nl> <nl> THCudaTensor_freeCopyTo ( self , self_ ) ; <nl> } ; <nl> <nl> - struct exponential_functor <nl> - { <nl> - const double lambda ; <nl> - const long step ; <nl> - <nl> - exponential_functor ( double lambda_ , long step_ ) : lambda ( lambda_ ) , step ( step_ ) { } <nl> - <nl> - __host__ __device__ float operator ( ) ( const float & x ) const <nl> - { <nl> - thrust : : default_random_engine rng ( the_initial_seed + step ) ; rng . discard ( x ) ; <nl> - thrust : : uniform_real_distribution < float > uniform ( 0 , 1 ) ; <nl> - float u = uniform ( rng ) ; <nl> - return ( float ) ( - 1 . / lambda * log ( 1 - u ) ) ; <nl> - } <nl> - } ; <nl> - <nl> - TH_API void THCudaTensor_exponential ( THCudaTensor * self_ , double lambda ) { <nl> + TH_API void THCudaTensor_geometric ( THCudaTensor * self_ , double p ) { <nl> THCudaTensor * self = THCudaTensor_newContiguous ( self_ ) ; <nl> long size = THCudaTensor_nElement ( self ) ; <nl> - thrust : : device_ptr < float > self_data ( THCudaTensor_data ( self ) ) ; <nl> + float * data = THCudaTensor_data ( self ) ; <nl> + thrust : : device_ptr < float > tdata ( data ) ; <nl> <nl> - while ( true ) { <nl> - long bsize = size < MAXBLOCK ? size : MAXBLOCK ; <nl> - thrust : : sequence ( self_data , self_data + bsize , 0 ) ; <nl> - thrust : : transform ( self_data , self_data + bsize , self_data , exponential_functor ( lambda , step ) ) ; <nl> - step + = bsize ; <nl> - self_data + = bsize ; <nl> - size - = bsize ; <nl> - if ( bsize = = 0 ) break ; <nl> - } <nl> + curandGenerateUniform ( gen , data , size ) ; <nl> + <nl> + thrust : : transform ( tdata , tdata + size , tdata , geometric_functor ( p ) ) ; <nl> <nl> THCudaTensor_freeCopyTo ( self , self_ ) ; <nl> } ; <nl> <nl> - struct cauchy_functor <nl> - { <nl> - const double median , sigma ; <nl> - const long step ; <nl> - <nl> - cauchy_functor ( double median_ , double sigma_ , long step_ ) : median ( median_ ) , sigma ( sigma_ ) , step ( step_ ) { } <nl> - <nl> - __host__ __device__ float operator ( ) ( const float & x ) const <nl> - { <nl> - thrust : : default_random_engine rng ( the_initial_seed + step ) ; rng . discard ( x ) ; <nl> - thrust : : uniform_real_distribution < float > uniform ( 0 , 1 ) ; <nl> - float u = uniform ( rng ) ; <nl> - return ( float ) ( median + sigma * tan ( M_PI * ( u - 0 . 5 ) ) ) ; <nl> - } <nl> - } ; <nl> - <nl> - TH_API void THCudaTensor_cauchy ( THCudaTensor * self_ , double median , double sigma ) { <nl> + TH_API void THCudaTensor_exponential ( THCudaTensor * self_ , double lambda ) { <nl> THCudaTensor * self = THCudaTensor_newContiguous ( self_ ) ; <nl> long size = THCudaTensor_nElement ( self ) ; <nl> - thrust : : device_ptr < float > self_data ( THCudaTensor_data ( self ) ) ; <nl> + float * data = THCudaTensor_data ( self ) ; <nl> + thrust : : device_ptr < float > tdata ( data ) ; <nl> <nl> - while ( true ) { <nl> - long bsize = size < MAXBLOCK ? size : MAXBLOCK ; <nl> - thrust : : sequence ( self_data , self_data + bsize , 0 ) ; <nl> - thrust : : transform ( self_data , self_data + bsize , self_data , cauchy_functor ( median , sigma , step ) ) ; <nl> - step + = bsize ; <nl> - self_data + = bsize ; <nl> - size - = bsize ; <nl> - if ( bsize = = 0 ) break ; <nl> - } <nl> + curandGenerateUniform ( gen , data , size ) ; <nl> + <nl> + thrust : : transform ( tdata , tdata + size , tdata , exponential_functor ( lambda ) ) ; <nl> <nl> THCudaTensor_freeCopyTo ( self , self_ ) ; <nl> } ; <nl> <nl> - struct logNormal_functor <nl> - { <nl> - const double mean , stdv ; <nl> - const long step ; <nl> - <nl> - logNormal_functor ( double mean_ , double stdv_ , long step_ ) : mean ( mean_ ) , stdv ( stdv_ ) , step ( step_ ) { } <nl> - <nl> - __host__ __device__ float operator ( ) ( const float & x ) const <nl> - { <nl> - double zm = mean * mean ; <nl> - double zs = stdv * stdv ; <nl> - thrust : : default_random_engine rng ( the_initial_seed + step ) ; rng . discard ( x ) ; <nl> - thrust : : random : : experimental : : normal_distribution < double > normal ( log ( zm / sqrt ( zs + zm ) ) , sqrt ( log ( zs / zm + 1 ) ) ) ; <nl> - return exp ( normal ( rng ) ) ; <nl> - } <nl> - } ; <nl> - <nl> - TH_API void THCudaTensor_logNormal ( THCudaTensor * self_ , double mean , double stdv ) { <nl> + TH_API void THCudaTensor_cauchy ( THCudaTensor * self_ , double median , double sigma ) { <nl> THCudaTensor * self = THCudaTensor_newContiguous ( self_ ) ; <nl> long size = THCudaTensor_nElement ( self ) ; <nl> - thrust : : device_ptr < float > self_data ( THCudaTensor_data ( self ) ) ; <nl> + float * data = THCudaTensor_data ( self ) ; <nl> + thrust : : device_ptr < float > tdata ( data ) ; <nl> <nl> - while ( true ) { <nl> - long bsize = size < MAXBLOCK ? size : MAXBLOCK ; <nl> - thrust : : sequence ( self_data , self_data + bsize , 0 ) ; <nl> - thrust : : transform ( self_data , self_data + bsize , self_data , logNormal_functor ( mean , stdv , step ) ) ; <nl> - step + = bsize ; <nl> - self_data + = bsize ; <nl> - size - = bsize ; <nl> - if ( bsize = = 0 ) break ; <nl> - } <nl> + curandGenerateUniform ( gen , data , size ) ; <nl> + <nl> + thrust : : transform ( tdata , tdata + size , tdata , cauchy_functor ( median , sigma ) ) ; <nl> <nl> THCudaTensor_freeCopyTo ( self , self_ ) ; <nl> } ; <nl> mmm a / THCTensorRandom . h <nl> ppp b / THCTensorRandom . h <nl> <nl> TH_API unsigned long THCRandom_seed ( ) ; <nl> TH_API void THCRandom_manualSeed ( unsigned long the_seed_ ) ; <nl> TH_API unsigned long THCRandom_initialSeed ( ) ; <nl> - TH_API unsigned long THCRandom_random ( ) ; <nl> - TH_API unsigned long THCRandom_random1 ( long b ) ; <nl> - TH_API unsigned long THCRandom_random2 ( long a , long b ) ; <nl> - TH_API double THCRandom_uniform ( double a , double b ) ; <nl> - TH_API double THCRandom_normal ( double mean , double stdv ) ; <nl> - TH_API double THCRandom_exponential ( double lambda ) ; <nl> - TH_API double THCRandom_cauchy ( double median , double sigma ) ; <nl> - TH_API double THCRandom_logNormal ( double mean , double stdv ) ; <nl> - TH_API int THCRandom_geometric ( double p ) ; <nl> - TH_API int THCRandom_bernoulli ( double p ) ; <nl> <nl> - TH_API void THCudaTensor_random ( THCudaTensor * self ) ; <nl> - TH_API void THCudaTensor_random1 ( THCudaTensor * self , long b ) ; <nl> - TH_API void THCudaTensor_random2 ( THCudaTensor * self , long a , long b ) ; <nl> + / / TH_API void THCudaTensor_random ( THCudaTensor * self ) ; <nl> + / / TH_API void THCudaTensor_random1 ( THCudaTensor * self , long b ) ; <nl> + / / TH_API void THCudaTensor_random2 ( THCudaTensor * self , long a , long b ) ; <nl> TH_API void THCudaTensor_geometric ( THCudaTensor * self , double p ) ; <nl> TH_API void THCudaTensor_bernoulli ( THCudaTensor * self , double p ) ; <nl> TH_API void THCudaTensor_uniform ( THCudaTensor * self , double a , double b ) ; <nl>
|
Complete ( and final ) revamping of the CUDA rand engine .
|
pytorch/pytorch
|
07f582c35ca24542227b4c1158b02eda78085953
|
2013-07-24T20:44:54Z
|
mmm a / lib / Sema / CSSolver . cpp <nl> ppp b / lib / Sema / CSSolver . cpp <nl> void ConstraintSystem : : sortDesignatedTypes ( <nl> <nl> for ( auto * protocol : argInfo . getLiteralProtocols ( ) ) { <nl> auto defaultType = TC . getDefaultType ( protocol , DC ) ; <nl> + / / ExpressibleByNilLiteral does not have a default type . <nl> + if ( ! defaultType ) <nl> + continue ; <nl> auto * nominal = defaultType - > getAnyNominal ( ) ; <nl> for ( size_t i = nextType + 1 ; i < nominalTypes . size ( ) ; + + i ) { <nl> if ( nominal = = nominalTypes [ i ] ) { <nl> new file mode 100644 <nl> index 000000000000 . . 144dc7bc8908 <nl> mmm / dev / null <nl> ppp b / test / Constraints / add_with_nil . swift <nl> <nl> + / / RUN : % target - typecheck - verify - swift - swift - version 5 - solver - enable - operator - designated - types - solver - disable - shrink - disable - constraint - solver - performance - hacks <nl> + <nl> + func test ( _ x : Int ) - > Int { <nl> + return x + nil <nl> + / / expected - error @ - 1 { { cannot convert value of type ' Int ' to expected argument type ' _ . Stride ' } } <nl> + } <nl>
|
Merge remote - tracking branch ' origin / master ' into master - next
|
apple/swift
|
a604b678b2ff7ec822ea758ca431d64ddf8227e0
|
2018-11-07T22:30:04Z
|
mmm a / src / app / ui / preview_editor . cpp <nl> ppp b / src / app / ui / preview_editor . cpp <nl> void PreviewEditorWindow : : onCenterClicked ( ) <nl> <nl> void PreviewEditorWindow : : onPlayClicked ( ) <nl> { <nl> + Editor * miniEditor = ( m_docView ? m_docView - > getEditor ( ) : nullptr ) ; <nl> + <nl> if ( m_playButton - > isPlaying ( ) ) { <nl> - Editor * miniEditor = ( m_docView ? m_docView - > getEditor ( ) : NULL ) ; <nl> - if ( miniEditor & & miniEditor - > document ( ) ! = NULL ) <nl> + if ( miniEditor & & miniEditor - > document ( ) ) <nl> m_nextFrameTime = miniEditor - > sprite ( ) - > frameDuration ( miniEditor - > frame ( ) ) ; <nl> else <nl> m_nextFrameTime = - 1 ; <nl> void PreviewEditorWindow : : onPlayClicked ( ) <nl> } <nl> else { <nl> m_playTimer . stop ( ) ; <nl> + <nl> + if ( miniEditor ) <nl> + miniEditor - > setFrame ( m_refFrame ) ; <nl> } <nl> } <nl> <nl>
|
Return to the current frame when we stop the animation in Preview window
|
aseprite/aseprite
|
7724c212e84bd1a7bd08042bb0f03ee0aa85b72b
|
2015-03-11T19:20:19Z
|
mmm a / scala - package / core / src / main / scala / org / apache / mxnet / javaapi / Context . scala <nl> ppp b / scala - package / core / src / main / scala / org / apache / mxnet / javaapi / Context . scala <nl> import collection . JavaConverters . _ <nl> * @ param deviceTypeName { ' cpu ' , ' gpu ' } String representing the device type <nl> * @ param deviceId The device id of the device , needed for GPU <nl> * / <nl> - class Context ( val context : org . apache . mxnet . Context ) { <nl> + class Context private [ mxnet ] ( val context : org . apache . mxnet . Context ) { <nl> <nl> val deviceTypeid : Int = context . deviceTypeid <nl> <nl> mmm a / scala - package / core / src / main / scala / org / apache / mxnet / javaapi / IO . scala <nl> ppp b / scala - package / core / src / main / scala / org / apache / mxnet / javaapi / IO . scala <nl> <nl> <nl> package org . apache . mxnet . javaapi <nl> <nl> - class DataDesc ( val dataDesc : org . apache . mxnet . DataDesc ) { <nl> + class DataDesc private [ mxnet ] ( val dataDesc : org . apache . mxnet . DataDesc ) { <nl> <nl> def this ( name : String , shape : Shape , dType : DType . DType , layout : String ) = <nl> this ( new org . apache . mxnet . DataDesc ( name , shape , dType , layout ) ) <nl> mmm a / scala - package / core / src / main / scala / org / apache / mxnet / javaapi / NDArray . scala <nl> ppp b / scala - package / core / src / main / scala / org / apache / mxnet / javaapi / NDArray . scala <nl> object NDArray extends NDArrayBase { <nl> * will result in leaking native memory . <nl> * < / b > <nl> * / <nl> - class NDArray ( val nd : org . apache . mxnet . NDArray ) { <nl> + class NDArray private [ mxnet ] ( val nd : org . apache . mxnet . NDArray ) { <nl> <nl> def this ( arr : Array [ Float ] , shape : Shape , ctx : Context ) = { <nl> this ( org . apache . mxnet . NDArray . array ( arr , shape , ctx ) ) <nl> mmm a / scala - package / core / src / main / scala / org / apache / mxnet / javaapi / Shape . scala <nl> ppp b / scala - package / core / src / main / scala / org / apache / mxnet / javaapi / Shape . scala <nl> import collection . JavaConverters . _ <nl> * Shape of [ [ NDArray ] ] or other data <nl> * / <nl> <nl> - class Shape ( val shape : org . apache . mxnet . Shape ) { <nl> + class Shape private [ mxnet ] ( val shape : org . apache . mxnet . Shape ) { <nl> def this ( dims : java . util . List [ java . lang . Integer ] ) <nl> = this ( new org . apache . mxnet . Shape ( dims . asScala . map ( Int . unbox ) ) ) <nl> def this ( dims : Array [ Int ] ) = this ( new org . apache . mxnet . Shape ( dims ) ) <nl> mmm a / scala - package / examples / scripts / infer / objectdetector / run_ssd_example . sh <nl> ppp b / scala - package / examples / scripts / infer / objectdetector / run_ssd_example . sh <nl> <nl> # under the License . <nl> <nl> hw_type = cpu <nl> - if [ [ $ 1 = gpu ] ] <nl> + if [ [ $ 4 = gpu ] ] <nl> then <nl> hw_type = gpu <nl> fi <nl> mmm a / scala - package / examples / src / main / java / org / apache / mxnetexamples / javaapi / infer / objectdetector / README . md <nl> ppp b / scala - package / examples / src / main / java / org / apache / mxnetexamples / javaapi / infer / objectdetector / README . md <nl> <nl> - # Single Shot Multi Object Detection using Scala Inference API <nl> + # Single Shot Multi Object Detection using Java Inference API <nl> <nl> - In this example , you will learn how to use Scala Inference API to run Inference on pre - trained Single Shot Multi Object Detection ( SSD ) MXNet model . <nl> + In this example , you will learn how to use Java Inference API to run Inference on pre - trained Single Shot Multi Object Detection ( SSD ) MXNet model . <nl> <nl> The model is trained on the [ Pascal VOC 2012 dataset ] ( http : / / host . robots . ox . ac . uk / pascal / VOC / voc2012 / index . html ) . The network is a SSD model built on Resnet50 as base network to extract image features . The model is trained to detect the following entities ( classes ) : [ ' aeroplane ' , ' bicycle ' , ' bird ' , ' boat ' , ' bottle ' , ' bus ' , ' car ' , ' cat ' , ' chair ' , ' cow ' , ' diningtable ' , ' dog ' , ' horse ' , ' motorbike ' , ' person ' , ' pottedplant ' , ' sheep ' , ' sofa ' , ' train ' , ' tvmonitor ' ] . For more details about the model , you can refer to the [ MXNet SSD example ] ( https : / / github . com / apache / incubator - mxnet / tree / master / example / ssd ) . <nl> <nl> The model is trained on the [ Pascal VOC 2012 dataset ] ( http : / / host . robots . ox . ac . u <nl> <nl> 1 . MXNet <nl> 2 . MXNet Scala Package <nl> - 3 . [ IntelliJ IDE ( or alternative IDE ) project setup ] ( http : / / mxnet . incubator . apache . org / tutorials / scala / mxnet_scala_on_intellij . html ) with the MXNet Scala Package <nl> + 3 . [ IntelliJ IDE ( or alternative IDE ) project setup ] ( http : / / mxnet . incubator . apache . org / tutorials / java / mxnet_java_on_intellij . html ) with the MXNet Scala / Java Package <nl> 4 . wget <nl> <nl> <nl> The model is trained on the [ Pascal VOC 2012 dataset ] ( http : / / host . robots . ox . ac . u <nl> # # # Download Artifacts <nl> # # # # Step 1 <nl> You can download the files using the script ` get_ssd_data . sh ` . It will download and place the model files in a ` model ` folder and the test image files in a ` image ` folder in the current directory . <nl> - From the ` scala - package / examples / scripts / infer / imageclassifier / ` folder run : <nl> + From the ` scala - package / examples / scripts / infer / objectdetector / ` folder run : <nl> <nl> ` ` ` bash <nl> . / get_ssd_data . sh <nl> ` ` ` <nl> <nl> - * * Note * * : You may need to run ` chmod + x get_resnet_data . sh ` before running this script . <nl> - <nl> - Alternatively use the following links to download the Symbol and Params files via your browser : <nl> - - [ resnet50_ssd_model - symbol . json ] ( https : / / s3 . amazonaws . com / model - server / models / resnet50_ssd / resnet50_ssd_model - symbol . json ) <nl> - - [ resnet50_ssd_model - 0000 . params ] ( https : / / s3 . amazonaws . com / model - server / models / resnet50_ssd / resnet50_ssd_model - 0000 . params ) <nl> - - [ synset . txt ] ( https : / / github . com / awslabs / mxnet - model - server / blob / master / examples / ssd / synset . txt ) <nl> + * * Note * * : You may need to run ` chmod + x get_ssd_data . sh ` before running this script . <nl> <nl> In the pre - trained model , the ` input_name ` is ` data ` and shape is ` ( 1 , 3 , 512 , 512 ) ` . <nl> This shape translates to : a batch of ` 1 ` image , the image has color and uses ` 3 ` channels ( RGB ) , and the image has the dimensions of ` 512 ` pixels in height by ` 512 ` pixels in width . <nl> The output shape is ` ( 1 , 6132 , 6 ) ` . As with the input , the ` 1 ` is the number of <nl> <nl> # # # Setup Datapath and Parameters <nl> # # # # Step 2 <nl> - The code ` Line 31 : val baseDir = System . getProperty ( " user . dir " ) ` in the example will automatically searches the work directory you have defined . Please put the files in your [ work directory ] ( https : / / stackoverflow . com / questions / 16239130 / java - user - dir - property - what - exactly - does - it - mean ) . < ! - - how do you define the work directory ? - - > <nl> - <nl> - Alternatively , if you would like to use your own path , please change line 31 into your own path <nl> - ` ` ` scala <nl> - val baseDir = < Your Own Path > <nl> - ` ` ` <nl> - <nl> The followings is the parameters defined for this example , you can find more information in the ` class SSDClassifierExample ` . <nl> <nl> | Argument | Comments | <nl> the outputs come from the the input image , with top3 predictions picked . <nl> <nl> <nl> # # Infer API Details <nl> - This example uses ObjectDetector class provided by MXNet ' s scala package Infer APIs . It provides methods to load the images , create NDArray out of Java BufferedImage and run prediction using Classifier and Predictor APIs . <nl> + This example uses ObjectDetector class provided by MXNet ' s Java Infer APIs . It provides methods to load the images , create NDArray out of Java BufferedImage and run prediction using Classifier and Predictor APIs . <nl> <nl> <nl> # # References <nl> This documentation used the model and inference setup guide from the [ MXNet Model Server SSD example ] ( https : / / github . com / awslabs / mxnet - model - server / blob / master / examples / ssd / README . md ) . <nl> - <nl> - <nl> - # # Next Steps <nl> - <nl> - Check out the following related tutorials and examples for the Infer API : <nl> - <nl> - * [ Image Classification with the MXNet Scala Infer API ] ( . . / imageclassifier / README . md ) <nl> mmm a / scala - package / examples / src / main / java / org / apache / mxnetexamples / javaapi / infer / objectdetector / SSDClassifierExample . java <nl> ppp b / scala - package / examples / src / main / java / org / apache / mxnetexamples / javaapi / infer / objectdetector / SSDClassifierExample . java <nl> <nl> import java . io . File ; <nl> <nl> public class SSDClassifierExample { <nl> - @ Option ( name = " - - model - path - prefix " , usage = " input model directory and prefix of the model " ) <nl> - private String modelPathPrefix = " / model / ssd_resnet50_512 " ; <nl> - @ Option ( name = " - - input - image " , usage = " the input image " ) <nl> - private String inputImagePath = " / images / dog . jpg " ; <nl> - @ Option ( name = " - - input - dir " , usage = " the input batch of images directory " ) <nl> - private String inputImageDir = " / images / " ; <nl> - <nl> - final static Logger logger = LoggerFactory . getLogger ( SSDClassifierExample . class ) ; <nl> - <nl> - static List < List < ObjectDetectorOutput > > <nl> - runObjectDetectionSingle ( String modelPathPrefix , String inputImagePath , List < Context > context ) { <nl> - Shape inputShape = new Shape ( new int [ ] { 1 , 3 , 512 , 512 } ) ; <nl> - List < DataDesc > inputDescriptors = new ArrayList < DataDesc > ( ) ; <nl> - inputDescriptors . add ( new DataDesc ( " data " , inputShape , DType . Float32 ( ) , " NCHW " ) ) ; <nl> - BufferedImage img = ObjectDetector . loadImageFromFile ( inputImagePath ) ; <nl> - ObjectDetector objDet = new ObjectDetector ( modelPathPrefix , inputDescriptors , context , 0 ) ; <nl> - return objDet . imageObjectDetect ( img , 3 ) ; <nl> - } <nl> - <nl> - static List < List < List < ObjectDetectorOutput > > > <nl> - runObjectDetectionBatch ( String modelPathPrefix , String inputImageDir , List < Context > context ) { <nl> - Shape inputShape = new Shape ( new int [ ] { 1 , 3 , 512 , 512 } ) ; <nl> - List < DataDesc > inputDescriptors = new ArrayList < DataDesc > ( ) ; <nl> - inputDescriptors . add ( new DataDesc ( " data " , inputShape , DType . Float32 ( ) , " NCHW " ) ) ; <nl> - ObjectDetector objDet = new ObjectDetector ( modelPathPrefix , inputDescriptors , context , 0 ) ; <nl> - <nl> - / / Loading batch of images from the directory path <nl> - List < List < String > > batchFiles = generateBatches ( inputImageDir , 20 ) ; <nl> - List < List < List < ObjectDetectorOutput > > > outputList <nl> - = new ArrayList < List < List < ObjectDetectorOutput > > > ( ) ; <nl> - <nl> - for ( List < String > batchFile : batchFiles ) { <nl> - List < BufferedImage > imgList = ObjectDetector . loadInputBatch ( batchFile ) ; <nl> - / / Running inference on batch of images loaded in previous step <nl> - List < List < ObjectDetectorOutput > > tmp <nl> - = objDet . imageBatchObjectDetect ( imgList , 5 ) ; <nl> - outputList . add ( tmp ) ; <nl> - } <nl> - return outputList ; <nl> - } <nl> - <nl> - static List < List < String > > generateBatches ( String inputImageDirPath , int batchSize ) { <nl> - File dir = new File ( inputImageDirPath ) ; <nl> - <nl> - List < List < String > > output = new ArrayList < List < String > > ( ) ; <nl> - List < String > batch = new ArrayList < String > ( ) ; <nl> - for ( File imgFile : dir . listFiles ( ) ) { <nl> - batch . add ( imgFile . getPath ( ) ) ; <nl> - if ( batch . size ( ) = = batchSize ) { <nl> - output . add ( batch ) ; <nl> - batch = new ArrayList < String > ( ) ; <nl> - } <nl> - } <nl> - if ( batch . size ( ) > 0 ) { <nl> - output . add ( batch ) ; <nl> - } <nl> - return output ; <nl> - } <nl> - <nl> - public static void main ( String [ ] args ) { <nl> - SSDClassifierExample inst = new SSDClassifierExample ( ) ; <nl> - CmdLineParser parser = new CmdLineParser ( inst ) ; <nl> - try { <nl> - parser . parseArgument ( args ) ; <nl> - } catch ( Exception e ) { <nl> - logger . error ( e . getMessage ( ) , e ) ; <nl> - parser . printUsage ( System . err ) ; <nl> - System . exit ( 1 ) ; <nl> - } <nl> - <nl> - String mdprefixDir = inst . modelPathPrefix ; <nl> - String imgPath = inst . inputImagePath ; <nl> - String imgDir = inst . inputImageDir ; <nl> - <nl> - if ( ! checkExist ( Arrays . asList ( mdprefixDir + " - symbol . json " , imgDir , imgPath ) ) ) { <nl> - logger . error ( " Model or input image path does not exist " ) ; <nl> - System . exit ( 1 ) ; <nl> - } <nl> - <nl> - List < Context > context = new ArrayList < Context > ( ) ; <nl> - if ( System . getenv ( ) . containsKey ( " SCALA_TEST_ON_GPU " ) & & <nl> - Integer . valueOf ( System . getenv ( " SCALA_TEST_ON_GPU " ) ) = = 1 ) { <nl> - context . add ( Context . gpu ( ) ) ; <nl> - } else { <nl> - context . add ( Context . cpu ( ) ) ; <nl> - } <nl> - <nl> - try { <nl> - Shape inputShape = new Shape ( new int [ ] { 1 , 3 , 512 , 512 } ) ; <nl> - Shape outputShape = new Shape ( new int [ ] { 1 , 6132 , 6 } ) ; <nl> - <nl> - <nl> - int width = inputShape . get ( 2 ) ; <nl> - int height = inputShape . get ( 3 ) ; <nl> - String outputStr = " \ n " ; <nl> - <nl> - List < List < ObjectDetectorOutput > > output <nl> - = runObjectDetectionSingle ( mdprefixDir , imgPath , context ) ; <nl> - <nl> - for ( List < ObjectDetectorOutput > ele : output ) { <nl> - for ( ObjectDetectorOutput i : ele ) { <nl> - outputStr + = " Class : " + i . getClassName ( ) + " \ n " ; <nl> - outputStr + = " Probabilties : " + i . getProbability ( ) + " \ n " ; <nl> - <nl> - List < Float > coord = Arrays . asList ( i . getXMin ( ) * width , <nl> - i . getXMax ( ) * height , i . getYMin ( ) * width , i . getYMax ( ) * height ) ; <nl> - StringBuilder sb = new StringBuilder ( ) ; <nl> - for ( float c : coord ) { <nl> - sb . append ( " , " ) . append ( c ) ; <nl> - } <nl> - outputStr + = " Coord : " + sb . substring ( 2 ) + " \ n " ; <nl> - } <nl> - } <nl> - logger . info ( outputStr ) ; <nl> - <nl> - List < List < List < ObjectDetectorOutput > > > outputList = <nl> - runObjectDetectionBatch ( mdprefixDir , imgDir , context ) ; <nl> - <nl> - outputStr = " \ n " ; <nl> - int index = 0 ; <nl> - for ( List < List < ObjectDetectorOutput > > i : outputList ) { <nl> - for ( List < ObjectDetectorOutput > j : i ) { <nl> - outputStr + = " * * * Image " + ( index + 1 ) + " * * * " + " \ n " ; <nl> - for ( ObjectDetectorOutput k : j ) { <nl> - outputStr + = " Class : " + k . getClassName ( ) + " \ n " ; <nl> - outputStr + = " Probabilties : " + k . getProbability ( ) + " \ n " ; <nl> - List < Float > coord = Arrays . asList ( k . getXMin ( ) * width , <nl> - k . getXMax ( ) * height , k . getYMin ( ) * width , k . getYMax ( ) * height ) ; <nl> - <nl> - StringBuilder sb = new StringBuilder ( ) ; <nl> - for ( float c : coord ) { <nl> - sb . append ( " , " ) . append ( c ) ; <nl> - } <nl> - outputStr + = " Coord : " + sb . substring ( 2 ) + " \ n " ; <nl> - } <nl> - index + + ; <nl> - } <nl> - } <nl> - logger . info ( outputStr ) ; <nl> - <nl> - } catch ( Exception e ) { <nl> - logger . error ( e . getMessage ( ) , e ) ; <nl> - parser . printUsage ( System . err ) ; <nl> - System . exit ( 1 ) ; <nl> - } <nl> - System . exit ( 0 ) ; <nl> - } <nl> - <nl> - static Boolean checkExist ( List < String > arr ) { <nl> - Boolean exist = true ; <nl> - for ( String item : arr ) { <nl> - exist = new File ( item ) . exists ( ) & & exist ; <nl> - if ( ! exist ) { <nl> - logger . error ( " Cannot find : " + item ) ; <nl> - } <nl> - } <nl> - return exist ; <nl> - } <nl> + @ Option ( name = " - - model - path - prefix " , usage = " input model directory and prefix of the model " ) <nl> + private String modelPathPrefix = " / model / ssd_resnet50_512 " ; <nl> + @ Option ( name = " - - input - image " , usage = " the input image " ) <nl> + private String inputImagePath = " / images / dog . jpg " ; <nl> + @ Option ( name = " - - input - dir " , usage = " the input batch of images directory " ) <nl> + private String inputImageDir = " / images / " ; <nl> + <nl> + final static Logger logger = LoggerFactory . getLogger ( SSDClassifierExample . class ) ; <nl> + <nl> + static List < List < ObjectDetectorOutput > > <nl> + runObjectDetectionSingle ( String modelPathPrefix , String inputImagePath , List < Context > context ) { <nl> + Shape inputShape = new Shape ( new int [ ] { 1 , 3 , 512 , 512 } ) ; <nl> + List < DataDesc > inputDescriptors = new ArrayList < DataDesc > ( ) ; <nl> + inputDescriptors . add ( new DataDesc ( " data " , inputShape , DType . Float32 ( ) , " NCHW " ) ) ; <nl> + BufferedImage img = ObjectDetector . loadImageFromFile ( inputImagePath ) ; <nl> + ObjectDetector objDet = new ObjectDetector ( modelPathPrefix , inputDescriptors , context , 0 ) ; <nl> + return objDet . imageObjectDetect ( img , 3 ) ; <nl> + } <nl> + <nl> + static List < List < List < ObjectDetectorOutput > > > <nl> + runObjectDetectionBatch ( String modelPathPrefix , String inputImageDir , List < Context > context ) { <nl> + Shape inputShape = new Shape ( new int [ ] { 1 , 3 , 512 , 512 } ) ; <nl> + List < DataDesc > inputDescriptors = new ArrayList < DataDesc > ( ) ; <nl> + inputDescriptors . add ( new DataDesc ( " data " , inputShape , DType . Float32 ( ) , " NCHW " ) ) ; <nl> + ObjectDetector objDet = new ObjectDetector ( modelPathPrefix , inputDescriptors , context , 0 ) ; <nl> + <nl> + / / Loading batch of images from the directory path <nl> + List < List < String > > batchFiles = generateBatches ( inputImageDir , 20 ) ; <nl> + List < List < List < ObjectDetectorOutput > > > outputList <nl> + = new ArrayList < List < List < ObjectDetectorOutput > > > ( ) ; <nl> + <nl> + for ( List < String > batchFile : batchFiles ) { <nl> + List < BufferedImage > imgList = ObjectDetector . loadInputBatch ( batchFile ) ; <nl> + / / Running inference on batch of images loaded in previous step <nl> + List < List < ObjectDetectorOutput > > tmp <nl> + = objDet . imageBatchObjectDetect ( imgList , 5 ) ; <nl> + outputList . add ( tmp ) ; <nl> + } <nl> + return outputList ; <nl> + } <nl> + <nl> + static List < List < String > > generateBatches ( String inputImageDirPath , int batchSize ) { <nl> + File dir = new File ( inputImageDirPath ) ; <nl> + <nl> + List < List < String > > output = new ArrayList < List < String > > ( ) ; <nl> + List < String > batch = new ArrayList < String > ( ) ; <nl> + for ( File imgFile : dir . listFiles ( ) ) { <nl> + batch . add ( imgFile . getPath ( ) ) ; <nl> + if ( batch . size ( ) = = batchSize ) { <nl> + output . add ( batch ) ; <nl> + batch = new ArrayList < String > ( ) ; <nl> + } <nl> + } <nl> + if ( batch . size ( ) > 0 ) { <nl> + output . add ( batch ) ; <nl> + } <nl> + return output ; <nl> + } <nl> + <nl> + public static void main ( String [ ] args ) { <nl> + SSDClassifierExample inst = new SSDClassifierExample ( ) ; <nl> + CmdLineParser parser = new CmdLineParser ( inst ) ; <nl> + try { <nl> + parser . parseArgument ( args ) ; <nl> + } catch ( Exception e ) { <nl> + logger . error ( e . getMessage ( ) , e ) ; <nl> + parser . printUsage ( System . err ) ; <nl> + System . exit ( 1 ) ; <nl> + } <nl> + <nl> + String mdprefixDir = inst . modelPathPrefix ; <nl> + String imgPath = inst . inputImagePath ; <nl> + String imgDir = inst . inputImageDir ; <nl> + <nl> + if ( ! checkExist ( Arrays . asList ( mdprefixDir + " - symbol . json " , imgDir , imgPath ) ) ) { <nl> + logger . error ( " Model or input image path does not exist " ) ; <nl> + System . exit ( 1 ) ; <nl> + } <nl> + <nl> + List < Context > context = new ArrayList < Context > ( ) ; <nl> + if ( System . getenv ( ) . containsKey ( " SCALA_TEST_ON_GPU " ) & & <nl> + Integer . valueOf ( System . getenv ( " SCALA_TEST_ON_GPU " ) ) = = 1 ) { <nl> + context . add ( Context . gpu ( ) ) ; <nl> + } else { <nl> + context . add ( Context . cpu ( ) ) ; <nl> + } <nl> + <nl> + try { <nl> + Shape inputShape = new Shape ( new int [ ] { 1 , 3 , 512 , 512 } ) ; <nl> + Shape outputShape = new Shape ( new int [ ] { 1 , 6132 , 6 } ) ; <nl> + <nl> + <nl> + int width = inputShape . get ( 2 ) ; <nl> + int height = inputShape . get ( 3 ) ; <nl> + StringBuilder outputStr = new StringBuilder ( ) . append ( " \ n " ) ; <nl> + <nl> + List < List < ObjectDetectorOutput > > output <nl> + = runObjectDetectionSingle ( mdprefixDir , imgPath , context ) ; <nl> + <nl> + for ( List < ObjectDetectorOutput > ele : output ) { <nl> + for ( ObjectDetectorOutput i : ele ) { <nl> + outputStr . append ( " Class : " + i . getClassName ( ) + " \ n " ) ; <nl> + outputStr . append ( " Probabilties : " + i . getProbability ( ) + " \ n " ) ; <nl> + <nl> + List < Float > coord = Arrays . asList ( i . getXMin ( ) * width , <nl> + i . getXMax ( ) * height , i . getYMin ( ) * width , i . getYMax ( ) * height ) ; <nl> + StringBuilder sb = new StringBuilder ( ) ; <nl> + for ( float c : coord ) { <nl> + sb . append ( " , " ) . append ( c ) ; <nl> + } <nl> + outputStr . append ( " Coord : " + sb . substring ( 2 ) + " \ n " ) ; <nl> + } <nl> + } <nl> + logger . info ( outputStr . toString ( ) ) ; <nl> + <nl> + List < List < List < ObjectDetectorOutput > > > outputList = <nl> + runObjectDetectionBatch ( mdprefixDir , imgDir , context ) ; <nl> + <nl> + outputStr = new StringBuilder ( ) . append ( " \ n " ) ; <nl> + int index = 0 ; <nl> + for ( List < List < ObjectDetectorOutput > > i : outputList ) { <nl> + for ( List < ObjectDetectorOutput > j : i ) { <nl> + outputStr . append ( " * * * Image " + ( index + 1 ) + " * * * " + " \ n " ) ; <nl> + for ( ObjectDetectorOutput k : j ) { <nl> + outputStr . append ( " Class : " + k . getClassName ( ) + " \ n " ) ; <nl> + outputStr . append ( " Probabilties : " + k . getProbability ( ) + " \ n " ) ; <nl> + List < Float > coord = Arrays . asList ( k . getXMin ( ) * width , <nl> + k . getXMax ( ) * height , k . getYMin ( ) * width , k . getYMax ( ) * height ) ; <nl> + <nl> + StringBuilder sb = new StringBuilder ( ) ; <nl> + for ( float c : coord ) { <nl> + sb . append ( " , " ) . append ( c ) ; <nl> + } <nl> + outputStr . append ( " Coord : " + sb . substring ( 2 ) + " \ n " ) ; <nl> + } <nl> + index + + ; <nl> + } <nl> + } <nl> + logger . info ( outputStr . toString ( ) ) ; <nl> + <nl> + } catch ( Exception e ) { <nl> + logger . error ( e . getMessage ( ) , e ) ; <nl> + parser . printUsage ( System . err ) ; <nl> + System . exit ( 1 ) ; <nl> + } <nl> + System . exit ( 0 ) ; <nl> + } <nl> + <nl> + static Boolean checkExist ( List < String > arr ) { <nl> + Boolean exist = true ; <nl> + for ( String item : arr ) { <nl> + if ( ! ( new File ( item ) . exists ( ) ) ) { <nl> + logger . error ( " Cannot find : " + item ) ; <nl> + exist = false ; <nl> + } <nl> + } <nl> + return exist ; <nl> + } <nl> } <nl> mmm a / scala - package / examples / src / main / scala / org / apache / mxnetexamples / infer / objectdetector / README . md <nl> ppp b / scala - package / examples / src / main / scala / org / apache / mxnetexamples / infer / objectdetector / README . md <nl> The model is trained on the [ Pascal VOC 2012 dataset ] ( http : / / host . robots . ox . ac . u <nl> # # # Download Artifacts <nl> # # # # Step 1 <nl> You can download the files using the script ` get_ssd_data . sh ` . It will download and place the model files in a ` model ` folder and the test image files in a ` image ` folder in the current directory . <nl> - From the ` scala - package / examples / scripts / infer / imageclassifier / ` folder run : <nl> + From the ` scala - package / examples / scripts / infer / objectdetector / ` folder run : <nl> <nl> ` ` ` bash <nl> . / get_ssd_data . sh <nl> ` ` ` <nl> <nl> - * * Note * * : You may need to run ` chmod + x get_resnet_data . sh ` before running this script . <nl> - <nl> - Alternatively use the following links to download the Symbol and Params files via your browser : <nl> - - [ resnet50_ssd_model - symbol . json ] ( https : / / s3 . amazonaws . com / model - server / models / resnet50_ssd / resnet50_ssd_model - symbol . json ) <nl> - - [ resnet50_ssd_model - 0000 . params ] ( https : / / s3 . amazonaws . com / model - server / models / resnet50_ssd / resnet50_ssd_model - 0000 . params ) <nl> - - [ synset . txt ] ( https : / / github . com / awslabs / mxnet - model - server / blob / master / examples / ssd / synset . txt ) <nl> + * * Note * * : You may need to run ` chmod + x get_ssd_data . sh ` before running this script . <nl> <nl> In the pre - trained model , the ` input_name ` is ` data ` and shape is ` ( 1 , 3 , 512 , 512 ) ` . <nl> This shape translates to : a batch of ` 1 ` image , the image has color and uses ` 3 ` channels ( RGB ) , and the image has the dimensions of ` 512 ` pixels in height by ` 512 ` pixels in width . <nl> The output shape is ` ( 1 , 6132 , 6 ) ` . As with the input , the ` 1 ` is the number of <nl> <nl> # # # Setup Datapath and Parameters <nl> # # # # Step 2 <nl> - The code ` Line 31 : val baseDir = System . getProperty ( " user . dir " ) ` in the example will automatically searches the work directory you have defined . Please put the files in your [ work directory ] ( https : / / stackoverflow . com / questions / 16239130 / java - user - dir - property - what - exactly - does - it - mean ) . < ! - - how do you define the work directory ? - - > <nl> - <nl> - Alternatively , if you would like to use your own path , please change line 31 into your own path <nl> - ` ` ` scala <nl> - val baseDir = < Your Own Path > <nl> - ` ` ` <nl> - <nl> The followings is the parameters defined for this example , you can find more information in the ` class SSDClassifierExample ` . <nl> <nl> | Argument | Comments | <nl> mmm a / scala - package / examples / src / main / scala / org / apache / mxnetexamples / infer / objectdetector / SSDClassifierExample . scala <nl> ppp b / scala - package / examples / src / main / scala / org / apache / mxnetexamples / infer / objectdetector / SSDClassifierExample . scala <nl> object SSDClassifierExample { <nl> def checkExist ( arr : Array [ String ] ) : Boolean = { <nl> var exist : Boolean = true <nl> for ( item < - arr ) { <nl> - exist = Files . exists ( Paths . get ( item ) ) & & exist <nl> - if ( ! exist ) { <nl> + if ( ! ( Files . exists ( Paths . get ( item ) ) ) ) { <nl> logger . error ( " Cannot find : " + item ) <nl> + exist = false <nl> } <nl> } <nl> exist <nl> mmm a / scala - package / infer / src / main / scala / org / apache / mxnet / infer / javaapi / ObjectDetector . scala <nl> ppp b / scala - package / infer / src / main / scala / org / apache / mxnet / infer / javaapi / ObjectDetector . scala <nl> import scala . collection . JavaConverters . _ <nl> * Defaults to CPU . <nl> * @ param epoch Model epoch to load ; defaults to 0 <nl> * / <nl> - class ObjectDetector ( val objDetector : org . apache . mxnet . infer . ObjectDetector ) { <nl> + class ObjectDetector private [ mxnet ] ( val objDetector : org . apache . mxnet . infer . ObjectDetector ) { <nl> <nl> def this ( modelPathPrefix : String , inputDescriptors : java . util . List [ DataDesc ] , contexts : <nl> java . util . List [ Context ] , epoch : Int ) <nl> mmm a / scala - package / infer / src / main / scala / org / apache / mxnet / infer / javaapi / Predictor . scala <nl> ppp b / scala - package / infer / src / main / scala / org / apache / mxnet / infer / javaapi / Predictor . scala <nl> import scala . collection . JavaConverters . _ <nl> * / <nl> <nl> / / JavaDoc description of class to be updated in https : / / issues . apache . org / jira / browse / MXNET - 1178 <nl> - class Predictor ( val predictor : org . apache . mxnet . infer . Predictor ) { <nl> + class Predictor private [ mxnet ] ( val predictor : org . apache . mxnet . infer . Predictor ) { <nl> def this ( modelPathPrefix : String , inputDescriptors : java . util . List [ DataDesc ] , <nl> contexts : java . util . List [ Context ] , epoch : Int ) <nl> = this { <nl>
|
Addressing PR feedback for merging Java API into master ( )
|
apache/incubator-mxnet
|
218a7a93c239dce7a9ce33fc2cc4f58e473e3da6
|
2018-11-15T01:51:25Z
|
mmm a / doc / base / classes . xml <nl> ppp b / doc / base / classes . xml <nl> This method controls whether the position between two cached points is interpola <nl> < / class > <nl> < class name = " Timer " inherits = " Node " category = " Core " > <nl> < brief_description > <nl> + A simple Timer node . <nl> < / brief_description > <nl> < description > <nl> Timer node . This is a simple node that will emit a timeout callback when the timer runs out . It can optionally be set to loop . <nl> This method controls whether the position between two cached points is interpola <nl> < argument index = " 0 " name = " active " type = " bool " > <nl> < / argument > <nl> < description > <nl> + Set whether the timer is active or not . An inactive timer will be paused until it is activated again . <nl> < / description > <nl> < / method > <nl> < method name = " is_active " qualifiers = " const " > <nl> < return type = " bool " > <nl> < / return > <nl> < description > <nl> + Return if the timer is active or not . <nl> < / description > <nl> < / method > <nl> < method name = " get_time_left " qualifiers = " const " > <nl>
|
Finished Timer Documentation
|
godotengine/godot
|
bf13410626b0085cf5cd11d4542bbcbb5bde1628
|
2016-05-02T15:10:31Z
|
mmm a / platform / windows / os_windows . cpp <nl> ppp b / platform / windows / os_windows . cpp <nl> void OS_Windows : : run ( ) { <nl> if ( ! main_loop ) <nl> return ; <nl> <nl> - / / Process all events before the main initialization so the cursor will get initialized properly <nl> - process_events ( ) ; / / get rid of pending events <nl> - <nl> main_loop - > init ( ) ; <nl> <nl> uint64_t last_ticks = get_ticks_usec ( ) ; <nl> mmm a / platform / x11 / os_x11 . cpp <nl> ppp b / platform / x11 / os_x11 . cpp <nl> void OS_X11 : : process_xevents ( ) { <nl> case EnterNotify : { <nl> if ( main_loop & & ! mouse_mode_grab ) <nl> main_loop - > notification ( MainLoop : : NOTIFICATION_WM_MOUSE_ENTER ) ; <nl> - if ( input ) { <nl> - / / Update mouse position . It is triggered before mouse motion . <nl> - Point2i pos ( event . xmotion . x , event . xmotion . y ) ; <nl> - input - > set_mouse_pos ( pos ) ; <nl> + if ( input ) <nl> input - > set_mouse_in_window ( true ) ; <nl> - } <nl> } break ; <nl> case FocusIn : <nl> minimized = false ; <nl> void OS_X11 : : run ( ) { <nl> if ( ! main_loop ) <nl> return ; <nl> <nl> - / / Process all events before the main initialization so the cursor will get initialized properly <nl> - process_xevents ( ) ; / / get rid of pending events <nl> - <nl> main_loop - > init ( ) ; <nl> <nl> / / uint64_t last_ticks = get_ticks_usec ( ) ; <nl>
|
Merge pull request from godotengine / revert - 8180 - 8145 - Mouse_Position_is_unknown_until_first_mouse_event
|
godotengine/godot
|
b49925caab99fb49b0d50b0f4696d20fd3f4f1d0
|
2017-04-06T16:09:19Z
|
mmm a / src / clustering / administration / cluster_config . cc <nl> ppp b / src / clustering / administration / cluster_config . cc <nl> bool convert_auth_key_from_datum ( <nl> ql : : datum_t datum , <nl> auth_key_t * value_out , <nl> std : : string * error_out ) { <nl> - if ( datum - > get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> + if ( datum . get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> * value_out = auth_key_t ( ) ; <nl> return true ; <nl> - } else if ( datum - > get_type ( ) = = ql : : datum_t : : R_STR ) { <nl> - if ( ! value_out - > assign_value ( datum - > as_str ( ) . to_std ( ) ) ) { <nl> - if ( datum - > as_str ( ) . size ( ) > static_cast < size_t > ( auth_key_t : : max_length ) ) { <nl> + } else if ( datum . get_type ( ) = = ql : : datum_t : : R_STR ) { <nl> + if ( ! value_out - > assign_value ( datum . as_str ( ) . to_std ( ) ) ) { <nl> + if ( datum . as_str ( ) . size ( ) > static_cast < size_t > ( auth_key_t : : max_length ) ) { <nl> * error_out = strprintf ( " The auth key should be at most % zu bytes long , " <nl> " but your given key is % zu bytes . " , <nl> - static_cast < size_t > ( auth_key_t : : max_length ) , datum - > as_str ( ) . size ( ) ) ; <nl> + static_cast < size_t > ( auth_key_t : : max_length ) , datum . as_str ( ) . size ( ) ) ; <nl> } else { <nl> / * Currently this can ' t happen , because length is the only reason to <nl> invalidate an auth key . This is here for future - proofing . * / <nl> bool convert_auth_key_from_datum ( <nl> " Setting the auth key to { hidden : true } is not allowed . " ; <nl> return false ; <nl> } else { <nl> - * error_out = " Expected a string or null ; got " + datum - > print ( ) ; <nl> + * error_out = " Expected a string or null ; got " + datum . print ( ) ; <nl> return false ; <nl> } <nl> } <nl> mmm a / src / clustering / administration / datum_adapter . cc <nl> ppp b / src / clustering / administration / datum_adapter . cc <nl> bool convert_name_from_datum ( <nl> const std : : string & what , <nl> name_string_t * value_out , <nl> std : : string * error_out ) { <nl> - if ( datum - > get_type ( ) ! = ql : : datum_t : : R_STR ) { <nl> - * error_out = " Expected a " + what + " ; got " + datum - > print ( ) ; <nl> + if ( datum . get_type ( ) ! = ql : : datum_t : : R_STR ) { <nl> + * error_out = " Expected a " + what + " ; got " + datum . print ( ) ; <nl> return false ; <nl> } <nl> - if ( ! value_out - > assign_value ( datum - > as_str ( ) ) ) { <nl> - * error_out = datum - > print ( ) + " is not a valid " + what + " ; " + <nl> + if ( ! value_out - > assign_value ( datum . as_str ( ) ) ) { <nl> + * error_out = datum . print ( ) + " is not a valid " + what + " ; " + <nl> std : : string ( name_string_t : : valid_char_msg ) ; <nl> return false ; <nl> } <nl> bool convert_uuid_from_datum ( <nl> ql : : datum_t datum , <nl> uuid_u * value_out , <nl> std : : string * error_out ) { <nl> - if ( datum - > get_type ( ) ! = ql : : datum_t : : R_STR ) { <nl> - * error_out = " Expected a UUID ; got " + datum - > print ( ) ; <nl> + if ( datum . get_type ( ) ! = ql : : datum_t : : R_STR ) { <nl> + * error_out = " Expected a UUID ; got " + datum . print ( ) ; <nl> return false ; <nl> } <nl> - if ( ! str_to_uuid ( datum - > as_str ( ) . to_std ( ) , value_out ) ) { <nl> - * error_out = " Expected a UUID ; got " + datum - > print ( ) ; <nl> + if ( ! str_to_uuid ( datum . as_str ( ) . to_std ( ) , value_out ) ) { <nl> + * error_out = " Expected a UUID ; got " + datum . print ( ) ; <nl> return false ; <nl> } <nl> return true ; <nl> bool convert_uuid_from_datum ( <nl> bool converter_from_datum_object_t : : init ( <nl> ql : : datum_t _datum , <nl> std : : string * error_out ) { <nl> - if ( _datum - > get_type ( ) ! = ql : : datum_t : : R_OBJECT ) { <nl> - * error_out = " Expected an object ; got " + _datum - > print ( ) ; <nl> + if ( _datum . get_type ( ) ! = ql : : datum_t : : R_OBJECT ) { <nl> + * error_out = " Expected an object ; got " + _datum . print ( ) ; <nl> return false ; <nl> } <nl> datum = _datum ; <nl> bool converter_from_datum_object_t : : get ( <nl> ql : : datum_t * value_out , <nl> std : : string * error_out ) { <nl> extra_keys . erase ( datum_string_t ( key ) ) ; <nl> - * value_out = datum - > get_field ( key , ql : : NOTHROW ) ; <nl> + * value_out = datum . get_field ( key , ql : : NOTHROW ) ; <nl> if ( ! value_out - > has ( ) ) { <nl> * error_out = strprintf ( " Expected a field named ` % s ` . " , key ) ; <nl> return false ; <nl> void converter_from_datum_object_t : : get_optional ( <nl> const char * key , <nl> ql : : datum_t * value_out ) { <nl> extra_keys . erase ( datum_string_t ( key ) ) ; <nl> - * value_out = datum - > get_field ( key , ql : : NOTHROW ) ; <nl> + * value_out = datum . get_field ( key , ql : : NOTHROW ) ; <nl> } <nl> <nl> bool converter_from_datum_object_t : : check_no_extra_keys ( std : : string * error_out ) { <nl> mmm a / src / clustering / administration / datum_adapter . hpp <nl> ppp b / src / clustering / administration / datum_adapter . hpp <nl> bool convert_vector_from_datum ( <nl> ql : : datum_t datum , <nl> std : : vector < T > * vector_out , <nl> std : : string * error_out ) { <nl> - if ( datum - > get_type ( ) ! = ql : : datum_t : : R_ARRAY ) { <nl> - * error_out = " Expected an array , got " + datum - > print ( ) ; <nl> + if ( datum . get_type ( ) ! = ql : : datum_t : : R_ARRAY ) { <nl> + * error_out = " Expected an array , got " + datum . print ( ) ; <nl> return false ; <nl> } <nl> - vector_out - > resize ( datum - > arr_size ( ) ) ; <nl> - for ( size_t i = 0 ; i < datum - > arr_size ( ) ; + + i ) { <nl> - if ( ! conv ( datum - > get ( i ) , & ( * vector_out ) [ i ] , error_out ) ) { <nl> + vector_out - > resize ( datum . arr_size ( ) ) ; <nl> + for ( size_t i = 0 ; i < datum . arr_size ( ) ; + + i ) { <nl> + if ( ! conv ( datum . get ( i ) , & ( * vector_out ) [ i ] , error_out ) ) { <nl> return false ; <nl> } <nl> } <nl> bool convert_set_from_datum ( <nl> ql : : datum_t datum , <nl> std : : set < T > * set_out , <nl> std : : string * error_out ) { <nl> - if ( datum - > get_type ( ) ! = ql : : datum_t : : R_ARRAY ) { <nl> - * error_out = " Expected an array , got " + datum - > print ( ) ; <nl> + if ( datum . get_type ( ) ! = ql : : datum_t : : R_ARRAY ) { <nl> + * error_out = " Expected an array , got " + datum . print ( ) ; <nl> return false ; <nl> } <nl> set_out - > clear ( ) ; <nl> - for ( size_t i = 0 ; i < datum - > arr_size ( ) ; + + i ) { <nl> + for ( size_t i = 0 ; i < datum . arr_size ( ) ; + + i ) { <nl> T value ; <nl> - if ( ! conv ( datum - > get ( i ) , & value , error_out ) ) { <nl> + if ( ! conv ( datum . get ( i ) , & value , error_out ) ) { <nl> return false ; <nl> } <nl> auto res = set_out - > insert ( value ) ; <nl> if ( ! allow_duplicates & & ! res . second ) { <nl> - * error_out = datum - > get ( i ) - > print ( ) + " was specified more than once . " ; <nl> + * error_out = datum . get ( i ) . print ( ) + " was specified more than once . " ; <nl> return false ; <nl> } <nl> } <nl> mmm a / src / clustering / administration / tables / table_config . cc <nl> ppp b / src / clustering / administration / tables / table_config . cc <nl> bool convert_table_config_shard_from_datum ( <nl> if ( ! converter . get ( " replicas " , & replica_names_datum , error_out ) ) { <nl> return false ; <nl> } <nl> - if ( replica_names_datum - > get_type ( ) ! = ql : : datum_t : : R_ARRAY ) { <nl> + if ( replica_names_datum . get_type ( ) ! = ql : : datum_t : : R_ARRAY ) { <nl> * error_out = " In ` replicas ` : Expected an array , got " + <nl> - replica_names_datum - > print ( ) ; <nl> + replica_names_datum . print ( ) ; <nl> return false ; <nl> } <nl> if ( ! convert_set_from_datum < name_string_t > ( <nl> mmm a / src / extproc / http_job . cc <nl> ppp b / src / extproc / http_job . cc <nl> std : : string url_encode_fields ( CURL * curl_handle , <nl> <nl> std : : map < std : : string , std : : string > translated_fields ; <nl> <nl> - for ( size_t field_idx = 0 ; field_idx < fields - > obj_size ( ) ; + + field_idx ) { <nl> - auto pair = fields - > get_pair ( field_idx ) ; <nl> + for ( size_t field_idx = 0 ; field_idx < fields . obj_size ( ) ; + + field_idx ) { <nl> + auto pair = fields . get_pair ( field_idx ) ; <nl> std : : string val ; <nl> - if ( pair . second - > get_type ( ) = = ql : : datum_t : : R_NUM ) { <nl> + if ( pair . second . get_type ( ) = = ql : : datum_t : : R_NUM ) { <nl> val = strprintf ( " % " PR_RECONSTRUCTABLE_DOUBLE , <nl> - pair . second - > as_num ( ) ) ; <nl> - } else if ( pair . second - > get_type ( ) = = ql : : datum_t : : R_STR ) { <nl> - val = pair . second - > as_str ( ) . to_std ( ) ; <nl> - } else if ( pair . second - > get_type ( ) ! = ql : : datum_t : : R_NULL ) { <nl> + pair . second . as_num ( ) ) ; <nl> + } else if ( pair . second . get_type ( ) = = ql : : datum_t : : R_STR ) { <nl> + val = pair . second . as_str ( ) . to_std ( ) ; <nl> + } else if ( pair . second . get_type ( ) ! = ql : : datum_t : : R_NULL ) { <nl> / / This shouldn ' t happen because we check this in the main process anyway <nl> throw curl_exc_t ( strprintf ( " expected ` params . % s ` to be a NUMBER , STRING , " <nl> " or NULL , but found % s " , <nl> pair . first . to_std ( ) . c_str ( ) , <nl> - pair . second - > get_type_name ( ) . c_str ( ) ) ) ; <nl> + pair . second . get_type_name ( ) . c_str ( ) ) ) ; <nl> } <nl> translated_fields [ pair . first . to_std ( ) ] = val ; <nl> } <nl> mmm a / src / extproc / js_job . cc <nl> ppp b / src / extproc / js_job . cc <nl> ql : : datum_t js_to_datum ( const v8 : : Handle < v8 : : Value > & value , <nl> v8 : : Handle < v8 : : Value > js_from_datum ( const ql : : datum_t & datum , <nl> std : : string * err_out ) { <nl> guarantee ( datum . has ( ) ) ; <nl> - switch ( datum - > get_type ( ) ) { <nl> + switch ( datum . get_type ( ) ) { <nl> case ql : : datum_t : : type_t : : R_BINARY : <nl> / / TODO : In order to support this , we need to link against a static version of <nl> / / V8 , which provides an ArrayBuffer API . <nl> err_out - > assign ( " ` r . binary ` data cannot be used in ` r . js ` . " ) ; <nl> return v8 : : Handle < v8 : : Value > ( ) ; <nl> case ql : : datum_t : : type_t : : R_BOOL : <nl> - if ( datum - > as_bool ( ) ) { <nl> + if ( datum . as_bool ( ) ) { <nl> return v8 : : True ( ) ; <nl> } else { <nl> return v8 : : False ( ) ; <nl> v8 : : Handle < v8 : : Value > js_from_datum ( const ql : : datum_t & datum , <nl> case ql : : datum_t : : type_t : : R_NULL : <nl> return v8 : : Null ( ) ; <nl> case ql : : datum_t : : type_t : : R_NUM : <nl> - return v8 : : Number : : New ( datum - > as_num ( ) ) ; <nl> + return v8 : : Number : : New ( datum . as_num ( ) ) ; <nl> case ql : : datum_t : : type_t : : R_STR : <nl> - return v8 : : String : : New ( datum - > as_str ( ) . to_std ( ) . c_str ( ) ) ; <nl> + return v8 : : String : : New ( datum . as_str ( ) . to_std ( ) . c_str ( ) ) ; <nl> case ql : : datum_t : : type_t : : R_ARRAY : { <nl> v8 : : Handle < v8 : : Array > array = v8 : : Array : : New ( ) ; <nl> <nl> v8 : : Handle < v8 : : Value > js_from_datum ( const ql : : datum_t & datum , <nl> return array ; <nl> } <nl> case ql : : datum_t : : type_t : : R_OBJECT : { <nl> - if ( datum - > is_ptype ( ql : : pseudo : : time_string ) ) { <nl> + if ( datum . is_ptype ( ql : : pseudo : : time_string ) ) { <nl> double epoch_time = ql : : pseudo : : time_to_epoch_time ( datum ) ; <nl> v8 : : Handle < v8 : : Value > date = v8 : : Date : : New ( epoch_time * 1000 ) ; <nl> return date ; <nl> mmm a / src / protob / protob . cc <nl> ppp b / src / protob / protob . cc <nl> void query_server_t : : handle ( const http_req_t & req , <nl> / / problems with interruption <nl> ql : : datum_t noreply = static_optarg ( " noreply " , query ) ; <nl> bool response_needed = ! ( noreply . has ( ) & & <nl> - noreply - > get_type ( ) = = ql : : datum_t : : type_t : : R_BOOL & & <nl> - noreply - > as_bool ( ) ) ; <nl> + noreply . get_type ( ) = = ql : : datum_t : : type_t : : R_BOOL & & <nl> + noreply . as_bool ( ) ) ; <nl> <nl> if ( ! response_needed ) { <nl> * result = http_res_t ( HTTP_BAD_REQUEST , " application / text " , <nl> mmm a / src / rdb_protocol / artificial_table / artificial_table . cc <nl> ppp b / src / rdb_protocol / artificial_table / artificial_table . cc <nl> counted_t < ql : : datum_stream_t > artificial_table_t : : read_all ( <nl> std : : sort ( keys . begin ( ) , keys . end ( ) , <nl> [ ] ( const ql : : datum_t & a , <nl> const ql : : datum_t & b ) { <nl> - return a - > compare_lt ( reql_version_t : : LATEST , * b ) ; <nl> + return a . compare_lt ( reql_version_t : : LATEST , b ) ; <nl> } ) ; <nl> break ; <nl> case sorting_t : : DESCENDING : <nl> std : : sort ( keys . begin ( ) , keys . end ( ) , <nl> [ ] ( const ql : : datum_t & a , <nl> const ql : : datum_t & b ) { <nl> - return a - > compare_gt ( reql_version_t : : LATEST , * b ) ; <nl> + return a . compare_gt ( reql_version_t : : LATEST , b ) ; <nl> } ) ; <nl> break ; <nl> default : <nl> ql : : datum_t artificial_table_t : : write_batched_insert ( <nl> throttled_pmap ( inserts . size ( ) , [ & ] ( int i ) { <nl> try { <nl> ql : : datum_t insert_row = inserts [ i ] ; <nl> - ql : : datum_t key = insert_row - > get_field ( <nl> + ql : : datum_t key = insert_row . get_field ( <nl> datum_string_t ( primary_key ) , ql : : NOTHROW ) ; <nl> guarantee ( key . has ( ) , " write_batched_insert ( ) shouldn ' t ever be called with " <nl> " documents that lack a primary key . " ) ; <nl> bool artificial_table_t : : checked_read_row ( <nl> } <nl> # ifndef NDEBUG <nl> if ( row_out - > has ( ) ) { <nl> - ql : : datum_t pval2 = ( * row_out ) - > get_field ( <nl> + ql : : datum_t pval2 = ( * row_out ) . get_field ( <nl> datum_string_t ( get_pkey ( ) ) , ql : : NOTHROW ) ; <nl> rassert ( pval2 . has ( ) ) ; <nl> rassert ( pval2 = = pval ) ; <nl> void artificial_table_t : : do_single_update ( <nl> if ( ! checked_read_row ( pval , interruptor , & old_row , & error ) ) { <nl> ql : : datum_object_builder_t builder ; <nl> builder . add_error ( error . c_str ( ) ) ; <nl> - * stats_inout = ( * stats_inout ) - > merge ( <nl> + * stats_inout = ( * stats_inout ) . merge ( <nl> std : : move ( builder ) . to_datum ( ) , ql : : stats_merge , env - > limits ( ) , <nl> conditions_inout ) ; <nl> return ; <nl> void artificial_table_t : : do_single_update ( <nl> ql : : datum_t new_row = function ( old_row ) ; <nl> bool was_changed ; <nl> resp = make_row_replacement_stats ( <nl> - datum_string_t ( primary_key ) , store_key_t ( pval - > print_primary ( ) ) , <nl> + datum_string_t ( primary_key ) , store_key_t ( pval . print_primary ( ) ) , <nl> old_row , new_row , return_changes , & was_changed ) ; <nl> if ( was_changed ) { <nl> - if ( new_row - > get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> + if ( new_row . get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> new_row . reset ( ) ; <nl> } <nl> if ( ! backend - > write_row ( pval , new_row , interruptor , & error ) ) { <nl> void artificial_table_t : : do_single_update ( <nl> resp = make_row_replacement_error_stats ( <nl> old_row , return_changes , e . what ( ) ) ; <nl> } <nl> - * stats_inout = ( * stats_inout ) - > merge ( <nl> + * stats_inout = ( * stats_inout ) . merge ( <nl> resp , ql : : stats_merge , env - > limits ( ) , conditions_inout ) ; <nl> } <nl> <nl> mmm a / src / rdb_protocol / artificial_table / in_memory . hpp <nl> ppp b / src / rdb_protocol / artificial_table / in_memory . hpp <nl> class in_memory_artificial_table_backend_t : <nl> on_thread_t thread_switcher ( home_thread ( ) ) ; <nl> keys_out - > clear ( ) ; <nl> for ( auto it = data . begin ( ) ; it ! = data . end ( ) ; + + it ) { <nl> - ql : : datum_t key = it - > second - > get_field ( " id " , ql : : NOTHROW ) ; <nl> + ql : : datum_t key = it - > second . get_field ( " id " , ql : : NOTHROW ) ; <nl> guarantee ( key . has ( ) ) ; <nl> keys_out - > push_back ( key ) ; <nl> } <nl> class in_memory_artificial_table_backend_t : <nl> UNUSED std : : string * error_out ) { <nl> random_delay ( interruptor ) ; <nl> on_thread_t thread_switcher ( home_thread ( ) ) ; <nl> - auto it = data . find ( primary_key - > print_primary ( ) ) ; <nl> + auto it = data . find ( primary_key . print_primary ( ) ) ; <nl> if ( it ! = data . end ( ) ) { <nl> * row_out = it - > second ; <nl> } else { <nl> class in_memory_artificial_table_backend_t : <nl> random_delay ( interruptor ) ; <nl> on_thread_t thread_switcher ( home_thread ( ) ) ; <nl> if ( new_value . has ( ) ) { <nl> - data [ primary_key - > print_primary ( ) ] = new_value ; <nl> + data [ primary_key . print_primary ( ) ] = new_value ; <nl> } else { <nl> - data . erase ( primary_key - > print_primary ( ) ) ; <nl> + data . erase ( primary_key . print_primary ( ) ) ; <nl> } <nl> return true ; <nl> } <nl> mmm a / src / rdb_protocol / batching . cc <nl> ppp b / src / rdb_protocol / batching . cc <nl> batchspec_t batchspec_t : : user ( batch_type_t batch_type , <nl> datum_t max_els_d , min_els_d , max_size_d , max_dur_d ; <nl> datum_t first_scaledown_d ; <nl> if ( conf . has ( ) ) { <nl> - min_els_d = conf - > get_field ( " min_els " , NOTHROW ) ; <nl> - max_els_d = conf - > get_field ( " max_els " , NOTHROW ) ; <nl> - max_size_d = conf - > get_field ( " max_size " , NOTHROW ) ; <nl> - first_scaledown_d = conf - > get_field ( " first_scaledown " , NOTHROW ) ; <nl> - max_dur_d = conf - > get_field ( " max_dur " , NOTHROW ) ; <nl> + min_els_d = conf . get_field ( " min_els " , NOTHROW ) ; <nl> + max_els_d = conf . get_field ( " max_els " , NOTHROW ) ; <nl> + max_size_d = conf . get_field ( " max_size " , NOTHROW ) ; <nl> + first_scaledown_d = conf . get_field ( " first_scaledown " , NOTHROW ) ; <nl> + max_dur_d = conf . get_field ( " max_dur " , NOTHROW ) ; <nl> } <nl> int64_t max_els = max_els_d . has ( ) <nl> - ? max_els_d - > as_int ( ) <nl> + ? max_els_d . as_int ( ) <nl> : std : : numeric_limits < decltype ( batchspec_t ( ) . max_els ) > : : max ( ) ; <nl> int64_t min_els = min_els_d . has ( ) <nl> - ? min_els_d - > as_int ( ) <nl> + ? min_els_d . as_int ( ) <nl> : std : : min < int64_t > ( max_els , DEFAULT_MIN_ELS ) ; <nl> - int64_t max_size = max_size_d . has ( ) ? max_size_d - > as_int ( ) : DEFAULT_MAX_SIZE ; <nl> + int64_t max_size = max_size_d . has ( ) ? max_size_d . as_int ( ) : DEFAULT_MAX_SIZE ; <nl> int64_t first_sd = first_scaledown_d . has ( ) <nl> - ? first_scaledown_d - > as_int ( ) <nl> + ? first_scaledown_d . as_int ( ) <nl> : DEFAULT_FIRST_SCALEDOWN ; <nl> - int64_t max_dur = max_dur_d . has ( ) ? max_dur_d - > as_int ( ) : DEFAULT_MAX_DURATION ; <nl> + int64_t max_dur = max_dur_d . has ( ) ? max_dur_d . as_int ( ) : DEFAULT_MAX_DURATION ; <nl> / / Protect the user in case they ' re a dork . Normally we would do rfail and <nl> / / trigger exceptions , but due to NOTHROWs above this may not be safe . <nl> min_els = std : : min < int64_t > ( min_els , max_els ) ; <nl> mmm a / src / rdb_protocol / btree . cc <nl> ppp b / src / rdb_protocol / btree . cc <nl> batched_replace_response_t rdb_replace_and_return_superblock ( <nl> / / Otherwise pass the entry with this key to the function . <nl> old_val = get_data ( kv_location . value_as < rdb_value_t > ( ) , <nl> buf_parent_t ( & kv_location . buf ) ) ; <nl> - guarantee ( old_val - > get_field ( primary_key , ql : : NOTHROW ) . has ( ) ) ; <nl> + guarantee ( old_val . get_field ( primary_key , ql : : NOTHROW ) . has ( ) ) ; <nl> } <nl> guarantee ( old_val . has ( ) ) ; <nl> <nl> batched_replace_response_t rdb_replace_and_return_superblock ( <nl> } <nl> <nl> / * Now that the change has passed validation , write it to disk * / <nl> - if ( new_val - > get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> + if ( new_val . get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> kv_location_delete ( & kv_location , * info . key , info . btree - > timestamp , <nl> deletion_context , mod_info_out ) ; <nl> } else { <nl> - r_sanity_check ( new_val - > get_field ( primary_key , ql : : NOTHROW ) . has ( ) ) ; <nl> + r_sanity_check ( new_val . get_field ( primary_key , ql : : NOTHROW ) . has ( ) ) ; <nl> ql : : serialization_result_t res = <nl> kv_location_set ( & kv_location , * info . key , new_val , <nl> info . btree - > timestamp , deletion_context , <nl> mod_info_out ) ; <nl> switch ( res ) { <nl> case ql : : serialization_result_t : : ARRAY_TOO_BIG : <nl> - rfail_typed_target ( new_val , " Array too large for disk writes " <nl> + rfail_typed_target ( & new_val , " Array too large for disk writes " <nl> " ( limit 100 , 000 elements ) " ) ; <nl> unreachable ( ) ; <nl> case ql : : serialization_result_t : : SUCCESS : <nl> batched_replace_response_t rdb_replace_and_return_superblock ( <nl> } <nl> <nl> / * Report the changes for sindex and change - feed purposes * / <nl> - if ( old_val - > get_type ( ) ! = ql : : datum_t : : R_NULL ) { <nl> + if ( old_val . get_type ( ) ! = ql : : datum_t : : R_NULL ) { <nl> guarantee ( ! mod_info_out - > deleted . second . empty ( ) ) ; <nl> mod_info_out - > deleted . first = old_val ; <nl> } else { <nl> guarantee ( mod_info_out - > deleted . second . empty ( ) ) ; <nl> } <nl> - if ( new_val - > get_type ( ) ! = ql : : datum_t : : R_NULL ) { <nl> + if ( new_val . get_type ( ) ! = ql : : datum_t : : R_NULL ) { <nl> guarantee ( ! mod_info_out - > added . second . empty ( ) ) ; <nl> mod_info_out - > added . first = new_val ; <nl> } else { <nl> void do_a_replace_from_batched_replace ( <nl> ql : : datum_t res = rdb_replace_and_return_superblock ( <nl> info , & one_replace , & deletion_context , superblock_promise , & mod_report . info , <nl> trace ) ; <nl> - * stats_out = ( * stats_out ) - > merge ( res , ql : : stats_merge , limits , conditions ) ; <nl> + * stats_out = ( * stats_out ) . merge ( res , ql : : stats_merge , limits , conditions ) ; <nl> <nl> / / KSI : What is this for ? are we waiting to get in line to call on_mod_report ? <nl> / / I guess so . <nl> void rdb_set ( const store_key_t & key , <nl> mod_info ) ; <nl> switch ( res ) { <nl> case ql : : serialization_result_t : : ARRAY_TOO_BIG : <nl> - rfail_typed_target ( data , " Array too large for disk writes " <nl> + rfail_typed_target ( & data , " Array too large for disk writes " <nl> " ( limit 100 , 000 elements ) " ) ; <nl> unreachable ( ) ; <nl> case ql : : serialization_result_t : : SUCCESS : <nl> THROWS_ONLY ( interrupted_exc_t ) { <nl> ql : : env_t sindex_env ( job . env - > interruptor , sindex - > func_reql_version ) ; <nl> sindex_val = sindex - > func - > call ( & sindex_env , val ) - > as_datum ( ) ; <nl> if ( sindex - > multi = = sindex_multi_bool_t : : MULTI <nl> - & & sindex_val - > get_type ( ) = = ql : : datum_t : : R_ARRAY ) { <nl> + & & sindex_val . get_type ( ) = = ql : : datum_t : : R_ARRAY ) { <nl> boost : : optional < uint64_t > tag = * ql : : datum_t : : extract_tag ( key ) ; <nl> guarantee ( tag ) ; <nl> - sindex_val = sindex_val - > get ( * tag , ql : : NOTHROW ) ; <nl> - guarantee ( sindex_val ) ; <nl> + sindex_val = sindex_val . get ( * tag , ql : : NOTHROW ) ; <nl> + guarantee ( sindex_val . has ( ) ) ; <nl> } <nl> if ( ! sindex - > range . contains ( sindex - > func_reql_version , sindex_val ) ) { <nl> return done_traversing_t : : NO ; <nl> std : : vector < std : : string > expand_geo_key ( <nl> / / Ignore non - geometry objects in geo indexes . <nl> / / TODO ( daniel ) : This needs to be changed once compound geo index <nl> / / support gets added . <nl> - if ( ! key - > is_ptype ( ql : : pseudo : : geometry_string ) ) { <nl> + if ( ! key . is_ptype ( ql : : pseudo : : geometry_string ) ) { <nl> return std : : vector < std : : string > ( ) ; <nl> } <nl> <nl> void compute_keys ( const store_key_t & primary_key , ql : : datum_t doc , <nl> index_info . mapping . compile_wire_func ( ) - > call ( & sindex_env , doc ) - > as_datum ( ) ; <nl> <nl> if ( index_info . multi = = sindex_multi_bool_t : : MULTI <nl> - & & index - > get_type ( ) = = ql : : datum_t : : R_ARRAY ) { <nl> - for ( uint64_t i = 0 ; i < index - > arr_size ( ) ; + + i ) { <nl> - const ql : : datum_t & skey = index - > get ( i , ql : : THROW ) ; <nl> + & & index . get_type ( ) = = ql : : datum_t : : R_ARRAY ) { <nl> + for ( uint64_t i = 0 ; i < index . arr_size ( ) ; + + i ) { <nl> + const ql : : datum_t & skey = index . get ( i , ql : : THROW ) ; <nl> if ( index_info . geo = = sindex_geo_bool_t : : GEO ) { <nl> std : : vector < std : : string > geo_keys = expand_geo_key ( reql_version , <nl> skey , <nl> void compute_keys ( const store_key_t & primary_key , ql : : datum_t doc , <nl> keys_out - > push_back ( store_key_t ( * it ) ) ; <nl> } <nl> } else { <nl> - keys_out - > push_back ( store_key_t ( skey - > print_secondary ( reql_version , <nl> - primary_key , <nl> - i ) ) ) ; <nl> + keys_out - > push_back ( store_key_t ( skey . print_secondary ( reql_version , <nl> + primary_key , <nl> + i ) ) ) ; <nl> } <nl> } <nl> } else { <nl> void compute_keys ( const store_key_t & primary_key , ql : : datum_t doc , <nl> keys_out - > push_back ( store_key_t ( * it ) ) ; <nl> } <nl> } else { <nl> - keys_out - > push_back ( store_key_t ( index - > print_secondary ( reql_version , <nl> - primary_key , <nl> - boost : : none ) ) ) ; <nl> + keys_out - > push_back ( store_key_t ( index . print_secondary ( reql_version , <nl> + primary_key , <nl> + boost : : none ) ) ) ; <nl> } <nl> } <nl> } <nl> void rdb_update_single_sindex ( <nl> <nl> superblock_t * super_block = sindex - > super_block . get ( ) ; <nl> <nl> - if ( modification - > info . deleted . first ) { <nl> + if ( modification - > info . deleted . first . has ( ) ) { <nl> guarantee ( ! modification - > info . deleted . second . empty ( ) ) ; <nl> try { <nl> ql : : datum_t deleted = modification - > info . deleted . first ; <nl> void rdb_update_single_sindex ( <nl> / / This is so we don ' t race against any sindex erase about who is faster <nl> / / ( we with inserting new entries , or the erase with removing them ) . <nl> const bool sindex_is_being_deleted = sindex - > sindex . being_deleted ; <nl> - if ( ! sindex_is_being_deleted & & modification - > info . added . first ) { <nl> + if ( ! sindex_is_being_deleted & & modification - > info . added . first . has ( ) ) { <nl> try { <nl> ql : : datum_t added = modification - > info . added . first ; <nl> <nl> void rdb_update_sindexes ( const store_t : : sindex_access_vector_t & sindexes , <nl> <nl> / * All of the sindex have been updated now it ' s time to actually clear the <nl> * deleted blob if it exists . * / <nl> - if ( modification - > info . deleted . first ) { <nl> + if ( modification - > info . deleted . first . has ( ) ) { <nl> deletion_context - > post_deleter ( ) - > delete_value ( buf_parent_t ( txn ) , <nl> modification - > info . deleted . second . data ( ) ) ; <nl> } <nl> mmm a / src / rdb_protocol / changefeed . cc <nl> ppp b / src / rdb_protocol / changefeed . cc <nl> class point_sub_t : public subscription_t { <nl> nif - > read ( <nl> read_t ( <nl> changefeed_point_stamp_t ( <nl> - * addr , store_key_t ( key - > print_primary ( ) ) ) , <nl> + * addr , store_key_t ( key . print_primary ( ) ) ) , <nl> profile_bool_t : : DONT_PROFILE ) , <nl> & read_resp , <nl> order_token_t : : ignore , <nl> class msg_visitor_t : public boost : : static_visitor < void > { <nl> d , default_limits ) ) ; <nl> auto val = change . new_val . has ( ) ? change . new_val : change . old_val ; <nl> r_sanity_check ( val . has ( ) ) ; <nl> - auto pkey_val = val - > get_field ( datum_string_t ( feed - > pkey ) , NOTHROW ) ; <nl> + auto pkey_val = val . get_field ( datum_string_t ( feed - > pkey ) , NOTHROW ) ; <nl> r_sanity_check ( pkey_val . has ( ) ) ; <nl> feed - > on_point_sub ( <nl> pkey_val , <nl> mmm a / src / rdb_protocol / datum . cc <nl> ppp b / src / rdb_protocol / datum . cc <nl> void datum_t : : array_to_str_key ( std : : string * str_out ) const { <nl> datum_t item = get ( i , NOTHROW ) ; <nl> r_sanity_check ( item . has ( ) ) ; <nl> <nl> - switch ( item - > get_type ( ) ) { <nl> - case R_NUM : item - > num_to_str_key ( str_out ) ; break ; <nl> - case R_STR : item - > str_to_str_key ( str_out ) ; break ; <nl> - case R_BINARY : item - > binary_to_str_key ( str_out ) ; break ; <nl> - case R_BOOL : item - > bool_to_str_key ( str_out ) ; break ; <nl> - case R_ARRAY : item - > array_to_str_key ( str_out ) ; break ; <nl> + switch ( item . get_type ( ) ) { <nl> + case R_NUM : item . num_to_str_key ( str_out ) ; break ; <nl> + case R_STR : item . str_to_str_key ( str_out ) ; break ; <nl> + case R_BINARY : item . binary_to_str_key ( str_out ) ; break ; <nl> + case R_BOOL : item . bool_to_str_key ( str_out ) ; break ; <nl> + case R_ARRAY : item . array_to_str_key ( str_out ) ; break ; <nl> case R_OBJECT : <nl> - if ( item - > is_ptype ( ) ) { <nl> - item - > pt_to_str_key ( str_out ) ; <nl> + if ( item . is_ptype ( ) ) { <nl> + item . pt_to_str_key ( str_out ) ; <nl> break ; <nl> } <nl> / / fallthru <nl> case R_NULL : <nl> - item - > type_error ( <nl> + item . type_error ( <nl> strprintf ( " Array keys can only contain numbers , strings , bools , " <nl> " pseudotypes , or arrays ( got % s of type % s ) . " , <nl> - item - > print ( ) . c_str ( ) , item - > get_type_name ( ) . c_str ( ) ) ) ; <nl> + item . print ( ) . c_str ( ) , item . get_type_name ( ) . c_str ( ) ) ) ; <nl> break ; <nl> case UNINITIALIZED : / / fallthru <nl> default : <nl> datum_t datum_t : : drop_literals ( bool * encountered_literal_out ) const { <nl> datum_t val = get_field ( pseudo : : value_key , NOTHROW ) ; <nl> if ( val . has ( ) ) { <nl> bool encountered_literal ; <nl> - val = val - > drop_literals ( & encountered_literal ) ; <nl> + val = val . drop_literals ( & encountered_literal ) ; <nl> / / Nested literals should have been caught on the higher QL levels . <nl> r_sanity_check ( ! encountered_literal ) ; <nl> } <nl> void datum_t : : rcheck_valid_replace ( datum_t old_val , <nl> pkey . to_std ( ) . c_str ( ) , print ( ) . c_str ( ) ) ) ; <nl> if ( old_val . has ( ) ) { <nl> datum_t old_pk = orig_key ; <nl> - if ( old_val - > get_type ( ) ! = R_NULL ) { <nl> - old_pk = old_val - > get_field ( pkey , NOTHROW ) ; <nl> + if ( old_val . get_type ( ) ! = R_NULL ) { <nl> + old_pk = old_val . get_field ( pkey , NOTHROW ) ; <nl> r_sanity_check ( old_pk . has ( ) ) ; <nl> } <nl> if ( old_pk . has ( ) ) { <nl> - rcheck ( * old_pk = = * pk , base_exc_t : : GENERIC , <nl> + rcheck ( old_pk = = pk , base_exc_t : : GENERIC , <nl> strprintf ( " Primary key ` % s ` cannot be changed ( ` % s ` - > ` % s ` ) . " , <nl> - pkey . to_std ( ) . c_str ( ) , old_val - > print ( ) . c_str ( ) , <nl> + pkey . to_std ( ) . c_str ( ) , old_val . print ( ) . c_str ( ) , <nl> print ( ) . c_str ( ) ) ) ; <nl> } <nl> } else { <nl> cJSON * datum_t : : as_json_raw ( ) const { <nl> scoped_cJSON_t arr ( cJSON_CreateArray ( ) ) ; <nl> const size_t sz = arr_size ( ) ; <nl> for ( size_t i = 0 ; i < sz ; + + i ) { <nl> - arr . AddItemToArray ( unchecked_get ( i ) - > as_json_raw ( ) ) ; <nl> + arr . AddItemToArray ( unchecked_get ( i ) . as_json_raw ( ) ) ; <nl> } <nl> return arr . release ( ) ; <nl> } break ; <nl> datum_t datum_t : : merge ( const datum_t & rhs ) const { <nl> / / Since nested literal keywords are forbidden , this should be a no - op <nl> / / if ` is_literal = = true ` . <nl> bool encountered_literal ; <nl> - val = val - > drop_literals ( & encountered_literal ) ; <nl> + val = val . drop_literals ( & encountered_literal ) ; <nl> r_sanity_check ( ! encountered_literal | | ! is_literal ) ; <nl> } <nl> if ( val . has ( ) ) { <nl> datum_t stats_merge ( UNUSED const datum_string_t & key , <nl> datum_t r , <nl> const configured_limits_t & limits , <nl> std : : set < std : : string > * conditions ) { <nl> - if ( l - > get_type ( ) = = datum_t : : R_NUM & & r - > get_type ( ) = = datum_t : : R_NUM ) { <nl> - return datum_t ( l - > as_num ( ) + r - > as_num ( ) ) ; <nl> - } else if ( l - > get_type ( ) = = datum_t : : R_ARRAY & & r - > get_type ( ) = = datum_t : : R_ARRAY ) { <nl> - const size_t l_sz = l - > arr_size ( ) ; <nl> - const size_t r_sz = r - > arr_size ( ) ; <nl> + if ( l . get_type ( ) = = datum_t : : R_NUM & & r . get_type ( ) = = datum_t : : R_NUM ) { <nl> + return datum_t ( l . as_num ( ) + r . as_num ( ) ) ; <nl> + } else if ( l . get_type ( ) = = datum_t : : R_ARRAY & & r . get_type ( ) = = datum_t : : R_ARRAY ) { <nl> + const size_t l_sz = l . arr_size ( ) ; <nl> + const size_t r_sz = r . arr_size ( ) ; <nl> if ( l_sz + r_sz > limits . array_size_limit ( ) ) { <nl> conditions - > insert ( strprintf ( " Too many changes , array truncated to % ld . " , limits . array_size_limit ( ) ) ) ; <nl> datum_array_builder_t arr ( limits ) ; <nl> size_t so_far = 0 ; <nl> for ( size_t i = 0 ; i < l_sz & & so_far < limits . array_size_limit ( ) ; + + i , + + so_far ) { <nl> - arr . add ( l - > get ( i ) ) ; <nl> + arr . add ( l . get ( i ) ) ; <nl> } <nl> for ( size_t i = 0 ; i < r_sz & & so_far < limits . array_size_limit ( ) ; + + i , + + so_far ) { <nl> - arr . add ( r - > get ( i ) ) ; <nl> + arr . add ( r . get ( i ) ) ; <nl> } <nl> return std : : move ( arr ) . to_datum ( ) ; <nl> } else { <nl> datum_array_builder_t arr ( limits ) ; <nl> for ( size_t i = 0 ; i < l_sz ; + + i ) { <nl> - arr . add ( l - > get ( i ) ) ; <nl> + arr . add ( l . get ( i ) ) ; <nl> } <nl> for ( size_t i = 0 ; i < r_sz ; + + i ) { <nl> - arr . add ( r - > get ( i ) ) ; <nl> + arr . add ( r . get ( i ) ) ; <nl> } <nl> return std : : move ( arr ) . to_datum ( ) ; <nl> } <nl> datum_t stats_merge ( UNUSED const datum_string_t & key , <nl> <nl> / / Merging a string is left - preferential , which is just a no - op . <nl> rcheck_datum ( <nl> - l - > get_type ( ) = = datum_t : : R_STR & & r - > get_type ( ) = = datum_t : : R_STR , <nl> + l . get_type ( ) = = datum_t : : R_STR & & r . get_type ( ) = = datum_t : : R_STR , <nl> base_exc_t : : GENERIC , <nl> strprintf ( " Cannot merge statistics ` % s ` ( type % s ) and ` % s ` ( type % s ) . " , <nl> - l - > trunc_print ( ) . c_str ( ) , l - > get_type_name ( ) . c_str ( ) , <nl> - r - > trunc_print ( ) . c_str ( ) , r - > get_type_name ( ) . c_str ( ) ) ) ; <nl> + l . trunc_print ( ) . c_str ( ) , l . get_type_name ( ) . c_str ( ) , <nl> + r . trunc_print ( ) . c_str ( ) , r . get_type_name ( ) . c_str ( ) ) ) ; <nl> return l ; <nl> } <nl> <nl> void datum_object_builder_t : : add_error ( const char * msg ) { <nl> / / Insert or update the " errors " entry . <nl> { <nl> datum_t * errors_entry = & map [ errors_field ] ; <nl> - double ecount = ( errors_entry - > has ( ) ? ( * errors_entry ) - > as_num ( ) : 0 ) + 1 ; <nl> + double ecount = ( errors_entry - > has ( ) ? ( * errors_entry ) . as_num ( ) : 0 ) + 1 ; <nl> * errors_entry = datum_t ( ecount ) ; <nl> } <nl> <nl> mmm a / src / rdb_protocol / datum . hpp <nl> ppp b / src / rdb_protocol / datum . hpp <nl> class datum_t { <nl> <nl> ~ datum_t ( ) ; <nl> <nl> - / / Interface to mimic counted_t , to ease transition from counted_t < const datum_t > <nl> - / / TODO : Phase these out . <nl> + / / has ( ) checks whether a datum is uninitialized . reset ( ) makes any datum <nl> + / / uninitialized . <nl> bool has ( ) const ; <nl> void reset ( ) ; <nl> - datum_t * operator - > ( ) { return this ; } <nl> - const datum_t * operator - > ( ) const { return this ; } <nl> - datum_t & operator * ( ) { return * this ; } <nl> - const datum_t & operator * ( ) const { return * this ; } <nl> - operator bool ( ) const { return has ( ) ; } <nl> <nl> void write_to_protobuf ( Datum * out , use_json_t use_json ) const ; <nl> <nl> mmm a / src / rdb_protocol / datum_stream . cc <nl> ppp b / src / rdb_protocol / datum_stream . cc <nl> std : : vector < datum_t > rget_response_reader_t : : next_batch ( env_t * env , <nl> for ( ; items_index < items . size ( ) ; + + items_index ) { <nl> if ( sindex . has ( ) ) { <nl> r_sanity_check ( items [ items_index ] . sindex_key . has ( ) ) ; <nl> - if ( * items [ items_index ] . sindex_key ! = * sindex ) { <nl> + if ( items [ items_index ] . sindex_key ! = sindex ) { <nl> break ; / / batch is done <nl> } <nl> } else { <nl> std : : vector < datum_t > rget_response_reader_t : : next_batch ( env_t * env , <nl> / / This is safe because you can ' t have duplicate <nl> / / primary keys , so they will never exceed the <nl> / / array limit . <nl> - sindex - > trunc_print ( ) . c_str ( ) ) ) ; <nl> + sindex . trunc_print ( ) . c_str ( ) ) ) ; <nl> } <nl> if ( items_index > = items . size ( ) ) { <nl> / / If we consumed the whole batch without finding a new sindex , <nl> bool rget_reader_t : : load_items ( env_t * env , const batchspec_t & batchspec ) { <nl> " Truncated key : \ n % s " , <nl> env - > limits ( ) . array_size_limit ( ) , <nl> readgen - > sindex_name ( ) . c_str ( ) , <nl> - items [ items . size ( ) - 1 ] . sindex_key - > trunc_print ( ) . c_str ( ) , <nl> + items [ items . size ( ) - 1 ] . sindex_key . trunc_print ( ) . c_str ( ) , <nl> key_to_debug_str ( items [ items . size ( ) - 1 ] . key ) . c_str ( ) ) ) ; <nl> <nl> items . reserve ( items . size ( ) + new_items . size ( ) ) ; <nl> class sindex_compare_t { <nl> / / v1 . 13 itself . For that , we use the last_key value in the <nl> / / rget_read_response_t . <nl> return reversed ( sorting ) <nl> - ? l . sindex_key - > compare_gt ( reql_version_t : : LATEST , * r . sindex_key ) <nl> - : l . sindex_key - > compare_lt ( reql_version_t : : LATEST , * r . sindex_key ) ; <nl> + ? l . sindex_key . compare_gt ( reql_version_t : : LATEST , r . sindex_key ) <nl> + : l . sindex_key . compare_lt ( reql_version_t : : LATEST , r . sindex_key ) ; <nl> } <nl> private : <nl> sorting_t sorting ; <nl> datum_t eager_datum_stream_t : : as_array ( env_t * env ) { <nl> batchspec_t batchspec = batchspec_t : : user ( batch_type_t : : TERMINAL , env ) ; <nl> { <nl> profile : : sampler_t sampler ( " Evaluating stream eagerly . " , env - > trace ) ; <nl> - while ( datum_t d = next ( env , batchspec ) ) { <nl> + datum_t d ; <nl> + while ( d = next ( env , batchspec ) , d . has ( ) ) { <nl> arr . add ( d ) ; <nl> sampler . new_sample ( ) ; <nl> } <nl> datum_t array_datum_stream_t : : next ( env_t * env , const batchspec_t & bs ) { <nl> return ops_to_do ( ) ? datum_stream_t : : next ( env , bs ) : next_arr_el ( ) ; <nl> } <nl> datum_t array_datum_stream_t : : next_arr_el ( ) { <nl> - return index < arr - > arr_size ( ) ? arr - > get ( index + + ) : datum_t ( ) ; <nl> + return index < arr . arr_size ( ) ? arr . get ( index + + ) : datum_t ( ) ; <nl> } <nl> <nl> bool array_datum_stream_t : : is_exhausted ( ) const { <nl> - return index > = arr - > arr_size ( ) ; <nl> + return index > = arr . arr_size ( ) ; <nl> } <nl> bool array_datum_stream_t : : is_cfeed ( ) const { <nl> return false ; <nl> array_datum_stream_t : : next_raw_batch ( env_t * env , const batchspec_t & batchspec ) { <nl> batcher_t batcher = batchspec . to_batcher ( ) ; <nl> <nl> profile : : sampler_t sampler ( " Fetching array elements . " , env - > trace ) ; <nl> - while ( const datum_t d = next_arr_el ( ) ) { <nl> + datum_t d ; <nl> + while ( d = next_arr_el ( ) , d . has ( ) ) { <nl> batcher . note_el ( d ) ; <nl> v . push_back ( std : : move ( d ) ) ; <nl> if ( batcher . should_send_batch ( ) ) { <nl> ordered_distinct_datum_stream_t : : next_raw_batch ( env_t * env , const batchspec_t & b <nl> std : : vector < datum_t > v = source - > next_batch ( env , bs ) ; <nl> if ( v . size ( ) = = 0 ) break ; <nl> for ( auto & & el : v ) { <nl> - if ( ! last_val . has ( ) | | * last_val ! = * el ) { <nl> + if ( ! last_val . has ( ) | | last_val ! = el ) { <nl> last_val = el ; <nl> ret . push_back ( std : : move ( el ) ) ; <nl> } <nl> datum_t union_datum_stream_t : : as_array ( env_t * env ) { <nl> batchspec_t batchspec = batchspec_t : : user ( batch_type_t : : TERMINAL , env ) ; <nl> { <nl> profile : : sampler_t sampler ( " Evaluating stream eagerly . " , env - > trace ) ; <nl> - while ( const datum_t d = next ( env , batchspec ) ) { <nl> + datum_t d ; <nl> + while ( d = next ( env , batchspec ) , d . has ( ) ) { <nl> arr . add ( d ) ; <nl> sampler . new_sample ( ) ; <nl> } <nl> mmm a / src / rdb_protocol / env . cc <nl> ppp b / src / rdb_protocol / env . cc <nl> env_t : : env_t ( signal_t * _interruptor , reql_version_t reql_version ) <nl> profile_bool_t profile_bool_optarg ( const protob_t < Query > & query ) { <nl> rassert ( query . has ( ) ) ; <nl> datum_t profile_arg = static_optarg ( " profile " , query ) ; <nl> - if ( profile_arg . has ( ) & & profile_arg - > get_type ( ) = = datum_t : : type_t : : R_BOOL & & <nl> - profile_arg - > as_bool ( ) ) { <nl> + if ( profile_arg . has ( ) & & profile_arg . get_type ( ) = = datum_t : : type_t : : R_BOOL & & <nl> + profile_arg . as_bool ( ) ) { <nl> return profile_bool_t : : PROFILE ; <nl> } else { <nl> return profile_bool_t : : DONT_PROFILE ; <nl> mmm a / src / rdb_protocol / func . cc <nl> ppp b / src / rdb_protocol / func . cc <nl> bool func_term_t : : is_deterministic ( ) const { <nl> * the object which we check to make sure matches the predicate . * / <nl> bool filter_match ( datum_t predicate , datum_t value , <nl> const rcheckable_t * parent ) { <nl> - if ( predicate - > is_ptype ( pseudo : : literal_string ) ) { <nl> - return * predicate - > get_field ( pseudo : : value_key ) = = * value ; <nl> + if ( predicate . is_ptype ( pseudo : : literal_string ) ) { <nl> + return predicate . get_field ( pseudo : : value_key ) = = value ; <nl> } else { <nl> for ( size_t i = 0 ; i < predicate . obj_size ( ) ; + + i ) { <nl> auto pair = predicate . get_pair ( i ) ; <nl> r_sanity_check ( pair . second . has ( ) ) ; <nl> - datum_t elt = value - > get_field ( pair . first , NOTHROW ) ; <nl> + datum_t elt = value . get_field ( pair . first , NOTHROW ) ; <nl> if ( ! elt . has ( ) ) { <nl> rfail_target ( parent , base_exc_t : : NON_EXISTENCE , <nl> " No attribute ` % s ` in object . " , pair . first . to_std ( ) . c_str ( ) ) ; <nl> } else if ( pair . second . get_type ( ) = = datum_t : : R_OBJECT & & <nl> - elt - > get_type ( ) = = datum_t : : R_OBJECT ) { <nl> + elt . get_type ( ) = = datum_t : : R_OBJECT ) { <nl> if ( ! filter_match ( pair . second , elt , parent ) ) { return false ; } <nl> } else if ( elt ! = pair . second ) { <nl> return false ; <nl> bool filter_match ( datum_t predicate , datum_t value , <nl> <nl> bool reql_func_t : : filter_helper ( env_t * env , datum_t arg ) const { <nl> datum_t d = call ( env , make_vector ( arg ) , NO_FLAGS ) - > as_datum ( ) ; <nl> - if ( d - > get_type ( ) = = datum_t : : R_OBJECT & & <nl> + if ( d . get_type ( ) = = datum_t : : R_OBJECT & & <nl> ( body - > get_src ( ) - > type ( ) = = Term : : MAKE_OBJ | | <nl> body - > get_src ( ) - > type ( ) = = Term : : DATUM ) ) { <nl> return filter_match ( d , arg , this ) ; <nl> } else { <nl> - return d - > as_bool ( ) ; <nl> + return d . as_bool ( ) ; <nl> } <nl> } <nl> <nl> std : : string js_func_t : : print_source ( ) const { <nl> <nl> bool js_func_t : : filter_helper ( env_t * env , datum_t arg ) const { <nl> datum_t d = call ( env , make_vector ( arg ) , NO_FLAGS ) - > as_datum ( ) ; <nl> - return d - > as_bool ( ) ; <nl> + return d . as_bool ( ) ; <nl> } <nl> <nl> bool func_t : : filter_call ( env_t * env , datum_t arg , counted_t < const func_t > default_filter_val ) const { <nl> counted_t < const func_t > new_eq_comparison_func ( datum_t obj , <nl> <nl> counted_t < const func_t > new_page_func ( datum_t method , <nl> const protob_t < const Backtrace > & bt_src ) { <nl> - if ( method - > get_type ( ) ! = datum_t : : R_NULL ) { <nl> - std : : string name = method - > as_str ( ) . to_std ( ) ; <nl> + if ( method . get_type ( ) ! = datum_t : : R_NULL ) { <nl> + std : : string name = method . as_str ( ) . to_std ( ) ; <nl> if ( name = = " link - next " ) { <nl> pb : : dummy_var_t info = pb : : dummy_var_t : : FUNC_PAGE ; <nl> protob_t < Term > twrap = <nl> mmm a / src / rdb_protocol / geo / geojson . cc <nl> ppp b / src / rdb_protocol / geo / geojson . cc <nl> lat_lon_point_t position_to_lat_lon_point ( const datum_t & position ) { <nl> <nl> / / GeoJSON positions are in order longitude , latitude , altitude <nl> double longitude , latitude ; <nl> - longitude = position . get ( 0 ) - > as_num ( ) ; <nl> - latitude = position . get ( 1 ) - > as_num ( ) ; <nl> + longitude = position . get ( 0 ) . as_num ( ) ; <nl> + latitude = position . get ( 1 ) . as_num ( ) ; <nl> <nl> return lat_lon_point_t ( latitude , longitude ) ; <nl> } <nl> <nl> lat_lon_point_t extract_lat_lon_point ( const datum_t & geojson ) { <nl> - if ( geojson - > get_field ( " type " ) - > as_str ( ) ! = " Point " ) { <nl> + if ( geojson . get_field ( " type " ) . as_str ( ) ! = " Point " ) { <nl> throw geo_exception_t ( <nl> strprintf ( " Expected geometry of type ` Point ` but found ` % s ` . " , <nl> - geojson - > get_field ( " type " ) - > as_str ( ) . to_std ( ) . c_str ( ) ) ) ; <nl> + geojson . get_field ( " type " ) . as_str ( ) . to_std ( ) . c_str ( ) ) ) ; <nl> } <nl> <nl> - const datum_t & coordinates = geojson - > get_field ( " coordinates " ) ; <nl> + const datum_t & coordinates = geojson . get_field ( " coordinates " ) ; <nl> <nl> return position_to_lat_lon_point ( coordinates ) ; <nl> } <nl> <nl> lat_lon_line_t extract_lat_lon_line ( const ql : : datum_t & geojson ) { <nl> - if ( geojson - > get_field ( " type " ) - > as_str ( ) ! = " LineString " ) { <nl> + if ( geojson . get_field ( " type " ) . as_str ( ) ! = " LineString " ) { <nl> throw geo_exception_t ( <nl> strprintf ( " Expected geometry of type ` LineString ` but found ` % s ` . " , <nl> - geojson - > get_field ( " type " ) - > as_str ( ) . to_std ( ) . c_str ( ) ) ) ; <nl> + geojson . get_field ( " type " ) . as_str ( ) . to_std ( ) . c_str ( ) ) ) ; <nl> } <nl> <nl> - const datum_t & coordinates = geojson - > get_field ( " coordinates " ) ; <nl> + const datum_t & coordinates = geojson . get_field ( " coordinates " ) ; <nl> lat_lon_line_t result ; <nl> result . reserve ( coordinates . arr_size ( ) ) ; <nl> for ( size_t i = 0 ; i < coordinates . arr_size ( ) ; + + i ) { <nl> lat_lon_line_t extract_lat_lon_line ( const ql : : datum_t & geojson ) { <nl> } <nl> <nl> lat_lon_line_t extract_lat_lon_shell ( const ql : : datum_t & geojson ) { <nl> - if ( geojson - > get_field ( " type " ) - > as_str ( ) ! = " Polygon " ) { <nl> + if ( geojson . get_field ( " type " ) . as_str ( ) ! = " Polygon " ) { <nl> throw geo_exception_t ( <nl> strprintf ( " Expected geometry of type ` Polygon ` but found ` % s ` . " , <nl> - geojson - > get_field ( " type " ) - > as_str ( ) . to_std ( ) . c_str ( ) ) ) ; <nl> + geojson . get_field ( " type " ) . as_str ( ) . to_std ( ) . c_str ( ) ) ) ; <nl> } <nl> <nl> - const datum_t & coordinates = geojson - > get_field ( " coordinates " ) ; <nl> + const datum_t & coordinates = geojson . get_field ( " coordinates " ) ; <nl> if ( coordinates . arr_size ( ) < 1 ) { <nl> throw geo_exception_t ( " The polygon is empty . It must have at least " <nl> " an outer shell . " ) ; <nl> scoped_ptr_t < S2Polygon > coordinates_to_s2polygon ( const datum_t & coords ) { <nl> <nl> void ensure_no_crs ( const ql : : datum_t & geojson ) { <nl> const ql : : datum_t & crs_field = <nl> - geojson - > get_field ( " crs " , ql : : throw_bool_t : : NOTHROW ) ; <nl> + geojson . get_field ( " crs " , ql : : throw_bool_t : : NOTHROW ) ; <nl> if ( crs_field . has ( ) ) { <nl> - if ( crs_field - > get_type ( ) ! = ql : : datum_t : : R_NULL ) { <nl> + if ( crs_field . get_type ( ) ! = ql : : datum_t : : R_NULL ) { <nl> throw geo_exception_t ( " Non - default coordinate reference systems " <nl> " are not supported in GeoJSON objects . " <nl> " Make sure the ` crs ` field of the geometry is " <nl> void ensure_no_crs ( const ql : : datum_t & geojson ) { <nl> } <nl> <nl> scoped_ptr_t < S2Point > to_s2point ( const ql : : datum_t & geojson ) { <nl> - const datum_string_t & type = geojson - > get_field ( " type " ) - > as_str ( ) ; <nl> - datum_t coordinates = geojson - > get_field ( " coordinates " ) ; <nl> + const datum_string_t & type = geojson . get_field ( " type " ) . as_str ( ) ; <nl> + datum_t coordinates = geojson . get_field ( " coordinates " ) ; <nl> if ( type ! = " Point " ) { <nl> throw geo_exception_t ( <nl> strprintf ( " Expected geometry of type ` Point ` but found ` % s ` . " , <nl> scoped_ptr_t < S2Point > to_s2point ( const ql : : datum_t & geojson ) { <nl> } <nl> <nl> scoped_ptr_t < S2Polyline > to_s2polyline ( const ql : : datum_t & geojson ) { <nl> - const datum_string_t & type = geojson - > get_field ( " type " ) - > as_str ( ) ; <nl> - datum_t coordinates = geojson - > get_field ( " coordinates " ) ; <nl> + const datum_string_t & type = geojson . get_field ( " type " ) . as_str ( ) ; <nl> + datum_t coordinates = geojson . get_field ( " coordinates " ) ; <nl> if ( type ! = " LineString " ) { <nl> throw geo_exception_t ( <nl> strprintf ( " Expected geometry of type ` LineString ` but found ` % s ` . " , <nl> scoped_ptr_t < S2Polyline > to_s2polyline ( const ql : : datum_t & geojson ) { <nl> } <nl> <nl> scoped_ptr_t < S2Polygon > to_s2polygon ( const ql : : datum_t & geojson ) { <nl> - const datum_string_t & type = geojson - > get_field ( " type " ) - > as_str ( ) ; <nl> - datum_t coordinates = geojson - > get_field ( " coordinates " ) ; <nl> + const datum_string_t & type = geojson . get_field ( " type " ) . as_str ( ) ; <nl> + datum_t coordinates = geojson . get_field ( " coordinates " ) ; <nl> if ( type ! = " Polygon " ) { <nl> throw geo_exception_t ( <nl> strprintf ( " Expected geometry of type ` Polygon ` but found ` % s ` . " , <nl> mmm a / src / rdb_protocol / geo / geojson . hpp <nl> ppp b / src / rdb_protocol / geo / geojson . hpp <nl> template < class return_t > <nl> return_t visit_geojson ( <nl> s2_geo_visitor_t < return_t > * visitor , <nl> const ql : : datum_t & geojson ) { <nl> - const datum_string_t & type = geojson - > get_field ( " type " ) - > as_str ( ) ; <nl> - ql : : datum_t coordinates = geojson - > get_field ( " coordinates " ) ; <nl> + const datum_string_t & type = geojson . get_field ( " type " ) . as_str ( ) ; <nl> + ql : : datum_t coordinates = geojson . get_field ( " coordinates " ) ; <nl> <nl> if ( type = = " Point " ) { <nl> scoped_ptr_t < geo : : S2Point > pt = coordinates_to_s2point ( coordinates ) ; <nl> mmm a / src / rdb_protocol / geo / indexing . cc <nl> ppp b / src / rdb_protocol / geo / indexing . cc <nl> std : : vector < std : : string > compute_index_grid_keys ( <nl> const ql : : datum_t & key , int goal_cells ) { <nl> rassert ( key . has ( ) ) ; <nl> <nl> - if ( ! key - > is_ptype ( ql : : pseudo : : geometry_string ) ) { <nl> - throw geo_exception_t ( " Expected geometry but found " + key - > get_type_name ( ) + " . " ) ; <nl> + if ( ! key . is_ptype ( ql : : pseudo : : geometry_string ) ) { <nl> + throw geo_exception_t ( " Expected geometry but found " + key . get_type_name ( ) + " . " ) ; <nl> } <nl> if ( goal_cells < = 0 ) { <nl> throw geo_exception_t ( " goal_cells must be positive ( and should be > = 4 ) . " ) ; <nl> mmm a / src / rdb_protocol / geo_traversal . cc <nl> ppp b / src / rdb_protocol / geo_traversal . cc <nl> done_traversing_t geo_intersecting_cb_t : : on_candidate ( <nl> ql : : datum_t sindex_val = <nl> sindex . func - > call ( & sindex_env , val ) - > as_datum ( ) ; <nl> if ( sindex . multi = = sindex_multi_bool_t : : MULTI <nl> - & & sindex_val - > get_type ( ) = = ql : : datum_t : : R_ARRAY ) { <nl> + & & sindex_val . get_type ( ) = = ql : : datum_t : : R_ARRAY ) { <nl> boost : : optional < uint64_t > tag = * ql : : datum_t : : extract_tag ( store_key ) ; <nl> guarantee ( tag ) ; <nl> - sindex_val = sindex_val - > get ( * tag , ql : : NOTHROW ) ; <nl> + sindex_val = sindex_val . get ( * tag , ql : : NOTHROW ) ; <nl> guarantee ( sindex_val . has ( ) ) ; <nl> } <nl> / / TODO ( daniel ) : This is a little inefficient because we re - parse <nl> done_traversing_t geo_intersecting_cb_t : : on_candidate ( <nl> / / This is relevant only for polygons and lines , since those can be <nl> / / encountered multiple times in the index . <nl> if ( already_processed . size ( ) < MAX_PROCESSED_SET_SIZE <nl> - & & sindex_val - > get_field ( " type " ) - > as_str ( ) ! = " Point " ) { <nl> + & & sindex_val . get_field ( " type " ) . as_str ( ) ! = " Point " ) { <nl> already_processed . insert ( primary_key ) ; <nl> } <nl> return done_traversing_t : : NO ; <nl> mmm a / src / rdb_protocol / op . cc <nl> ppp b / src / rdb_protocol / op . cc <nl> argvec_t arg_terms_t : : start_eval ( scope_env_t * env , eval_flags_t flags ) const { <nl> if ( ( * it ) - > get_src ( ) - > type ( ) = = Term : : ARGS ) { <nl> counted_t < val_t > v = ( * it ) - > eval ( env , new_flags ) ; <nl> datum_t d = v - > as_datum ( ) ; <nl> - for ( size_t i = 0 ; i < d - > arr_size ( ) ; + + i ) { <nl> - args . push_back ( make_counted < faux_term_t > ( src , d - > get ( i ) ) ) ; <nl> + for ( size_t i = 0 ; i < d . arr_size ( ) ; + + i ) { <nl> + args . push_back ( make_counted < faux_term_t > ( src , d . get ( i ) ) ) ; <nl> } <nl> } else { <nl> args . push_back ( * it ) ; <nl> mmm a / src / rdb_protocol / pathspec . cc <nl> ppp b / src / rdb_protocol / pathspec . cc <nl> pathspec_t : : pathspec_t ( const std : : map < datum_string_t , pathspec_t > & _map , <nl> pathspec_t : : pathspec_t ( datum_t datum , const term_t * _creator ) <nl> : creator ( _creator ) <nl> { <nl> - if ( datum - > get_type ( ) = = datum_t : : R_STR ) { <nl> + if ( datum . get_type ( ) = = datum_t : : R_STR ) { <nl> type = STR ; <nl> - str = new datum_string_t ( datum - > as_str ( ) ) ; <nl> - } else if ( datum - > get_type ( ) = = datum_t : : R_ARRAY ) { <nl> + str = new datum_string_t ( datum . as_str ( ) ) ; <nl> + } else if ( datum . get_type ( ) = = datum_t : : R_ARRAY ) { <nl> type = VEC ; <nl> vec = new std : : vector < pathspec_t > ; <nl> - for ( size_t i = 0 ; i < datum - > arr_size ( ) ; + + i ) { <nl> - vec - > push_back ( pathspec_t ( datum - > get ( i ) , creator ) ) ; <nl> + for ( size_t i = 0 ; i < datum . arr_size ( ) ; + + i ) { <nl> + vec - > push_back ( pathspec_t ( datum . get ( i ) , creator ) ) ; <nl> } <nl> - } else if ( datum - > get_type ( ) = = datum_t : : R_OBJECT ) { <nl> + } else if ( datum . get_type ( ) = = datum_t : : R_OBJECT ) { <nl> scoped_ptr_t < std : : vector < pathspec_t > > local_vec ( new std : : vector < pathspec_t > ) ; <nl> scoped_ptr_t < std : : map < datum_string_t , pathspec_t > > <nl> local_map ( new std : : map < datum_string_t , pathspec_t > ) ; <nl> pathspec_t : : pathspec_t ( datum_t datum , const term_t * _creator ) <nl> } <nl> } else { <nl> rfail_target ( creator , base_exc_t : : GENERIC , " Invalid path argument ` % s ` . " , <nl> - datum - > print ( ) . c_str ( ) ) ; <nl> + datum . print ( ) . c_str ( ) ) ; <nl> } <nl> <nl> if ( type = = VEC & & vec - > size ( ) = = 1 ) { <nl> void pathspec_t : : init_from ( const pathspec_t & other ) { <nl> datum_t project ( datum_t datum , <nl> const pathspec_t & pathspec , recurse_flag_t recurse , <nl> const configured_limits_t & limits ) { <nl> - if ( datum - > get_type ( ) = = datum_t : : R_ARRAY & & recurse = = RECURSE ) { <nl> + if ( datum . get_type ( ) = = datum_t : : R_ARRAY & & recurse = = RECURSE ) { <nl> datum_array_builder_t res ( limits ) ; <nl> res . reserve ( datum . arr_size ( ) ) ; <nl> for ( size_t i = 0 ; i < datum . arr_size ( ) ; + + i ) { <nl> datum_t project ( datum_t datum , <nl> datum_object_builder_t res ; <nl> if ( pathspec . as_str ( ) ! = NULL ) { <nl> datum_string_t str ( * pathspec . as_str ( ) ) ; <nl> - if ( datum_t val = datum - > get_field ( str , NOTHROW ) ) { <nl> + const datum_t val = datum . get_field ( str , NOTHROW ) ; <nl> + if ( val . has ( ) ) { <nl> res . overwrite ( std : : move ( str ) , val ) ; <nl> } <nl> } else if ( const std : : vector < pathspec_t > * vec = pathspec . as_vec ( ) ) { <nl> datum_t project ( datum_t datum , <nl> } <nl> } else if ( const std : : map < datum_string_t , pathspec_t > * map = pathspec . as_map ( ) ) { <nl> for ( auto it = map - > begin ( ) ; it ! = map - > end ( ) ; + + it ) { <nl> - if ( datum_t val = datum - > get_field ( it - > first , NOTHROW ) ) { <nl> + const datum_t val = datum . get_field ( it - > first , NOTHROW ) ; <nl> + if ( val . has ( ) ) { <nl> try { <nl> datum_t sub_result = <nl> project ( val , it - > second , RECURSE , limits ) ; <nl> void unproject_helper ( datum_object_builder_t * datum , <nl> } <nl> } else if ( const std : : map < datum_string_t , pathspec_t > * map = pathspec . as_map ( ) ) { <nl> for ( auto it = map - > begin ( ) ; it ! = map - > end ( ) ; + + it ) { <nl> - if ( datum_t val = datum - > try_get ( it - > first ) ) { <nl> + const datum_t val = datum - > try_get ( it - > first ) ; <nl> + if ( val . has ( ) ) { <nl> try { <nl> datum_t sub_result = <nl> unproject ( val , it - > second , RECURSE , limits ) ; <nl> void unproject_helper ( datum_object_builder_t * datum , <nl> datum_t unproject ( datum_t datum , <nl> const pathspec_t & pathspec , recurse_flag_t recurse , <nl> const configured_limits_t & limits ) { <nl> - if ( datum - > get_type ( ) = = datum_t : : R_ARRAY & & recurse = = RECURSE ) { <nl> + if ( datum . get_type ( ) = = datum_t : : R_ARRAY & & recurse = = RECURSE ) { <nl> datum_array_builder_t res ( limits ) ; <nl> res . reserve ( datum . arr_size ( ) ) ; <nl> for ( size_t i = 0 ; i < datum . arr_size ( ) ; + + i ) { <nl> bool contains ( datum_t datum , <nl> try { <nl> bool res = true ; <nl> if ( const datum_string_t * str = pathspec . as_str ( ) ) { <nl> - if ( ! ( res & = ( datum - > get_field ( * str , NOTHROW ) . has ( ) & & <nl> - datum - > get_field ( * str ) - > get_type ( ) ! = datum_t : : R_NULL ) ) ) { <nl> + if ( ! ( res & = ( datum . get_field ( * str , NOTHROW ) . has ( ) & & <nl> + datum . get_field ( * str ) . get_type ( ) ! = datum_t : : R_NULL ) ) ) { <nl> return res ; <nl> } <nl> } else if ( const std : : vector < pathspec_t > * vec = pathspec . as_vec ( ) ) { <nl> bool contains ( datum_t datum , <nl> } <nl> } else if ( const std : : map < datum_string_t , pathspec_t > * map = pathspec . as_map ( ) ) { <nl> for ( auto it = map - > begin ( ) ; it ! = map - > end ( ) ; + + it ) { <nl> - if ( datum_t val = datum - > get_field ( it - > first , NOTHROW ) ) { <nl> + const datum_t val = datum . get_field ( it - > first , NOTHROW ) ; <nl> + if ( val . has ( ) ) { <nl> if ( ! ( res & = contains ( val , it - > second ) ) ) { <nl> return res ; <nl> } <nl> mmm a / src / rdb_protocol / protocol . cc <nl> ppp b / src / rdb_protocol / protocol . cc <nl> bool datum_range_t : : is_universe ( ) const { <nl> bool datum_range_t : : contains ( reql_version_t reql_version , <nl> ql : : datum_t val ) const { <nl> return ( ! left_bound . has ( ) <nl> - | | left_bound - > compare_lt ( reql_version , * val ) <nl> - | | ( * left_bound = = * val & & left_bound_type = = key_range_t : : closed ) ) <nl> + | | left_bound . compare_lt ( reql_version , val ) <nl> + | | ( left_bound = = val & & left_bound_type = = key_range_t : : closed ) ) <nl> & & ( ! right_bound . has ( ) <nl> - | | right_bound - > compare_gt ( reql_version , * val ) <nl> - | | ( * right_bound = = * val & & right_bound_type = = key_range_t : : closed ) ) ; <nl> + | | right_bound . compare_gt ( reql_version , val ) <nl> + | | ( right_bound = = val & & right_bound_type = = key_range_t : : closed ) ) ; <nl> } <nl> <nl> key_range_t datum_range_t : : to_primary_keyrange ( ) const { <nl> return key_range_t ( <nl> left_bound_type , <nl> left_bound . has ( ) <nl> - ? store_key_t ( left_bound - > print_primary ( ) ) <nl> + ? store_key_t ( left_bound . print_primary ( ) ) <nl> : store_key_t : : min ( ) , <nl> right_bound_type , <nl> right_bound . has ( ) <nl> - ? store_key_t ( right_bound - > print_primary ( ) ) <nl> + ? store_key_t ( right_bound . print_primary ( ) ) <nl> : store_key_t : : max ( ) ) ; <nl> } <nl> <nl> key_range_t datum_range_t : : to_sindex_keyrange ( ) const { <nl> return rdb_protocol : : sindex_key_range ( <nl> left_bound . has ( ) <nl> - ? store_key_t ( left_bound - > truncated_secondary ( ) ) <nl> + ? store_key_t ( left_bound . truncated_secondary ( ) ) <nl> : store_key_t : : min ( ) , <nl> right_bound . has ( ) <nl> - ? store_key_t ( right_bound - > truncated_secondary ( ) ) <nl> + ? store_key_t ( right_bound . truncated_secondary ( ) ) <nl> : store_key_t : : max ( ) ) ; <nl> } <nl> <nl> struct rdb_w_get_region_visitor : public boost : : static_visitor < region_t > { <nl> std : : vector < store_key_t > keys ; <nl> keys . reserve ( bi . inserts . size ( ) ) ; <nl> for ( auto it = bi . inserts . begin ( ) ; it ! = bi . inserts . end ( ) ; + + it ) { <nl> - keys . emplace_back ( ( * it ) - > get_field ( datum_string_t ( bi . pkey ) ) - > print_primary ( ) ) ; <nl> + keys . emplace_back ( ( * it ) . get_field ( datum_string_t ( bi . pkey ) ) . print_primary ( ) ) ; <nl> } <nl> return region_from_keys ( keys ) ; <nl> } <nl> struct rdb_w_shard_visitor_t : public boost : : static_visitor < bool > { <nl> bool operator ( ) ( const batched_insert_t & bi ) const { <nl> std : : vector < ql : : datum_t > shard_inserts ; <nl> for ( auto it = bi . inserts . begin ( ) ; it ! = bi . inserts . end ( ) ; + + it ) { <nl> - store_key_t key ( ( * it ) - > get_field ( datum_string_t ( bi . pkey ) ) - > print_primary ( ) ) ; <nl> + store_key_t key ( ( * it ) . get_field ( datum_string_t ( bi . pkey ) ) . print_primary ( ) ) ; <nl> if ( region_contains_key ( * region , key ) ) { <nl> shard_inserts . push_back ( * it ) ; <nl> } <nl> struct rdb_w_unshard_visitor_t : public boost : : static_visitor < void > { <nl> const ql : : datum_t * stats_i = <nl> boost : : get < ql : : datum_t > ( & responses [ i ] . response ) ; <nl> guarantee ( stats_i ! = NULL ) ; <nl> - stats = stats - > merge ( * stats_i , ql : : stats_merge , * limits , & conditions ) ; <nl> + stats = stats . merge ( * stats_i , ql : : stats_merge , * limits , & conditions ) ; <nl> } <nl> ql : : datum_object_builder_t result ( stats ) ; <nl> result . add_warnings ( conditions , * limits ) ; <nl> mmm a / src / rdb_protocol / pseudo_binary . cc <nl> ppp b / src / rdb_protocol / pseudo_binary . cc <nl> datum_string_t decode_base64_ptype ( <nl> datum_string_t res ; <nl> for ( auto it = ptype . begin ( ) ; it ! = ptype . end ( ) ; + + it ) { <nl> if ( it - > first = = datum_t : : reql_type_string ) { <nl> - r_sanity_check ( it - > second - > as_str ( ) = = binary_string ) ; <nl> + r_sanity_check ( it - > second . as_str ( ) = = binary_string ) ; <nl> } else if ( it - > first = = data_key ) { <nl> has_data = true ; <nl> - res = decode_base64 ( it - > second - > as_str ( ) ) ; <nl> + res = decode_base64 ( it - > second . as_str ( ) ) ; <nl> } else { <nl> rfail_datum ( base_exc_t : : GENERIC , <nl> " Invalid binary pseudotype : illegal ` % s ` key . " , <nl> mmm a / src / rdb_protocol / pseudo_geometry . cc <nl> ppp b / src / rdb_protocol / pseudo_geometry . cc <nl> const char * const geometry_string = " GEOMETRY " ; <nl> datum_t geo_sub ( datum_t lhs , <nl> datum_t rhs , <nl> const configured_limits_t & limits ) { <nl> - rcheck_target ( & lhs , base_exc_t : : GENERIC , lhs - > is_ptype ( geometry_string ) , <nl> + rcheck_target ( & lhs , base_exc_t : : GENERIC , lhs . is_ptype ( geometry_string ) , <nl> " Value must be of geometry type . " ) ; <nl> - rcheck_target ( & rhs , base_exc_t : : GENERIC , rhs - > is_ptype ( geometry_string ) , <nl> + rcheck_target ( & rhs , base_exc_t : : GENERIC , rhs . is_ptype ( geometry_string ) , <nl> " Value must be of geometry type . " ) ; <nl> <nl> rcheck_target ( & rhs , base_exc_t : : GENERIC , <nl> - rhs - > get_field ( " coordinates " ) - > arr_size ( ) < = 1 , <nl> + rhs . get_field ( " coordinates " ) . arr_size ( ) < = 1 , <nl> " The second argument to ` sub ` must be a Polygon with only an outer " <nl> " shell . This one has holes . " ) ; <nl> <nl> / / Construct a polygon from lhs with rhs cut out <nl> rcheck_target ( & lhs , base_exc_t : : GENERIC , <nl> - lhs - > get_field ( " type " ) - > as_str ( ) = = " Polygon " , <nl> + lhs . get_field ( " type " ) . as_str ( ) = = " Polygon " , <nl> strprintf ( " The first argument to ` sub ` must be a Polygon . Found ` % s ` . " , <nl> - lhs - > get_field ( " type " ) - > as_str ( ) . to_std ( ) . c_str ( ) ) ) ; <nl> + lhs . get_field ( " type " ) . as_str ( ) . to_std ( ) . c_str ( ) ) ) ; <nl> rcheck_target ( & lhs , base_exc_t : : GENERIC , <nl> - lhs - > get_field ( " coordinates " ) - > arr_size ( ) > = 1 , <nl> + lhs . get_field ( " coordinates " ) . arr_size ( ) > = 1 , <nl> " The first argument to ` sub ` is an empty polygon . It must at least " <nl> " have an outer shell . " ) ; <nl> <nl> mmm a / src / rdb_protocol / pseudo_time . cc <nl> ppp b / src / rdb_protocol / pseudo_time . cc <nl> void add_seconds_to_ptime ( ptime_t * t , double raw_sec ) { <nl> } <nl> <nl> time_t time_to_boost ( datum_t d ) { <nl> - double raw_sec = d - > get_field ( epoch_time_key ) - > as_num ( ) ; <nl> + double raw_sec = d . get_field ( epoch_time_key ) . as_num ( ) ; <nl> ptime_t t ( date_t ( 1970 , 1 , 1 ) ) ; <nl> add_seconds_to_ptime ( & t , raw_sec ) ; <nl> <nl> - if ( datum_t tz = d - > get_field ( timezone_key , NOTHROW ) ) { <nl> + const datum_t tz = d . get_field ( timezone_key , NOTHROW ) ; <nl> + if ( tz . has ( ) ) { <nl> boost : : local_time : : time_zone_ptr zone ( <nl> - new boost : : local_time : : posix_time_zone ( sanitize : : tz ( tz - > as_str ( ) . to_std ( ) ) ) ) ; <nl> + new boost : : local_time : : posix_time_zone ( sanitize : : tz ( tz . as_str ( ) . to_std ( ) ) ) ) ; <nl> return time_t ( t , zone ) ; <nl> } else { <nl> return time_t ( t , utc ) ; <nl> std : : string time_to_iso8601 ( datum_t d ) { <nl> year ) ) ; <nl> std : : ostringstream ss ; <nl> ss . exceptions ( std : : ios_base : : failbit ) ; <nl> - if ( datum_t tz = d - > get_field ( timezone_key , NOTHROW ) ) { <nl> + const datum_t tz = d . get_field ( timezone_key , NOTHROW ) ; <nl> + if ( tz . has ( ) ) { <nl> ss . imbue ( tz_format ) ; <nl> } else { <nl> ss . imbue ( no_tz_format ) ; <nl> std : : string time_to_iso8601 ( datum_t d ) { <nl> } <nl> <nl> double time_to_epoch_time ( datum_t d ) { <nl> - return d - > get_field ( epoch_time_key ) - > as_num ( ) ; <nl> + return d . get_field ( epoch_time_key ) . as_num ( ) ; <nl> } <nl> <nl> datum_t time_now ( ) { <nl> int time_cmp ( reql_version_t reql_version , const datum_t & x , const datum_t & y ) { <nl> / / We know that these are both nums , so the reql_version doesn ' t actually affect <nl> / / anything ( between v1_13 and v1_14_is_latest ) . But it ' s safer not to have to <nl> / / prove that , so we take it and pass it anyway . <nl> - return x . get_field ( epoch_time_key ) - > cmp ( reql_version , * y . get_field ( epoch_time_key ) ) ; <nl> + return x . get_field ( epoch_time_key ) . cmp ( reql_version , y . get_field ( epoch_time_key ) ) ; <nl> } <nl> <nl> double sanitize_epoch_sec ( double d ) { <nl> void sanitize_time ( datum_t * time ) { <nl> } <nl> <nl> datum_t time_tz ( datum_t time ) { <nl> - r_sanity_check ( time - > is_ptype ( time_string ) ) ; <nl> - if ( datum_t tz = time - > get_field ( timezone_key , NOTHROW ) ) { <nl> + r_sanity_check ( time . is_ptype ( time_string ) ) ; <nl> + const datum_t tz = time . get_field ( timezone_key , NOTHROW ) ; <nl> + if ( tz . has ( ) ) { <nl> return tz ; <nl> } else { <nl> return datum_t : : null ( ) ; <nl> datum_t time_tz ( datum_t time ) { <nl> } <nl> <nl> datum_t time_in_tz ( datum_t t , datum_t tz ) { <nl> - r_sanity_check ( t - > is_ptype ( time_string ) ) ; <nl> + r_sanity_check ( t . is_ptype ( time_string ) ) ; <nl> datum_object_builder_t t2 ( t ) ; <nl> - std : : string raw_new_tzs = tz - > as_str ( ) . to_std ( ) ; <nl> + std : : string raw_new_tzs = tz . as_str ( ) . to_std ( ) ; <nl> std : : string new_tzs = sanitize : : tz ( raw_new_tzs ) ; <nl> if ( raw_new_tzs = = new_tzs ) { <nl> t2 . overwrite ( timezone_key , tz ) ; <nl> datum_t make_time ( <nl> <nl> datum_t time_add ( datum_t x , datum_t y ) { <nl> datum_t time , duration ; <nl> - if ( x - > is_ptype ( time_string ) ) { <nl> + if ( x . is_ptype ( time_string ) ) { <nl> time = x ; <nl> duration = y ; <nl> } else { <nl> - r_sanity_check ( y - > is_ptype ( time_string ) ) ; <nl> + r_sanity_check ( y . is_ptype ( time_string ) ) ; <nl> time = y ; <nl> duration = x ; <nl> } <nl> datum_t time_add ( datum_t x , datum_t y ) { <nl> datum_object_builder_t res ( time ) ; <nl> res . overwrite ( <nl> epoch_time_key , <nl> - datum_t ( time - > get_field ( epoch_time_key ) - > as_num ( ) + <nl> - duration - > as_num ( ) ) ) ; <nl> + datum_t ( time . get_field ( epoch_time_key ) . as_num ( ) + <nl> + duration . as_num ( ) ) ) ; <nl> <nl> return std : : move ( res ) . to_datum ( ) ; <nl> } <nl> <nl> datum_t time_sub ( datum_t time , datum_t time_or_duration ) { <nl> - r_sanity_check ( time - > is_ptype ( time_string ) ) ; <nl> + r_sanity_check ( time . is_ptype ( time_string ) ) ; <nl> <nl> - if ( time_or_duration - > is_ptype ( time_string ) ) { <nl> + if ( time_or_duration . is_ptype ( time_string ) ) { <nl> return datum_t ( sanitize_epoch_sec ( <nl> - time - > get_field ( epoch_time_key ) - > as_num ( ) <nl> - - time_or_duration - > get_field ( epoch_time_key ) - > as_num ( ) ) ) ; <nl> + time . get_field ( epoch_time_key ) . as_num ( ) <nl> + - time_or_duration . get_field ( epoch_time_key ) . as_num ( ) ) ) ; <nl> } else { <nl> datum_object_builder_t res ( time ) ; <nl> res . overwrite ( <nl> epoch_time_key , <nl> - datum_t ( time - > get_field ( epoch_time_key ) - > as_num ( ) - <nl> - time_or_duration - > as_num ( ) ) ) ; <nl> + datum_t ( time . get_field ( epoch_time_key ) . as_num ( ) - <nl> + time_or_duration . as_num ( ) ) ) ; <nl> return std : : move ( res ) . to_datum ( ) ; <nl> } <nl> } <nl> double time_portion ( datum_t time , time_component_t c ) { <nl> case HOURS : return ptime . time_of_day ( ) . hours ( ) ; <nl> case MINUTES : return ptime . time_of_day ( ) . minutes ( ) ; <nl> case SECONDS : { <nl> - double frac = modf ( time - > get_field ( epoch_time_key ) - > as_num ( ) , & frac ) ; <nl> + double frac = modf ( time . get_field ( epoch_time_key ) . as_num ( ) , & frac ) ; <nl> frac = round ( frac * 1000 ) / 1000 ; <nl> return ptime . time_of_day ( ) . seconds ( ) + frac ; <nl> } break ; <nl> void time_to_str_key ( const datum_t & d , std : : string * str_out ) { <nl> / / We need to prepend " P " and append a character less than [ a - zA - Z ] so that <nl> / / different pseudotypes sort correctly . <nl> str_out - > append ( std : : string ( " P " ) + time_string + " : " ) ; <nl> - d . get_field ( epoch_time_key ) - > num_to_str_key ( str_out ) ; <nl> + d . get_field ( epoch_time_key ) . num_to_str_key ( str_out ) ; <nl> } <nl> <nl> } / / namespace pseudo <nl> mmm a / src / rdb_protocol / query_server . cc <nl> ppp b / src / rdb_protocol / query_server . cc <nl> bool rdb_query_server_t : : run_query ( const ql : : protob_t < Query > & query , <nl> <nl> ql : : datum_t noreply = static_optarg ( " noreply " , query ) ; <nl> bool response_needed = ! ( noreply . has ( ) & & <nl> - noreply - > get_type ( ) = = ql : : datum_t : : type_t : : R_BOOL & & <nl> - noreply - > as_bool ( ) ) ; <nl> + noreply . get_type ( ) = = ql : : datum_t : : type_t : : R_BOOL & & <nl> + noreply . as_bool ( ) ) ; <nl> try { <nl> scoped_ops_running_stat_t stat ( & rdb_ctx - > ql_ops_running ) ; <nl> guarantee ( rdb_ctx - > cluster_interface ) ; <nl> mmm a / src / rdb_protocol / rdb_protocol_json . hpp <nl> ppp b / src / rdb_protocol / rdb_protocol_json . hpp <nl> class optional_datum_less_t { <nl> bool operator ( ) ( const ql : : datum_t & a , <nl> const ql : : datum_t & b ) const { <nl> if ( a . has ( ) ) { <nl> - return b . has ( ) & & a - > compare_lt ( reql_version_ , * b ) ; <nl> + return b . has ( ) & & a . compare_lt ( reql_version_ , b ) ; <nl> } else { <nl> return b . has ( ) ; <nl> } <nl> mmm a / src / rdb_protocol / real_table . cc <nl> ppp b / src / rdb_protocol / real_table . cc <nl> const std : : string & real_table_t : : get_pkey ( ) { <nl> <nl> ql : : datum_t real_table_t : : read_row ( ql : : env_t * env , <nl> ql : : datum_t pval , bool use_outdated ) { <nl> - read_t read ( point_read_t ( store_key_t ( pval - > print_primary ( ) ) ) , env - > profile ( ) ) ; <nl> + read_t read ( point_read_t ( store_key_t ( pval . print_primary ( ) ) ) , env - > profile ( ) ) ; <nl> read_response_t res ; <nl> read_with_profile ( env , read , & res , use_outdated ) ; <nl> point_read_response_t * p_res = boost : : get < point_read_response_t > ( & res . response ) ; <nl> ql : : datum_t real_table_t : : write_batched_replace ( ql : : env_t * env , <nl> std : : vector < store_key_t > store_keys ; <nl> store_keys . reserve ( keys . size ( ) ) ; <nl> for ( auto it = keys . begin ( ) ; it ! = keys . end ( ) ; it + + ) { <nl> - store_keys . push_back ( store_key_t ( ( * it ) - > print_primary ( ) ) ) ; <nl> + store_keys . push_back ( store_key_t ( ( * it ) . print_primary ( ) ) ) ; <nl> } <nl> batched_replace_t write ( std : : move ( store_keys ) , pkey , func , <nl> env - > get_all_optargs ( ) , return_changes ) ; <nl> mmm a / src / rdb_protocol / serialize_datum . cc <nl> ppp b / src / rdb_protocol / serialize_datum . cc <nl> serialization_result_t datum_serialize ( <nl> serialization_result_t res = serialization_result_t : : SUCCESS ; <nl> <nl> r_sanity_check ( datum . has ( ) ) ; <nl> - switch ( datum - > get_type ( ) ) { <nl> + switch ( datum . get_type ( ) ) { <nl> case datum_t : : R_ARRAY : { <nl> res = res | datum_serialize ( wm , datum_serialized_type_t : : BUF_R_ARRAY ) ; <nl> if ( datum . arr_size ( ) > 100000 ) <nl> serialization_result_t datum_serialize ( <nl> } break ; <nl> case datum_t : : R_BINARY : { <nl> datum_serialize ( wm , datum_serialized_type_t : : R_BINARY ) ; <nl> - const datum_string_t & value = datum - > as_binary ( ) ; <nl> + const datum_string_t & value = datum . as_binary ( ) ; <nl> datum_serialize ( wm , value ) ; <nl> } break ; <nl> case datum_t : : R_BOOL : { <nl> res = res | datum_serialize ( wm , datum_serialized_type_t : : R_BOOL ) ; <nl> - bool value = datum - > as_bool ( ) ; <nl> + bool value = datum . as_bool ( ) ; <nl> serialize_universal ( wm , value ) ; <nl> } break ; <nl> case datum_t : : R_NULL : { <nl> res = res | datum_serialize ( wm , datum_serialized_type_t : : R_NULL ) ; <nl> } break ; <nl> case datum_t : : R_NUM : { <nl> - double value = datum - > as_num ( ) ; <nl> + double value = datum . as_num ( ) ; <nl> int64_t i ; <nl> if ( number_as_integer ( value , & i ) ) { <nl> / / We serialize the signed - zero double , - 0 . 0 , with INT_NEGATIVE . <nl> serialization_result_t datum_serialize ( <nl> } break ; <nl> case datum_t : : R_STR : { <nl> res = res | datum_serialize ( wm , datum_serialized_type_t : : R_STR ) ; <nl> - const datum_string_t & value = datum - > as_str ( ) ; <nl> + const datum_string_t & value = datum . as_str ( ) ; <nl> res = res | datum_serialize ( wm , value ) ; <nl> } break ; <nl> case datum_t : : UNINITIALIZED : / / fallthru <nl> mmm a / src / rdb_protocol / shards . cc <nl> ppp b / src / rdb_protocol / shards . cc <nl> void dprint ( const char * s , const T & ) { <nl> template < > <nl> void dprint ( const char * s , const datum_t & t ) { <nl> if ( t . has ( ) ) { <nl> - debugf ( " % s - > % s \ n " , s , t - > print ( ) . c_str ( ) ) ; <nl> + debugf ( " % s - > % s \ n " , s , t . print ( ) . c_str ( ) ) ; <nl> } else { <nl> debugf ( " % s - > NULL \ n " , s ) ; <nl> } <nl> class sum_terminal_t : public skip_terminal_t < double > { <nl> const datum_t & el , <nl> double * out , <nl> const acc_func_t & f ) { <nl> - * out + = f ( env , el ) - > as_num ( ) ; <nl> + * out + = f ( env , el ) . as_num ( ) ; <nl> } <nl> virtual datum_t unpack ( double * d ) { <nl> return datum_t ( * d ) ; <nl> class avg_terminal_t : public skip_terminal_t < std : : pair < double , uint64_t > > { <nl> const datum_t & el , <nl> std : : pair < double , uint64_t > * out , <nl> const acc_func_t & f ) { <nl> - out - > first + = f ( env , el ) - > as_num ( ) ; <nl> + out - > first + = f ( env , el ) . as_num ( ) ; <nl> out - > second + = 1 ; <nl> } <nl> virtual datum_t unpack ( <nl> bool datum_lt ( reql_version_t reql_version , <nl> const datum_t & val1 , <nl> const datum_t & val2 ) { <nl> r_sanity_check ( val1 . has ( ) & & val2 . has ( ) ) ; <nl> - return val1 - > compare_lt ( reql_version , * val2 ) ; <nl> + return val1 . compare_lt ( reql_version , val2 ) ; <nl> } <nl> <nl> bool datum_gt ( reql_version_t reql_version , <nl> const datum_t & val1 , <nl> const datum_t & val2 ) { <nl> r_sanity_check ( val1 . has ( ) & & val2 . has ( ) ) ; <nl> - return val1 - > compare_gt ( reql_version , * val2 ) ; <nl> + return val1 . compare_gt ( reql_version , val2 ) ; <nl> } <nl> <nl> class optimizing_terminal_t : public skip_terminal_t < optimizer_t > { <nl> class group_trans_t : public op_t { <nl> } else { <nl> std : : vector < std : : vector < datum_t > > perms ( arr . size ( ) ) ; <nl> for ( size_t i = 0 ; i < arr . size ( ) ; + + i ) { <nl> - if ( arr [ i ] - > get_type ( ) ! = datum_t : : R_ARRAY ) { <nl> + if ( arr [ i ] . get_type ( ) ! = datum_t : : R_ARRAY ) { <nl> perms [ i ] . push_back ( arr [ i ] ) ; <nl> } else { <nl> perms [ i ] . reserve ( arr [ i ] . arr_size ( ) ) ; <nl> class distinct_trans_t : public ungrouped_op_t { <nl> r_sanity_check ( sindex_val . has ( ) ) ; <nl> * it = sindex_val ; <nl> } <nl> - if ( ! last_val . has ( ) | | * * it ! = * last_val ) { <nl> + if ( ! last_val . has ( ) | | * it ! = last_val ) { <nl> std : : swap ( * loc , * it ) ; <nl> last_val = * loc ; <nl> + + loc ; <nl> class zip_trans_t : public ungrouped_op_t { <nl> virtual void lst_transform ( env_t * , datums_t * lst , <nl> const datum_t & ) { <nl> for ( auto it = lst - > begin ( ) ; it ! = lst - > end ( ) ; + + it ) { <nl> - auto left = ( * it ) - > get_field ( " left " , NOTHROW ) ; <nl> - auto right = ( * it ) - > get_field ( " right " , NOTHROW ) ; <nl> + auto left = ( * it ) . get_field ( " left " , NOTHROW ) ; <nl> + auto right = ( * it ) . get_field ( " right " , NOTHROW ) ; <nl> rcheck_datum ( left . has ( ) , base_exc_t : : GENERIC , <nl> " ZIP can only be called on the result of a join . " ) ; <nl> - * it = right . has ( ) ? left - > merge ( right ) : left ; <nl> + * it = right . has ( ) ? left . merge ( right ) : left ; <nl> } <nl> } <nl> } ; <nl> mmm a / src / rdb_protocol / shards . hpp <nl> ppp b / src / rdb_protocol / shards . hpp <nl> class grouped_pair_compare_t { <nl> const std : : pair < datum_t , T > & b ) const { <nl> / / We know the keys are different , this is only used in <nl> / / iterate_ordered_by_version . <nl> - return a . first - > compare_lt ( reql_version , * b . first ) ; <nl> + return a . first . compare_lt ( reql_version , b . first ) ; <nl> } <nl> <nl> private : <nl> mmm a / src / rdb_protocol / store . cc <nl> ppp b / src / rdb_protocol / store . cc <nl> struct rdb_write_visitor_t : public boost : : static_visitor < void > { <nl> std : : vector < store_key_t > keys ; <nl> keys . reserve ( bi . inserts . size ( ) ) ; <nl> for ( auto it = bi . inserts . begin ( ) ; it ! = bi . inserts . end ( ) ; + + it ) { <nl> - keys . emplace_back ( ( * it ) - > get_field ( datum_string_t ( bi . pkey ) ) - > print_primary ( ) ) ; <nl> + keys . emplace_back ( ( * it ) . get_field ( datum_string_t ( bi . pkey ) ) . print_primary ( ) ) ; <nl> } <nl> response - > response = <nl> rdb_batched_replace ( <nl> mmm a / src / rdb_protocol / stream_cache . cc <nl> ppp b / src / rdb_protocol / stream_cache . cc <nl> bool stream_cache_t : : serve ( int64_t key , Response * res , signal_t * interruptor ) { <nl> batchspec_t : : user ( batch_type , & env ) ) ; <nl> entry - > has_sent_batch = true ; <nl> for ( auto d = ds . begin ( ) ; d ! = ds . end ( ) ; + + d ) { <nl> - ( * d ) - > write_to_protobuf ( res - > add_response ( ) , entry - > use_json ) ; <nl> + d - > write_to_protobuf ( res - > add_response ( ) , entry - > use_json ) ; <nl> } <nl> if ( trace . has ( ) ) { <nl> - trace - > as_datum ( ) - > write_to_protobuf ( <nl> + trace - > as_datum ( ) . write_to_protobuf ( <nl> res - > mutable_profile ( ) , entry - > use_json ) ; <nl> } <nl> } catch ( const std : : exception & e ) { <nl> mmm a / src / rdb_protocol / table_common . cc <nl> ppp b / src / rdb_protocol / table_common . cc <nl> ql : : datum_t make_row_replacement_stats ( <nl> bool * was_changed_out ) { <nl> guarantee ( old_row . has ( ) ) ; <nl> bool started_empty ; <nl> - if ( old_row - > get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> + if ( old_row . get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> started_empty = true ; <nl> - } else if ( old_row - > get_type ( ) = = ql : : datum_t : : R_OBJECT ) { <nl> + } else if ( old_row . get_type ( ) = = ql : : datum_t : : R_OBJECT ) { <nl> started_empty = false ; <nl> # ifndef NDEBUG <nl> - ql : : datum_t old_row_pval = old_row - > get_field ( primary_key_name ) ; <nl> + ql : : datum_t old_row_pval = old_row . get_field ( primary_key_name ) ; <nl> rassert ( old_row_pval . has ( ) ) ; <nl> - rassert ( store_key_t ( old_row_pval - > print_primary ( ) ) = = primary_key_value ) ; <nl> + rassert ( store_key_t ( old_row_pval . print_primary ( ) ) = = primary_key_value ) ; <nl> # endif <nl> } else { <nl> crash ( " old_row is invalid " ) ; <nl> ql : : datum_t make_row_replacement_stats ( <nl> <nl> guarantee ( new_row . has ( ) ) ; <nl> bool ended_empty ; <nl> - if ( new_row - > get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> + if ( new_row . get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> ended_empty = true ; <nl> - } else if ( new_row - > get_type ( ) = = ql : : datum_t : : R_OBJECT ) { <nl> + } else if ( new_row . get_type ( ) = = ql : : datum_t : : R_OBJECT ) { <nl> ended_empty = false ; <nl> - new_row - > rcheck_valid_replace ( <nl> + new_row . rcheck_valid_replace ( <nl> old_row , ql : : datum_t ( ) , primary_key_name ) ; <nl> ql : : datum_t new_primary_key_value = <nl> - new_row - > get_field ( primary_key_name , ql : : NOTHROW ) ; <nl> - rcheck_target ( new_row , ql : : base_exc_t : : GENERIC , <nl> + new_row . get_field ( primary_key_name , ql : : NOTHROW ) ; <nl> + rcheck_target ( & new_row , ql : : base_exc_t : : GENERIC , <nl> primary_key_value . compare ( <nl> - store_key_t ( new_primary_key_value - > print_primary ( ) ) ) = = 0 , <nl> + store_key_t ( new_primary_key_value . print_primary ( ) ) ) = = 0 , <nl> ( started_empty <nl> ? strprintf ( " Primary key ` % s ` cannot be changed ( null - > % s ) " , <nl> - primary_key_name . to_std ( ) . c_str ( ) , new_row - > print ( ) . c_str ( ) ) <nl> + primary_key_name . to_std ( ) . c_str ( ) , new_row . print ( ) . c_str ( ) ) <nl> : strprintf ( " Primary key ` % s ` cannot be changed ( % s - > % s ) " , <nl> primary_key_name . to_std ( ) . c_str ( ) , <nl> - old_row - > print ( ) . c_str ( ) , new_row - > print ( ) . c_str ( ) ) ) ) ; <nl> + old_row . print ( ) . c_str ( ) , new_row . print ( ) . c_str ( ) ) ) ) ; <nl> } else { <nl> rfail_typed_target ( <nl> - new_row , " Inserted value must be an OBJECT ( got % s ) : \ n % s " , <nl> - new_row - > get_type_name ( ) . c_str ( ) , new_row - > print ( ) . c_str ( ) ) ; <nl> + & new_row , " Inserted value must be an OBJECT ( got % s ) : \ n % s " , <nl> + new_row . get_type_name ( ) . c_str ( ) , new_row . print ( ) . c_str ( ) ) ; <nl> } <nl> <nl> - * was_changed_out = * old_row ! = * new_row ; <nl> + * was_changed_out = ( old_row ! = new_row ) ; <nl> <nl> ql : : datum_object_builder_t resp ; <nl> if ( return_changes = = return_changes_t : : YES ) { <nl> ql : : datum_t resolve_insert_conflict ( <nl> ql : : datum_t old_row , <nl> ql : : datum_t insert_row , <nl> conflict_behavior_t conflict_behavior ) { <nl> - if ( old_row - > get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> + if ( old_row . get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> return insert_row ; <nl> } else if ( conflict_behavior = = conflict_behavior_t : : REPLACE ) { <nl> return insert_row ; <nl> } else if ( conflict_behavior = = conflict_behavior_t : : UPDATE ) { <nl> - return old_row - > merge ( insert_row ) ; <nl> + return old_row . merge ( insert_row ) ; <nl> } else { <nl> - rfail_target ( old_row , ql : : base_exc_t : : GENERIC , <nl> + rfail_target ( & old_row , ql : : base_exc_t : : GENERIC , <nl> " Duplicate primary key ` % s ` : \ n % s \ n % s " , <nl> - primary_key . c_str ( ) , old_row - > print ( ) . c_str ( ) , <nl> - insert_row - > print ( ) . c_str ( ) ) ; <nl> + primary_key . c_str ( ) , old_row . print ( ) . c_str ( ) , <nl> + insert_row . print ( ) . c_str ( ) ) ; <nl> } <nl> } <nl> <nl> mmm a / src / rdb_protocol / term . cc <nl> ppp b / src / rdb_protocol / term . cc <nl> void run ( protob_t < Query > q , <nl> if ( val - > get_type ( ) . is_convertible ( val_t : : type_t : : DATUM ) ) { <nl> res - > set_type ( Response : : SUCCESS_ATOM ) ; <nl> datum_t d = val - > as_datum ( ) ; <nl> - d - > write_to_protobuf ( res - > add_response ( ) , use_json ) ; <nl> + d . write_to_protobuf ( res - > add_response ( ) , use_json ) ; <nl> if ( trace . has ( ) ) { <nl> - trace - > as_datum ( ) - > write_to_protobuf ( <nl> + trace - > as_datum ( ) . write_to_protobuf ( <nl> res - > mutable_profile ( ) , use_json ) ; <nl> } <nl> } else if ( counted_t < grouped_data_t > gd <nl> void run ( protob_t < Query > q , <nl> datum_t d = to_datum_for_client_serialization ( std : : move ( * gd ) , <nl> env . reql_version ( ) , <nl> env . limits ( ) ) ; <nl> - d - > write_to_protobuf ( res - > add_response ( ) , use_json ) ; <nl> + d . write_to_protobuf ( res - > add_response ( ) , use_json ) ; <nl> if ( env . trace ! = nullptr ) { <nl> - env . trace - > as_datum ( ) - > write_to_protobuf ( <nl> + env . trace - > as_datum ( ) . write_to_protobuf ( <nl> res - > mutable_profile ( ) , use_json ) ; <nl> } <nl> } else if ( val - > get_type ( ) . is_convertible ( val_t : : type_t : : SEQUENCE ) ) { <nl> counted_t < datum_stream_t > seq = val - > as_seq ( & env ) ; <nl> - if ( datum_t arr = seq - > as_array ( & env ) ) { <nl> + const datum_t arr = seq - > as_array ( & env ) ; <nl> + if ( arr . has ( ) ) { <nl> res - > set_type ( Response : : SUCCESS_ATOM ) ; <nl> - arr - > write_to_protobuf ( res - > add_response ( ) , use_json ) ; <nl> + arr . write_to_protobuf ( res - > add_response ( ) , use_json ) ; <nl> if ( trace . has ( ) ) { <nl> - trace - > as_datum ( ) - > write_to_protobuf ( <nl> + trace - > as_datum ( ) . write_to_protobuf ( <nl> res - > mutable_profile ( ) , use_json ) ; <nl> } <nl> } else { <nl> mmm a / src / rdb_protocol / terms / arith . cc <nl> ppp b / src / rdb_protocol / terms / arith . cc <nl> class arith_term_t : public op_term_t { <nl> <nl> private : <nl> datum_t add ( datum_t lhs , <nl> - datum_t rhs , <nl> - const configured_limits_t & limits ) const { <nl> - if ( lhs - > is_ptype ( pseudo : : time_string ) | | <nl> - rhs - > is_ptype ( pseudo : : time_string ) ) { <nl> + datum_t rhs , <nl> + const configured_limits_t & limits ) const { <nl> + if ( lhs . is_ptype ( pseudo : : time_string ) | | <nl> + rhs . is_ptype ( pseudo : : time_string ) ) { <nl> return pseudo : : time_add ( lhs , rhs ) ; <nl> - } else if ( lhs - > get_type ( ) = = datum_t : : R_NUM ) { <nl> - rhs - > check_type ( datum_t : : R_NUM ) ; <nl> - return datum_t ( lhs - > as_num ( ) + rhs - > as_num ( ) ) ; <nl> - } else if ( lhs - > get_type ( ) = = datum_t : : R_STR ) { <nl> - rhs - > check_type ( datum_t : : R_STR ) ; <nl> - return datum_t ( concat ( lhs - > as_str ( ) , rhs - > as_str ( ) ) ) ; <nl> - } else if ( lhs - > get_type ( ) = = datum_t : : R_ARRAY ) { <nl> - rhs - > check_type ( datum_t : : R_ARRAY ) ; <nl> + } else if ( lhs . get_type ( ) = = datum_t : : R_NUM ) { <nl> + rhs . check_type ( datum_t : : R_NUM ) ; <nl> + return datum_t ( lhs . as_num ( ) + rhs . as_num ( ) ) ; <nl> + } else if ( lhs . get_type ( ) = = datum_t : : R_STR ) { <nl> + rhs . check_type ( datum_t : : R_STR ) ; <nl> + return datum_t ( concat ( lhs . as_str ( ) , rhs . as_str ( ) ) ) ; <nl> + } else if ( lhs . get_type ( ) = = datum_t : : R_ARRAY ) { <nl> + rhs . check_type ( datum_t : : R_ARRAY ) ; <nl> datum_array_builder_t out ( limits ) ; <nl> - for ( size_t i = 0 ; i < lhs - > arr_size ( ) ; + + i ) { <nl> - out . add ( lhs - > get ( i ) ) ; <nl> + for ( size_t i = 0 ; i < lhs . arr_size ( ) ; + + i ) { <nl> + out . add ( lhs . get ( i ) ) ; <nl> } <nl> - for ( size_t i = 0 ; i < rhs - > arr_size ( ) ; + + i ) { <nl> - out . add ( rhs - > get ( i ) ) ; <nl> + for ( size_t i = 0 ; i < rhs . arr_size ( ) ; + + i ) { <nl> + out . add ( rhs . get ( i ) ) ; <nl> } <nl> return std : : move ( out ) . to_datum ( ) ; <nl> } else { <nl> / / If we get here lhs is neither number nor string <nl> / / so we ' ll just error saying we expect a number <nl> - lhs - > check_type ( datum_t : : R_NUM ) ; <nl> + lhs . check_type ( datum_t : : R_NUM ) ; <nl> } <nl> unreachable ( ) ; <nl> } <nl> class arith_term_t : public op_term_t { <nl> datum_t sub ( datum_t lhs , <nl> datum_t rhs , <nl> const configured_limits_t & limits ) const { <nl> - if ( lhs - > is_ptype ( pseudo : : time_string ) ) { <nl> + if ( lhs . is_ptype ( pseudo : : time_string ) ) { <nl> return pseudo : : time_sub ( lhs , rhs ) ; <nl> - } else if ( lhs - > is_ptype ( pseudo : : geometry_string ) ) { <nl> + } else if ( lhs . is_ptype ( pseudo : : geometry_string ) ) { <nl> try { <nl> return pseudo : : geo_sub ( lhs , rhs , limits ) ; <nl> } catch ( const geo_exception_t & e ) { <nl> rfail ( base_exc_t : : GENERIC , " % s " , e . what ( ) ) ; <nl> } <nl> } else { <nl> - lhs - > check_type ( datum_t : : R_NUM ) ; <nl> - rhs - > check_type ( datum_t : : R_NUM ) ; <nl> - return datum_t ( lhs - > as_num ( ) - rhs - > as_num ( ) ) ; <nl> + lhs . check_type ( datum_t : : R_NUM ) ; <nl> + rhs . check_type ( datum_t : : R_NUM ) ; <nl> + return datum_t ( lhs . as_num ( ) - rhs . as_num ( ) ) ; <nl> } <nl> } <nl> datum_t mul ( datum_t lhs , <nl> datum_t rhs , <nl> const configured_limits_t & limits ) const { <nl> - if ( lhs - > get_type ( ) = = datum_t : : R_ARRAY | | <nl> - rhs - > get_type ( ) = = datum_t : : R_ARRAY ) { <nl> + if ( lhs . get_type ( ) = = datum_t : : R_ARRAY | | <nl> + rhs . get_type ( ) = = datum_t : : R_ARRAY ) { <nl> datum_t array = <nl> - ( lhs - > get_type ( ) = = datum_t : : R_ARRAY ? lhs : rhs ) ; <nl> + ( lhs . get_type ( ) = = datum_t : : R_ARRAY ? lhs : rhs ) ; <nl> datum_t num = <nl> - ( lhs - > get_type ( ) = = datum_t : : R_ARRAY ? rhs : lhs ) ; <nl> + ( lhs . get_type ( ) = = datum_t : : R_ARRAY ? rhs : lhs ) ; <nl> <nl> datum_array_builder_t out ( limits ) ; <nl> - const int64_t num_copies = num - > as_int ( ) ; <nl> + const int64_t num_copies = num . as_int ( ) ; <nl> rcheck ( num_copies > = 0 , base_exc_t : : GENERIC , <nl> " Cannot multiply an ARRAY by a negative number . " ) ; <nl> <nl> for ( int64_t j = 0 ; j < num_copies ; + + j ) { <nl> - for ( size_t i = 0 ; i < array - > arr_size ( ) ; + + i ) { <nl> - out . add ( array - > get ( i ) ) ; <nl> + for ( size_t i = 0 ; i < array . arr_size ( ) ; + + i ) { <nl> + out . add ( array . get ( i ) ) ; <nl> } <nl> } <nl> return std : : move ( out ) . to_datum ( ) ; <nl> } else { <nl> - lhs - > check_type ( datum_t : : R_NUM ) ; <nl> - rhs - > check_type ( datum_t : : R_NUM ) ; <nl> - return datum_t ( lhs - > as_num ( ) * rhs - > as_num ( ) ) ; <nl> + lhs . check_type ( datum_t : : R_NUM ) ; <nl> + rhs . check_type ( datum_t : : R_NUM ) ; <nl> + return datum_t ( lhs . as_num ( ) * rhs . as_num ( ) ) ; <nl> } <nl> } <nl> datum_t div ( datum_t lhs , <nl> datum_t rhs , <nl> UNUSED const configured_limits_t & limits ) const { <nl> - lhs - > check_type ( datum_t : : R_NUM ) ; <nl> - rhs - > check_type ( datum_t : : R_NUM ) ; <nl> - rcheck ( rhs - > as_num ( ) ! = 0 , base_exc_t : : GENERIC , " Cannot divide by zero . " ) ; <nl> + lhs . check_type ( datum_t : : R_NUM ) ; <nl> + rhs . check_type ( datum_t : : R_NUM ) ; <nl> + rcheck ( rhs . as_num ( ) ! = 0 , base_exc_t : : GENERIC , " Cannot divide by zero . " ) ; <nl> / / throws on non - finite values <nl> - return datum_t ( lhs - > as_num ( ) / rhs - > as_num ( ) ) ; <nl> + return datum_t ( lhs . as_num ( ) / rhs . as_num ( ) ) ; <nl> } <nl> <nl> const char * namestr ; <nl> mmm a / src / rdb_protocol / terms / arr . cc <nl> ppp b / src / rdb_protocol / terms / arr . cc <nl> class pend_term_t : public op_term_t { <nl> datum_t arr = args - > arg ( env , 0 ) - > as_datum ( ) ; <nl> datum_t new_el = args - > arg ( env , 1 ) - > as_datum ( ) ; <nl> datum_array_builder_t out ( env - > env - > limits ( ) ) ; <nl> - out . reserve ( arr - > arr_size ( ) + 1 ) ; <nl> + out . reserve ( arr . arr_size ( ) + 1 ) ; <nl> if ( which_pend = = PRE ) { <nl> / / TODO : this is horrendously inefficient . <nl> out . add ( new_el ) ; <nl> - for ( size_t i = 0 ; i < arr - > arr_size ( ) ; + + i ) { <nl> - out . add ( arr - > get ( i ) ) ; <nl> + for ( size_t i = 0 ; i < arr . arr_size ( ) ; + + i ) { <nl> + out . add ( arr . get ( i ) ) ; <nl> } <nl> } else { <nl> / / TODO : this is horrendously inefficient . <nl> - for ( size_t i = 0 ; i < arr - > arr_size ( ) ; + + i ) { <nl> - out . add ( arr - > get ( i ) ) ; <nl> + for ( size_t i = 0 ; i < arr . arr_size ( ) ; + + i ) { <nl> + out . add ( arr . get ( i ) ) ; <nl> } <nl> out . add ( new_el ) ; <nl> } <nl> uint64_t canonicalize ( const term_t * t , int64_t index , size_t size , bool * oob_out <nl> / / needed because nth_term_impl may need to recurse over its contents to deal with <nl> / / e . g . grouped data . <nl> counted_t < val_t > nth_term_direct_impl ( const term_t * term , scope_env_t * env , <nl> - counted_t < val_t > aggregate , counted_t < val_t > index ) { <nl> + counted_t < val_t > aggregate , counted_t < val_t > index ) { <nl> int32_t n = index - > as_int < int32_t > ( ) ; <nl> if ( aggregate - > get_type ( ) . is_convertible ( val_t : : type_t : : DATUM ) ) { <nl> datum_t arr = aggregate - > as_datum ( ) ; <nl> class slice_term_t : public bounded_op_term_t { <nl> bool left_open , int64_t fake_l , <nl> bool right_open , int64_t fake_r ) const { <nl> uint64_t real_l , real_r ; <nl> - if ( canon_helper ( arr - > arr_size ( ) , left_open , fake_l , true , & real_l ) ) { <nl> + if ( canon_helper ( arr . arr_size ( ) , left_open , fake_l , true , & real_l ) ) { <nl> real_l = 0 ; <nl> } <nl> - if ( canon_helper ( arr - > arr_size ( ) , right_open , fake_r , false , & real_r ) ) { <nl> + if ( canon_helper ( arr . arr_size ( ) , right_open , fake_r , false , & real_r ) ) { <nl> return new_val ( datum_t : : empty_array ( ) ) ; <nl> } <nl> <nl> datum_array_builder_t out ( limits ) ; <nl> for ( uint64_t i = real_l ; i < real_r ; + + i ) { <nl> - if ( i > = arr - > arr_size ( ) ) { <nl> + if ( i > = arr . arr_size ( ) ) { <nl> break ; <nl> } <nl> - out . add ( arr - > get ( i ) ) ; <nl> + out . add ( arr . get ( i ) ) ; <nl> } <nl> return new_val ( std : : move ( out ) . to_datum ( ) ) ; <nl> } <nl> class slice_term_t : public bounded_op_term_t { <nl> counted_t < val_t > slice_binary ( datum_t binary , <nl> bool left_open , int64_t fake_l , <nl> bool right_open , int64_t fake_r ) const { <nl> - const datum_string_t & data = binary - > as_binary ( ) ; <nl> + const datum_string_t & data = binary . as_binary ( ) ; <nl> uint64_t real_l , real_r ; <nl> if ( canon_helper ( data . size ( ) , left_open , fake_l , true , & real_l ) ) { <nl> real_l = 0 ; <nl> class slice_term_t : public bounded_op_term_t { <nl> <nl> if ( v - > get_type ( ) . is_convertible ( val_t : : type_t : : DATUM ) ) { <nl> datum_t d = v - > as_datum ( ) ; <nl> - if ( d - > get_type ( ) = = datum_t : : R_ARRAY ) { <nl> + if ( d . get_type ( ) = = datum_t : : R_ARRAY ) { <nl> return slice_array ( d , env - > env - > limits ( ) , left_open , fake_l , <nl> right_open , fake_r ) ; <nl> - } else if ( d - > get_type ( ) = = datum_t : : R_BINARY ) { <nl> + } else if ( d . get_type ( ) = = datum_t : : R_BINARY ) { <nl> return slice_binary ( d , left_open , fake_l , right_open , fake_r ) ; <nl> } else { <nl> rfail_target ( v , base_exc_t : : GENERIC , <nl> " Expected ARRAY or BINARY , but found % s . " , <nl> - d - > get_type_name ( ) . c_str ( ) ) ; <nl> + d . get_type_name ( ) . c_str ( ) ) ; <nl> } <nl> } else if ( v - > get_type ( ) . is_convertible ( val_t : : type_t : : SEQUENCE ) ) { <nl> counted_t < table_t > t ; <nl> class set_insert_term_t : public op_term_t { <nl> std : : set < datum_t , optional_datum_less_t > <nl> el_set ( optional_datum_less_t ( env - > env - > reql_version ( ) ) ) ; <nl> datum_array_builder_t out ( env - > env - > limits ( ) ) ; <nl> - for ( size_t i = 0 ; i < arr - > arr_size ( ) ; + + i ) { <nl> - if ( el_set . insert ( arr - > get ( i ) ) . second ) { <nl> - out . add ( arr - > get ( i ) ) ; <nl> + for ( size_t i = 0 ; i < arr . arr_size ( ) ; + + i ) { <nl> + if ( el_set . insert ( arr . get ( i ) ) . second ) { <nl> + out . add ( arr . get ( i ) ) ; <nl> } <nl> } <nl> if ( ! std_contains ( el_set , new_el ) ) { <nl> class set_union_term_t : public op_term_t { <nl> std : : set < datum_t , optional_datum_less_t > el_set ( <nl> optional_datum_less_t ( env - > env - > reql_version ( ) ) ) ; <nl> datum_array_builder_t out ( env - > env - > limits ( ) ) ; <nl> - for ( size_t i = 0 ; i < arr1 - > arr_size ( ) ; + + i ) { <nl> - if ( el_set . insert ( arr1 - > get ( i ) ) . second ) { <nl> - out . add ( arr1 - > get ( i ) ) ; <nl> + for ( size_t i = 0 ; i < arr1 . arr_size ( ) ; + + i ) { <nl> + if ( el_set . insert ( arr1 . get ( i ) ) . second ) { <nl> + out . add ( arr1 . get ( i ) ) ; <nl> } <nl> } <nl> - for ( size_t i = 0 ; i < arr2 - > arr_size ( ) ; + + i ) { <nl> - if ( el_set . insert ( arr2 - > get ( i ) ) . second ) { <nl> - out . add ( arr2 - > get ( i ) ) ; <nl> + for ( size_t i = 0 ; i < arr2 . arr_size ( ) ; + + i ) { <nl> + if ( el_set . insert ( arr2 . get ( i ) ) . second ) { <nl> + out . add ( arr2 . get ( i ) ) ; <nl> } <nl> } <nl> <nl> class set_intersection_term_t : public op_term_t { <nl> std : : set < datum_t , optional_datum_less_t > <nl> el_set ( optional_datum_less_t ( env - > env - > reql_version ( ) ) ) ; <nl> datum_array_builder_t out ( env - > env - > limits ( ) ) ; <nl> - for ( size_t i = 0 ; i < arr1 - > arr_size ( ) ; + + i ) { <nl> - el_set . insert ( arr1 - > get ( i ) ) ; <nl> + for ( size_t i = 0 ; i < arr1 . arr_size ( ) ; + + i ) { <nl> + el_set . insert ( arr1 . get ( i ) ) ; <nl> } <nl> - for ( size_t i = 0 ; i < arr2 - > arr_size ( ) ; + + i ) { <nl> - if ( std_contains ( el_set , arr2 - > get ( i ) ) ) { <nl> - out . add ( arr2 - > get ( i ) ) ; <nl> - el_set . erase ( arr2 - > get ( i ) ) ; <nl> + for ( size_t i = 0 ; i < arr2 . arr_size ( ) ; + + i ) { <nl> + if ( std_contains ( el_set , arr2 . get ( i ) ) ) { <nl> + out . add ( arr2 . get ( i ) ) ; <nl> + el_set . erase ( arr2 . get ( i ) ) ; <nl> } <nl> } <nl> <nl> class set_difference_term_t : public op_term_t { <nl> std : : set < datum_t , optional_datum_less_t > <nl> el_set ( optional_datum_less_t ( env - > env - > reql_version ( ) ) ) ; <nl> datum_array_builder_t out ( env - > env - > limits ( ) ) ; <nl> - for ( size_t i = 0 ; i < arr2 - > arr_size ( ) ; + + i ) { <nl> - el_set . insert ( arr2 - > get ( i ) ) ; <nl> + for ( size_t i = 0 ; i < arr2 . arr_size ( ) ; + + i ) { <nl> + el_set . insert ( arr2 . get ( i ) ) ; <nl> } <nl> - for ( size_t i = 0 ; i < arr1 - > arr_size ( ) ; + + i ) { <nl> - if ( ! std_contains ( el_set , arr1 - > get ( i ) ) ) { <nl> - out . add ( arr1 - > get ( i ) ) ; <nl> - el_set . insert ( arr1 - > get ( i ) ) ; <nl> + for ( size_t i = 0 ; i < arr1 . arr_size ( ) ; + + i ) { <nl> + if ( ! std_contains ( el_set , arr1 . get ( i ) ) ) { <nl> + out . add ( arr1 . get ( i ) ) ; <nl> + el_set . insert ( arr1 . get ( i ) ) ; <nl> } <nl> } <nl> <nl> class at_term_t : public op_term_t { <nl> datum_array_builder_t arr ( args - > arg ( env , 0 ) - > as_datum ( ) , env - > env - > limits ( ) ) ; <nl> size_t index ; <nl> if ( index_method_ = = ELEMENTS ) { <nl> - index = canonicalize ( this , args - > arg ( env , 1 ) - > as_datum ( ) - > as_int ( ) , arr . size ( ) ) ; <nl> + index = canonicalize ( this , args - > arg ( env , 1 ) - > as_datum ( ) . as_int ( ) , arr . size ( ) ) ; <nl> } else if ( index_method_ = = SPACES ) { <nl> - index = canonicalize ( this , args - > arg ( env , 1 ) - > as_datum ( ) - > as_int ( ) , arr . size ( ) + 1 ) ; <nl> + index = canonicalize ( this , args - > arg ( env , 1 ) - > as_datum ( ) . as_int ( ) , arr . size ( ) + 1 ) ; <nl> } else { <nl> unreachable ( ) ; <nl> } <nl> class delete_at_term_t : public at_term_t { <nl> array - > erase ( index ) ; <nl> } else { <nl> int end_index = <nl> - canonicalize ( this , args - > arg ( env , 2 ) - > as_datum ( ) - > as_int ( ) , array - > size ( ) ) ; <nl> + canonicalize ( this , args - > arg ( env , 2 ) - > as_datum ( ) . as_int ( ) , array - > size ( ) ) ; <nl> array - > erase_range ( env - > env - > reql_version ( ) , index , end_index ) ; <nl> } <nl> } <nl> class contains_term_t : public op_term_t { <nl> { <nl> profile : : sampler_t sampler ( " Evaluating elements in contains . " , <nl> env - > env - > trace ) ; <nl> - while ( datum_t el = seq - > next ( env - > env , batchspec ) ) { <nl> + datum_t el ; <nl> + while ( el = seq - > next ( env - > env , batchspec ) , el . has ( ) ) { <nl> for ( auto it = required_els . begin ( ) ; it ! = required_els . end ( ) ; + + it ) { <nl> - if ( * * it = = * el ) { <nl> + if ( * it = = el ) { <nl> std : : swap ( * it , required_els . back ( ) ) ; <nl> required_els . pop_back ( ) ; <nl> break ; / / Bag semantics for contains . <nl> class args_term_t : public op_term_t { <nl> eval_flags_t eval_flags ) const { <nl> counted_t < val_t > v0 = args - > arg ( env , 0 , eval_flags ) ; <nl> / / If v0 is not an array , force a type error . <nl> - v0 - > as_datum ( ) - > check_type ( datum_t : : R_ARRAY ) ; <nl> + v0 - > as_datum ( ) . check_type ( datum_t : : R_ARRAY ) ; <nl> return v0 ; <nl> } <nl> private : <nl> mmm a / src / rdb_protocol / terms / datum_terms . cc <nl> ppp b / src / rdb_protocol / terms / datum_terms . cc <nl> class binary_term_t : public op_term_t { <nl> counted_t < val_t > arg = args - > arg ( env , 0 ) ; <nl> datum_t datum_arg = arg - > as_datum ( ) ; <nl> <nl> - if ( datum_arg - > get_type ( ) = = datum_t : : type_t : : R_BINARY ) { <nl> + if ( datum_arg . get_type ( ) = = datum_t : : type_t : : R_BINARY ) { <nl> return arg ; <nl> } <nl> <nl> - const datum_string_t & datum_str = datum_arg - > as_str ( ) ; <nl> + const datum_string_t & datum_str = datum_arg . as_str ( ) ; <nl> return new_val ( datum_t : : binary ( datum_string_t ( datum_str ) ) ) ; <nl> } <nl> virtual const char * name ( ) const { return " binary " ; } <nl> mmm a / src / rdb_protocol / terms / db_table . cc <nl> ppp b / src / rdb_protocol / terms / db_table . cc <nl> std : : map < name_string_t , size_t > get_replica_counts ( counted_t < val_t > arg ) { <nl> r_sanity_check ( arg . has ( ) ) ; <nl> std : : map < name_string_t , size_t > replica_counts ; <nl> datum_t datum = arg - > as_datum ( ) ; <nl> - if ( datum - > get_type ( ) = = datum_t : : R_OBJECT ) { <nl> + if ( datum . get_type ( ) = = datum_t : : R_OBJECT ) { <nl> for ( size_t i = 0 ; i < datum . obj_size ( ) ; + + i ) { <nl> std : : pair < datum_string_t , datum_t > pair = datum . get_pair ( i ) ; <nl> name_string_t name ; <nl> std : : map < name_string_t , size_t > get_replica_counts ( counted_t < val_t > arg ) { <nl> strprintf ( " Integer too large : % " PRIi64 , replicas ) ) ; <nl> replica_counts . insert ( std : : make_pair ( name , replicas2 ) ) ; <nl> } <nl> - } else if ( datum - > get_type ( ) = = datum_t : : R_NUM ) { <nl> + } else if ( datum . get_type ( ) = = datum_t : : R_NUM ) { <nl> size_t replicas = arg - > as_int < size_t > ( ) ; <nl> replica_counts . insert ( std : : make_pair ( <nl> name_string_t : : guarantee_valid ( " default " ) , replicas ) ) ; <nl> } else { <nl> rfail_target ( arg . get ( ) , base_exc_t : : GENERIC , <nl> " Expected type OBJECT or NUMBER but found % s : \ n % s " , <nl> - datum - > get_type_name ( ) . c_str ( ) , datum - > print ( ) . c_str ( ) ) ; <nl> + datum . get_type_name ( ) . c_str ( ) , datum . print ( ) . c_str ( ) ) ; <nl> } <nl> return replica_counts ; <nl> } <nl> class get_all_term_t : public op_term_t { <nl> for ( size_t i = 1 ; i < args - > num_args ( ) ; + + i ) { <nl> datum_t key = args - > arg ( env , i ) - > as_datum ( ) ; <nl> datum_t row = table - > get_row ( env - > env , key ) ; <nl> - if ( row - > get_type ( ) ! = datum_t : : R_NULL ) { <nl> + if ( row . get_type ( ) ! = datum_t : : R_NULL ) { <nl> arr . add ( row ) ; <nl> } <nl> } <nl> mmm a / src / rdb_protocol / terms / error . cc <nl> ppp b / src / rdb_protocol / terms / error . cc <nl> class default_term_t : public op_term_t { <nl> v = args - > arg ( env , 0 ) ; <nl> if ( v - > get_type ( ) . is_convertible ( val_t : : type_t : : DATUM ) ) { <nl> func_arg = v - > as_datum ( ) ; <nl> - if ( func_arg - > get_type ( ) ! = datum_t : : R_NULL ) { <nl> + if ( func_arg . get_type ( ) ! = datum_t : : R_NULL ) { <nl> return v ; <nl> } <nl> } else { <nl> class default_term_t : public op_term_t { <nl> } <nl> } <nl> r_sanity_check ( func_arg . has ( ) ) ; <nl> - r_sanity_check ( func_arg - > get_type ( ) = = datum_t : : R_NULL <nl> - | | func_arg - > get_type ( ) = = datum_t : : R_STR ) ; <nl> + r_sanity_check ( func_arg . get_type ( ) = = datum_t : : R_NULL <nl> + | | func_arg . get_type ( ) = = datum_t : : R_STR ) ; <nl> try { <nl> counted_t < val_t > def = args - > arg ( env , 1 ) ; <nl> if ( def - > get_type ( ) . is_convertible ( val_t : : type_t : : FUNC ) ) { <nl> class default_term_t : public op_term_t { <nl> if ( err . has ( ) ) { <nl> throw * err ; <nl> } else { <nl> - r_sanity_check ( func_arg - > get_type ( ) = = datum_t : : R_NULL ) ; <nl> + r_sanity_check ( func_arg . get_type ( ) = = datum_t : : R_NULL ) ; <nl> return v ; <nl> } <nl> } else { <nl> mmm a / src / rdb_protocol / terms / geo . cc <nl> ppp b / src / rdb_protocol / terms / geo . cc <nl> class point_term_t : public geo_term_t { <nl> / / Accepts either a geometry object of type Point , or an array with two coordinates . <nl> / / We often want to support both . <nl> lat_lon_point_t parse_point_argument ( const datum_t & point_datum ) { <nl> - if ( point_datum - > is_ptype ( pseudo : : geometry_string ) ) { <nl> + if ( point_datum . is_ptype ( pseudo : : geometry_string ) ) { <nl> / / The argument is a point ( should be at least , if not this will throw ) <nl> return extract_lat_lon_point ( point_datum ) ; <nl> } else { <nl> / / The argument must be a coordinate pair <nl> - rcheck_target ( & point_datum , base_exc_t : : GENERIC , point_datum - > arr_size ( ) = = 2 , <nl> + rcheck_target ( & point_datum , base_exc_t : : GENERIC , point_datum . arr_size ( ) = = 2 , <nl> strprintf ( " Expected point coordinate pair . " <nl> " Got % zu element array instead of a 2 element one . " , <nl> - point_datum - > arr_size ( ) ) ) ; <nl> - double lat = point_datum - > get ( 0 ) - > as_num ( ) ; <nl> - double lon = point_datum - > get ( 1 ) - > as_num ( ) ; <nl> + point_datum . arr_size ( ) ) ) ; <nl> + double lat = point_datum . get ( 0 ) . as_num ( ) ; <nl> + double lon = point_datum . get ( 1 ) . as_num ( ) ; <nl> return lat_lon_point_t ( lat , lon ) ; <nl> } <nl> } <nl> class includes_term_t : public geo_obj_or_seq_op_term_t { <nl> ellipsoid_spec_t pick_reference_ellipsoid ( scope_env_t * env , args_t * args ) { <nl> counted_t < val_t > geo_system_arg = args - > optarg ( env , " geo_system " ) ; <nl> if ( geo_system_arg . has ( ) ) { <nl> - if ( geo_system_arg - > as_datum ( ) - > get_type ( ) = = datum_t : : R_OBJECT ) { <nl> + if ( geo_system_arg - > as_datum ( ) . get_type ( ) = = datum_t : : R_OBJECT ) { <nl> / / We expect a reference ellipsoid with parameters ' a ' and ' f ' . <nl> / / ( equator radius and the flattening ) <nl> - double a = geo_system_arg - > as_datum ( ) - > get_field ( " a " ) - > as_num ( ) ; <nl> - double f = geo_system_arg - > as_datum ( ) - > get_field ( " f " ) - > as_num ( ) ; <nl> + double a = geo_system_arg - > as_datum ( ) . get_field ( " a " ) . as_num ( ) ; <nl> + double f = geo_system_arg - > as_datum ( ) . get_field ( " f " ) . as_num ( ) ; <nl> rcheck_target ( geo_system_arg . get ( ) , base_exc_t : : GENERIC , <nl> a > 0 . 0 , " The equator radius ` a ` must be positive . " ) ; <nl> rcheck_target ( geo_system_arg . get ( ) , base_exc_t : : GENERIC , <nl> class distance_term_t : public geo_term_t { <nl> scoped_ptr_t < S2Point > p ; <nl> datum_t g ; <nl> const std : : string g1_type = <nl> - g1_arg - > as_ptype ( pseudo : : geometry_string ) - > get_field ( " type " ) - > as_str ( ) . to_std ( ) ; <nl> + g1_arg - > as_ptype ( pseudo : : geometry_string ) . get_field ( " type " ) . as_str ( ) . to_std ( ) ; <nl> if ( g1_type = = " Point " ) { <nl> p = to_s2point ( g1_arg - > as_ptype ( pseudo : : geometry_string ) ) ; <nl> g = g2_arg - > as_ptype ( pseudo : : geometry_string ) ; <nl> mmm a / src / rdb_protocol / terms / http . cc <nl> ppp b / src / rdb_protocol / terms / http . cc <nl> class http_term_t : public op_term_t { <nl> <nl> void check_url_params ( const datum_t & params , <nl> pb_rcheckable_t * val ) { <nl> - if ( params - > get_type ( ) = = datum_t : : R_OBJECT ) { <nl> + if ( params . get_type ( ) = = datum_t : : R_OBJECT ) { <nl> for ( size_t i = 0 ; i < params . obj_size ( ) ; + + i ) { <nl> auto pair = params . get_pair ( i ) ; <nl> if ( pair . second . get_type ( ) ! = datum_t : : R_NUM & & <nl> void check_url_params ( const datum_t & params , <nl> } else { <nl> rfail_target ( val , base_exc_t : : GENERIC , <nl> " Expected ` params ` to be an OBJECT , but found % s : \ n % s " , <nl> - params - > get_type_name ( ) . c_str ( ) , <nl> - params - > print ( ) . c_str ( ) ) ; <nl> + params . get_type_name ( ) . c_str ( ) , <nl> + params . print ( ) . c_str ( ) ) ; <nl> } <nl> } <nl> <nl> void check_error_result ( const http_result_t & res , <nl> opts . url . c_str ( ) , <nl> res . error . c_str ( ) ) ; <nl> if ( res . header . has ( ) ) { <nl> - error_string . append ( " \ nheader : \ n " + res . header - > print ( ) ) ; <nl> + error_string . append ( " \ nheader : \ n " + res . header . print ( ) ) ; <nl> } <nl> <nl> if ( res . body . has ( ) ) { <nl> - error_string . append ( " \ nbody : \ n " + res . body - > print ( ) ) ; <nl> + error_string . append ( " \ nbody : \ n " + res . body . print ( ) ) ; <nl> } <nl> <nl> / / Any error coming back from the extproc may be due to the fragility of <nl> http_datum_stream_t : : next_page ( env_t * env ) { <nl> / / the end of the stream <nl> more = apply_depaginate ( env , res ) ; <nl> <nl> - if ( res . body - > get_type ( ) = = datum_t : : R_ARRAY ) { <nl> + if ( res . body . get_type ( ) = = datum_t : : R_ARRAY ) { <nl> std : : vector < datum_t > res_arr ; <nl> res_arr . reserve ( res . body . arr_size ( ) ) ; <nl> for ( size_t i = 0 ; i < res . body . arr_size ( ) ; + + i ) { <nl> bool http_datum_stream_t : : apply_depaginate ( env_t * env , const http_result_t & res ) <nl> http_method_to_str ( opts . method ) . c_str ( ) , <nl> opts . url . c_str ( ) , <nl> ex . what ( ) , <nl> - args [ 0 ] - > print ( ) . c_str ( ) ) , <nl> + args [ 0 ] . print ( ) . c_str ( ) ) , <nl> ex . backtrace ( ) ) ; <nl> } <nl> } <nl> <nl> bool http_datum_stream_t : : apply_depage_url ( datum_t new_url ) { <nl> / / NULL url indicates no further depagination <nl> - if ( new_url - > get_type ( ) = = datum_t : : R_NULL ) { <nl> + if ( new_url . get_type ( ) = = datum_t : : R_NULL ) { <nl> return false ; <nl> - } else if ( new_url - > get_type ( ) ! = datum_t : : R_STR ) { <nl> + } else if ( new_url . get_type ( ) ! = datum_t : : R_STR ) { <nl> rfail ( base_exc_t : : GENERIC , <nl> " Expected ` url ` in OBJECT returned by ` page ` to be a " <nl> " STRING or NULL , but found % s . " , <nl> - new_url - > get_type_name ( ) . c_str ( ) ) ; <nl> + new_url . get_type_name ( ) . c_str ( ) ) ; <nl> } <nl> - opts . url . assign ( new_url - > as_str ( ) . to_std ( ) ) ; <nl> + opts . url . assign ( new_url . as_str ( ) . to_std ( ) ) ; <nl> return true ; <nl> } <nl> <nl> void http_datum_stream_t : : apply_depage_params ( datum_t new_params ) { <nl> / / Verify new params and merge with the old ones , new taking precedence <nl> check_url_params ( new_params , this ) ; <nl> - opts . url_params - > merge ( new_params ) ; <nl> + opts . url_params . merge ( new_params ) ; <nl> } <nl> <nl> bool http_datum_stream_t : : handle_depage_result ( datum_t depage ) { <nl> - if ( depage - > get_type ( ) = = datum_t : : R_NULL | | <nl> - depage - > get_type ( ) = = datum_t : : R_STR ) { <nl> + if ( depage . get_type ( ) = = datum_t : : R_NULL | | <nl> + depage . get_type ( ) = = datum_t : : R_STR ) { <nl> return apply_depage_url ( depage ) ; <nl> - } else if ( depage - > get_type ( ) = = datum_t : : R_OBJECT ) { <nl> - datum_t new_url = depage - > get_field ( " url " , NOTHROW ) ; <nl> - datum_t new_params = depage - > get_field ( " params " , NOTHROW ) ; <nl> + } else if ( depage . get_type ( ) = = datum_t : : R_OBJECT ) { <nl> + datum_t new_url = depage . get_field ( " url " , NOTHROW ) ; <nl> + datum_t new_params = depage . get_field ( " params " , NOTHROW ) ; <nl> if ( ! new_url . has ( ) & & ! new_params . has ( ) ) { <nl> rfail ( base_exc_t : : GENERIC , <nl> " OBJECT returned by ` page ` must contain " <nl> bool http_datum_stream_t : : handle_depage_result ( datum_t depage ) { <nl> } else { <nl> rfail ( base_exc_t : : GENERIC , <nl> " Expected ` page ` to return an OBJECT , but found % s . " , <nl> - depage - > get_type_name ( ) . c_str ( ) ) ; <nl> + depage . get_type_name ( ) . c_str ( ) ) ; <nl> } <nl> <nl> return true ; <nl> void http_term_t : : get_header ( scope_env_t * env , <nl> counted_t < val_t > header = args - > optarg ( env , " header " ) ; <nl> if ( header . has ( ) ) { <nl> datum_t datum_header = header - > as_datum ( ) ; <nl> - if ( datum_header - > get_type ( ) = = datum_t : : R_OBJECT ) { <nl> + if ( datum_header . get_type ( ) = = datum_t : : R_OBJECT ) { <nl> for ( size_t i = 0 ; i < datum_header . obj_size ( ) ; + + i ) { <nl> auto pair = datum_header . get_pair ( i ) ; <nl> std : : string str ; <nl> - if ( pair . second - > get_type ( ) = = datum_t : : R_STR ) { <nl> + if ( pair . second . get_type ( ) = = datum_t : : R_STR ) { <nl> str = strprintf ( " % s : % s " , pair . first . to_std ( ) . c_str ( ) , <nl> pair . second . as_str ( ) . to_std ( ) . c_str ( ) ) ; <nl> } else if ( pair . second . get_type ( ) ! = datum_t : : R_NULL ) { <nl> void http_term_t : : get_header ( scope_env_t * env , <nl> verify_header_string ( str , header . get ( ) ) ; <nl> header_out - > push_back ( str ) ; <nl> } <nl> - } else if ( datum_header - > get_type ( ) = = datum_t : : R_ARRAY ) { <nl> - for ( size_t i = 0 ; i < datum_header - > arr_size ( ) ; + + i ) { <nl> - datum_t line = datum_header - > get ( i ) ; <nl> - if ( line - > get_type ( ) ! = datum_t : : R_STR ) { <nl> + } else if ( datum_header . get_type ( ) = = datum_t : : R_ARRAY ) { <nl> + for ( size_t i = 0 ; i < datum_header . arr_size ( ) ; + + i ) { <nl> + datum_t line = datum_header . get ( i ) ; <nl> + if ( line . get_type ( ) ! = datum_t : : R_STR ) { <nl> rfail_target ( header . get ( ) , base_exc_t : : GENERIC , <nl> " Expected ` header [ % zu ] ` to be a STRING , but found % s . " , <nl> - i , line - > get_type_name ( ) . c_str ( ) ) ; <nl> + i , line . get_type_name ( ) . c_str ( ) ) ; <nl> } <nl> - std : : string str = line - > as_str ( ) . to_std ( ) ; <nl> + std : : string str = line . as_str ( ) . to_std ( ) ; <nl> verify_header_string ( str , header . get ( ) ) ; <nl> header_out - > push_back ( str ) ; <nl> } <nl> } else { <nl> rfail_target ( header . get ( ) , base_exc_t : : GENERIC , <nl> " Expected ` header ` to be an ARRAY or OBJECT , but found % s . " , <nl> - datum_header - > get_type_name ( ) . c_str ( ) ) ; <nl> + datum_header . get_type_name ( ) . c_str ( ) ) ; <nl> } <nl> } <nl> } <nl> void http_term_t : : get_method ( scope_env_t * env , <nl> std : : string http_term_t : : get_auth_item ( const datum_t & datum , <nl> const std : : string & name , <nl> const pb_rcheckable_t * auth ) { <nl> - datum_t item = datum - > get_field ( datum_string_t ( name ) , NOTHROW ) ; <nl> + datum_t item = datum . get_field ( datum_string_t ( name ) , NOTHROW ) ; <nl> if ( ! item . has ( ) ) { <nl> rfail_target ( auth , base_exc_t : : GENERIC , <nl> " ` auth . % s ` not found in the auth object . " , name . c_str ( ) ) ; <nl> - } else if ( item - > get_type ( ) ! = datum_t : : R_STR ) { <nl> + } else if ( item . get_type ( ) ! = datum_t : : R_STR ) { <nl> rfail_target ( auth , base_exc_t : : GENERIC , <nl> " Expected ` auth . % s ` to be a STRING , but found % s . " , <nl> - name . c_str ( ) , item - > get_type_name ( ) . c_str ( ) ) ; <nl> + name . c_str ( ) , item . get_type_name ( ) . c_str ( ) ) ; <nl> } <nl> - return item - > as_str ( ) . to_std ( ) ; <nl> + return item . as_str ( ) . to_std ( ) ; <nl> } <nl> <nl> / / The ` auth ` optarg takes an object consisting of the following fields : <nl> void http_term_t : : get_auth ( scope_env_t * env , <nl> counted_t < val_t > auth = args - > optarg ( env , " auth " ) ; <nl> if ( auth . has ( ) ) { <nl> datum_t datum_auth = auth - > as_datum ( ) ; <nl> - if ( datum_auth - > get_type ( ) ! = datum_t : : R_OBJECT ) { <nl> + if ( datum_auth . get_type ( ) ! = datum_t : : R_OBJECT ) { <nl> rfail_target ( auth . get ( ) , base_exc_t : : GENERIC , <nl> " Expected ` auth ` to be an OBJECT , but found % s . " , <nl> - datum_auth - > get_type_name ( ) . c_str ( ) ) ; <nl> + datum_auth . get_type_name ( ) . c_str ( ) ) ; <nl> } <nl> <nl> / / Default to ' basic ' if no type is specified <nl> std : : string type ; <nl> { <nl> - datum_t type_datum = datum_auth - > get_field ( " type " , NOTHROW ) ; <nl> + datum_t type_datum = datum_auth . get_field ( " type " , NOTHROW ) ; <nl> <nl> if ( type_datum . has ( ) ) { <nl> - if ( type_datum - > get_type ( ) ! = datum_t : : R_STR ) { <nl> + if ( type_datum . get_type ( ) ! = datum_t : : R_STR ) { <nl> rfail_target ( auth . get ( ) , base_exc_t : : GENERIC , <nl> " Expected ` auth . type ` to be a STRING , but found % s . " , <nl> - datum_auth - > get_type_name ( ) . c_str ( ) ) ; <nl> + datum_auth . get_type_name ( ) . c_str ( ) ) ; <nl> } <nl> - type . assign ( type_datum - > as_str ( ) . to_std ( ) ) ; <nl> + type . assign ( type_datum . as_str ( ) . to_std ( ) ) ; <nl> } else { <nl> type . assign ( " basic " ) ; <nl> } <nl> std : : string http_term_t : : print_http_param ( const datum_t & datum , <nl> const char * val_name , <nl> const char * key_name , <nl> const pb_rcheckable_t * val ) { <nl> - if ( datum - > get_type ( ) = = datum_t : : R_NUM ) { <nl> + if ( datum . get_type ( ) = = datum_t : : R_NUM ) { <nl> return strprintf ( " % " PR_RECONSTRUCTABLE_DOUBLE , <nl> - datum - > as_num ( ) ) ; <nl> - } else if ( datum - > get_type ( ) = = datum_t : : R_STR ) { <nl> - return datum - > as_str ( ) . to_std ( ) ; <nl> - } else if ( datum - > get_type ( ) = = datum_t : : R_NULL ) { <nl> + datum . as_num ( ) ) ; <nl> + } else if ( datum . get_type ( ) = = datum_t : : R_STR ) { <nl> + return datum . as_str ( ) . to_std ( ) ; <nl> + } else if ( datum . get_type ( ) = = datum_t : : R_NULL ) { <nl> return std : : string ( ) ; <nl> } <nl> <nl> rfail_target ( val , base_exc_t : : GENERIC , <nl> " Expected ` % s . % s ` to be a NUMBER , STRING or NULL , but found % s . " , <nl> - val_name , key_name , datum - > get_type_name ( ) . c_str ( ) ) ; <nl> + val_name , key_name , datum . get_type_name ( ) . c_str ( ) ) ; <nl> } <nl> <nl> / / The ` data ` optarg is used to pass in the data to be passed in the body of the <nl> void http_term_t : : get_data ( <nl> if ( method = = http_method_t : : PUT | | <nl> method = = http_method_t : : PATCH | | <nl> method = = http_method_t : : DELETE ) { <nl> - if ( datum_data - > get_type ( ) = = datum_t : : R_STR ) { <nl> - data_out - > assign ( datum_data - > as_str ( ) . to_std ( ) ) ; <nl> + if ( datum_data . get_type ( ) = = datum_t : : R_STR ) { <nl> + data_out - > assign ( datum_data . as_str ( ) . to_std ( ) ) ; <nl> } else { <nl> / / Set the Content - Type to application / json - this may be overwritten <nl> / / later by the ' header ' optarg <nl> header_out - > push_back ( " Content - Type : application / json " ) ; <nl> - data_out - > assign ( datum_data - > print ( ) ) ; <nl> + data_out - > assign ( datum_data . print ( ) ) ; <nl> } <nl> } else if ( method = = http_method_t : : POST ) { <nl> - if ( datum_data - > get_type ( ) = = datum_t : : R_STR ) { <nl> + if ( datum_data . get_type ( ) = = datum_t : : R_STR ) { <nl> / / Use the put data for this , as we assume the user does any <nl> / / encoding they need when they pass a string <nl> - data_out - > assign ( datum_data - > as_str ( ) . to_std ( ) ) ; <nl> - } else if ( datum_data - > get_type ( ) = = datum_t : : R_OBJECT ) { <nl> + data_out - > assign ( datum_data . as_str ( ) . to_std ( ) ) ; <nl> + } else if ( datum_data . get_type ( ) = = datum_t : : R_OBJECT ) { <nl> for ( size_t i = 0 ; i < datum_data . obj_size ( ) ; + + i ) { <nl> auto pair = datum_data . get_pair ( i ) ; <nl> std : : string val_str = print_http_param ( pair . second , <nl> void http_term_t : : get_data ( <nl> } else { <nl> rfail_target ( data . get ( ) , base_exc_t : : GENERIC , <nl> " Expected ` data ` to be a STRING or OBJECT , but found % s . " , <nl> - datum_data - > get_type_name ( ) . c_str ( ) ) ; <nl> + datum_data . get_type_name ( ) . c_str ( ) ) ; <nl> } <nl> } else { <nl> rfail_target ( this , base_exc_t : : GENERIC , <nl> mmm a / src / rdb_protocol / terms / js . cc <nl> ppp b / src / rdb_protocol / terms / js . cc <nl> class javascript_term_t : public op_term_t { <nl> } <nl> } <nl> <nl> - std : : string source = args - > arg ( env , 0 ) - > as_datum ( ) - > as_str ( ) . to_std ( ) ; <nl> + std : : string source = args - > arg ( env , 0 ) - > as_datum ( ) . as_str ( ) . to_std ( ) ; <nl> <nl> / / JS runner configuration is limited to setting an execution timeout . <nl> js_runner_t : : req_config_t config ; <nl> mmm a / src / rdb_protocol / terms / obj . cc <nl> ppp b / src / rdb_protocol / terms / obj . cc <nl> class object_term_t : public op_term_t { <nl> strprintf ( " Duplicate key ` % s ` in object . " <nl> " ( got ` % s ` and ` % s ` as values ) " , <nl> key . to_std ( ) . c_str ( ) , <nl> - obj . at ( key ) - > trunc_print ( ) . c_str ( ) , <nl> - keyval - > trunc_print ( ) . c_str ( ) ) ) ; <nl> + obj . at ( key ) . trunc_print ( ) . c_str ( ) , <nl> + keyval . trunc_print ( ) . c_str ( ) ) ) ; <nl> } <nl> return new_val ( std : : move ( obj ) . to_datum ( ) ) ; <nl> } <nl> mmm a / src / rdb_protocol / terms / obj_or_seq . cc <nl> ppp b / src / rdb_protocol / terms / obj_or_seq . cc <nl> counted_t < val_t > obj_or_seq_op_impl_t : : eval_impl_dereferenced ( <nl> d = v0 - > as_datum ( ) ; <nl> } <nl> <nl> - if ( d . has ( ) & & d - > get_type ( ) = = datum_t : : R_OBJECT ) { <nl> + if ( d . has ( ) & & d . get_type ( ) = = datum_t : : R_OBJECT ) { <nl> return helper ( ) ; <nl> - } else if ( ( d . has ( ) & & d - > get_type ( ) = = datum_t : : R_ARRAY ) | | <nl> + } else if ( ( d . has ( ) & & d . get_type ( ) = = datum_t : : R_ARRAY ) | | <nl> ( ! d . has ( ) <nl> & & v0 - > get_type ( ) . is_convertible ( val_t : : type_t : : SEQUENCE ) ) ) { <nl> / / The above if statement is complicated because it produces better <nl> class pluck_term_t : public obj_or_seq_op_term_t { <nl> private : <nl> virtual counted_t < val_t > obj_eval ( scope_env_t * env , args_t * args , counted_t < val_t > v0 ) const { <nl> datum_t obj = v0 - > as_datum ( ) ; <nl> - r_sanity_check ( obj - > get_type ( ) = = datum_t : : R_OBJECT ) ; <nl> + r_sanity_check ( obj . get_type ( ) = = datum_t : : R_OBJECT ) ; <nl> <nl> const size_t n = args - > num_args ( ) ; <nl> std : : vector < datum_t > paths ; <nl> class without_term_t : public obj_or_seq_op_term_t { <nl> private : <nl> virtual counted_t < val_t > obj_eval ( scope_env_t * env , args_t * args , counted_t < val_t > v0 ) const { <nl> datum_t obj = v0 - > as_datum ( ) ; <nl> - r_sanity_check ( obj - > get_type ( ) = = datum_t : : R_OBJECT ) ; <nl> + r_sanity_check ( obj . get_type ( ) = = datum_t : : R_OBJECT ) ; <nl> <nl> std : : vector < datum_t > paths ; <nl> const size_t n = args - > num_args ( ) ; <nl> class merge_term_t : public obj_or_seq_op_term_t { <nl> / / We branch here because compiling functions is expensive , and <nl> / / ` obj_eval ` may be called many many times . <nl> if ( v - > get_type ( ) . is_convertible ( val_t : : type_t : : DATUM ) ) { <nl> - d = d - > merge ( v - > as_datum ( ) ) ; <nl> + d = d . merge ( v - > as_datum ( ) ) ; <nl> } else { <nl> auto f = v - > as_func ( CONSTANT_SHORTCUT ) ; <nl> - d = d - > merge ( f - > call ( env - > env , d , LITERAL_OK ) - > as_datum ( ) ) ; <nl> + d = d . merge ( f - > call ( env - > env , d , LITERAL_OK ) - > as_datum ( ) ) ; <nl> } <nl> } <nl> return new_val ( d ) ; <nl> class has_fields_term_t : public obj_or_seq_op_term_t { <nl> private : <nl> virtual counted_t < val_t > obj_eval ( scope_env_t * env , args_t * args , counted_t < val_t > v0 ) const { <nl> datum_t obj = v0 - > as_datum ( ) ; <nl> - r_sanity_check ( obj - > get_type ( ) = = datum_t : : R_OBJECT ) ; <nl> + r_sanity_check ( obj . get_type ( ) = = datum_t : : R_OBJECT ) ; <nl> <nl> std : : vector < datum_t > paths ; <nl> const size_t n = args - > num_args ( ) ; <nl> class get_field_term_t : public obj_or_seq_op_term_t { <nl> : obj_or_seq_op_term_t ( env , term , SKIP_MAP , argspec_t ( 2 ) ) { } <nl> private : <nl> virtual counted_t < val_t > obj_eval ( scope_env_t * env , args_t * args , counted_t < val_t > v0 ) const { <nl> - return new_val ( v0 - > as_datum ( ) - > get_field ( args - > arg ( env , 1 ) - > as_str ( ) ) ) ; <nl> + return new_val ( v0 - > as_datum ( ) . get_field ( args - > arg ( env , 1 ) - > as_str ( ) ) ) ; <nl> } <nl> virtual const char * name ( ) const { return " get_field " ; } <nl> } ; <nl> class bracket_term_t : public grouped_seq_op_term_t { <nl> datum_t d = v1 - > as_datum ( ) ; <nl> r_sanity_check ( d . has ( ) ) ; <nl> <nl> - switch ( d - > get_type ( ) ) { <nl> + switch ( d . get_type ( ) ) { <nl> case datum_t : : R_NUM : <nl> return nth_term_impl ( this , env , v0 , v1 ) ; <nl> case datum_t : : R_STR : <nl> class bracket_term_t : public grouped_seq_op_term_t { <nl> case datum_t : : R_OBJECT : <nl> case datum_t : : UNINITIALIZED : <nl> default : <nl> - d - > type_error ( strprintf ( " Expected NUMBER or STRING as second argument to ` % s ` but found % s . " , <nl> - name ( ) , d - > get_type_name ( ) . c_str ( ) ) ) ; <nl> + d . type_error ( strprintf ( " Expected NUMBER or STRING as second argument to ` % s ` but found % s . " , <nl> + name ( ) , d . get_type_name ( ) . c_str ( ) ) ) ; <nl> unreachable ( ) ; <nl> } <nl> } <nl> mmm a / src / rdb_protocol / terms / pred . cc <nl> ppp b / src / rdb_protocol / terms / pred . cc <nl> class predicate_term_t : public op_term_t { <nl> datum_t lhs = args - > arg ( env , 0 ) - > as_datum ( ) ; <nl> for ( size_t i = 1 ; i < args - > num_args ( ) ; + + i ) { <nl> datum_t rhs = args - > arg ( env , i ) - > as_datum ( ) ; <nl> - if ( ! ( pred ) ( env - > env - > reql_version ( ) , * lhs , * rhs ) ) { <nl> + if ( ! ( pred ) ( env - > env - > reql_version ( ) , lhs , rhs ) ) { <nl> return new_val_bool ( static_cast < bool > ( false ^ invert ) ) ; <nl> } <nl> lhs = rhs ; <nl> mmm a / src / rdb_protocol / terms / random . cc <nl> ppp b / src / rdb_protocol / terms / random . cc <nl> class sample_term_t : public op_term_t { <nl> batchspec_t batchspec = batchspec_t : : user ( batch_type_t : : TERMINAL , env - > env ) ; <nl> { <nl> profile : : sampler_t sampler ( " Sampling elements . " , env - > env - > trace ) ; <nl> - while ( datum_t row = seq - > next ( env - > env , batchspec ) ) { <nl> + datum_t row ; <nl> + while ( row = seq - > next ( env - > env , batchspec ) , row . has ( ) ) { <nl> element_number + + ; <nl> if ( result . size ( ) < num ) { <nl> result . push_back ( row ) ; <nl> mmm a / src / rdb_protocol / terms / seq . cc <nl> ppp b / src / rdb_protocol / terms / seq . cc <nl> class count_term_t : public grouped_seq_op_term_t { <nl> if ( args - > num_args ( ) = = 1 ) { <nl> if ( v0 - > get_type ( ) . is_convertible ( val_t : : type_t : : DATUM ) ) { <nl> datum_t d = v0 - > as_datum ( ) ; <nl> - if ( d - > get_type ( ) = = datum_t : : R_BINARY ) { <nl> + if ( d . get_type ( ) = = datum_t : : R_BINARY ) { <nl> return new_val ( datum_t ( <nl> - safe_to_double ( d - > as_binary ( ) . size ( ) ) ) ) ; <nl> + safe_to_double ( d . as_binary ( ) . size ( ) ) ) ) ; <nl> } <nl> } <nl> return v0 - > as_seq ( env - > env ) <nl> class between_term_t : public bounded_op_term_t { <nl> counted_t < table_t > tbl = args - > arg ( env , 0 ) - > as_table ( ) ; <nl> bool left_open = is_left_open ( env , args ) ; <nl> datum_t lb = args - > arg ( env , 1 ) - > as_datum ( ) ; <nl> - if ( lb - > get_type ( ) = = datum_t : : R_NULL ) { <nl> + if ( lb . get_type ( ) = = datum_t : : R_NULL ) { <nl> lb . reset ( ) ; <nl> } <nl> bool right_open = is_right_open ( env , args ) ; <nl> datum_t rb = args - > arg ( env , 2 ) - > as_datum ( ) ; <nl> - if ( rb - > get_type ( ) = = datum_t : : R_NULL ) { <nl> + if ( rb . get_type ( ) = = datum_t : : R_NULL ) { <nl> rb . reset ( ) ; <nl> } <nl> <nl> if ( lb . has ( ) & & rb . has ( ) ) { <nl> / / This reql_version will always be LATEST , because this function is not <nl> / / deterministic , but whatever . <nl> - if ( lb - > compare_gt ( env - > env - > reql_version ( ) , * rb ) | | <nl> - ( ( left_open | | right_open ) & & * lb = = * rb ) ) { <nl> + if ( lb . compare_gt ( env - > env - > reql_version ( ) , rb ) | | <nl> + ( ( left_open | | right_open ) & & lb = = rb ) ) { <nl> counted_t < datum_stream_t > ds <nl> = make_counted < array_datum_stream_t > ( datum_t : : empty_array ( ) , <nl> backtrace ( ) ) ; <nl> mmm a / src / rdb_protocol / terms / sindex . cc <nl> ppp b / src / rdb_protocol / terms / sindex . cc <nl> class sindex_create_term_t : public op_term_t { <nl> virtual counted_t < val_t > eval_impl ( scope_env_t * env , args_t * args , eval_flags_t ) const { <nl> counted_t < table_t > table = args - > arg ( env , 0 ) - > as_table ( ) ; <nl> datum_t name_datum = args - > arg ( env , 1 ) - > as_datum ( ) ; <nl> - std : : string name = name_datum - > as_str ( ) . to_std ( ) ; <nl> + std : : string name = name_datum . as_str ( ) . to_std ( ) ; <nl> rcheck ( name ! = table - > get_pkey ( ) , <nl> base_exc_t : : GENERIC , <nl> strprintf ( " Index name conflict : ` % s ` is the name of the primary key . " , <nl> class sindex_create_term_t : public op_term_t { <nl> counted_t < val_t > v = args - > arg ( env , 2 ) ; <nl> if ( v - > get_type ( ) . is_convertible ( val_t : : type_t : : DATUM ) ) { <nl> datum_t d = v - > as_datum ( ) ; <nl> - if ( d - > get_type ( ) = = datum_t : : R_BINARY ) { <nl> - const char * data = d - > as_binary ( ) . data ( ) ; <nl> - size_t sz = d - > as_binary ( ) . size ( ) ; <nl> + if ( d . get_type ( ) = = datum_t : : R_BINARY ) { <nl> + const char * data = d . as_binary ( ) . data ( ) ; <nl> + size_t sz = d . as_binary ( ) . size ( ) ; <nl> size_t prefix_sz = strlen ( sindex_blob_prefix ) ; <nl> bool bad_prefix = ( sz < prefix_sz ) ; <nl> for ( size_t i = 0 ; ! bad_prefix & & i < prefix_sz ; + + i ) { <nl> class sindex_drop_term_t : public op_term_t { <nl> <nl> virtual counted_t < val_t > eval_impl ( scope_env_t * env , args_t * args , eval_flags_t ) const { <nl> counted_t < table_t > table = args - > arg ( env , 0 ) - > as_table ( ) ; <nl> - std : : string name = args - > arg ( env , 1 ) - > as_datum ( ) - > as_str ( ) . to_std ( ) ; <nl> + std : : string name = args - > arg ( env , 1 ) - > as_datum ( ) . as_str ( ) . to_std ( ) ; <nl> bool success = table - > sindex_drop ( env - > env , name ) ; <nl> if ( success ) { <nl> datum_object_builder_t res ; <nl> int64_t initial_poll_ms = 50 ; <nl> int64_t max_poll_ms = 10000 ; <nl> <nl> bool all_ready ( datum_t statuses ) { <nl> - for ( size_t i = 0 ; i < statuses - > arr_size ( ) ; + + i ) { <nl> - if ( ! statuses - > get ( i ) - > get_field ( " ready " , NOTHROW ) - > as_bool ( ) ) { <nl> + for ( size_t i = 0 ; i < statuses . arr_size ( ) ; + + i ) { <nl> + if ( ! statuses . get ( i ) . get_field ( " ready " , NOTHROW ) . as_bool ( ) ) { <nl> return false ; <nl> } <nl> } <nl> mmm a / src / rdb_protocol / terms / sort . cc <nl> ppp b / src / rdb_protocol / terms / sort . cc <nl> class orderby_term_t : public op_term_t { <nl> return false ! = ( it - > first = = DESC ) ; <nl> } <nl> / / TODO ( 2014 - 08 ) : use datum_t : : cmp instead to be faster <nl> - if ( * lval = = * rval ) { <nl> + if ( lval = = rval ) { <nl> continue ; <nl> } <nl> - return lval - > compare_lt ( env - > reql_version ( ) , * rval ) ! = <nl> + return lval . compare_lt ( env - > reql_version ( ) , rval ) ! = <nl> ( it - > first = = DESC ) ; <nl> } <nl> <nl> class distinct_term_t : public op_term_t { <nl> { <nl> profile : : sampler_t sampler ( " Evaluating elements in distinct . " , <nl> env - > env - > trace ) ; <nl> - while ( datum_t d = s - > next ( env - > env , batchspec ) ) { <nl> + datum_t d ; <nl> + while ( d = s - > next ( env - > env , batchspec ) , d . has ( ) ) { <nl> results . insert ( std : : move ( d ) ) ; <nl> rcheck_array_size ( results , env - > env - > limits ( ) , base_exc_t : : GENERIC ) ; <nl> sampler . new_sample ( ) ; <nl> mmm a / src / rdb_protocol / terms / string . cc <nl> ppp b / src / rdb_protocol / terms / string . cc <nl> class split_term_t : public op_term_t { <nl> boost : : optional < std : : string > delim ; <nl> if ( args - > num_args ( ) > 1 ) { <nl> datum_t d = args - > arg ( env , 1 ) - > as_datum ( ) ; <nl> - if ( d - > get_type ( ) ! = datum_t : : R_NULL ) { <nl> - delim = d - > as_str ( ) . to_std ( ) ; <nl> + if ( d . get_type ( ) ! = datum_t : : R_NULL ) { <nl> + delim = d . as_str ( ) . to_std ( ) ; <nl> } <nl> } <nl> <nl> mmm a / src / rdb_protocol / terms / time . cc <nl> ppp b / src / rdb_protocol / terms / time . cc <nl> class during_term_t : public bounded_op_term_t { <nl> datum_t t = args - > arg ( env , 0 ) - > as_ptype ( pseudo : : time_string ) ; <nl> datum_t lb = args - > arg ( env , 1 ) - > as_ptype ( pseudo : : time_string ) ; <nl> datum_t rb = args - > arg ( env , 2 ) - > as_ptype ( pseudo : : time_string ) ; <nl> - int lcmp = pseudo : : time_cmp ( env - > env - > reql_version ( ) , * lb , * t ) ; <nl> - int rcmp = pseudo : : time_cmp ( env - > env - > reql_version ( ) , * t , * rb ) ; <nl> + int lcmp = pseudo : : time_cmp ( env - > env - > reql_version ( ) , lb , t ) ; <nl> + int rcmp = pseudo : : time_cmp ( env - > env - > reql_version ( ) , t , rb ) ; <nl> return new_val_bool ( ! ( lcmp > 0 | | ( lcmp = = 0 & & is_left_open ( env , args ) ) <nl> | | rcmp > 0 | | ( rcmp = = 0 & & is_right_open ( env , args ) ) ) ) ; <nl> } <nl> class time_term_t : public op_term_t { <nl> } <nl> static std : : string parse_tz ( counted_t < val_t > v ) { <nl> datum_t d = v - > as_datum ( ) ; <nl> - return d - > as_str ( ) . to_std ( ) ; <nl> + return d . as_str ( ) . to_std ( ) ; <nl> } <nl> virtual const char * name ( ) const { return " time " ; } <nl> } ; <nl> mmm a / src / rdb_protocol / terms / type_manip . cc <nl> ppp b / src / rdb_protocol / terms / type_manip . cc <nl> class coerce_term_t : public op_term_t { <nl> int start_subtype = 0 ; <nl> if ( opaque_start_type . is_convertible ( val_t : : type_t : : DATUM ) ) { <nl> start_supertype = val_t : : type_t : : DATUM ; <nl> - start_subtype = val - > as_datum ( ) - > get_type ( ) ; <nl> + start_subtype = val - > as_datum ( ) . get_type ( ) ; <nl> } <nl> int start_type = merge_types ( start_supertype , start_subtype ) ; <nl> <nl> class coerce_term_t : public op_term_t { <nl> / / DATUM - > DATUM <nl> if ( supertype ( end_type ) = = val_t : : type_t : : DATUM ) { <nl> if ( start_type = = R_BINARY_TYPE & & end_type = = R_STR_TYPE ) { <nl> - return new_val ( datum_t ( d - > as_binary ( ) ) ) ; <nl> + return new_val ( datum_t ( d . as_binary ( ) ) ) ; <nl> } <nl> if ( start_type = = R_STR_TYPE & & end_type = = R_BINARY_TYPE ) { <nl> - return new_val ( datum_t : : binary ( d - > as_str ( ) ) ) ; <nl> + return new_val ( datum_t : : binary ( d . as_str ( ) ) ) ; <nl> } <nl> <nl> / / DATUM - > STR <nl> if ( end_type = = R_STR_TYPE ) { <nl> - return new_val ( datum_t ( datum_string_t ( d - > print ( ) ) ) ) ; <nl> + return new_val ( datum_t ( datum_string_t ( d . print ( ) ) ) ) ; <nl> } <nl> <nl> / / OBJECT - > ARRAY <nl> class coerce_term_t : public op_term_t { <nl> <nl> / / STR - > NUM <nl> if ( start_type = = R_STR_TYPE & & end_type = = R_NUM_TYPE ) { <nl> - const datum_string_t & s = d - > as_str ( ) ; <nl> + const datum_string_t & s = d . as_str ( ) ; <nl> double dbl ; <nl> char end ; / / Used to ensure that there ' s no trailing garbage . <nl> if ( sscanf ( s . to_std ( ) . c_str ( ) , " % lf % c " , & dbl , & end ) = = 1 ) { <nl> class coerce_term_t : public op_term_t { <nl> = batchspec_t : : user ( batch_type_t : : TERMINAL , env - > env ) ; <nl> { <nl> profile : : sampler_t sampler ( " Coercing to object . " , env - > env - > trace ) ; <nl> - while ( auto pair = ds - > next ( env - > env , batchspec ) ) { <nl> - const datum_string_t & key = pair - > get ( 0 ) - > as_str ( ) ; <nl> - datum_t keyval = pair - > get ( 1 ) ; <nl> + datum_t pair ; <nl> + while ( pair = ds - > next ( env - > env , batchspec ) , pair . has ( ) ) { <nl> + const datum_string_t & key = pair . get ( 0 ) . as_str ( ) ; <nl> + datum_t keyval = pair . get ( 1 ) ; <nl> bool b = obj . add ( key , keyval ) ; <nl> rcheck ( ! b , base_exc_t : : GENERIC , <nl> strprintf ( " Duplicate key ` % s ` in coerced object . " <nl> " ( got ` % s ` and ` % s ` as values ) " , <nl> key . to_std ( ) . c_str ( ) , <nl> - obj . at ( key ) - > trunc_print ( ) . c_str ( ) , <nl> - keyval - > trunc_print ( ) . c_str ( ) ) ) ; <nl> + obj . at ( key ) . trunc_print ( ) . c_str ( ) , <nl> + keyval . trunc_print ( ) . c_str ( ) ) ) ; <nl> sampler . new_sample ( ) ; <nl> } <nl> } <nl> class ungroup_term_t : public op_term_t { <nl> int val_type ( counted_t < val_t > v ) { <nl> int t = v - > get_type ( ) . raw_type * MAX_TYPE ; <nl> if ( t = = DATUM_TYPE ) { <nl> - t + = v - > as_datum ( ) - > get_type ( ) ; <nl> + t + = v - > as_datum ( ) . get_type ( ) ; <nl> } else if ( t = = SELECTION_TYPE ) { <nl> if ( v - > sequence ( ) - > is_array ( ) ) { <nl> t + = datum_t : : R_ARRAY ; <nl> class typeof_term_t : public op_term_t { <nl> counted_t < val_t > v = args - > arg ( env , 0 ) ; <nl> if ( v - > get_type ( ) . raw_type = = val_t : : type_t : : DATUM ) { <nl> datum_t d = v - > as_datum ( ) ; <nl> - return new_val ( datum_t ( datum_string_t ( d - > get_type_name ( ) ) ) ) ; <nl> + return new_val ( datum_t ( datum_string_t ( d . get_type_name ( ) ) ) ) ; <nl> } else if ( v - > get_type ( ) . raw_type = = val_t : : type_t : : SEQUENCE <nl> & & v - > as_seq ( env - > env ) - > is_grouped ( ) ) { <nl> return new_val ( datum_t ( " GROUPED_STREAM " ) ) ; <nl> class info_term_t : public op_term_t { <nl> case R_BINARY_TYPE : / / fallthru <nl> b | = info . add ( " count " , <nl> datum_t ( <nl> - safe_to_double ( v - > as_datum ( ) - > as_binary ( ) . size ( ) ) ) ) ; <nl> + safe_to_double ( v - > as_datum ( ) . as_binary ( ) . size ( ) ) ) ) ; <nl> <nl> case R_NULL_TYPE : / / fallthru <nl> case R_BOOL_TYPE : / / fallthru <nl> class info_term_t : public op_term_t { <nl> case R_OBJECT_TYPE : / / fallthru <nl> case DATUM_TYPE : { <nl> b | = info . add ( " value " , <nl> - datum_t ( datum_string_t ( v - > as_datum ( ) - > print ( ) ) ) ) ; <nl> + datum_t ( datum_string_t ( v - > as_datum ( ) . print ( ) ) ) ) ; <nl> } break ; <nl> <nl> default : r_sanity_check ( false ) ; <nl> mmm a / src / rdb_protocol / terms / writes . cc <nl> ppp b / src / rdb_protocol / terms / writes . cc <nl> class insert_term_t : public op_term_t { <nl> std : : vector < std : : string > * generated_keys_out , <nl> size_t * keys_skipped_out , <nl> datum_t * datum_out ) { <nl> - if ( ! ( * datum_out ) - > get_field ( datum_string_t ( tbl - > get_pkey ( ) ) , NOTHROW ) . has ( ) ) { <nl> + if ( ! ( * datum_out ) . get_field ( datum_string_t ( tbl - > get_pkey ( ) ) , NOTHROW ) . has ( ) ) { <nl> std : : string key = uuid_to_str ( generate_uuid ( ) ) ; <nl> datum_t keyd ( ( datum_string_t ( key ) ) ) ; <nl> { <nl> class insert_term_t : public op_term_t { <nl> bool conflict = d . add ( datum_string_t ( tbl - > get_pkey ( ) ) , keyd ) ; <nl> r_sanity_check ( ! conflict ) ; <nl> std : : set < std : : string > conditions ; <nl> - * datum_out = ( * datum_out ) - > merge ( std : : move ( d ) . to_datum ( ) , pure_merge , <nl> + * datum_out = ( * datum_out ) . merge ( std : : move ( d ) . to_datum ( ) , pure_merge , <nl> limits , & conditions ) ; <nl> / / we happen to know that pure_merge cannot ever generate warning <nl> / / conditions , because it shouldn ' t ever be run . <nl> class insert_term_t : public op_term_t { <nl> if ( v1 - > get_type ( ) . is_convertible ( val_t : : type_t : : DATUM ) ) { <nl> std : : vector < datum_t > datums ; <nl> datums . push_back ( v1 - > as_datum ( ) ) ; <nl> - if ( datums [ 0 ] - > get_type ( ) = = datum_t : : R_OBJECT ) { <nl> + if ( datums [ 0 ] . get_type ( ) = = datum_t : : R_OBJECT ) { <nl> try { <nl> maybe_generate_key ( t , env - > env - > limits ( ) , & generated_keys , <nl> & keys_skipped , & datums [ 0 ] ) ; <nl> class insert_term_t : public op_term_t { <nl> datum_t replace_stats = t - > batched_insert ( <nl> env - > env , std : : move ( datums ) , conflict_behavior , <nl> durability_requirement , return_changes ) ; <nl> - stats = stats - > merge ( replace_stats , stats_merge , env - > env - > limits ( ) , & conditions ) ; <nl> + stats = stats . merge ( replace_stats , stats_merge , env - > env - > limits ( ) , & conditions ) ; <nl> done = true ; <nl> } <nl> } <nl> class insert_term_t : public op_term_t { <nl> <nl> datum_t replace_stats = t - > batched_insert ( <nl> env - > env , std : : move ( datums ) , conflict_behavior , durability_requirement , return_changes ) ; <nl> - stats = stats - > merge ( replace_stats , stats_merge , env - > env - > limits ( ) , & conditions ) ; <nl> + stats = stats . merge ( replace_stats , stats_merge , env - > env - > limits ( ) , & conditions ) ; <nl> } <nl> } <nl> <nl> class insert_term_t : public op_term_t { <nl> UNUSED bool b = d . add ( " generated_keys " , <nl> datum_t ( std : : move ( genkeys ) , <nl> env - > env - > limits ( ) ) ) ; <nl> - stats = stats - > merge ( std : : move ( d ) . to_datum ( ) , pure_merge , <nl> + stats = stats . merge ( std : : move ( d ) . to_datum ( ) , pure_merge , <nl> env - > env - > limits ( ) , & conditions ) ; <nl> } <nl> <nl> class replace_term_t : public op_term_t { <nl> datum_t orig_key = v0 - > get_orig_key ( ) ; <nl> if ( ! orig_key . has ( ) ) { <nl> orig_key = <nl> - orig_val - > get_field ( datum_string_t ( tblrow . first - > get_pkey ( ) ) , NOTHROW ) ; <nl> + orig_val . get_field ( datum_string_t ( tblrow . first - > get_pkey ( ) ) , NOTHROW ) ; <nl> r_sanity_check ( orig_key . has ( ) ) ; <nl> } <nl> <nl> class replace_term_t : public op_term_t { <nl> datum_t replace_stats = tblrow . first - > batched_replace ( <nl> env - > env , vals , keys , f , <nl> nondet_ok , durability_requirement , return_changes ) ; <nl> - stats = stats - > merge ( replace_stats , stats_merge , env - > env - > limits ( ) , <nl> + stats = stats . merge ( replace_stats , stats_merge , env - > env - > limits ( ) , <nl> & conditions ) ; <nl> } else { <nl> std : : pair < counted_t < table_t > , counted_t < datum_stream_t > > tblrows <nl> class replace_term_t : public op_term_t { <nl> std : : vector < datum_t > keys ; <nl> keys . reserve ( vals . size ( ) ) ; <nl> for ( auto it = vals . begin ( ) ; it ! = vals . end ( ) ; + + it ) { <nl> - keys . push_back ( ( * it ) - > get_field ( datum_string_t ( tbl - > get_pkey ( ) ) ) ) ; <nl> + keys . push_back ( ( * it ) . get_field ( datum_string_t ( tbl - > get_pkey ( ) ) ) ) ; <nl> } <nl> datum_t replace_stats = tbl - > batched_replace ( <nl> env - > env , vals , keys , <nl> f , nondet_ok , durability_requirement , return_changes ) ; <nl> - stats = stats - > merge ( replace_stats , stats_merge , env - > env - > limits ( ) , & conditions ) ; <nl> + stats = stats . merge ( replace_stats , stats_merge , env - > env - > limits ( ) , & conditions ) ; <nl> } <nl> } <nl> <nl> class foreach_term_t : public op_term_t { <nl> profile : : sampler_t sampler ( " Evaluating elements in for each . " , <nl> env - > env - > trace ) ; <nl> counted_t < const func_t > f = args - > arg ( env , 1 ) - > as_func ( CONSTANT_SHORTCUT ) ; <nl> - while ( datum_t row = ds - > next ( env - > env , batchspec ) ) { <nl> + datum_t row ; <nl> + while ( row = ds - > next ( env - > env , batchspec ) , row . has ( ) ) { <nl> counted_t < val_t > v = f - > call ( env - > env , row ) ; <nl> try { <nl> datum_t d = v - > as_datum ( ) ; <nl> - if ( d - > get_type ( ) = = datum_t : : R_OBJECT ) { <nl> - stats = stats - > merge ( d , stats_merge , env - > env - > limits ( ) , <nl> + if ( d . get_type ( ) = = datum_t : : R_OBJECT ) { <nl> + stats = stats . merge ( d , stats_merge , env - > env - > limits ( ) , <nl> & conditions ) ; <nl> } else { <nl> - for ( size_t i = 0 ; i < d - > arr_size ( ) ; + + i ) { <nl> - stats = stats - > merge ( d - > get ( i ) , stats_merge , env - > env - > limits ( ) , <nl> - & conditions ) ; <nl> + for ( size_t i = 0 ; i < d . arr_size ( ) ; + + i ) { <nl> + stats = stats . merge ( d . get ( i ) , stats_merge , env - > env - > limits ( ) , <nl> + & conditions ) ; <nl> } <nl> } <nl> } catch ( const exc_t & e ) { <nl> mmm a / src / rdb_protocol / val . cc <nl> ppp b / src / rdb_protocol / val . cc <nl> datum_t table_t : : batched_replace ( <nl> datum_t new_val ; <nl> try { <nl> new_val = replacement_generator - > call ( env , vals [ i ] ) - > as_datum ( ) ; <nl> - new_val - > rcheck_valid_replace ( vals [ i ] , keys [ i ] , <nl> - datum_string_t ( get_pkey ( ) ) ) ; <nl> + new_val . rcheck_valid_replace ( vals [ i ] , keys [ i ] , <nl> + datum_string_t ( get_pkey ( ) ) ) ; <nl> r_sanity_check ( new_val . has ( ) ) ; <nl> replacement_values . push_back ( new_val ) ; <nl> } catch ( const base_exc_t & e ) { <nl> datum_t table_t : : batched_replace ( <nl> durability_requirement , return_changes ) ; <nl> std : : set < std : : string > conditions ; <nl> datum_t merged <nl> - = std : : move ( stats ) . to_datum ( ) - > merge ( insert_stats , stats_merge , <nl> + = std : : move ( stats ) . to_datum ( ) . merge ( insert_stats , stats_merge , <nl> env - > limits ( ) , & conditions ) ; <nl> datum_object_builder_t result ( merged ) ; <nl> result . add_warnings ( conditions , env - > limits ( ) ) ; <nl> datum_t table_t : : batched_insert ( <nl> for ( auto it = insert_datums . begin ( ) ; it ! = insert_datums . end ( ) ; + + it ) { <nl> try { <nl> datum_string_t pkey_w ( get_pkey ( ) ) ; <nl> - ( * it ) - > rcheck_valid_replace ( datum_t ( ) , <nl> - datum_t ( ) , <nl> - pkey_w ) ; <nl> - const ql : : datum_t & keyval = ( * it ) - > get_field ( pkey_w ) ; <nl> - keyval - > print_primary ( ) ; / / does error checking <nl> + it - > rcheck_valid_replace ( datum_t ( ) , <nl> + datum_t ( ) , <nl> + pkey_w ) ; <nl> + const ql : : datum_t & keyval = ( * it ) . get_field ( pkey_w ) ; <nl> + keyval . print_primary ( ) ; / / does error checking <nl> valid_inserts . push_back ( std : : move ( * it ) ) ; <nl> } catch ( const base_exc_t & e ) { <nl> stats . add_error ( e . what ( ) ) ; <nl> datum_t table_t : : batched_insert ( <nl> durability_requirement ) ; <nl> std : : set < std : : string > conditions ; <nl> datum_t merged <nl> - = std : : move ( stats ) . to_datum ( ) - > merge ( insert_stats , stats_merge , <nl> + = std : : move ( stats ) . to_datum ( ) . merge ( insert_stats , stats_merge , <nl> env - > limits ( ) , & conditions ) ; <nl> datum_object_builder_t result ( merged ) ; <nl> result . add_warnings ( conditions , env - > limits ( ) ) ; <nl> counted_t < datum_stream_t > val_t : : as_seq ( env_t * env ) { <nl> } else if ( type . raw_type = = type_t : : TABLE ) { <nl> return table - > as_datum_stream ( env , backtrace ( ) ) ; <nl> } else if ( type . raw_type = = type_t : : DATUM ) { <nl> - return datum ( ) - > as_datum_stream ( backtrace ( ) ) ; <nl> + return datum ( ) . as_datum_stream ( backtrace ( ) ) ; <nl> } <nl> rcheck_literal_type ( type_t : : SEQUENCE ) ; <nl> unreachable ( ) ; <nl> datum_t val_t : : as_ptype ( const std : : string s ) { <nl> try { <nl> datum_t d = as_datum ( ) ; <nl> r_sanity_check ( d . has ( ) ) ; <nl> - d - > rcheck_is_ptype ( s ) ; <nl> + d . rcheck_is_ptype ( s ) ; <nl> return d ; <nl> } catch ( const datum_exc_t & e ) { <nl> rfail ( e . get_type ( ) , " % s " , e . what ( ) ) ; <nl> bool val_t : : as_bool ( ) { <nl> try { <nl> datum_t d = as_datum ( ) ; <nl> r_sanity_check ( d . has ( ) ) ; <nl> - return d - > as_bool ( ) ; <nl> + return d . as_bool ( ) ; <nl> } catch ( const datum_exc_t & e ) { <nl> rfail ( e . get_type ( ) , " % s " , e . what ( ) ) ; <nl> } <nl> double val_t : : as_num ( ) { <nl> try { <nl> datum_t d = as_datum ( ) ; <nl> r_sanity_check ( d . has ( ) ) ; <nl> - return d - > as_num ( ) ; <nl> + return d . as_num ( ) ; <nl> } catch ( const datum_exc_t & e ) { <nl> rfail ( e . get_type ( ) , " % s " , e . what ( ) ) ; <nl> } <nl> int64_t val_t : : as_int ( ) { <nl> try { <nl> datum_t d = as_datum ( ) ; <nl> r_sanity_check ( d . has ( ) ) ; <nl> - return d - > as_int ( ) ; <nl> + return d . as_int ( ) ; <nl> } catch ( const datum_exc_t & e ) { <nl> rfail ( e . get_type ( ) , " % s " , e . what ( ) ) ; <nl> } <nl> datum_string_t val_t : : as_str ( ) { <nl> try { <nl> datum_t d = as_datum ( ) ; <nl> r_sanity_check ( d . has ( ) ) ; <nl> - return d - > as_str ( ) ; <nl> + return d . as_str ( ) ; <nl> } catch ( const datum_exc_t & e ) { <nl> rfail ( e . get_type ( ) , " % s " , e . what ( ) ) ; <nl> } <nl> void val_t : : rcheck_literal_type ( type_t : : raw_type_t expected_raw_type ) const { <nl> <nl> std : : string val_t : : print ( ) const { <nl> if ( get_type ( ) . is_convertible ( type_t : : DATUM ) ) { <nl> - return as_datum ( ) - > print ( ) ; <nl> + return as_datum ( ) . print ( ) ; <nl> } else if ( get_type ( ) . is_convertible ( type_t : : DB ) ) { <nl> return strprintf ( " db ( \ " % s \ " ) " , as_db ( ) - > name . c_str ( ) ) ; <nl> } else if ( get_type ( ) . is_convertible ( type_t : : TABLE ) ) { <nl> std : : string val_t : : print ( ) const { <nl> <nl> std : : string val_t : : trunc_print ( ) const { <nl> if ( get_type ( ) . is_convertible ( type_t : : DATUM ) ) { <nl> - return as_datum ( ) - > trunc_print ( ) ; <nl> + return as_datum ( ) . trunc_print ( ) ; <nl> } else { <nl> std : : string s = print ( ) ; <nl> if ( s . size ( ) > datum_t : : trunc_len ) { <nl> mmm a / src / rdb_protocol / var_types . cc <nl> ppp b / src / rdb_protocol / var_types . cc <nl> std : : string var_scope_t : : print ( ) const { <nl> } else if ( implicit_depth = = 1 ) { <nl> ret + = " implicit : " ; <nl> if ( maybe_implicit . has ( ) ) { <nl> - ret + = maybe_implicit - > print ( ) ; <nl> + ret + = maybe_implicit . print ( ) ; <nl> } else { <nl> ret + = " ( not stored ) " ; <nl> } <nl> std : : string var_scope_t : : print ( ) const { <nl> for ( auto it = vars . begin ( ) ; it ! = vars . end ( ) ; + + it ) { <nl> ret + = " , " ; <nl> ret + = strprintf ( " % " PRIi64 " : " , it - > first . value ) ; <nl> - ret + = it - > second - > print ( ) ; <nl> + ret + = it - > second . print ( ) ; <nl> } <nl> ret + = " ] " ; <nl> return ret ; <nl> mmm a / src / unittest / btree_sindex . cc <nl> ppp b / src / unittest / btree_sindex . cc <nl> TPTEST ( BTreeSindex , BtreeStoreAPI ) { <nl> rdb_get ( key , store . get_sindex_slice ( sindex_uuid ) , <nl> sindex_super_block . get ( ) , & response , NULL ) ; <nl> <nl> - ASSERT_EQ ( ql : : datum_t ( 1 . 0 ) , * response . data ) ; <nl> + ASSERT_EQ ( ql : : datum_t ( 1 . 0 ) , response . data ) ; <nl> } <nl> } <nl> <nl> mmm a / src / unittest / geo_indexes . cc <nl> ppp b / src / unittest / geo_indexes . cc <nl> void test_get_intersecting ( const datum_t & query_geometry , <nl> / / 3 . Compare both results <nl> ASSERT_EQ ( intersecting_res . size ( ) , reference_res . size ( ) ) ; <nl> for ( size_t i = 0 ; i < intersecting_res . size ( ) & & i < reference_res . size ( ) ; + + i ) { <nl> - ASSERT_EQ ( * intersecting_res [ i ] , * reference_res [ i ] ) ; <nl> + ASSERT_EQ ( intersecting_res [ i ] , reference_res [ i ] ) ; <nl> } <nl> } <nl> <nl> mmm a / src / unittest / jsproc . cc <nl> ppp b / src / unittest / jsproc . cc <nl> SPAWNER_TEST ( JSProc , LiteralNumber ) { <nl> ql : : datum_t result ; <nl> run_datum_test ( " 9467923 " , & result ) ; <nl> ASSERT_TRUE ( result . has ( ) ) ; <nl> - ASSERT_TRUE ( result - > get_type ( ) = = ql : : datum_t : : R_NUM ) ; <nl> - ASSERT_EQ ( result - > as_int ( ) , 9467923 ) ; <nl> + ASSERT_TRUE ( result . get_type ( ) = = ql : : datum_t : : R_NUM ) ; <nl> + ASSERT_EQ ( result . as_int ( ) , 9467923 ) ; <nl> } <nl> <nl> SPAWNER_TEST ( JSProc , LiteralString ) { <nl> ql : : datum_t result ; <nl> run_datum_test ( " \ " string data \ " " , & result ) ; <nl> ASSERT_TRUE ( result . has ( ) ) ; <nl> - ASSERT_TRUE ( result - > get_type ( ) = = ql : : datum_t : : R_STR ) ; <nl> - ASSERT_EQ ( result - > as_str ( ) , " string data " ) ; <nl> + ASSERT_TRUE ( result . get_type ( ) = = ql : : datum_t : : R_STR ) ; <nl> + ASSERT_EQ ( result . as_str ( ) , " string data " ) ; <nl> } <nl> <nl> SPAWNER_TEST ( JSProc , EvalAndCall ) { <nl> SPAWNER_TEST ( JSProc , EvalAndCall ) { <nl> ASSERT_TRUE ( js_runner . connected ( ) ) ; <nl> <nl> / / Check results <nl> - ql : : datum_t * res_datum = <nl> - boost : : get < ql : : datum_t > ( & result ) ; <nl> + ql : : datum_t * res_datum = boost : : get < ql : : datum_t > ( & result ) ; <nl> ASSERT_TRUE ( res_datum ! = NULL ) ; <nl> ASSERT_TRUE ( res_datum - > has ( ) ) ; <nl> - ASSERT_TRUE ( ( * res_datum ) - > get_type ( ) = = ql : : datum_t : : R_NUM ) ; <nl> - ASSERT_EQ ( ( * res_datum ) - > as_int ( ) , 10337 ) ; <nl> + ASSERT_TRUE ( res_datum - > get_type ( ) = = ql : : datum_t : : R_NUM ) ; <nl> + ASSERT_EQ ( res_datum - > as_int ( ) , 10337 ) ; <nl> } <nl> <nl> SPAWNER_TEST ( JSProc , BrokenFunction ) { <nl> mmm a / src / unittest / mock_store . cc <nl> ppp b / src / unittest / mock_store . cc <nl> std : : string mock_parse_read_response ( const read_response_t & rr ) { <nl> = boost : : get < point_read_response_t > ( & rr . response ) ; <nl> guarantee ( prr ! = NULL ) ; <nl> guarantee ( prr - > data . has ( ) ) ; <nl> - if ( prr - > data - > get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> + if ( prr - > data . get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> / / Behave like the old dummy_protocol_t . <nl> return " " ; <nl> } <nl> - return prr - > data - > get_field ( " value " ) - > as_str ( ) . to_std ( ) ; <nl> + return prr - > data . get_field ( " value " ) . as_str ( ) . to_std ( ) ; <nl> } <nl> <nl> std : : string mock_lookup ( store_view_t * store , std : : string key ) { <nl> std : : string mock_store_t : : values ( std : : string key ) { <nl> / / Behave like the old dummy_protocol_t . <nl> return " " ; <nl> } <nl> - return it - > second . second - > get_field ( " value " ) - > as_str ( ) . to_std ( ) ; <nl> + return it - > second . second . get_field ( " value " ) . as_str ( ) . to_std ( ) ; <nl> } <nl> <nl> repli_timestamp_t mock_store_t : : timestamps ( std : : string key ) { <nl> mmm a / src / unittest / rdb_backfill . cc <nl> ppp b / src / unittest / rdb_backfill . cc <nl> void run_backfill_test ( size_t value_padding_length , <nl> broadcaster - > get ( ) - > read ( read , & response , & exiter , order_source - > check_in ( " unittest : : ( rdb ) run_partial_backfill_test " ) . with_read_mode ( ) , & non_interruptor ) ; <nl> point_read_response_t get_result = boost : : get < point_read_response_t > ( response . response ) ; <nl> EXPECT_TRUE ( get_result . data . has ( ) ) ; <nl> - EXPECT_EQ ( * generate_document ( value_padding_length , <nl> + EXPECT_EQ ( generate_document ( value_padding_length , <nl> it - > second ) , <nl> - * get_result . data ) ; <nl> + get_result . data ) ; <nl> } <nl> } <nl> <nl> void run_sindex_backfill_test ( std : : pair < io_backender_t * , simple_mailbox_cluster <nl> / / Order doesn ' t matter because groups - > size ( ) is 1 . <nl> auto result_stream = & groups - > begin ( ql : : grouped : : order_doesnt_matter_t ( ) ) - > second ; <nl> ASSERT_EQ ( 1u , result_stream - > size ( ) ) ; <nl> - EXPECT_EQ ( * generate_document ( 0 , it - > second ) , * result_stream - > at ( 0 ) . data ) ; <nl> + EXPECT_EQ ( generate_document ( 0 , it - > second ) , result_stream - > at ( 0 ) . data ) ; <nl> } <nl> } <nl> <nl> mmm a / src / unittest / rdb_btree . cc <nl> ppp b / src / unittest / rdb_btree . cc <nl> void insert_rows ( int start , int finish , store_t * store ) { <nl> std : : string data = strprintf ( " { \ " id \ " : % d , \ " sid \ " : % d } " , i , i * i ) ; <nl> point_write_response_t response ; <nl> <nl> - store_key_t pk ( ql : : datum_t ( static_cast < double > ( i ) ) - > print_primary ( ) ) ; <nl> + store_key_t pk ( ql : : datum_t ( static_cast < double > ( i ) ) . print_primary ( ) ) ; <nl> rdb_modification_report_t mod_report ( pk ) ; <nl> rdb_live_deletion_context_t deletion_context ; <nl> rdb_set ( pk , <nl> void _check_keys_are_present ( store_t * store , <nl> rdb_rget_slice ( <nl> store - > get_sindex_slice ( sindex_uuid ) , <nl> rdb_protocol : : sindex_key_range ( <nl> - store_key_t ( ql : : datum_t ( ii ) - > print_primary ( ) ) , <nl> - store_key_t ( ql : : datum_t ( ii ) - > print_primary ( ) ) ) , <nl> + store_key_t ( ql : : datum_t ( ii ) . print_primary ( ) ) , <nl> + store_key_t ( ql : : datum_t ( ii ) . print_primary ( ) ) ) , <nl> sindex_sb . get ( ) , <nl> & dummy_env , / / env_t <nl> ql : : batchspec_t : : user ( ql : : batch_type_t : : NORMAL , <nl> void _check_keys_are_present ( store_t * store , <nl> <nl> std : : string expected_data = strprintf ( " { \ " id \ " : % d , \ " sid \ " : % d } " , i , i * i ) ; <nl> scoped_cJSON_t expected_value ( cJSON_Parse ( expected_data . c_str ( ) ) ) ; <nl> - ASSERT_EQ ( * ql : : to_datum ( expected_value . get ( ) , limits ) , * stream - > front ( ) . data ) ; <nl> + ASSERT_EQ ( ql : : to_datum ( expected_value . get ( ) , limits ) , stream - > front ( ) . data ) ; <nl> } <nl> } <nl> <nl> void _check_keys_are_NOT_present ( store_t * store , <nl> rdb_rget_slice ( <nl> store - > get_sindex_slice ( sindex_uuid ) , <nl> rdb_protocol : : sindex_key_range ( <nl> - store_key_t ( ql : : datum_t ( ii ) - > print_primary ( ) ) , <nl> - store_key_t ( ql : : datum_t ( ii ) - > print_primary ( ) ) ) , <nl> + store_key_t ( ql : : datum_t ( ii ) . print_primary ( ) ) , <nl> + store_key_t ( ql : : datum_t ( ii ) . print_primary ( ) ) ) , <nl> sindex_sb . get ( ) , <nl> & dummy_env , / / env_t <nl> ql : : batchspec_t : : user ( ql : : batch_type_t : : NORMAL , <nl> mmm a / src / unittest / rdb_env . cc <nl> ppp b / src / unittest / rdb_env . cc <nl> void mock_namespace_interface_t : : write_visitor_t : : operator ( ) ( <nl> data - > erase ( * it ) ; <nl> <nl> bool err ; <nl> - if ( new_val - > get_type ( ) = = ql : : datum_t : : R_OBJECT ) { <nl> - data - > insert ( std : : make_pair ( * it , new scoped_cJSON_t ( new_val - > as_json ( ) ) ) ) ; <nl> - if ( old_val - > get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> + if ( new_val . get_type ( ) = = ql : : datum_t : : R_OBJECT ) { <nl> + data - > insert ( std : : make_pair ( * it , new scoped_cJSON_t ( new_val . as_json ( ) ) ) ) ; <nl> + if ( old_val . get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> err = resp . add ( " inserted " , ql : : datum_t ( 1 . 0 ) ) ; <nl> } else { <nl> - if ( * old_val = = * new_val ) { <nl> + if ( old_val = = new_val ) { <nl> err = resp . add ( " unchanged " , ql : : datum_t ( 1 . 0 ) ) ; <nl> } else { <nl> err = resp . add ( " replaced " , ql : : datum_t ( 1 . 0 ) ) ; <nl> } <nl> } <nl> - } else if ( new_val - > get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> - if ( old_val - > get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> + } else if ( new_val . get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> + if ( old_val . get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> err = resp . add ( " skipped " , ql : : datum_t ( 1 . 0 ) ) ; <nl> } else { <nl> err = resp . add ( " deleted " , ql : : datum_t ( 1 . 0 ) ) ; <nl> void mock_namespace_interface_t : : write_visitor_t : : operator ( ) ( <nl> " value being inserted is neither an object nor an empty value " ) ; <nl> } <nl> guarantee ( ! err ) ; <nl> - stats = stats - > merge ( std : : move ( resp ) . to_datum ( ) , ql : : stats_merge , <nl> + stats = stats . merge ( std : : move ( resp ) . to_datum ( ) , ql : : stats_merge , <nl> limits , & conditions ) ; <nl> } <nl> ql : : datum_object_builder_t result ( std : : move ( stats ) ) ; <nl> void mock_namespace_interface_t : : write_visitor_t : : operator ( ) ( <nl> ql : : datum_t stats = ql : : datum_t : : empty_object ( ) ; <nl> std : : set < std : : string > conditions ; <nl> for ( auto it = bi . inserts . begin ( ) ; it ! = bi . inserts . end ( ) ; + + it ) { <nl> - store_key_t key ( ( * it ) - > get_field ( datum_string_t ( bi . pkey ) ) - > print_primary ( ) ) ; <nl> + store_key_t key ( ( * it ) . get_field ( datum_string_t ( bi . pkey ) ) . print_primary ( ) ) ; <nl> ql : : datum_object_builder_t resp ; <nl> ql : : datum_t old_val ; <nl> if ( data - > find ( key ) ! = data - > end ( ) ) { <nl> void mock_namespace_interface_t : : write_visitor_t : : operator ( ) ( <nl> data - > erase ( key ) ; <nl> <nl> bool err ; <nl> - if ( new_val - > get_type ( ) = = ql : : datum_t : : R_OBJECT ) { <nl> - data - > insert ( std : : make_pair ( key , new scoped_cJSON_t ( new_val - > as_json ( ) ) ) ) ; <nl> - if ( old_val - > get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> + if ( new_val . get_type ( ) = = ql : : datum_t : : R_OBJECT ) { <nl> + data - > insert ( std : : make_pair ( key , new scoped_cJSON_t ( new_val . as_json ( ) ) ) ) ; <nl> + if ( old_val . get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> err = resp . add ( " inserted " , ql : : datum_t ( 1 . 0 ) ) ; <nl> } else { <nl> - if ( * old_val = = * new_val ) { <nl> + if ( old_val = = new_val ) { <nl> err = resp . add ( " unchanged " , ql : : datum_t ( 1 . 0 ) ) ; <nl> } else { <nl> err = resp . add ( " replaced " , ql : : datum_t ( 1 . 0 ) ) ; <nl> } <nl> } <nl> - } else if ( new_val - > get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> - if ( old_val - > get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> + } else if ( new_val . get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> + if ( old_val . get_type ( ) = = ql : : datum_t : : R_NULL ) { <nl> err = resp . add ( " skipped " , ql : : datum_t ( 1 . 0 ) ) ; <nl> } else { <nl> err = resp . add ( " deleted " , ql : : datum_t ( 1 . 0 ) ) ; <nl> void mock_namespace_interface_t : : write_visitor_t : : operator ( ) ( <nl> " value being inserted is neither an object nor an empty value " ) ; <nl> } <nl> guarantee ( ! err ) ; <nl> - stats = stats - > merge ( std : : move ( resp ) . to_datum ( ) , ql : : stats_merge , limits , & conditions ) ; <nl> + stats = stats . merge ( std : : move ( resp ) . to_datum ( ) , ql : : stats_merge , limits , & conditions ) ; <nl> } <nl> ql : : datum_object_builder_t result ( stats ) ; <nl> result . add_warnings ( conditions , limits ) ; <nl> mmm a / src / unittest / rdb_protocol . cc <nl> ppp b / src / unittest / rdb_protocol . cc <nl> void run_get_set_test ( namespace_interface_t * nsi , order_source_t * osource ) { <nl> if ( point_read_response_t * maybe_point_read_response = boost : : get < point_read_response_t > ( & response . response ) ) { <nl> ASSERT_TRUE ( maybe_point_read_response - > data . has ( ) ) ; <nl> ASSERT_EQ ( ql : : datum_t ( ql : : datum_t : : construct_null_t ( ) ) , <nl> - * maybe_point_read_response - > data ) ; <nl> + maybe_point_read_response - > data ) ; <nl> } else { <nl> ADD_FAILURE ( ) < < " got wrong result back " ; <nl> } <nl> void run_create_drop_sindex_test ( namespace_interface_t * nsi , order_source_t * oso <nl> ql : : configured_limits_t limits ; <nl> ql : : datum_t d <nl> = ql : : to_datum ( cJSON_slow_GetObjectItem ( data - > get ( ) , " id " ) , limits ) ; <nl> - store_key_t pk = store_key_t ( d - > print_primary ( ) ) ; <nl> + store_key_t pk = store_key_t ( d . print_primary ( ) ) ; <nl> ql : : datum_t sindex_key_literal = ql : : datum_t ( 1 . 0 ) ; <nl> <nl> ASSERT_TRUE ( data - > get ( ) ) ; <nl> void run_create_drop_sindex_test ( namespace_interface_t * nsi , order_source_t * oso <nl> auto stream = & streams - > begin ( ql : : grouped : : order_doesnt_matter_t ( ) ) - > second ; <nl> ASSERT_TRUE ( stream ! = NULL ) ; <nl> ASSERT_EQ ( 1u , stream - > size ( ) ) ; <nl> - ASSERT_EQ ( * ql : : to_datum ( data - > get ( ) , limits ) , * stream - > at ( 0 ) . data ) ; <nl> + ASSERT_EQ ( ql : : to_datum ( data - > get ( ) , limits ) , stream - > at ( 0 ) . data ) ; <nl> } else { <nl> ADD_FAILURE ( ) < < " got wrong type of result back " ; <nl> } <nl> void populate_sindex ( namespace_interface_t * nsi , <nl> ql : : configured_limits_t limits ; <nl> ql : : datum_t d <nl> = ql : : to_datum ( cJSON_slow_GetObjectItem ( data - > get ( ) , " id " ) , limits ) ; <nl> - store_key_t pk = store_key_t ( d - > print_primary ( ) ) ; <nl> + store_key_t pk = store_key_t ( d . print_primary ( ) ) ; <nl> <nl> / * Insert a piece of data ( it will be indexed using the secondary <nl> * index ) . * / <nl> void run_sindex_oversized_keys_test ( namespace_interface_t * nsi , order_source_t * <nl> try { <nl> pk = store_key_t ( ql : : to_datum ( <nl> cJSON_slow_GetObjectItem ( data - > get ( ) , " id " ) , <nl> - limits ) - > print_primary ( ) ) ; <nl> + limits ) . print_primary ( ) ) ; <nl> } catch ( const ql : : base_exc_t & ex ) { <nl> ASSERT_TRUE ( id . length ( ) > = rdb_protocol : : MAX_PRIMARY_KEY_SIZE ) ; <nl> continue ; <nl> void run_sindex_missing_attr_test ( namespace_interface_t * nsi , order_source_t * os <nl> new scoped_cJSON_t ( cJSON_Parse ( " { \ " id \ " : 0 } " ) ) ) ; <nl> store_key_t pk = store_key_t ( ql : : to_datum ( <nl> cJSON_slow_GetObjectItem ( data - > get ( ) , " id " ) , <nl> - limits ) - > print_primary ( ) ) ; <nl> + limits ) . print_primary ( ) ) ; <nl> ASSERT_TRUE ( data - > get ( ) ) ; <nl> { <nl> / * Insert a piece of data ( it will be indexed using the secondary <nl>
|
Merge commit ' baa9109cb1c25992443eb1705fa39479f4e9aeac ' into reql_admin
|
rethinkdb/rethinkdb
|
347e7c8368af8066893174f1ab4a41801d812c77
|
2014-09-05T03:29:36Z
|
new file mode 100644 <nl> index 0000000000 . . 93bc988297 <nl> mmm / dev / null <nl> ppp b / code / mathematical - algorithms / reverse_factorial / reverse_factorial . go <nl> <nl> + / * <nl> + * Part of Cosmos by OpenGenus Foundation <nl> + * / <nl> + package main <nl> + <nl> + / * <nl> + Excepted outpout <nl> + 720 is 6 ! <nl> + 120 is 5 ! <nl> + 24 is 4 ! <nl> + 362880 is 9 ! <nl> + 12345 isn ' t a factorial number <nl> + <nl> + * / <nl> + import " fmt " <nl> + <nl> + func reverseFactorial ( target int ) { <nl> + divisor , num : = 2 , target <nl> + <nl> + for 0 = = num % divisor { <nl> + num / = divisor <nl> + divisor + + <nl> + } <nl> + <nl> + if num = = 1 { <nl> + fmt . Printf ( " % d is % d ! \ n " , target , divisor - 1 ) <nl> + return <nl> + } <nl> + fmt . Printf ( " % d isn ' t a factorial number \ n " , target ) <nl> + return <nl> + } <nl> + <nl> + func main ( ) { <nl> + reverseFactorial ( 720 ) <nl> + reverseFactorial ( 120 ) <nl> + reverseFactorial ( 24 ) <nl> + reverseFactorial ( 362880 ) <nl> + reverseFactorial ( 12345 ) <nl> + } <nl> mmm a / code / mathematical - algorithms / sum_of_digits / SumOfDigits . cs <nl> ppp b / code / mathematical - algorithms / sum_of_digits / SumOfDigits . cs <nl> <nl> + / / Part of Cosmos by OpenGenus Foundation <nl> using System ; <nl> <nl> class MainClass { <nl> mmm a / code / mathematical - algorithms / sum_of_digits / Sum_of_digits_function . cpp <nl> ppp b / code / mathematical - algorithms / sum_of_digits / Sum_of_digits_function . cpp <nl> <nl> + / / Part of Cosmos by OpenGenus Foundation <nl> # include < iostream > <nl> using namespace std ; <nl> int sum_of_digits ( int n ) <nl> mmm a / code / mathematical - algorithms / sum_of_digits / Sum_of_digits_function . py <nl> ppp b / code / mathematical - algorithms / sum_of_digits / Sum_of_digits_function . py <nl> <nl> + # Part of Cosmos by OpenGenus Foundation <nl> def sum_of_digits ( number ) : <nl> return sum ( map ( int , str ( number ) ) ) <nl> <nl> mmm a / code / mathematical - algorithms / sum_of_digits / sum_of_digits . go <nl> ppp b / code / mathematical - algorithms / sum_of_digits / sum_of_digits . go <nl> <nl> + / / Part of Cosmos by OpenGenus Foundation <nl> package main <nl> <nl> import ( <nl> mmm a / code / mathematical - algorithms / sum_of_digits / sum_of_digits . rb <nl> ppp b / code / mathematical - algorithms / sum_of_digits / sum_of_digits . rb <nl> <nl> + # # Part of Cosmos by OpenGenus Foundation <nl> def sum_of_digits ( num ) <nl> num = num . abs <nl> sum = 0 <nl> mmm a / code / mathematical - algorithms / sum_of_digits / sum_of_digits_with_recursion . c <nl> ppp b / code / mathematical - algorithms / sum_of_digits / sum_of_digits_with_recursion . c <nl> <nl> + / / Part of Cosmos by OpenGenus Foundation <nl> + # include < stdio . h > <nl> + <nl> / / loop <nl> int sumOfDigits ( int number ) <nl> { <nl> mmm a / code / mathematical - algorithms / sum_of_digits / sumofdigits . c <nl> ppp b / code / mathematical - algorithms / sum_of_digits / sumofdigits . c <nl> <nl> + / / Part of Cosmos by OpenGenus Foundation <nl> # include < stdio . h > <nl> # include < math . h > <nl> <nl>
|
Merge pull request from hwchiu / master
|
OpenGenus/cosmos
|
64edfef0633c53d6cfb78bb69732f351634d56da
|
2017-10-19T06:09:59Z
|
mmm a / src / indirectmap . h <nl> ppp b / src / indirectmap . h <nl> <nl> + / / Copyright ( c ) 2016 The Bitcoin Core developers <nl> + / / Distributed under the MIT software license , see the accompanying <nl> + / / file COPYING or http : / / www . opensource . org / licenses / mit - license . php . <nl> + <nl> # ifndef BITCOIN_INDIRECTMAP_H <nl> # define BITCOIN_INDIRECTMAP_H <nl> <nl>
|
prepend license statement to indirectmap
|
bitcoin/bitcoin
|
d3af342276f29d2bd162628eb4b669599633e39e
|
2016-07-27T23:27:07Z
|
mmm a / atom / browser / net / url_request_fetch_job . cc <nl> ppp b / atom / browser / net / url_request_fetch_job . cc <nl> URLRequestFetchJob : : URLRequestFetchJob ( <nl> } <nl> <nl> / / Use | request | ' s headers . <nl> - net : : HttpRequestHeaders headers ; <nl> - if ( request - > GetFullRequestHeaders ( & headers ) ) { <nl> - fetcher_ - > SetExtraRequestHeaders ( headers . ToString ( ) ) ; <nl> - } <nl> + fetcher_ - > SetExtraRequestHeaders ( request - > extra_request_headers ( ) . ToString ( ) ) ; <nl> } <nl> <nl> net : : URLRequestContextGetter * URLRequestFetchJob : : GetRequestContext ( ) { <nl> mmm a / spec / api - protocol - spec . coffee <nl> ppp b / spec / api - protocol - spec . coffee <nl> describe ' protocol module ' , - > <nl> <nl> it ' returns RequestHttpJob should send respone ' , ( done ) - > <nl> server = http . createServer ( req , res ) - > <nl> + assert . notEqual req . headers . accept , ' ' <nl> res . writeHead ( 200 , { ' Content - Type ' : ' text / plain ' } ) <nl> res . end ( ' hello ' ) <nl> server . close ( ) <nl>
|
Merge pull request from deepak1556 / fetch_job_headers_patch
|
electron/electron
|
9afb9734988f45d14e7824ba7a9e0580d6e69394
|
2015-07-29T03:44:47Z
|
mmm a / docs / authenticator . md <nl> ppp b / docs / authenticator . md <nl> <nl> # Authenticator <nl> <nl> - Trojan servers can authenticate users according to not only passwords in the config file but also entries in a MySQL ( MariaDB ) database . To turn this functionality on , set ` enabled ` field in the MySQL config to ` true ` and correctly configure the server address , credentials , and etc . If you would like to connect to the database securely , you can to fill the ` cafile ` field indicating the CA file : <nl> + Trojan servers can authenticate users according to not only passwords in the config file but also entries in a MySQL ( MariaDB ) database . To turn this functionality on , set ` enabled ` field in the MySQL config to ` true ` and correctly configure the server address , credentials , and etc . If you would like to connect to the database securely , you can fill the ` ca ` field indicating the MySQL server ' s CA file and optionally fill the ` key ` and ` cert ` fields indicating the client ' s private key and certificate : <nl> <nl> ` ` ` json <nl> " mysql " : { <nl> Trojan servers can authenticate users according to not only passwords in the con <nl> " database " : " trojan " , <nl> " username " : " trojan " , <nl> " password " : " " , <nl> - " cafile " : " " <nl> + " key " : " " , <nl> + " cert " : " " , <nl> + " ca " : " " <nl> } <nl> ` ` ` <nl> <nl> mmm a / docs / config . md <nl> ppp b / docs / config . md <nl> The NAT config is for transparent proxy . You ' ll need to [ setup iptables rules ] ( h <nl> " database " : " trojan " , <nl> " username " : " trojan " , <nl> " password " : " " , <nl> - " cafile " : " " <nl> + " key " : " " , <nl> + " cert " : " " , <nl> + " ca " : " " <nl> } <nl> } <nl> ` ` ` <nl> mmm a / examples / server . json - example <nl> ppp b / examples / server . json - example <nl> <nl> " database " : " trojan " , <nl> " username " : " trojan " , <nl> " password " : " " , <nl> - " cafile " : " " <nl> + " key " : " " , <nl> + " cert " : " " , <nl> + " ca " : " " <nl> } <nl> } <nl> mmm a / src / core / authenticator . cpp <nl> ppp b / src / core / authenticator . cpp <nl> using namespace std ; <nl> Authenticator : : Authenticator ( const Config & config ) { <nl> mysql_init ( & con ) ; <nl> Log : : log_with_date_time ( " connecting to MySQL server " + config . mysql . server_addr + ' : ' + to_string ( config . mysql . server_port ) , Log : : INFO ) ; <nl> - if ( ! config . mysql . cafile . empty ( ) ) { <nl> - mysql_ssl_set ( & con , nullptr , nullptr , config . mysql . cafile . c_str ( ) , nullptr , nullptr ) ; <nl> + if ( ! config . mysql . ca . empty ( ) ) { <nl> + if ( ! config . mysql . key . empty ( ) & & ! config . mysql . cert . empty ( ) ) { <nl> + mysql_ssl_set ( & con , config . mysql . key . c_str ( ) , config . mysql . cert . c_str ( ) , config . mysql . ca . c_str ( ) , nullptr , nullptr ) ; <nl> + } else { <nl> + mysql_ssl_set ( & con , nullptr , nullptr , config . mysql . ca . c_str ( ) , nullptr , nullptr ) ; <nl> + } <nl> } <nl> if ( mysql_real_connect ( & con , config . mysql . server_addr . c_str ( ) , <nl> config . mysql . username . c_str ( ) , <nl> mmm a / src / core / config . cpp <nl> ppp b / src / core / config . cpp <nl> void Config : : populate ( const ptree & tree ) { <nl> mysql . database = tree . get ( " mysql . database " , string ( " trojan " ) ) ; <nl> mysql . username = tree . get ( " mysql . username " , string ( " trojan " ) ) ; <nl> mysql . password = tree . get ( " mysql . password " , string ( ) ) ; <nl> - mysql . cafile = tree . get ( " mysql . cafile " , string ( ) ) ; <nl> + mysql . key = tree . get ( " mysql . key " , string ( ) ) ; <nl> + mysql . cert = tree . get ( " mysql . cert " , string ( ) ) ; <nl> + mysql . ca = tree . get ( " mysql . ca " , string ( ) ) ; <nl> } <nl> <nl> bool Config : : sip003 ( ) { <nl> mmm a / src / core / config . h <nl> ppp b / src / core / config . h <nl> class Config { <nl> std : : string database ; <nl> std : : string username ; <nl> std : : string password ; <nl> - std : : string cafile ; <nl> + std : : string key ; <nl> + std : : string cert ; <nl> + std : : string ca ; <nl> } mysql ; <nl> void load ( const std : : string & filename ) ; <nl> void populate ( const std : : string & JSON ) ; <nl> mmm a / tests / LinuxSmokeTest / server . json <nl> ppp b / tests / LinuxSmokeTest / server . json <nl> <nl> " database " : " " , <nl> " username " : " " , <nl> " password " : " " , <nl> - " cafile " : " " <nl> + " key " : " " , <nl> + " cert " : " " , <nl> + " ca " : " " <nl> } <nl> } <nl>
|
Add two - way authentication . ( )
|
trojan-gfw/trojan
|
3d6d545dbd681a589b9d834c92dcd9e7f84b7a33
|
2020-06-09T19:15:47Z
|
mmm a / src / wasm / baseline / arm / liftoff - assembler - arm . h <nl> ppp b / src / wasm / baseline / arm / liftoff - assembler - arm . h <nl> void LiftoffAssembler : : LoadTaggedPointer ( Register dst , Register src_addr , <nl> offset_imm , LoadType : : kI32Load , pinned ) ; <nl> } <nl> <nl> + void LiftoffAssembler : : StoreTaggedPointer ( Register dst_addr , <nl> + Register offset_reg , <nl> + int32_t offset_imm , <nl> + LiftoffRegister src , <nl> + LiftoffRegList pinned ) { <nl> + STATIC_ASSERT ( kTaggedSize = = kInt32Size ) ; <nl> + Store ( dst_addr , offset_reg , offset_imm , src , StoreType : : kI32Store , pinned ) ; <nl> + } <nl> + <nl> void LiftoffAssembler : : Load ( LiftoffRegister dst , Register src_addr , <nl> Register offset_reg , uint32_t offset_imm , <nl> LoadType type , LiftoffRegList pinned , <nl> mmm a / src / wasm / baseline / arm64 / liftoff - assembler - arm64 . h <nl> ppp b / src / wasm / baseline / arm64 / liftoff - assembler - arm64 . h <nl> void LiftoffAssembler : : LoadTaggedPointer ( Register dst , Register src_addr , <nl> LoadTaggedPointerField ( dst , src_op ) ; <nl> } <nl> <nl> + void LiftoffAssembler : : StoreTaggedPointer ( Register dst_addr , <nl> + Register offset_reg , <nl> + int32_t offset_imm , <nl> + LiftoffRegister src , <nl> + LiftoffRegList pinned ) { <nl> + UseScratchRegisterScope temps ( this ) ; <nl> + MemOperand dst_op = <nl> + liftoff : : GetMemOp ( this , & temps , dst_addr , offset_reg , offset_imm ) ; <nl> + StoreTaggedField ( src . gp ( ) , dst_op ) ; <nl> + } <nl> + <nl> void LiftoffAssembler : : Load ( LiftoffRegister dst , Register src_addr , <nl> Register offset_reg , uint32_t offset_imm , <nl> LoadType type , LiftoffRegList pinned , <nl> mmm a / src / wasm / baseline / ia32 / liftoff - assembler - ia32 . h <nl> ppp b / src / wasm / baseline / ia32 / liftoff - assembler - ia32 . h <nl> void LiftoffAssembler : : LoadTaggedPointer ( Register dst , Register src_addr , <nl> static_cast < uint32_t > ( offset_imm ) , LoadType : : kI32Load , pinned ) ; <nl> } <nl> <nl> + void LiftoffAssembler : : StoreTaggedPointer ( Register dst_addr , <nl> + Register offset_reg , <nl> + int32_t offset_imm , <nl> + LiftoffRegister src , <nl> + LiftoffRegList pinned ) { <nl> + DCHECK_GE ( offset_imm , 0 ) ; <nl> + STATIC_ASSERT ( kTaggedSize = = kInt32Size ) ; <nl> + Store ( dst_addr , offset_reg , offset_imm , src , StoreType : : kI32Store , pinned ) ; <nl> + } <nl> + <nl> void LiftoffAssembler : : Load ( LiftoffRegister dst , Register src_addr , <nl> Register offset_reg , uint32_t offset_imm , <nl> LoadType type , LiftoffRegList pinned , <nl> mmm a / src / wasm / baseline / liftoff - assembler . h <nl> ppp b / src / wasm / baseline / liftoff - assembler . h <nl> class LiftoffAssembler : public TurboAssembler { <nl> inline void LoadTaggedPointer ( Register dst , Register src_addr , <nl> Register offset_reg , int32_t offset_imm , <nl> LiftoffRegList pinned ) ; <nl> + inline void StoreTaggedPointer ( Register dst_addr , Register offset_reg , <nl> + int32_t offset_imm , LiftoffRegister src , <nl> + LiftoffRegList pinned ) ; <nl> inline void Load ( LiftoffRegister dst , Register src_addr , Register offset_reg , <nl> uint32_t offset_imm , LoadType type , LiftoffRegList pinned , <nl> uint32_t * protected_load_pc = nullptr , <nl> mmm a / src / wasm / baseline / liftoff - compiler . cc <nl> ppp b / src / wasm / baseline / liftoff - compiler . cc <nl> class LiftoffCompiler { <nl> void GlobalGet ( FullDecoder * decoder , Value * result , <nl> const GlobalIndexImmediate < validate > & imm ) { <nl> const auto * global = & env_ - > module - > globals [ imm . index ] ; <nl> - if ( ! CheckSupportedType ( decoder , kSupportedTypesWithoutRefs , global - > type , <nl> - " global " ) ) { <nl> + if ( ! CheckSupportedType ( decoder , <nl> + FLAG_liftoff_extern_ref <nl> + ? kSupportedTypes <nl> + : kSupportedTypesWithoutRefs , <nl> + global - > type , " global " ) ) { <nl> + return ; <nl> + } <nl> + <nl> + if ( global - > type . is_reference_type ( ) ) { <nl> + if ( global - > mutability & & global - > imported ) { <nl> + unsupported ( decoder , kRefTypes , " imported mutable globals " ) ; <nl> + return ; <nl> + } <nl> + <nl> + LiftoffRegList pinned ; <nl> + Register globals_buffer = <nl> + pinned . set ( __ GetUnusedRegister ( kGpReg , pinned ) ) . gp ( ) ; <nl> + LOAD_TAGGED_PTR_INSTANCE_FIELD ( globals_buffer , TaggedGlobalsBuffer ) ; <nl> + Register value = pinned . set ( __ GetUnusedRegister ( kGpReg , pinned ) ) . gp ( ) ; <nl> + __ LoadTaggedPointer ( value , globals_buffer , no_reg , <nl> + wasm : : ObjectAccess : : ElementOffsetInTaggedFixedArray ( <nl> + imm . global - > offset ) , <nl> + pinned ) ; <nl> + __ PushRegister ( global - > type , LiftoffRegister ( value ) ) ; <nl> return ; <nl> } <nl> LiftoffRegList pinned ; <nl> class LiftoffCompiler { <nl> void GlobalSet ( FullDecoder * decoder , const Value & value , <nl> const GlobalIndexImmediate < validate > & imm ) { <nl> auto * global = & env_ - > module - > globals [ imm . index ] ; <nl> - if ( ! CheckSupportedType ( decoder , kSupportedTypesWithoutRefs , global - > type , <nl> - " global " ) ) <nl> + if ( ! CheckSupportedType ( decoder , <nl> + FLAG_liftoff_extern_ref <nl> + ? kSupportedTypes <nl> + : kSupportedTypesWithoutRefs , <nl> + global - > type , " global " ) ) { <nl> return ; <nl> + } <nl> + <nl> + if ( global - > type . is_reference_type ( ) ) { <nl> + if ( global - > mutability & & global - > imported ) { <nl> + unsupported ( decoder , kRefTypes , " imported mutable globals " ) ; <nl> + return ; <nl> + } <nl> + <nl> + LiftoffRegList pinned ; <nl> + Register globals_buffer = <nl> + pinned . set ( __ GetUnusedRegister ( kGpReg , pinned ) ) . gp ( ) ; <nl> + LOAD_TAGGED_PTR_INSTANCE_FIELD ( globals_buffer , TaggedGlobalsBuffer ) ; <nl> + LiftoffRegister value = pinned . set ( __ PopToRegister ( pinned ) ) ; <nl> + __ StoreTaggedPointer ( globals_buffer , no_reg , <nl> + wasm : : ObjectAccess : : ElementOffsetInTaggedFixedArray ( <nl> + imm . global - > offset ) , <nl> + value , pinned ) ; <nl> + return ; <nl> + } <nl> LiftoffRegList pinned ; <nl> uint32_t offset = 0 ; <nl> Register addr = GetGlobalBaseAndOffset ( global , & pinned , & offset ) ; <nl> mmm a / src / wasm / baseline / mips / liftoff - assembler - mips . h <nl> ppp b / src / wasm / baseline / mips / liftoff - assembler - mips . h <nl> void LiftoffAssembler : : LoadTaggedPointer ( Register dst , Register src_addr , <nl> static_cast < uint32_t > ( offset_imm ) , LoadType : : kI32Load , pinned ) ; <nl> } <nl> <nl> + void LiftoffAssembler : : StoreTaggedPointer ( Register dst_addr , <nl> + Register offset_reg , <nl> + int32_t offset_imm , <nl> + LiftoffRegister src , <nl> + LiftoffRegList pinned ) { <nl> + bailout ( kRefTypes , " GlobalSet " ) ; <nl> + } <nl> + <nl> void LiftoffAssembler : : Load ( LiftoffRegister dst , Register src_addr , <nl> Register offset_reg , uint32_t offset_imm , <nl> LoadType type , LiftoffRegList pinned , <nl> mmm a / src / wasm / baseline / mips64 / liftoff - assembler - mips64 . h <nl> ppp b / src / wasm / baseline / mips64 / liftoff - assembler - mips64 . h <nl> void LiftoffAssembler : : LoadTaggedPointer ( Register dst , Register src_addr , <nl> static_cast < uint32_t > ( offset_imm ) , LoadType : : kI64Load , pinned ) ; <nl> } <nl> <nl> + void LiftoffAssembler : : StoreTaggedPointer ( Register dst_addr , <nl> + Register offset_reg , <nl> + int32_t offset_imm , <nl> + LiftoffRegister src , <nl> + LiftoffRegList pinned ) { <nl> + bailout ( kRefTypes , " GlobalSet " ) ; <nl> + } <nl> + <nl> void LiftoffAssembler : : Load ( LiftoffRegister dst , Register src_addr , <nl> Register offset_reg , uint32_t offset_imm , <nl> LoadType type , LiftoffRegList pinned , <nl> mmm a / src / wasm / baseline / ppc / liftoff - assembler - ppc . h <nl> ppp b / src / wasm / baseline / ppc / liftoff - assembler - ppc . h <nl> void LiftoffAssembler : : LoadTaggedPointer ( Register dst , Register src_addr , <nl> bailout ( kUnsupportedArchitecture , " LoadTaggedPointer " ) ; <nl> } <nl> <nl> + void LiftoffAssembler : : StoreTaggedPointer ( Register dst_addr , <nl> + Register offset_reg , <nl> + int32_t offset_imm , <nl> + LiftoffRegister src , <nl> + LiftoffRegList pinned ) { <nl> + bailout ( kRefTypes , " GlobalSet " ) ; <nl> + } <nl> + <nl> void LiftoffAssembler : : Load ( LiftoffRegister dst , Register src_addr , <nl> Register offset_reg , uint32_t offset_imm , <nl> LoadType type , LiftoffRegList pinned , <nl> mmm a / src / wasm / baseline / s390 / liftoff - assembler - s390 . h <nl> ppp b / src / wasm / baseline / s390 / liftoff - assembler - s390 . h <nl> void LiftoffAssembler : : LoadTaggedPointer ( Register dst , Register src_addr , <nl> bailout ( kUnsupportedArchitecture , " LoadTaggedPointer " ) ; <nl> } <nl> <nl> + void LiftoffAssembler : : StoreTaggedPointer ( Register dst_addr , <nl> + Register offset_reg , <nl> + int32_t offset_imm , <nl> + LiftoffRegister src , <nl> + LiftoffRegList pinned ) { <nl> + bailout ( kRefTypes , " GlobalSet " ) ; <nl> + } <nl> + <nl> void LiftoffAssembler : : Load ( LiftoffRegister dst , Register src_addr , <nl> Register offset_reg , uint32_t offset_imm , <nl> LoadType type , LiftoffRegList pinned , <nl> mmm a / src / wasm / baseline / x64 / liftoff - assembler - x64 . h <nl> ppp b / src / wasm / baseline / x64 / liftoff - assembler - x64 . h <nl> void LiftoffAssembler : : LoadTaggedPointer ( Register dst , Register src_addr , <nl> LoadTaggedPointerField ( dst , src_op ) ; <nl> } <nl> <nl> + void LiftoffAssembler : : StoreTaggedPointer ( Register dst_addr , <nl> + Register offset_reg , <nl> + int32_t offset_imm , <nl> + LiftoffRegister src , <nl> + LiftoffRegList pinned ) { <nl> + DCHECK_GE ( offset_imm , 0 ) ; <nl> + if ( emit_debug_code ( ) & & offset_reg ! = no_reg ) { <nl> + AssertZeroExtended ( offset_reg ) ; <nl> + } <nl> + Operand dst_op = liftoff : : GetMemOp ( this , dst_addr , offset_reg , <nl> + static_cast < uint32_t > ( offset_imm ) ) ; <nl> + StoreTaggedField ( dst_op , src . gp ( ) ) ; <nl> + } <nl> + <nl> void LiftoffAssembler : : AtomicLoad ( LiftoffRegister dst , Register src_addr , <nl> Register offset_reg , uint32_t offset_imm , <nl> LoadType type , LiftoffRegList pinned ) { <nl> new file mode 100644 <nl> index 00000000000 . . 53fb6ea0aba <nl> mmm / dev / null <nl> ppp b / test / mjsunit / wasm / externref - globals - liftoff . js <nl> <nl> + / / Copyright 2020 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + / / Flags : - - experimental - wasm - reftypes - - expose - gc - - liftoff <nl> + / / Flags : - - no - wasm - tier - up - - liftoff - extern - ref <nl> + <nl> + load ( " test / mjsunit / wasm / externref - globals . js " ) ; <nl>
|
[ wasm ] [ liftoff ] Support for most externref globals
|
v8/v8
|
e43ec59b4e95505fbb2e233390e7551891287cd3
|
2020-09-08T13:55:22Z
|
mmm a / configure . ac <nl> ppp b / configure . ac <nl> <nl> AC_INIT ( [ Google C + + Mocking and Testing Frameworks ] , <nl> - [ 1 . 8 . 0 ] , <nl> + [ 1 . 9 . 0 ] , <nl> [ googlemock @ googlegroups . com ] , <nl> [ googletest ] ) <nl> <nl>
|
version : fix declared version to be in sync with CMakeLists . txt
|
google/googletest
|
c4ef6f3a0509514623c11f13058e86099f6fc11e
|
2018-09-12T22:50:17Z
|
new file mode 100644 <nl> index 0000000000000 . . 0d544ee7725d9 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / autograph / g3doc / reference / _control_flow_tutorial . ipynb <nl> <nl> + { <nl> + " cells " : [ <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " - vLwpT31YOJk " <nl> + } , <nl> + " source " : [ <nl> + " TODO ( b / 138297412 ) : This colab retains some useful code snippets and demonstrations that used to be in the tf . function / AutoGraph customization tutorial , and should be rolled into the existing docs as part of a broader markdown - \ u003ecolab conversion . " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " otIdN1TS8N7S " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " import tensorflow as tf " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " I0xDjO4SHLUD " <nl> + } , <nl> + " source " : [ <nl> + " Define a helper function to demonstrate the kinds of errors you might encounter : " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " D25apou9IOXa " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " import traceback \ n " , <nl> + " import contextlib \ n " , <nl> + " \ n " , <nl> + " # Some helper code to demonstrate the kinds of errors you might encounter . \ n " , <nl> + " @ contextlib . contextmanager \ n " , <nl> + " def assert_raises ( error_class ) : \ n " , <nl> + " try : \ n " , <nl> + " yield \ n " , <nl> + " except error_class as e : \ n " , <nl> + " print ( ' Caught expected exception \ \ n { } : ' . format ( error_class ) ) \ n " , <nl> + " traceback . print_exc ( limit = 2 ) \ n " , <nl> + " except Exception as e : \ n " , <nl> + " raise e \ n " , <nl> + " else : \ n " , <nl> + " raise Exception ( ' Expected { } to be raised but no error was raised ! ' . format ( \ n " , <nl> + " error_class ) ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " 5f05Vr_YBUCz " <nl> + } , <nl> + " source " : [ <nl> + " # # Using AutoGraph \ n " , <nl> + " \ n " , <nl> + " The [ autograph ] ( https : / / www . tensorflow . org / guide / function ) library is fully integrated with ` tf . function ` , and it will rewrite conditionals and loops which depend on Tensors to run dynamically in the graph . \ n " , <nl> + " \ n " , <nl> + " ` tf . cond ` and ` tf . while_loop ` continue to work with ` tf . function ` , but code with control flow is often easier to write and understand when written in imperative style . " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " xgKmkrNTZSyz " <nl> + } , <nl> + " source " : [ <nl> + " # # AutoGraph : Conditionals \ n " , <nl> + " \ n " , <nl> + " AutoGraph will convert ` if ` statements into the equivalent ` tf . cond ` calls . \ n " , <nl> + " \ n " , <nl> + " This substitution is made if the condition is a Tensor . Otherwise , the conditional is executed during tracing . " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " 20WlM9T2I9EV " <nl> + } , <nl> + " source " : [ <nl> + " Here is a function that checks if the resulting graph uses ` tf . cond ` : " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " E - 7KllizZYsy " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " def test_tf_cond ( f , * args ) : \ n " , <nl> + " g = f . get_concrete_function ( * args ) . graph \ n " , <nl> + " if any ( node . name = = ' cond ' for node in g . as_graph_def ( ) . node ) : \ n " , <nl> + " print ( \ " { } ( { } ) uses tf . cond . \ " . format ( \ n " , <nl> + " f . __name__ , ' , ' . join ( map ( str , args ) ) ) ) \ n " , <nl> + " else : \ n " , <nl> + " print ( \ " { } ( { } ) executes normally . \ " . format ( \ n " , <nl> + " f . __name__ , ' , ' . join ( map ( str , args ) ) ) ) \ n " , <nl> + " \ n " , <nl> + " print ( \ " result : \ " , f ( * args ) . numpy ( ) ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " DlqiutEEJHOe " <nl> + } , <nl> + " source " : [ <nl> + " This substitution is made if the condition is a Tensor . Otherwise , the conditional is executed during tracing . \ n " , <nl> + " \ n " , <nl> + " Passing a python ` True ` executes the conditional normally : " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " fCMywOXwJLIQ " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def dropout ( x , training = True ) : \ n " , <nl> + " if training : \ n " , <nl> + " x = tf . nn . dropout ( x , rate = 0 . 5 ) \ n " , <nl> + " return x " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " 68D2RZ17JM8u " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " test_tf_cond ( dropout , tf . ones ( [ 10 ] , dtype = tf . float32 ) , True ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " WEz0QYucJPBa " <nl> + } , <nl> + " source " : [ <nl> + " But passing a tensor replaces the python ` if ` with a ` tf . cond ` : " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " o86paGR - Zadi " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " test_tf_cond ( dropout , tf . ones ( [ 10 ] , dtype = tf . float32 ) , tf . constant ( True ) ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " 5xFLfdApZh8q " <nl> + } , <nl> + " source " : [ <nl> + " ` tf . cond ` has a number of subtleties . \ n " , <nl> + " \ n " , <nl> + " it works by tracing both sides of the conditional , and then choosing the appropriate branch at runtime , depending on the condition . Tracing both sides can result in unexpected execution of Python code . " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " VTMoZEVaZiwk " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def f ( x ) : \ n " , <nl> + " if x \ u003e 0 : \ n " , <nl> + " x = x + 1 . \ n " , <nl> + " print ( \ " Tracing ` then ` branch \ " ) \ n " , <nl> + " else : \ n " , <nl> + " x = x - 1 . \ n " , <nl> + " print ( \ " Tracing ` else ` branch \ " ) \ n " , <nl> + " return x " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " HqBVIZWb0Qzn " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " f ( - 1 . 0 ) . numpy ( ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " BIMfbXlW0QdP " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " f ( 1 . 0 ) . numpy ( ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " 2nBnJ42v0Pvq " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " f ( tf . constant ( 1 . 0 ) ) . numpy ( ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " zyzzvtN5Jfpb " <nl> + } , <nl> + " source " : [ <nl> + " It requires that if one branch creates a tensor used downstream , the other branch must also create that tensor . " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " k_dxWHeFZlaQ " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def f ( ) : \ n " , <nl> + " if tf . constant ( True ) : \ n " , <nl> + " x = tf . ones ( [ 3 , 3 ] ) \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " # Throws an error because both branches need to define ` x ` . \ n " , <nl> + " with assert_raises ( ValueError ) : \ n " , <nl> + " f ( ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " wP - LZP6cztnu " <nl> + } , <nl> + " source " : [ <nl> + " If you want to be sure that a particular section of control flow is never converted by autograph , then explicitly convert the object to a python type so an error is raised instead : " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " iG_VDavjzrzV " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def f ( x , y ) : \ n " , <nl> + " if bool ( x ) : \ n " , <nl> + " y = y + 1 . \ n " , <nl> + " print ( \ " Tracing ` then ` branch \ " ) \ n " , <nl> + " else : \ n " , <nl> + " y = y - 1 . \ n " , <nl> + " print ( \ " Tracing ` else ` branch \ " ) \ n " , <nl> + " return y " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " kQ4CRP9T0rH2 " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " f ( True , 0 ) . numpy ( ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " ww9tCzHy0rkv " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " f ( False , 0 ) . numpy ( ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " ppuV7iug0r7i " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " with assert_raises ( TypeError ) : \ n " , <nl> + " f ( tf . constant ( True ) , 0 . 0 ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " yho4J0a0ZkQS " <nl> + } , <nl> + " source " : [ <nl> + " # # AutoGraph and loops \ n " , <nl> + " \ n " , <nl> + " AutoGraph has a few simple rules for converting loops . \ n " , <nl> + " \ n " , <nl> + " - ` for ` : Convert if the iterable is a tensor \ n " , <nl> + " - ` while ` : Convert if the while condition depends on a tensor \ n " , <nl> + " \ n " , <nl> + " If a loop is converted , it will be dynamically unrolled with ` tf . while_loop ` , or in the special case of a ` for x in tf . data . Dataset ` , transformed into ` tf . data . Dataset . reduce ` . \ n " , <nl> + " \ n " , <nl> + " If a loop is _not_ converted , it will be statically unrolled " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " OyzGNQAuZsky " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " def test_dynamically_unrolled ( f , * args ) : \ n " , <nl> + " g = f . get_concrete_function ( * args ) . graph \ n " , <nl> + " if any ( node . name = = ' while ' for node in g . as_graph_def ( ) . node ) : \ n " , <nl> + " print ( \ " { } ( { } ) uses tf . while_loop . \ " . format ( \ n " , <nl> + " f . __name__ , ' , ' . join ( map ( str , args ) ) ) ) \ n " , <nl> + " elif any ( node . name = = ' ReduceDataset ' for node in g . as_graph_def ( ) . node ) : \ n " , <nl> + " print ( \ " { } ( { } ) uses tf . data . Dataset . reduce . \ " . format ( \ n " , <nl> + " f . __name__ , ' , ' . join ( map ( str , args ) ) ) ) \ n " , <nl> + " else : \ n " , <nl> + " print ( \ " { } ( { } ) gets unrolled . \ " . format ( \ n " , <nl> + " f . __name__ , ' , ' . join ( map ( str , args ) ) ) ) \ n " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " KFO1BSN9JkRP " <nl> + } , <nl> + " source " : [ <nl> + " # # # For loops \ n " , <nl> + " \ n " , <nl> + " Here is a ` tf . function ` that demonstrates static unrolling : " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " frecgTco_00V " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def for_in_range ( ) : \ n " , <nl> + " x = 0 \ n " , <nl> + " for i in range ( 5 ) : \ n " , <nl> + " x + = i \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " test_dynamically_unrolled ( for_in_range ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " PMdl0azc_5d4 " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def for_in_tfrange ( ) : \ n " , <nl> + " x = tf . constant ( 0 , dtype = tf . int32 ) \ n " , <nl> + " for i in tf . range ( 5 ) : \ n " , <nl> + " x + = i \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " test_dynamically_unrolled ( for_in_tfrange ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " Q7tmncQTZt6_ " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def for_in_tfdataset ( ) : \ n " , <nl> + " x = tf . constant ( 0 , dtype = tf . int64 ) \ n " , <nl> + " for i in tf . data . Dataset . range ( 5 ) : \ n " , <nl> + " x + = i \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " test_dynamically_unrolled ( for_in_tfdataset ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " eyPzDYiJAC8f " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def while_py_cond ( ) : \ n " , <nl> + " x = 5 \ n " , <nl> + " while x \ u003e 0 : \ n " , <nl> + " x - = 1 \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " test_dynamically_unrolled ( while_py_cond ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " l6s7aU - padY5 " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def while_tf_cond ( ) : \ n " , <nl> + " x = tf . constant ( 5 ) \ n " , <nl> + " while x \ u003e 0 : \ n " , <nl> + " x - = 1 \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " test_dynamically_unrolled ( while_tf_cond ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " dSr64Xn6ap - S " <nl> + } , <nl> + " source " : [ <nl> + " If you have a ` break ` or early ` return ` clause that depends on a tensor , the top - level condition or iterable should also be a tensor . \ n " , <nl> + " \ n " , <nl> + " Compare the following examples : " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " hG2Fe_OEAwpY " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def while_py_true_py_break ( x ) : \ n " , <nl> + " while True : # py true \ n " , <nl> + " if x = = 0 : # py break \ n " , <nl> + " break \ n " , <nl> + " x - = 1 \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " test_dynamically_unrolled ( while_py_true_py_break , 5 ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " Sr2cn5bY_E_9 " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def buggy_while_py_true_tf_break ( x ) : \ n " , <nl> + " while True : # py true \ n " , <nl> + " if tf . equal ( x , 0 ) : # tf break \ n " , <nl> + " break \ n " , <nl> + " x - = 1 \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " with assert_raises ( TypeError ) : \ n " , <nl> + " test_dynamically_unrolled ( buggy_while_py_true_tf_break , 5 ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " Q - VirD - 5avdZ " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def while_tf_true_tf_break ( x ) : \ n " , <nl> + " while tf . constant ( True ) : # tf true \ n " , <nl> + " if x = = 0 : # py break \ n " , <nl> + " break \ n " , <nl> + " x - = 1 \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " test_dynamically_unrolled ( while_tf_true_tf_break , 5 ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " Upx5J0j8_Ldu " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def buggy_py_for_tf_break ( ) : \ n " , <nl> + " x = 0 \ n " , <nl> + " for i in range ( 5 ) : # py for \ n " , <nl> + " if tf . equal ( i , 3 ) : # tf break \ n " , <nl> + " break \ n " , <nl> + " x + = i \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " with assert_raises ( TypeError ) : \ n " , <nl> + " test_dynamically_unrolled ( buggy_py_for_tf_break ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " GQHbodav_QMt " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def tf_for_py_break ( ) : \ n " , <nl> + " x = 0 \ n " , <nl> + " for i in tf . range ( 5 ) : # tf for \ n " , <nl> + " if i = = 3 : # py break \ n " , <nl> + " break \ n " , <nl> + " x + = i \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " test_dynamically_unrolled ( tf_for_py_break ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " hyksHW9TCukR " <nl> + } , <nl> + " source " : [ <nl> + " In order to accumulate results from a dynamically unrolled loop , you ' ll want to use ` tf . TensorArray ` . \ n " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " HJ3Vb3dXfefN " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " batch_size = 2 \ n " , <nl> + " seq_len = 3 \ n " , <nl> + " feature_size = 4 \ n " , <nl> + " \ n " , <nl> + " def rnn_step ( inp , state ) : \ n " , <nl> + " return inp + state \ n " , <nl> + " \ n " , <nl> + " @ tf . function \ n " , <nl> + " def dynamic_rnn ( rnn_step , input_data , initial_state ) : \ n " , <nl> + " # [ batch , time , features ] - \ u003e [ time , batch , features ] \ n " , <nl> + " input_data = tf . transpose ( input_data , [ 1 , 0 , 2 ] ) \ n " , <nl> + " max_seq_len = input_data . shape [ 0 ] \ n " , <nl> + " \ n " , <nl> + " states = tf . TensorArray ( tf . float32 , size = max_seq_len ) \ n " , <nl> + " state = initial_state \ n " , <nl> + " for i in tf . range ( max_seq_len ) : \ n " , <nl> + " state = rnn_step ( input_data [ i ] , state ) \ n " , <nl> + " states = states . write ( i , state ) \ n " , <nl> + " return tf . transpose ( states . stack ( ) , [ 1 , 0 , 2 ] ) \ n " , <nl> + " \ n " , <nl> + " dynamic_rnn ( rnn_step , \ n " , <nl> + " tf . random . uniform ( [ batch_size , seq_len , feature_size ] ) , \ n " , <nl> + " tf . zeros ( [ batch_size , feature_size ] ) ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " 9gmLpHY - bkly " <nl> + } , <nl> + " source " : [ <nl> + " # # # Gotcha ' s \ n " , <nl> + " \ n " , <nl> + " As with ` tf . cond ` , ` tf . while_loop ` also comes with a number of subtleties . \ n " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " FJdfznhhKO7D " <nl> + } , <nl> + " source " : [ <nl> + " # # # # Zero iterations \ n " , <nl> + " \ n " , <nl> + " Since a loop can execute 0 times , all tensors used downstream of the while_loop must be initialized above the loop . \ n " , <nl> + " \ n " , <nl> + " Here is an example of incorrect code : " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " CocT5RHwblrQ " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def buggy_loop_var_uninitialized ( ) : \ n " , <nl> + " for i in tf . range ( 3 ) : \ n " , <nl> + " x = i \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " with assert_raises ( ValueError ) : \ n " , <nl> + " buggy_loop_var_uninitialized ( ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " ncr7tRZ1KWh9 " <nl> + } , <nl> + " source " : [ <nl> + " And the correct version : " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " Wm7wIKXcCDGf " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def f ( ) : \ n " , <nl> + " x = tf . constant ( 0 ) \ n " , <nl> + " for i in tf . range ( 3 ) : \ n " , <nl> + " x = i \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " f ( ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " CM7qXVY0KZHB " <nl> + } , <nl> + " source " : [ <nl> + " # # # # Consistent shapes and types \ n " , <nl> + " \ n " , <nl> + " The shape / dtypes of all loop variables must stay consistent with each iteration . \ n " , <nl> + " \ n " , <nl> + " Here is an incorrect example that attempts to change a tensor ' s type : " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " FSftc9cCbpAo " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def buggy_loop_type_changes ( ) : \ n " , <nl> + " x = tf . constant ( 0 , dtype = tf . float32 ) \ n " , <nl> + " for i in tf . range ( 3 ) : # Yields tensors of type tf . int32 . . . \ n " , <nl> + " x = i \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " with assert_raises ( TypeError ) : \ n " , <nl> + " buggy_loop_type_changes ( ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " markdown " , <nl> + " metadata " : { <nl> + " colab_type " : " text " , <nl> + " id " : " M5l90NAHKsUM " <nl> + } , <nl> + " source " : [ <nl> + " Here is an incorrect example that attempts to change a Tensor ' s shape while iterating : " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " kWF189prbuK0 " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def buggy_concat ( ) : \ n " , <nl> + " x = tf . ones ( [ 0 , 10 ] ) \ n " , <nl> + " for i in tf . range ( 5 ) : \ n " , <nl> + " x = tf . concat ( [ x , tf . ones ( [ 1 , 10 ] ) ] , axis = 0 ) \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " with assert_raises ( ValueError ) : \ n " , <nl> + " buggy_concat ( ) " <nl> + ] <nl> + } , <nl> + { <nl> + " cell_type " : " code " , <nl> + " execution_count " : 0 , <nl> + " metadata " : { <nl> + " colab " : { } , <nl> + " colab_type " : " code " , <nl> + " id " : " miYnYcznCHeV " <nl> + } , <nl> + " outputs " : [ ] , <nl> + " source " : [ <nl> + " @ tf . function \ n " , <nl> + " def concat_with_padding ( ) : \ n " , <nl> + " x = tf . zeros ( [ 5 , 10 ] ) \ n " , <nl> + " for i in tf . range ( 5 ) : \ n " , <nl> + " x = tf . concat ( [ x [ : i ] , tf . ones ( [ 1 , 10 ] ) , tf . zeros ( [ 4 - i , 10 ] ) ] , axis = 0 ) \ n " , <nl> + " x . set_shape ( [ 5 , 10 ] ) \ n " , <nl> + " return x \ n " , <nl> + " \ n " , <nl> + " concat_with_padding ( ) \ n " <nl> + ] <nl> + } <nl> + ] , <nl> + " metadata " : { <nl> + " colab " : { <nl> + " collapsed_sections " : [ ] , <nl> + " name " : " performance . ipynb " , <nl> + " private_outputs " : true , <nl> + " provenance " : [ ] , <nl> + " toc_visible " : true <nl> + } , <nl> + " kernelspec " : { <nl> + " display_name " : " Python 3 " , <nl> + " name " : " python3 " <nl> + } <nl> + } , <nl> + " nbformat " : 4 , <nl> + " nbformat_minor " : 0 <nl> + } <nl> mmm a / tensorflow / python / autograph / g3doc / reference / control_flow . md <nl> ppp b / tensorflow / python / autograph / g3doc / reference / control_flow . md <nl> def extra_test ( break_ ) : <nl> break_ , = ag__ . for_stmt ( range ( 10 ) , extra_test , . . . , ( break_ , ) ) <nl> ` ` ` <nl> <nl> + Mixing Tensor - dependent ` break ` and Python - dependent loops is disallowed : <nl> + <nl> + ` ` ` <nl> + @ tf . function <nl> + def buggy_while_py_true_tf_break ( x ) : <nl> + while True : # python conditional <nl> + if tf . equal ( x , 0 ) : # tensor break <nl> + break <nl> + x - = 1 <nl> + return x <nl> + <nl> + # Raises OperatorNotAllowedInGraphError : using a ` tf . Tensor ` as a Python ` bool ` is not allowed <nl> + # buggy_while_true_tf_break ( 5 ) <nl> + ` ` ` <nl> + <nl> # # # ` continue ` statements <nl> <nl> Code blocks in which ` continue ` statements are used are rewritten with <nl>
|
Move advanced AutoGraph material from tf . function guide into AutoGraph docs .
|
tensorflow/tensorflow
|
a80d96c6634cd005b3841d462030448f9f551a14
|
2020-04-29T20:01:24Z
|
mmm a / wine <nl> ppp b / wine <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit a1e5640b60439f0df83fc24c8a69629cef2c6c67 <nl> + Subproject commit 02951753970f5b2a0dafe41aee9fe96bc1a24301 <nl>
|
update wine
|
ValveSoftware/Proton
|
6364369ad9267489989925a531e6b4169bb877e4
|
2020-03-05T16:03:11Z
|
mmm a / build . yaml <nl> ppp b / build . yaml <nl> filegroups : <nl> - gpr <nl> filegroups : <nl> - grpc_trace_headers <nl> + - grpc_base_headers <nl> - name : grpc_trace_headers <nl> headers : <nl> - src / core / lib / debug / trace . h <nl> mmm a / src / core / ext / filters / client_channel / lb_policy / grpclb / proto / grpc / lb / v1 / load_balancer . pb . c <nl> ppp b / src / core / ext / filters / client_channel / lb_policy / grpclb / proto / grpc / lb / v1 / load_balancer . pb . c <nl> <nl> - / * <nl> - * <nl> - * Copyright 2017 gRPC authors . <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * <nl> - * / <nl> / * Automatically generated nanopb constant definitions * / <nl> / * Generated by nanopb - 0 . 3 . 7 - dev * / <nl> <nl> const pb_field_t grpc_lb_v1_Server_fields [ 5 ] = { <nl> # if ! defined ( PB_FIELD_32BIT ) <nl> / * If you get an error here , it means that you need to define PB_FIELD_32BIT <nl> * compile - time option . You can do that in pb . h or on compiler command line . <nl> - * <nl> + * <nl> * The reason you need to do this is that some of your messages contain tag <nl> * numbers or field sizes that are larger than what can fit in 8 or 16 bit <nl> * field descriptors . <nl> PB_STATIC_ASSERT ( ( pb_membersize ( grpc_lb_v1_LoadBalanceRequest , initial_request ) <nl> # if ! defined ( PB_FIELD_16BIT ) & & ! defined ( PB_FIELD_32BIT ) <nl> / * If you get an error here , it means that you need to define PB_FIELD_16BIT <nl> * compile - time option . You can do that in pb . h or on compiler command line . <nl> - * <nl> + * <nl> * The reason you need to do this is that some of your messages contain tag <nl> * numbers or field sizes that are larger than what can fit in the default <nl> * 8 bit descriptors . <nl> mmm a / src / core / ext / transport / chttp2 / transport / stream_lists . cc <nl> ppp b / src / core / ext / transport / chttp2 / transport / stream_lists . cc <nl> <nl> * <nl> * / <nl> <nl> + # include " src / core / ext / transport / chttp2 / transport / chttp2_transport . h " <nl> # include " src / core / ext / transport / chttp2 / transport / internal . h " <nl> <nl> # include < grpc / support / log . h > <nl> mmm a / src / core / lib / iomgr / tcp_windows . cc <nl> ppp b / src / core / lib / iomgr / tcp_windows . cc <nl> <nl> # include " src / core / lib / iomgr / sockaddr_utils . h " <nl> # include " src / core / lib / iomgr / socket_windows . h " <nl> # include " src / core / lib / iomgr / tcp_client . h " <nl> + # include " src / core / lib / iomgr / tcp_windows . h " <nl> # include " src / core / lib / iomgr / timer . h " <nl> # include " src / core / lib / slice / slice_internal . h " <nl> <nl> mmm a / src / core / lib / support / cpu_iphone . cc <nl> ppp b / src / core / lib / support / cpu_iphone . cc <nl> <nl> <nl> # include < grpc / support / port_platform . h > <nl> <nl> + # include < grpc / support / cpu . h > <nl> + <nl> # ifdef GPR_CPU_IPHONE <nl> <nl> / * Probably 2 instead of 1 , but see comment on gpr_cpu_current_cpu . * / <nl> mmm a / src / core / lib / support / cpu_posix . cc <nl> ppp b / src / core / lib / support / cpu_posix . cc <nl> <nl> # include < string . h > <nl> # include < unistd . h > <nl> <nl> + # include < grpc / support / cpu . h > <nl> # include < grpc / support / log . h > <nl> # include < grpc / support / sync . h > <nl> # include < grpc / support / useful . h > <nl> mmm a / src / core / lib / support / cpu_windows . cc <nl> ppp b / src / core / lib / support / cpu_windows . cc <nl> <nl> # include < grpc / support / port_platform . h > <nl> <nl> # ifdef GPR_WINDOWS <nl> + # include < grpc / support / cpu . h > <nl> # include < grpc / support / log . h > <nl> <nl> unsigned gpr_cpu_num_cores ( void ) { <nl> mmm a / src / core / lib / support / string_util_windows . cc <nl> ppp b / src / core / lib / support / string_util_windows . cc <nl> <nl> # include < strsafe . h > <nl> <nl> # include < grpc / support / alloc . h > <nl> + # include < grpc / support / log_windows . h > <nl> # include < grpc / support / string_util . h > <nl> <nl> # include " src / core / lib / support / string . h " <nl> mmm a / src / core / lib / support / wrap_memcpy . cc <nl> ppp b / src / core / lib / support / wrap_memcpy . cc <nl> <nl> * Enable by setting LDFLAGS = - Wl , - wrap , memcpy when linking . <nl> * / <nl> <nl> + extern " C " { <nl> # ifdef __linux__ <nl> # if defined ( __x86_64__ ) & & ! defined ( GPR_MUSL_LIBC_COMPAT ) <nl> __asm__ ( " . symver memcpy , memcpy @ GLIBC_2 . 2 . 5 " ) ; <nl> void * __wrap_memcpy ( void * destination , const void * source , size_t num ) { <nl> } <nl> # endif <nl> # endif <nl> + } <nl> mmm a / src / core / lib / transport / static_metadata . h <nl> ppp b / src / core / lib / transport / static_metadata . h <nl> <nl> # ifndef GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H <nl> # define GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H <nl> <nl> + # ifdef __cplusplus <nl> + extern " C " { <nl> + # endif <nl> + <nl> # include " src / core / lib / transport / metadata . h " <nl> <nl> # define GRPC_STATIC_MDSTR_COUNT 100 <nl> extern const uint8_t grpc_static_accept_stream_encoding_metadata [ 4 ] ; <nl> ( GRPC_MAKE_MDELEM ( & grpc_static_mdelem_table \ <nl> [ grpc_static_accept_stream_encoding_metadata [ ( algs ) ] ] , \ <nl> GRPC_MDELEM_STORAGE_STATIC ) ) <nl> + # ifdef __cplusplus <nl> + } <nl> + # endif <nl> # endif / * GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H * / <nl> mmm a / tools / codegen / core / gen_static_metadata . py <nl> ppp b / tools / codegen / core / gen_static_metadata . py <nl> def esc_dict ( line ) : <nl> print > > H , ' # ifndef GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H ' <nl> print > > H , ' # define GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H ' <nl> print > > H <nl> + print > > H , ' # ifdef __cplusplus ' <nl> + print > > H , ' extern " C " { ' <nl> + print > > H , ' # endif ' <nl> + print > > H <nl> print > > H , ' # include " src / core / lib / transport / metadata . h " ' <nl> print > > H <nl> <nl> def f ( i , p = p ) : <nl> <nl> print > > H , ' # define GRPC_MDELEM_ACCEPT_STREAM_ENCODING_FOR_ALGORITHMS ( algs ) ( GRPC_MAKE_MDELEM ( & grpc_static_mdelem_table [ grpc_static_accept_stream_encoding_metadata [ ( algs ) ] ] , GRPC_MDELEM_STORAGE_STATIC ) ) ' <nl> <nl> + print > > H , ' # ifdef __cplusplus ' <nl> + print > > H , ' } ' <nl> + print > > H , ' # endif ' <nl> + <nl> print > > H , ' # endif / * GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H * / ' <nl> <nl> H . close ( ) <nl> mmm a / tools / run_tests / generated / sources_and_headers . json <nl> ppp b / tools / run_tests / generated / sources_and_headers . json <nl> <nl> { <nl> " deps " : [ <nl> " gpr " , <nl> + " grpc_base_headers " , <nl> " grpc_trace_headers " <nl> ] , <nl> " headers " : [ ] , <nl>
|
some build changes , some unresolved dependencies
|
grpc/grpc
|
547653ebdb3cc16e3c0fb65383322bc7dacc9b90
|
2017-10-02T23:32:27Z
|
mmm a / SConstruct <nl> ppp b / SConstruct <nl> add_option ( ' variable - parse - mode ' , <nl> type = ' choice ' , default = variable_parse_mode_choices [ 0 ] , <nl> choices = variable_parse_mode_choices ) <nl> <nl> + add_option ( ' modules ' , <nl> + " Comma - separated list of modules to build . Empty means none . Default is all . " , <nl> + 1 , False ) <nl> + <nl> # Setup the command - line variables <nl> def variable_shlex_converter ( val ) : <nl> parse_mode = get_option ( ' variable - parse - mode ' ) <nl> if not use_system_version_of_library ( " boost " ) : <nl> boostSuffix = " - % s . 0 " % get_option ( " internal - boost " ) <nl> <nl> # discover modules , and load the ( python ) module for each module ' s build . py <nl> - mongo_modules = moduleconfig . discover_modules ( ' src / mongo / db / modules ' ) <nl> + mongo_modules = moduleconfig . discover_modules ( ' src / mongo / db / modules ' , get_option ( ' modules ' ) ) <nl> env [ ' MONGO_MODULES ' ] = [ m . name for m in mongo_modules ] <nl> <nl> # mmm check system mmm <nl> mmm a / buildscripts / moduleconfig . py <nl> ppp b / buildscripts / moduleconfig . py <nl> <nl> import inspect <nl> import os <nl> <nl> - def discover_modules ( module_root ) : <nl> + def discover_modules ( module_root , allowed_modules ) : <nl> " " " Scans module_root for subdirectories that look like MongoDB modules . <nl> <nl> Returns a list of imported build . py module objects . <nl> " " " <nl> found_modules = [ ] <nl> <nl> + if allowed_modules is not None : <nl> + allowed_modules = allowed_modules . split ( ' , ' ) <nl> + <nl> if not os . path . isdir ( module_root ) : <nl> return found_modules <nl> <nl> def discover_modules ( module_root ) : <nl> build_py = os . path . join ( root , ' build . py ' ) <nl> module = None <nl> <nl> + if allowed_modules is not None and name not in allowed_modules : <nl> + print " skipping module : % s " % name <nl> + continue <nl> + <nl> if os . path . isfile ( build_py ) : <nl> print " adding module : % s " % name <nl> fp = open ( build_py , " r " ) <nl>
|
SERVER - 17897 Add a way to opt out of building modules
|
mongodb/mongo
|
a7fa0b96f58d22af7b074cb6a90d16c7b71a39f2
|
2015-04-09T15:35:56Z
|
mmm a / taichi / runtime / internal_function . h <nl> ppp b / taichi / runtime / internal_function . h <nl> i32 test_list_manager ( Context * context ) { <nl> sizeof ( ListManager ) , 4096 ) ; <nl> list = new ( ptr ) ListManager ( context , 4 , 16 ) ; <nl> for ( int i = 0 ; i < 320 ; i + + ) { <nl> - printf ( " appending % d \ n " , i ) ; <nl> - list - > append ( & i ) ; <nl> + / / printf ( " appending % d \ n " , i ) ; <nl> + auto j = i + 5 ; <nl> + list - > append ( & j ) ; <nl> + } <nl> + for ( int i = 0 ; i < 320 ; i + + ) { <nl> + TC_ASSERT ( * ( i32 * ) list - > get ( i ) = = i + 5 ) ; <nl> } <nl> return 0 ; <nl> } <nl> mmm a / taichi / runtime / runtime . cpp <nl> ppp b / taichi / runtime / runtime . cpp <nl> struct ListManager { <nl> void clear ( ) { <nl> num_elements = 0 ; <nl> } <nl> + <nl> + Ptr get ( i32 i ) { <nl> + return chunks [ i > > log2chunk_num_elements ] + <nl> + element_size * ( i & ( ( 1 < < log2chunk_num_elements ) - 1 ) ) ; <nl> + } <nl> } ; <nl> <nl> struct NodeManager { <nl> i32 linear_thread_idx ( ) { <nl> <nl> void ListManager : : append ( void * data_ptr ) { <nl> auto i = atomic_add_i32 ( & num_elements , 1 ) ; <nl> - printf ( " i % d \ n " , i ) ; <nl> + / / printf ( " i % d \ n " , i ) ; <nl> auto chunk_id = i > > log2chunk_num_elements ; <nl> auto item_id = i & ( ( 1 < < log2chunk_num_elements ) - 1 ) ; <nl> if ( ! chunks [ chunk_id ] ) { <nl> locked_task ( & lock , [ & ] { <nl> / / may have been allocated during lock contention <nl> if ( ! chunks [ chunk_id ] ) { <nl> - printf ( " Allocating chunk % d \ n " , chunk_id ) ; <nl> + / / printf ( " Allocating chunk % d \ n " , chunk_id ) ; <nl> chunks [ chunk_id ] = runtime - > allocate_aligned ( <nl> max_num_elements_per_chunk * element_size , 4096 ) ; <nl> } <nl> mmm a / tests / python / test_internal_func . py <nl> ppp b / tests / python / test_internal_func . py <nl> def test ( ) : <nl> test ( ) <nl> time . sleep ( 0 . 1 ) <nl> <nl> - # @ ti . all_archs <nl> - @ ti . host_arch <nl> + @ ti . all_archs <nl> def test_list_manager ( ) : <nl> @ ti . kernel <nl> def test ( ) : <nl>
|
reproduce test_list_manager failure on cuda
|
taichi-dev/taichi
|
ef81f6a9c5c20460468fd06595a4087d39ec4bbe
|
2020-01-27T05:37:48Z
|
mmm a / cmake / modules / SwiftUtils . cmake <nl> ppp b / cmake / modules / SwiftUtils . cmake <nl> function ( precondition var ) <nl> cmake_parse_arguments ( <nl> PRECONDITION # prefix <nl> " NEGATE " # options <nl> - " " # single - value args <nl> + " MESSAGE " # single - value args <nl> " " # multi - value args <nl> $ { ARGN } ) <nl> <nl> if ( PRECONDITION_NEGATE ) <nl> if ( $ { var } ) <nl> - message ( FATAL_ERROR " Error ! Variable $ { var } is true . " ) <nl> + if ( PRECONDITION_MESSAGE ) <nl> + message ( FATAL_ERROR " Error ! $ { PRECONDITION_MESSAGE } " ) <nl> + else ( ) <nl> + message ( FATAL_ERROR " Error ! Variable $ { var } is true . " ) <nl> + endif ( ) <nl> endif ( ) <nl> else ( ) <nl> if ( NOT $ { var } ) <nl> - message ( FATAL_ERROR " Error ! Variable $ { var } is false or not set . " ) <nl> + if ( PRECONDITION_MESSAGE ) <nl> + message ( FATAL_ERROR " Error ! $ { PRECONDITION_MESSAGE } " ) <nl> + else ( ) <nl> + message ( FATAL_ERROR " Error ! Variable $ { var } is false or not set . " ) <nl> + endif ( ) <nl> endif ( ) <nl> endif ( ) <nl> endfunction ( ) <nl>
|
[ cmake ] Add an optional message parameter to precondition to allow for more specific precondition messages .
|
apple/swift
|
27f2a8018ea2a9e49a410e6717417a7d665d5cea
|
2016-07-25T19:03:25Z
|
mmm a / tools / foozzie / v8_foozzie . py <nl> ppp b / tools / foozzie / v8_foozzie . py <nl> <nl> ignition_turbo = [ ' - - ignition - staging ' , ' - - turbo ' , ' - - validate - asm ' ] , <nl> ignition_turbo_opt = [ ' - - ignition - staging ' , ' - - turbo ' , ' - - always - opt ' , <nl> ' - - validate - asm ' ] , <nl> + ignition_turbo_opt_eager = [ <nl> + ' - - ignition - staging ' , ' - - turbo ' , ' - - always - opt ' , ' - - validate - asm ' , <nl> + ' - - no - lazy ' , ' - - no - lazy - inner - functions ' ] , <nl> ) <nl> <nl> # Timeout in seconds for one d8 run . <nl>
|
[ foozzie ] Add config to run turbo_opt and no - lazy
|
v8/v8
|
70965025ca1d7d4234d7645c48bee1988d2c6067
|
2017-03-03T15:08:44Z
|
mmm a / listener . c <nl> ppp b / listener . c <nl> json_t * w_match_results_to_json ( <nl> uint32_t num_matches , <nl> struct watchman_rule_match * matches ) <nl> { <nl> - json_t * file_list = json_array ( ) ; <nl> + json_t * file_list = json_array_of_size ( num_matches ) ; <nl> uint32_t i ; <nl> <nl> for ( i = 0 ; i < num_matches ; i + + ) { <nl> mmm a / query / fieldlist . c <nl> ppp b / query / fieldlist . c <nl> json_t * w_query_results_to_json ( <nl> uint32_t num_results , <nl> struct watchman_rule_match * results ) <nl> { <nl> - json_t * file_list = json_array ( ) ; <nl> + json_t * file_list = json_array_of_size ( num_results ) ; <nl> uint32_t i , f ; <nl> <nl> for ( i = 0 ; i < num_results ; i + + ) { <nl> mmm a / thirdparty / jansson / jansson . h <nl> ppp b / thirdparty / jansson / jansson . h <nl> typedef long json_int_t ; <nl> <nl> json_t * json_object ( void ) ; <nl> json_t * json_array ( void ) ; <nl> + json_t * json_array_of_size ( size_t nelems ) ; <nl> json_t * json_string ( const char * value ) ; <nl> json_t * json_string_nocheck ( const char * value ) ; <nl> json_t * json_integer ( json_int_t value ) ; <nl> mmm a / thirdparty / jansson / value . c <nl> ppp b / thirdparty / jansson / value . c <nl> static json_t * json_object_deep_copy ( json_t * object ) <nl> <nl> / * * * array * * * / <nl> <nl> - json_t * json_array ( void ) <nl> + json_t * json_array_of_size ( size_t nelems ) <nl> { <nl> json_array_t * array = jsonp_malloc ( sizeof ( json_array_t ) ) ; <nl> if ( ! array ) <nl> json_t * json_array ( void ) <nl> json_init ( & array - > json , JSON_ARRAY ) ; <nl> <nl> array - > entries = 0 ; <nl> - array - > size = 8 ; <nl> + array - > size = max ( nelems , 8 ) ; <nl> <nl> array - > table = jsonp_malloc ( array - > size * sizeof ( json_t * ) ) ; <nl> if ( ! array - > table ) { <nl> json_t * json_array ( void ) <nl> return & array - > json ; <nl> } <nl> <nl> + json_t * json_array ( void ) <nl> + { <nl> + return json_array_of_size ( 8 ) ; <nl> + } <nl> + <nl> static void json_delete_array ( json_array_t * array ) <nl> { <nl> size_t i ; <nl>
|
JSON : allow pre - sizing the array table
|
facebook/watchman
|
74cfb4cb7c1961f9c98c019cd34bb6aa8bece66f
|
2013-05-12T01:49:49Z
|
mmm a / modules / imgproc / doc / motion_analysis_and_object_tracking . rst <nl> ppp b / modules / imgproc / doc / motion_analysis_and_object_tracking . rst <nl> See Also : <nl> : func : ` accumulateSquare ` , <nl> : func : ` accumulateProduct ` , <nl> : func : ` accumulateWeighted ` <nl> + <nl> . . index : : accumulateSquare <nl> <nl> accumulateSquare <nl>
|
Purpose : completed the imgproc chapter
|
opencv/opencv
|
738c5bb4959ea325537766b9c88eff323ee6d3c9
|
2011-04-30T13:52:10Z
|
mmm a / Code / CryEngine / Cry3DEngine / 3dEngineLoad . cpp <nl> ppp b / Code / CryEngine / Cry3DEngine / 3dEngineLoad . cpp <nl> bool C3DEngine : : LoadVisAreas ( std : : vector < struct IStatObj * > * * ppStatObjTable , std <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> void C3DEngine : : UnloadLevel ( ) <nl> { <nl> + LOADING_TIME_PROFILE_SECTION ; <nl> if ( GetRenderer ( ) ) <nl> { <nl> GetRenderer ( ) - > EnableLevelUnloading ( true ) ; <nl> mmm a / Code / CryEngine / Cry3DEngine / SVO / SceneTree . cpp <nl> ppp b / Code / CryEngine / Cry3DEngine / SVO / SceneTree . cpp <nl> struct SVoxRndDataFileHdr <nl> <nl> void CSvoEnv : : ReconstructTree ( bool bMultiPoint ) <nl> { <nl> + LOADING_TIME_PROFILE_SECTION ; <nl> char szFolder [ 256 ] = " " ; <nl> CVoxelSegment : : MakeFolderName ( szFolder ) ; <nl> CVoxelSegment : : m_strRenderDataFileName = szFolder ; <nl> mmm a / Code / CryEngine / Cry3DEngine / SVO / SceneTreeManager . cpp <nl> ppp b / Code / CryEngine / Cry3DEngine / SVO / SceneTreeManager . cpp <nl> extern CSvoEnv * gSvoEnv ; <nl> <nl> void CSvoManager : : CheckAllocateGlobalCloud ( ) <nl> { <nl> + LOADING_TIME_PROFILE_SECTION ; <nl> if ( ! gSvoEnv & & Cry3DEngineBase : : GetCVars ( ) - > e_svoEnabled ) <nl> { <nl> float fMapSize = ( float ) Cry3DEngineBase : : Get3DEngine ( ) - > GetTerrainSize ( ) ; <nl> void CSvoManager : : Release ( ) <nl> <nl> void CSvoManager : : Render ( ) <nl> { <nl> + LOADING_TIME_PROFILE_SECTION ; <nl> if ( GetCVars ( ) - > e_svoTI_Apply & & ( ! m_bLevelLoadingInProgress | | gEnv - > IsEditor ( ) ) & & ! GetCVars ( ) - > e_svoTI_Active ) <nl> { <nl> GetCVars ( ) - > e_svoTI_Active = 1 ; <nl> void CSvoManager : : Render ( ) <nl> <nl> if ( GetCVars ( ) - > e_svoLoadTree ) <nl> { <nl> + LOADING_TIME_PROFILE_SECTION_NAMED ( " SVO Load Tree " ) ; <nl> SAFE_DELETE ( gSvoEnv ) ; <nl> <nl> GetCVars ( ) - > e_svoEnabled = 1 ; <nl> void CSvoManager : : Render ( ) <nl> <nl> if ( GetCVars ( ) - > e_svoEnabled & & GetCVars ( ) - > e_svoRender ) <nl> { <nl> + LOADING_TIME_PROFILE_SECTION_NAMED ( " SVO Render " ) ; <nl> + <nl> CheckAllocateGlobalCloud ( ) ; <nl> <nl> if ( gSvoEnv ) <nl> mmm a / Code / CryEngine / CryEntitySystem / EntitySystem . cpp <nl> ppp b / Code / CryEngine / CryEntitySystem / EntitySystem . cpp <nl> void CEntitySystem : : PurgeHeaps ( ) <nl> <nl> void CEntitySystem : : Reset ( ) <nl> { <nl> + LOADING_TIME_PROFILE_SECTION ; <nl> + <nl> m_pPartitionGrid - > BeginReset ( ) ; <nl> m_pProximityTriggerSystem - > BeginReset ( ) ; <nl> <nl>
|
! B ( Sandbox ) ( HNT - 17208 ) EDITOR : PERFORMANCE : Viewport stays black when loading Cemetery ( Approved by samuelk )
|
CRYTEK/CRYENGINE
|
0e916f5f09c54d6e06cc7233ce7e99b815333b51
|
2017-05-15T13:15:19Z
|
mmm a / tools / eosiocpp . in <nl> ppp b / tools / eosiocpp . in <nl> function generate_abi { <nl> exit 1 <nl> fi <nl> <nl> - context_folder = $ ( realpath $ ( dirname $ 1 ) ) <nl> + context_folder = $ ( cd " $ ( dirname " $ 1 " ) " ; pwd - P ) <nl> <nl> $ { ABIGEN } - extra - arg = - c - extra - arg = - - std = c + + 14 - extra - arg = - - target = wasm32 \ <nl> - extra - arg = - nostdinc - extra - arg = - nostdinc + + - extra - arg = - DABIGEN \ <nl>
|
Merge pull request from EOSIO / remove - realpath - from - eosiocpp
|
EOSIO/eos
|
346c3852b25920e431bb286ca4d83a55aa0e7177
|
2018-03-22T00:39:22Z
|
mmm a / lib / IRGen / IRGen . cpp <nl> ppp b / lib / IRGen / IRGen . cpp <nl> <nl> # include " swift / AST / Diagnostics . h " <nl> # include " swift / AST / IRGenOptions . h " <nl> # include " swift / AST / LinkLibrary . h " <nl> + # include " llvm / Bitcode / BitcodeWriterPass . h " <nl> # include " llvm / IR / LLVMContext . h " <nl> # include " llvm / IR / Module . h " <nl> # include " llvm / PassManager . h " <nl> - # include " llvm / Analysis / Verifier . h " <nl> + # include " llvm / IR / Verifier . h " <nl> # include " llvm / Bitcode / ReaderWriter . h " <nl> # include " llvm / IR / DataLayout . h " <nl> # include " llvm / IR / IRPrintingPasses . h " <nl>
|
more mainline API drift .
|
apple/swift
|
1d9a826420d68e95257de51f0acbf30546f50b66
|
2014-01-13T15:15:58Z
|
Binary files a / android / sdk / libs / armeabi / libweexv8 . so and b / android / sdk / libs / armeabi / libweexv8 . so differ <nl>
|
* [ android ] fix v8 security problem
|
apache/incubator-weex
|
06dbc38128a6997db9c2e0501ab058ff234b6886
|
2016-08-24T02:46:07Z
|
mmm a / src / json . hpp <nl> ppp b / src / json . hpp <nl> SOFTWARE . <nl> # endif <nl> <nl> / / allow to disable exceptions <nl> - # if __cpp_exceptions & & not defined ( JSON_NOEXCEPTION ) <nl> + # if ( __cpp_exceptions | | defined ( _CPPUNWIND ) ) & & not defined ( JSON_NOEXCEPTION ) <nl> # define JSON_THROW ( exception ) throw exception <nl> # define JSON_TRY try <nl> # define JSON_CATCH ( exception ) catch ( exception ) <nl> mmm a / src / json . hpp . re2c <nl> ppp b / src / json . hpp . re2c <nl> SOFTWARE . <nl> # endif <nl> <nl> / / allow to disable exceptions <nl> - # if __cpp_exceptions & & not defined ( JSON_NOEXCEPTION ) <nl> + # if ( __cpp_exceptions | | defined ( _CPPUNWIND ) ) & & not defined ( JSON_NOEXCEPTION ) <nl> # define JSON_THROW ( exception ) throw exception <nl> # define JSON_TRY try <nl> # define JSON_CATCH ( exception ) catch ( exception ) <nl>
|
: checkered_flag : added check for _CPPUNWIND
|
nlohmann/json
|
e3e6cbecc7a150590636d13b17a0260b5c46d90e
|
2017-03-11T16:59:24Z
|
new file mode 100644 <nl> index 000000000000 . . d5e769c71a9c <nl> mmm / dev / null <nl> ppp b / validation - test / compiler_crashers / 28679 - swift - typebase - getcanonicaltype . swift <nl> <nl> + / / This source file is part of the Swift . org open source project <nl> + / / Copyright ( c ) 2014 - 2017 Apple Inc . and the Swift project authors <nl> + / / Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> + / / <nl> + / / See https : / / swift . org / LICENSE . txt for license information <nl> + / / See https : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> + <nl> + / / RUN : not - - crash % target - swift - frontend % s - emit - ir <nl> + { struct A { func b ( UInt = 1 + 1 + 1 as ? Int ) { { { <nl>
|
Merge remote - tracking branch ' origin / master ' into master - next
|
apple/swift
|
26034d6e55f439bdb0d035a7ddb5b1c4527c3c15
|
2017-02-04T11:23:10Z
|
mmm a / docs / root / configuration / best_practices / edge . rst <nl> ppp b / docs / root / configuration / best_practices / edge . rst <nl> HTTP proxies should additionally configure : <nl> * : ref : ` HTTP / 2 maximum concurrent streams limit < envoy_api_field_core . Http2ProtocolOptions . max_concurrent_streams > ` to 100 , <nl> * : ref : ` HTTP / 2 initial stream window size limit < envoy_api_field_core . Http2ProtocolOptions . initial_stream_window_size > ` to 64 KiB , <nl> * : ref : ` HTTP / 2 initial connection window size limit < envoy_api_field_core . Http2ProtocolOptions . initial_connection_window_size > ` to 1 MiB . <nl> + * : ref : ` headers_with_underscores_action setting < envoy_api_field_core . HttpProtocolOptions . headers_with_underscores_action > ` to REJECT_REQUEST , to protect upstream services that treat ' _ ' and ' - ' as interchangeable . <nl> <nl> The following is a YAML example of the above recommendation . <nl> <nl> The following is a YAML example of the above recommendation . <nl> use_remote_address : true <nl> common_http_protocol_options : <nl> idle_timeout : 3600s # 1 hour <nl> + headers_with_underscores_action : REJECT_REQUEST <nl> http2_protocol_options : <nl> max_concurrent_streams : 100 <nl> initial_stream_window_size : 65536 # 64 KiB <nl>
|
doc : Update recommended edge settings to reject requests with underscores ( )
|
envoyproxy/envoy
|
082f4af1fe045897bd86b17412c98b40da6a658a
|
2020-04-08T17:16:21Z
|
mmm a / Marlin / Configuration . h <nl> ppp b / Marlin / Configuration . h <nl> const bool Z_MAX_ENDSTOP_INVERTING = false ; / / set to true to invert the logic o <nl> / / Custom M code points <nl> # define CUSTOM_M_CODES <nl> # ifdef CUSTOM_M_CODES <nl> - # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> - # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> - # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> + # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> + # define Z_PROBE_OFFSET_RANGE_MIN - 20 <nl> + # define Z_PROBE_OFFSET_RANGE_MAX 20 <nl> + # endif <nl> # endif <nl> <nl> <nl> mmm a / Marlin / ConfigurationStore . cpp <nl> ppp b / Marlin / ConfigurationStore . cpp <nl> <nl> * max_acceleration_units_per_sq_second ( x4 ) <nl> * acceleration <nl> * retract_acceleration <nl> - * travel_aceeleration <nl> + * travel_acceleration <nl> * minimumfeedrate <nl> * mintravelfeedrate <nl> * minsegmenttime <nl> <nl> * mesh_num_x <nl> * mesh_num_y <nl> * z_values [ ] [ ] <nl> + * zprobe_zoffset <nl> * <nl> * DELTA : <nl> * endstop_adj ( x3 ) <nl> <nl> * absPreheatHotendTemp <nl> * absPreheatHPBTemp <nl> * absPreheatFanSpeed <nl> - * zprobe_zoffset <nl> * <nl> * PIDTEMP : <nl> * Kp [ 0 ] , Ki [ 0 ] , Kd [ 0 ] , Kc [ 0 ] <nl> void _EEPROM_readData ( int & pos , uint8_t * value , uint8_t size ) { <nl> / / wrong data being written to the variables . <nl> / / ALSO : always make sure the variables in the Store and retrieve sections are in the same order . <nl> <nl> - # define EEPROM_VERSION " V17 " <nl> + # define EEPROM_VERSION " V18 " <nl> <nl> # ifdef EEPROM_SETTINGS <nl> <nl> void Config_StoreSettings ( ) { <nl> <nl> uint8_t mesh_num_x = 3 ; <nl> uint8_t mesh_num_y = 3 ; <nl> - # if defined ( MESH_BED_LEVELING ) <nl> + # ifdef MESH_BED_LEVELING <nl> / / Compile time test that sizeof ( mbl . z_values ) is as expected <nl> typedef char c_assert [ ( sizeof ( mbl . z_values ) = = MESH_NUM_X_POINTS * MESH_NUM_Y_POINTS * sizeof ( dummy ) ) ? 1 : - 1 ] ; <nl> mesh_num_x = MESH_NUM_X_POINTS ; <nl> void Config_StoreSettings ( ) { <nl> for ( int q = 0 ; q < mesh_num_x * mesh_num_y ; q + + ) { <nl> EEPROM_WRITE_VAR ( i , dummy ) ; <nl> } <nl> - # endif / / MESH_BED_LEVELING <nl> + # endif / / MESH_BED_LEVELING <nl> + <nl> + # ifndef ENABLE_AUTO_BED_LEVELING <nl> + float zprobe_zoffset = 0 ; <nl> + # endif <nl> + EEPROM_WRITE_VAR ( i , zprobe_zoffset ) ; <nl> <nl> # ifdef DELTA <nl> EEPROM_WRITE_VAR ( i , endstop_adj ) ; / / 3 floats <nl> void Config_StoreSettings ( ) { <nl> EEPROM_WRITE_VAR ( i , absPreheatHotendTemp ) ; <nl> EEPROM_WRITE_VAR ( i , absPreheatHPBTemp ) ; <nl> EEPROM_WRITE_VAR ( i , absPreheatFanSpeed ) ; <nl> - EEPROM_WRITE_VAR ( i , zprobe_zoffset ) ; <nl> + <nl> <nl> for ( int e = 0 ; e < 4 ; e + + ) { <nl> <nl> void Config_RetrieveSettings ( ) { <nl> } <nl> # endif / / MESH_BED_LEVELING <nl> <nl> + # ifndef ENABLE_AUTO_BED_LEVELING <nl> + float zprobe_zoffset = 0 ; <nl> + # endif <nl> + EEPROM_READ_VAR ( i , zprobe_zoffset ) ; <nl> + <nl> # ifdef DELTA <nl> EEPROM_READ_VAR ( i , endstop_adj ) ; / / 3 floats <nl> EEPROM_READ_VAR ( i , delta_radius ) ; / / 1 float <nl> void Config_RetrieveSettings ( ) { <nl> EEPROM_READ_VAR ( i , absPreheatHotendTemp ) ; <nl> EEPROM_READ_VAR ( i , absPreheatHPBTemp ) ; <nl> EEPROM_READ_VAR ( i , absPreheatFanSpeed ) ; <nl> - EEPROM_READ_VAR ( i , zprobe_zoffset ) ; <nl> <nl> # ifdef PIDTEMP <nl> for ( int e = 0 ; e < 4 ; e + + ) { / / 4 = max extruders currently supported by Marlin <nl> void Config_ResetDefault ( ) { <nl> max_e_jerk = DEFAULT_EJERK ; <nl> home_offset [ X_AXIS ] = home_offset [ Y_AXIS ] = home_offset [ Z_AXIS ] = 0 ; <nl> <nl> - # if defined ( MESH_BED_LEVELING ) <nl> + # ifdef MESH_BED_LEVELING <nl> mbl . active = 0 ; <nl> - # endif / / MESH_BED_LEVELING <nl> + # endif <nl> + <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> + zprobe_zoffset = - Z_PROBE_OFFSET_FROM_EXTRUDER ; <nl> + # endif <nl> <nl> # ifdef DELTA <nl> endstop_adj [ X_AXIS ] = endstop_adj [ Y_AXIS ] = endstop_adj [ Z_AXIS ] = 0 ; <nl> void Config_ResetDefault ( ) { <nl> absPreheatFanSpeed = ABS_PREHEAT_FAN_SPEED ; <nl> # endif <nl> <nl> - # ifdef ENABLE_AUTO_BED_LEVELING <nl> - zprobe_zoffset = - Z_PROBE_OFFSET_FROM_EXTRUDER ; <nl> - # endif <nl> - <nl> # ifdef DOGLCD <nl> lcd_contrast = DEFAULT_LCD_CONTRAST ; <nl> # endif <nl> void Config_PrintSettings ( bool forReplay ) { <nl> } <nl> } <nl> <nl> - # ifdef CUSTOM_M_CODES <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> SERIAL_ECHO_START ; <nl> - if ( ! forReplay ) { <nl> - SERIAL_ECHOLNPGM ( " Z - Probe Offset ( mm ) : " ) ; <nl> - SERIAL_ECHO_START ; <nl> - } <nl> - SERIAL_ECHO ( " M " ) ; <nl> - SERIAL_ECHO ( CUSTOM_M_CODE_SET_Z_PROBE_OFFSET ) ; <nl> - SERIAL_ECHOPAIR ( " Z " , - zprobe_zoffset ) ; <nl> + # ifdef CUSTOM_M_CODES <nl> + if ( ! forReplay ) { <nl> + SERIAL_ECHOLNPGM ( " Z - Probe Offset ( mm ) : " ) ; <nl> + SERIAL_ECHO_START ; <nl> + } <nl> + SERIAL_ECHOPAIR ( " M " , ( unsigned long ) CUSTOM_M_CODE_SET_Z_PROBE_OFFSET ) ; <nl> + SERIAL_ECHOPAIR ( " Z " , - zprobe_zoffset ) ; <nl> + # else <nl> + if ( ! forReplay ) { <nl> + SERIAL_ECHOPAIR ( " Z - Probe Offset ( mm ) : " , - zprobe_zoffset ) ; <nl> + } <nl> + # endif <nl> SERIAL_EOL ; <nl> # endif <nl> } <nl> mmm a / Marlin / Marlin . h <nl> ppp b / Marlin / Marlin . h <nl> extern float z_endstop_adj ; <nl> extern float min_pos [ 3 ] ; <nl> extern float max_pos [ 3 ] ; <nl> extern bool axis_known_position [ 3 ] ; <nl> - extern float zprobe_zoffset ; <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> + extern float zprobe_zoffset ; <nl> + # endif <nl> extern int fanSpeed ; <nl> # ifdef BARICUDA <nl> extern int ValvePressure ; <nl> mmm a / Marlin / Marlin_main . cpp <nl> ppp b / Marlin / Marlin_main . cpp <nl> <nl> <nl> float homing_feedrate [ ] = HOMING_FEEDRATE ; <nl> # ifdef ENABLE_AUTO_BED_LEVELING <nl> - int xy_travel_speed = XY_TRAVEL_SPEED ; <nl> + int xy_travel_speed = XY_TRAVEL_SPEED ; <nl> + float zprobe_zoffset = - Z_PROBE_OFFSET_FROM_EXTRUDER ; <nl> # endif <nl> int homing_bump_divisor [ ] = HOMING_BUMP_DIVISOR ; <nl> bool axis_relative_modes [ ] = AXIS_RELATIVE_MODES ; <nl> float home_offset [ 3 ] = { 0 , 0 , 0 } ; <nl> float min_pos [ 3 ] = { X_MIN_POS , Y_MIN_POS , Z_MIN_POS } ; <nl> float max_pos [ 3 ] = { X_MAX_POS , Y_MAX_POS , Z_MAX_POS } ; <nl> bool axis_known_position [ 3 ] = { false , false , false } ; <nl> - float zprobe_zoffset ; <nl> <nl> / / Extruder offset <nl> # if EXTRUDERS > 1 <nl> static void set_bed_level_equation_lsq ( double * plane_equation_coefficients ) <nl> current_position [ Y_AXIS ] = corrected_position . y ; <nl> current_position [ Z_AXIS ] = corrected_position . z ; <nl> <nl> - / / put the bed at 0 so we don ' t go below it . <nl> - current_position [ Z_AXIS ] = zprobe_zoffset ; / / in the lsq we reach here after raising the extruder due to the loop structure <nl> - <nl> plan_set_position ( current_position [ X_AXIS ] , current_position [ Y_AXIS ] , current_position [ Z_AXIS ] , current_position [ E_AXIS ] ) ; <nl> } <nl> # endif <nl> static void set_bed_level_equation_3pts ( float z_at_pt_1 , float z_at_pt_2 , float <nl> vector_3 pt1 = vector_3 ( ABL_PROBE_PT_1_X , ABL_PROBE_PT_1_Y , z_at_pt_1 ) ; <nl> vector_3 pt2 = vector_3 ( ABL_PROBE_PT_2_X , ABL_PROBE_PT_2_Y , z_at_pt_2 ) ; <nl> vector_3 pt3 = vector_3 ( ABL_PROBE_PT_3_X , ABL_PROBE_PT_3_Y , z_at_pt_3 ) ; <nl> + vector_3 planeNormal = vector_3 : : cross ( pt1 - pt2 , pt3 - pt2 ) . get_normal ( ) ; <nl> <nl> - vector_3 from_2_to_1 = ( pt1 - pt2 ) . get_normal ( ) ; <nl> - vector_3 from_2_to_3 = ( pt3 - pt2 ) . get_normal ( ) ; <nl> - vector_3 planeNormal = vector_3 : : cross ( from_2_to_1 , from_2_to_3 ) . get_normal ( ) ; <nl> - planeNormal = vector_3 ( planeNormal . x , planeNormal . y , abs ( planeNormal . z ) ) ; <nl> + if ( planeNormal . z < 0 ) { <nl> + planeNormal . x = - planeNormal . x ; <nl> + planeNormal . y = - planeNormal . y ; <nl> + planeNormal . z = - planeNormal . z ; <nl> + } <nl> <nl> plan_bed_level_matrix = matrix_3x3 : : create_look_at ( planeNormal ) ; <nl> <nl> static void set_bed_level_equation_3pts ( float z_at_pt_1 , float z_at_pt_2 , float <nl> current_position [ Y_AXIS ] = corrected_position . y ; <nl> current_position [ Z_AXIS ] = corrected_position . z ; <nl> <nl> - / / put the bed at 0 so we don ' t go below it . <nl> - current_position [ Z_AXIS ] = zprobe_zoffset ; <nl> - <nl> plan_set_position ( current_position [ X_AXIS ] , current_position [ Y_AXIS ] , current_position [ Z_AXIS ] , current_position [ E_AXIS ] ) ; <nl> - <nl> } <nl> <nl> # endif / / AUTO_BED_LEVELING_GRID <nl> inline void gcode_G28 ( ) { <nl> endstops_hit_on_purpose ( ) ; <nl> } <nl> <nl> - # if defined ( MESH_BED_LEVELING ) <nl> + # ifdef MESH_BED_LEVELING <nl> <nl> + / * * <nl> + * G29 : Mesh - based Z - Probe , probes a grid and produces a <nl> + * mesh to compensate for variable bed height <nl> + * <nl> + * Parameters With MESH_BED_LEVELING : <nl> + * <nl> + * S0 Produce a mesh report <nl> + * S1 Start probing mesh points <nl> + * S2 Probe the next mesh point <nl> + * <nl> + * / <nl> inline void gcode_G29 ( ) { <nl> static int probe_point = - 1 ; <nl> int state = 0 ; <nl> inline void gcode_G28 ( ) { <nl> } else if ( state = = 2 ) { / / Goto next point <nl> <nl> if ( probe_point < 0 ) { <nl> - SERIAL_PROTOCOLPGM ( " Mesh probing not started . \ n " ) ; <nl> + SERIAL_PROTOCOLPGM ( " Start mesh probing with \ " G29 S1 \ " first . \ n " ) ; <nl> return ; <nl> } <nl> int ix , iy ; <nl> inline void gcode_G28 ( ) { <nl> } else { <nl> ix = ( probe_point - 1 ) % MESH_NUM_X_POINTS ; <nl> iy = ( probe_point - 1 ) / MESH_NUM_X_POINTS ; <nl> - if ( iy & 1 ) { / / Zig zag <nl> - ix = ( MESH_NUM_X_POINTS - 1 ) - ix ; <nl> - } <nl> + if ( iy & 1 ) ix = ( MESH_NUM_X_POINTS - 1 ) - ix ; / / zig - zag <nl> mbl . set_z ( ix , iy , current_position [ Z_AXIS ] ) ; <nl> current_position [ Z_AXIS ] = MESH_HOME_SEARCH_Z ; <nl> plan_buffer_line ( current_position [ X_AXIS ] , current_position [ Y_AXIS ] , current_position [ Z_AXIS ] , current_position [ E_AXIS ] , homing_feedrate [ X_AXIS ] / 60 , active_extruder ) ; <nl> st_synchronize ( ) ; <nl> } <nl> - if ( probe_point = = MESH_NUM_X_POINTS * MESH_NUM_Y_POINTS ) { <nl> - SERIAL_PROTOCOLPGM ( " Mesh done . \ n " ) ; <nl> + if ( probe_point = = MESH_NUM_X_POINTS * MESH_NUM_Y_POINTS ) { <nl> + SERIAL_PROTOCOLPGM ( " Mesh probing done . \ n " ) ; <nl> probe_point = - 1 ; <nl> mbl . active = 1 ; <nl> enquecommands_P ( PSTR ( " G28 " ) ) ; <nl> inline void gcode_G28 ( ) { <nl> } <nl> ix = probe_point % MESH_NUM_X_POINTS ; <nl> iy = probe_point / MESH_NUM_X_POINTS ; <nl> - if ( iy & 1 ) { / / Zig zag <nl> - ix = ( MESH_NUM_X_POINTS - 1 ) - ix ; <nl> - } <nl> + if ( iy & 1 ) ix = ( MESH_NUM_X_POINTS - 1 ) - ix ; / / zig - zag <nl> current_position [ X_AXIS ] = mbl . get_x ( ix ) ; <nl> current_position [ Y_AXIS ] = mbl . get_y ( iy ) ; <nl> plan_buffer_line ( current_position [ X_AXIS ] , current_position [ Y_AXIS ] , current_position [ Z_AXIS ] , current_position [ E_AXIS ] , homing_feedrate [ X_AXIS ] / 60 , active_extruder ) ; <nl> inline void gcode_G28 ( ) { <nl> } <nl> } <nl> <nl> - # endif <nl> - <nl> - # ifdef ENABLE_AUTO_BED_LEVELING <nl> + # elif defined ( ENABLE_AUTO_BED_LEVELING ) <nl> <nl> / * * <nl> * G29 : Detailed Z - Probe , probes the bed at 3 or more points . <nl> inline void gcode_G28 ( ) { <nl> * <nl> * S Set the XY travel speed between probe points ( in mm / min ) <nl> * <nl> - * D Dry - Run mode . Just evaluate the bed Topology - It does not apply or clean the rotation Matrix <nl> - * Useful to check the topology after a first run of G29 . <nl> + * D Dry - Run mode . Just evaluate the bed Topology - Don ' t apply <nl> + * or clean the rotation Matrix . Useful to check the topology <nl> + * after a first run of G29 . <nl> * <nl> * V Set the verbose level ( 0 - 4 ) . Example : " G29 V3 " <nl> * <nl> inline void gcode_G28 ( ) { <nl> <nl> # ifdef AUTO_BED_LEVELING_GRID <nl> <nl> - # ifndef DELTA <nl> - bool do_topography_map = verbose_level > 2 | | code_seen ( ' T ' ) | | code_seen ( ' t ' ) ; <nl> - # endif <nl> + # ifndef DELTA <nl> + bool do_topography_map = verbose_level > 2 | | code_seen ( ' T ' ) | | code_seen ( ' t ' ) ; <nl> + # endif <nl> <nl> if ( verbose_level > 0 ) <nl> { <nl> inline void gcode_G28 ( ) { <nl> <nl> # ifdef Z_PROBE_SLED <nl> dock_sled ( false ) ; / / engage ( un - dock ) the probe <nl> - # elif defined ( Z_PROBE_ALLEN_KEY ) <nl> + # elif defined ( Z_PROBE_ALLEN_KEY ) / / | | defined ( SERVO_LEVELING ) <nl> engage_z_probe ( ) ; <nl> # endif <nl> <nl> inline void gcode_G28 ( ) { <nl> { <nl> # ifdef DELTA <nl> reset_bed_level ( ) ; <nl> - # else <nl> - <nl> - / / make sure the bed_level_rotation_matrix is identity or the planner will get it incorectly <nl> - / / vector_3 corrected_position = plan_get_position_mm ( ) ; <nl> - / / corrected_position . debug ( " position before G29 " ) ; <nl> - plan_bed_level_matrix . set_to_identity ( ) ; <nl> - vector_3 uncorrected_position = plan_get_position ( ) ; <nl> - / / uncorrected_position . debug ( " position during G29 " ) ; <nl> - <nl> - current_position [ X_AXIS ] = uncorrected_position . x ; <nl> - current_position [ Y_AXIS ] = uncorrected_position . y ; <nl> - current_position [ Z_AXIS ] = uncorrected_position . z ; <nl> - plan_set_position ( current_position [ X_AXIS ] , current_position [ Y_AXIS ] , current_position [ Z_AXIS ] , current_position [ E_AXIS ] ) ; <nl> + # else / / ! DELTA <nl> + <nl> + / / make sure the bed_level_rotation_matrix is identity or the planner will get it incorectly <nl> + / / vector_3 corrected_position = plan_get_position_mm ( ) ; <nl> + / / corrected_position . debug ( " position before G29 " ) ; <nl> + plan_bed_level_matrix . set_to_identity ( ) ; <nl> + vector_3 uncorrected_position = plan_get_position ( ) ; <nl> + / / uncorrected_position . debug ( " position during G29 " ) ; <nl> + current_position [ X_AXIS ] = uncorrected_position . x ; <nl> + current_position [ Y_AXIS ] = uncorrected_position . y ; <nl> + current_position [ Z_AXIS ] = uncorrected_position . z ; <nl> + plan_set_position ( current_position [ X_AXIS ] , current_position [ Y_AXIS ] , current_position [ Z_AXIS ] , current_position [ E_AXIS ] ) ; <nl> <nl> # endif <nl> } <nl> inline void gcode_G28 ( ) { <nl> const int xGridSpacing = ( right_probe_bed_position - left_probe_bed_position ) / ( auto_bed_leveling_grid_points - 1 ) ; <nl> const int yGridSpacing = ( back_probe_bed_position - front_probe_bed_position ) / ( auto_bed_leveling_grid_points - 1 ) ; <nl> <nl> - # ifndef DELTA <nl> - / / solve the plane equation ax + by + d = z <nl> - / / A is the matrix with rows [ x y 1 ] for all the probed points <nl> - / / B is the vector of the Z positions <nl> - / / the normal vector to the plane is formed by the coefficients of the plane equation in the standard form , which is Vx * x + Vy * y + Vz * z + d = 0 <nl> - / / so Vx = - a Vy = - b Vz = 1 ( we want the vector facing towards positive Z <nl> - <nl> - int abl2 = auto_bed_leveling_grid_points * auto_bed_leveling_grid_points ; <nl> - <nl> - double eqnAMatrix [ abl2 * 3 ] , / / " A " matrix of the linear system of equations <nl> - eqnBVector [ abl2 ] , / / " B " vector of Z points <nl> - mean = 0 . 0 ; <nl> - <nl> - # else <nl> - delta_grid_spacing [ 0 ] = xGridSpacing ; <nl> - delta_grid_spacing [ 1 ] = yGridSpacing ; <nl> - <nl> - float z_offset = Z_PROBE_OFFSET_FROM_EXTRUDER ; <nl> - if ( code_seen ( axis_codes [ Z_AXIS ] ) ) z_offset + = code_value ( ) ; <nl> - # endif <nl> + # ifdef DELTA <nl> + delta_grid_spacing [ 0 ] = xGridSpacing ; <nl> + delta_grid_spacing [ 1 ] = yGridSpacing ; <nl> + float z_offset = Z_PROBE_OFFSET_FROM_EXTRUDER ; <nl> + if ( code_seen ( axis_codes [ Z_AXIS ] ) ) z_offset + = code_value ( ) ; <nl> + # else / / ! DELTA <nl> + / / solve the plane equation ax + by + d = z <nl> + / / A is the matrix with rows [ x y 1 ] for all the probed points <nl> + / / B is the vector of the Z positions <nl> + / / the normal vector to the plane is formed by the coefficients of the plane equation in the standard form , which is Vx * x + Vy * y + Vz * z + d = 0 <nl> + / / so Vx = - a Vy = - b Vz = 1 ( we want the vector facing towards positive Z <nl> + <nl> + int abl2 = auto_bed_leveling_grid_points * auto_bed_leveling_grid_points ; <nl> + <nl> + double eqnAMatrix [ abl2 * 3 ] , / / " A " matrix of the linear system of equations <nl> + eqnBVector [ abl2 ] , / / " B " vector of Z points <nl> + mean = 0 . 0 ; <nl> + # endif / / ! DELTA <nl> <nl> int probePointCounter = 0 ; <nl> bool zig = true ; <nl> inline void gcode_G28 ( ) { <nl> float measured_z , <nl> z_before = probePointCounter = = 0 ? Z_RAISE_BEFORE_PROBING : current_position [ Z_AXIS ] + Z_RAISE_BETWEEN_PROBINGS ; <nl> <nl> - # ifdef DELTA <nl> - / / Avoid probing the corners ( outside the round or hexagon print surface ) on a delta printer . <nl> - float distance_from_center = sqrt ( xProbe * xProbe + yProbe * yProbe ) ; <nl> - if ( distance_from_center > DELTA_PROBABLE_RADIUS ) <nl> - continue ; <nl> - # endif / / DELTA <nl> + # ifdef DELTA <nl> + / / Avoid probing the corners ( outside the round or hexagon print surface ) on a delta printer . <nl> + float distance_from_center = sqrt ( xProbe * xProbe + yProbe * yProbe ) ; <nl> + if ( distance_from_center > DELTA_PROBABLE_RADIUS ) <nl> + continue ; <nl> + # endif / / DELTA <nl> <nl> / / Enhanced G29 - Do not retract servo between probes <nl> ProbeAction act ; <nl> inline void gcode_G28 ( ) { <nl> <nl> measured_z = probe_pt ( xProbe , yProbe , z_before , act , verbose_level ) ; <nl> <nl> - # ifndef DELTA <nl> - mean + = measured_z ; <nl> + # ifndef DELTA <nl> + mean + = measured_z ; <nl> <nl> - eqnBVector [ probePointCounter ] = measured_z ; <nl> - eqnAMatrix [ probePointCounter + 0 * abl2 ] = xProbe ; <nl> - eqnAMatrix [ probePointCounter + 1 * abl2 ] = yProbe ; <nl> - eqnAMatrix [ probePointCounter + 2 * abl2 ] = 1 ; <nl> - # else <nl> - bed_level [ xCount ] [ yCount ] = measured_z + z_offset ; <nl> - # endif <nl> + eqnBVector [ probePointCounter ] = measured_z ; <nl> + eqnAMatrix [ probePointCounter + 0 * abl2 ] = xProbe ; <nl> + eqnAMatrix [ probePointCounter + 1 * abl2 ] = yProbe ; <nl> + eqnAMatrix [ probePointCounter + 2 * abl2 ] = 1 ; <nl> + # else <nl> + bed_level [ xCount ] [ yCount ] = measured_z + z_offset ; <nl> + # endif <nl> <nl> probePointCounter + + ; <nl> } / / xProbe <nl> inline void gcode_G28 ( ) { <nl> <nl> clean_up_after_endstop_move ( ) ; <nl> <nl> - # ifndef DELTA <nl> - / / solve lsq problem <nl> - double * plane_equation_coefficients = qr_solve ( abl2 , 3 , eqnAMatrix , eqnBVector ) ; <nl> - <nl> - mean / = abl2 ; <nl> - <nl> - if ( verbose_level ) { <nl> - SERIAL_PROTOCOLPGM ( " Eqn coefficients : a : " ) ; <nl> - SERIAL_PROTOCOL_F ( plane_equation_coefficients [ 0 ] , 8 ) ; <nl> - SERIAL_PROTOCOLPGM ( " b : " ) ; <nl> - SERIAL_PROTOCOL_F ( plane_equation_coefficients [ 1 ] , 8 ) ; <nl> - SERIAL_PROTOCOLPGM ( " d : " ) ; <nl> - SERIAL_PROTOCOL_F ( plane_equation_coefficients [ 2 ] , 8 ) ; <nl> - SERIAL_EOL ; <nl> - if ( verbose_level > 2 ) { <nl> - SERIAL_PROTOCOLPGM ( " Mean of sampled points : " ) ; <nl> - SERIAL_PROTOCOL_F ( mean , 8 ) ; <nl> + # ifdef DELTA <nl> + <nl> + if ( ! dryrun ) extrapolate_unprobed_bed_level ( ) ; <nl> + print_bed_level ( ) ; <nl> + <nl> + # else / / ! DELTA <nl> + <nl> + / / solve lsq problem <nl> + double * plane_equation_coefficients = qr_solve ( abl2 , 3 , eqnAMatrix , eqnBVector ) ; <nl> + <nl> + mean / = abl2 ; <nl> + <nl> + if ( verbose_level ) { <nl> + SERIAL_PROTOCOLPGM ( " Eqn coefficients : a : " ) ; <nl> + SERIAL_PROTOCOL_F ( plane_equation_coefficients [ 0 ] , 8 ) ; <nl> + SERIAL_PROTOCOLPGM ( " b : " ) ; <nl> + SERIAL_PROTOCOL_F ( plane_equation_coefficients [ 1 ] , 8 ) ; <nl> + SERIAL_PROTOCOLPGM ( " d : " ) ; <nl> + SERIAL_PROTOCOL_F ( plane_equation_coefficients [ 2 ] , 8 ) ; <nl> SERIAL_EOL ; <nl> + if ( verbose_level > 2 ) { <nl> + SERIAL_PROTOCOLPGM ( " Mean of sampled points : " ) ; <nl> + SERIAL_PROTOCOL_F ( mean , 8 ) ; <nl> + SERIAL_EOL ; <nl> + } <nl> } <nl> - } <nl> <nl> - / / Show the Topography map if enabled <nl> - if ( do_topography_map ) { <nl> - <nl> - SERIAL_PROTOCOLPGM ( " \ nBed Height Topography : \ n " ) ; <nl> - SERIAL_PROTOCOLPGM ( " + mmmmmmmmm - - + \ n " ) ; <nl> - SERIAL_PROTOCOLPGM ( " | . . . Back . . . . | \ n " ) ; <nl> - SERIAL_PROTOCOLPGM ( " | Left . . Right | \ n " ) ; <nl> - SERIAL_PROTOCOLPGM ( " | . . . Front . . . | \ n " ) ; <nl> - SERIAL_PROTOCOLPGM ( " + mmmmmmmmm - - + \ n " ) ; <nl> - <nl> - for ( int yy = auto_bed_leveling_grid_points - 1 ; yy > = 0 ; yy - - ) { <nl> - for ( int xx = 0 ; xx < auto_bed_leveling_grid_points ; xx + + ) { <nl> - int ind = yy * auto_bed_leveling_grid_points + xx ; <nl> - float diff = eqnBVector [ ind ] - mean ; <nl> - if ( diff > = 0 . 0 ) <nl> - SERIAL_PROTOCOLPGM ( " + " ) ; / / Include + for column alignment <nl> - else <nl> - SERIAL_PROTOCOLPGM ( " " ) ; <nl> - SERIAL_PROTOCOL_F ( diff , 5 ) ; <nl> - } / / xx <nl> + / / Show the Topography map if enabled <nl> + if ( do_topography_map ) { <nl> + <nl> + SERIAL_PROTOCOLPGM ( " \ nBed Height Topography : \ n " ) ; <nl> + SERIAL_PROTOCOLPGM ( " + mmmmmmmmm - - + \ n " ) ; <nl> + SERIAL_PROTOCOLPGM ( " | . . . Back . . . . | \ n " ) ; <nl> + SERIAL_PROTOCOLPGM ( " | Left . . Right | \ n " ) ; <nl> + SERIAL_PROTOCOLPGM ( " | . . . Front . . . | \ n " ) ; <nl> + SERIAL_PROTOCOLPGM ( " + mmmmmmmmm - - + \ n " ) ; <nl> + <nl> + for ( int yy = auto_bed_leveling_grid_points - 1 ; yy > = 0 ; yy - - ) { <nl> + for ( int xx = 0 ; xx < auto_bed_leveling_grid_points ; xx + + ) { <nl> + int ind = yy * auto_bed_leveling_grid_points + xx ; <nl> + float diff = eqnBVector [ ind ] - mean ; <nl> + if ( diff > = 0 . 0 ) <nl> + SERIAL_PROTOCOLPGM ( " + " ) ; / / Include + for column alignment <nl> + else <nl> + SERIAL_PROTOCOLPGM ( " " ) ; <nl> + SERIAL_PROTOCOL_F ( diff , 5 ) ; <nl> + } / / xx <nl> + SERIAL_EOL ; <nl> + } / / yy <nl> SERIAL_EOL ; <nl> - } / / yy <nl> - SERIAL_EOL ; <nl> <nl> - } / / do_topography_map <nl> + } / / do_topography_map <nl> + <nl> <nl> + if ( ! dryrun ) set_bed_level_equation_lsq ( plane_equation_coefficients ) ; <nl> + free ( plane_equation_coefficients ) ; <nl> <nl> - if ( ! dryrun ) set_bed_level_equation_lsq ( plane_equation_coefficients ) ; <nl> - free ( plane_equation_coefficients ) ; <nl> - # else / / Delta <nl> - if ( ! dryrun ) extrapolate_unprobed_bed_level ( ) ; <nl> - print_bed_level ( ) ; <nl> - # endif / / Delta <nl> + # endif / / ! DELTA <nl> <nl> # else / / ! AUTO_BED_LEVELING_GRID <nl> <nl> inline void gcode_G28 ( ) { <nl> <nl> # endif / / ! AUTO_BED_LEVELING_GRID <nl> <nl> - # ifndef DELTA <nl> - if ( verbose_level > 0 ) plan_bed_level_matrix . debug ( " \ n \ nBed Level Correction Matrix : " ) ; <nl> + # ifndef DELTA <nl> + if ( verbose_level > 0 ) <nl> + plan_bed_level_matrix . debug ( " \ n \ nBed Level Correction Matrix : " ) ; <nl> <nl> - / / Correct the Z height difference from z - probe position and hotend tip position . <nl> - / / The Z height on homing is measured by Z - Probe , but the probe is quite far from the hotend . <nl> - / / When the bed is uneven , this height must be corrected . <nl> - if ( ! dryrun ) <nl> - { <nl> - real_z = float ( st_get_position ( Z_AXIS ) ) / axis_steps_per_unit [ Z_AXIS ] ; / / get the real Z ( since the auto bed leveling is already correcting the plane ) <nl> - x_tmp = current_position [ X_AXIS ] + X_PROBE_OFFSET_FROM_EXTRUDER ; <nl> - y_tmp = current_position [ Y_AXIS ] + Y_PROBE_OFFSET_FROM_EXTRUDER ; <nl> - z_tmp = current_position [ Z_AXIS ] ; <nl> + / / Correct the Z height difference from z - probe position and hotend tip position . <nl> + / / The Z height on homing is measured by Z - Probe , but the probe is quite far from the hotend . <nl> + / / When the bed is uneven , this height must be corrected . <nl> + if ( ! dryrun ) <nl> + { <nl> + real_z = float ( st_get_position ( Z_AXIS ) ) / axis_steps_per_unit [ Z_AXIS ] ; / / get the real Z ( since the auto bed leveling is already correcting the plane ) <nl> + x_tmp = current_position [ X_AXIS ] + X_PROBE_OFFSET_FROM_EXTRUDER ; <nl> + y_tmp = current_position [ Y_AXIS ] + Y_PROBE_OFFSET_FROM_EXTRUDER ; <nl> + z_tmp = current_position [ Z_AXIS ] ; <nl> <nl> - apply_rotation_xyz ( plan_bed_level_matrix , x_tmp , y_tmp , z_tmp ) ; / / Apply the correction sending the probe offset <nl> - current_position [ Z_AXIS ] = z_tmp - real_z + current_position [ Z_AXIS ] ; / / The difference is added to current position and sent to planner . <nl> - plan_set_position ( current_position [ X_AXIS ] , current_position [ Y_AXIS ] , current_position [ Z_AXIS ] , current_position [ E_AXIS ] ) ; <nl> - } <nl> - # endif <nl> + apply_rotation_xyz ( plan_bed_level_matrix , x_tmp , y_tmp , z_tmp ) ; / / Apply the correction sending the probe offset <nl> + current_position [ Z_AXIS ] = z_tmp - real_z + current_position [ Z_AXIS ] ; / / The difference is added to current position and sent to planner . <nl> + plan_set_position ( current_position [ X_AXIS ] , current_position [ Y_AXIS ] , current_position [ Z_AXIS ] , current_position [ E_AXIS ] ) ; <nl> + } <nl> + # endif / / ! DELTA <nl> <nl> - # ifdef Z_PROBE_SLED <nl> - dock_sled ( true , - SLED_DOCKING_OFFSET ) ; / / dock the probe , correcting for over - travel <nl> - # elif defined ( Z_PROBE_ALLEN_KEY ) <nl> - retract_z_probe ( ) ; <nl> - # endif <nl> - <nl> - # ifdef Z_PROBE_END_SCRIPT <nl> - enquecommands_P ( PSTR ( Z_PROBE_END_SCRIPT ) ) ; <nl> - st_synchronize ( ) ; <nl> - # endif <nl> + # ifdef Z_PROBE_SLED <nl> + dock_sled ( true , - SLED_DOCKING_OFFSET ) ; / / dock the probe , correcting for over - travel <nl> + # elif defined ( Z_PROBE_ALLEN_KEY ) / / | | defined ( SERVO_LEVELING ) <nl> + retract_z_probe ( ) ; <nl> + # endif <nl> + <nl> + # ifdef Z_PROBE_END_SCRIPT <nl> + enquecommands_P ( PSTR ( Z_PROBE_END_SCRIPT ) ) ; <nl> + st_synchronize ( ) ; <nl> + # endif <nl> } <nl> <nl> # ifndef Z_PROBE_SLED <nl> inline void gcode_M42 ( ) { <nl> do_blocking_move_to ( X_probe_location , Y_probe_location , Z_start_location ) ; / / Make sure we are at the probe location <nl> <nl> if ( n_legs ) { <nl> - double radius = 0 . 0 , theta = 0 . 0 , x_sweep , y_sweep ; <nl> + double radius = 0 . 0 , theta = 0 . 0 ; <nl> int l ; <nl> int rotational_direction = ( unsigned long ) millis ( ) & 0x0001 ; / / clockwise or counter clockwise <nl> radius = ( unsigned long ) millis ( ) % ( long ) ( X_MAX_LENGTH / 4 ) ; / / limit how far out to go <nl> inline void gcode_M200 ( ) { <nl> } <nl> } <nl> <nl> - float area = . 0 ; <nl> if ( code_seen ( ' D ' ) ) { <nl> float diameter = code_value ( ) ; <nl> / / setting any extruder filament size disables volumetric on the assumption that <nl> inline void gcode_M502 ( ) { <nl> * M503 : print settings currently in memory <nl> * / <nl> inline void gcode_M503 ( ) { <nl> - Config_PrintSettings ( code_seen ( ' S ' ) & & code_value = = 0 ) ; <nl> + Config_PrintSettings ( code_seen ( ' S ' ) & & code_value ( ) = = 0 ) ; <nl> } <nl> <nl> # ifdef ABORT_ON_ENDSTOP_HIT_FEATURE_ENABLED <nl> inline void gcode_T ( ) { <nl> SERIAL_ECHOLN ( MSG_INVALID_EXTRUDER ) ; <nl> } <nl> else { <nl> - boolean make_move = false ; <nl> + # if EXTRUDERS > 1 <nl> + bool make_move = false ; <nl> + # endif <nl> + <nl> if ( code_seen ( ' F ' ) ) { <nl> - make_move = true ; <nl> + # if EXTRUDERS > 1 <nl> + make_move = true ; <nl> + # endif <nl> next_feedrate = code_value ( ) ; <nl> if ( next_feedrate > 0 . 0 ) feedrate = next_feedrate ; <nl> } <nl> void ClearToSend ( ) <nl> SERIAL_PROTOCOLLNPGM ( MSG_OK ) ; <nl> } <nl> <nl> - void get_coordinates ( ) <nl> - { <nl> - bool seen [ 4 ] = { false , false , false , false } ; <nl> - for ( int8_t i = 0 ; i < NUM_AXIS ; i + + ) { <nl> - if ( code_seen ( axis_codes [ i ] ) ) <nl> - { <nl> - destination [ i ] = ( float ) code_value ( ) + ( axis_relative_modes [ i ] | | relative_mode ) * current_position [ i ] ; <nl> - seen [ i ] = true ; <nl> + void get_coordinates ( ) { <nl> + for ( int i = 0 ; i < NUM_AXIS ; i + + ) { <nl> + float dest ; <nl> + if ( code_seen ( axis_codes [ i ] ) ) { <nl> + dest = code_value ( ) ; <nl> + if ( axis_relative_modes [ i ] | | relative_mode ) <nl> + dest + = current_position [ i ] ; <nl> } <nl> - else destination [ i ] = current_position [ i ] ; / / Are these else lines really needed ? <nl> + else <nl> + dest = current_position [ i ] ; <nl> + <nl> + destination [ i ] = dest ; <nl> } <nl> - if ( code_seen ( ' F ' ) ) { <nl> + if ( code_seen ( ' F ' ) ) { <nl> next_feedrate = code_value ( ) ; <nl> - if ( next_feedrate > 0 . 0 ) feedrate = next_feedrate ; <nl> + if ( next_feedrate > 0 . 0 ) feedrate = next_feedrate ; <nl> } <nl> } <nl> <nl> mmm a / Marlin / SanityCheck . h <nl> ppp b / Marlin / SanityCheck . h <nl> <nl> <nl> / / Make sure probing points are reachable <nl> # if LEFT_PROBE_BED_POSITION < MIN_PROBE_X <nl> - # error The given LEFT_PROBE_BED_POSITION can not be reached by the probe . <nl> + # error " The given LEFT_PROBE_BED_POSITION can ' t be reached by the probe . " <nl> # elif RIGHT_PROBE_BED_POSITION > MAX_PROBE_X <nl> - # error The given RIGHT_PROBE_BED_POSITION can not be reached by the probe . <nl> + # error " The given RIGHT_PROBE_BED_POSITION can ' t be reached by the probe . " <nl> # elif FRONT_PROBE_BED_POSITION < MIN_PROBE_Y <nl> - # error The given FRONT_PROBE_BED_POSITION can not be reached by the probe . <nl> + # error " The given FRONT_PROBE_BED_POSITION can ' t be reached by the probe . " <nl> # elif BACK_PROBE_BED_POSITION > MAX_PROBE_Y <nl> - # error The given BACK_PROBE_BED_POSITION can not be reached by the probe . <nl> + # error " The given BACK_PROBE_BED_POSITION can ' t be reached by the probe . " <nl> # endif <nl> <nl> # define PROBE_SIZE_X ( X_PROBE_OFFSET_FROM_EXTRUDER * ( AUTO_BED_LEVELING_GRID_POINTS - 1 ) ) <nl> mmm a / Marlin / configurator / config / Configuration . h <nl> ppp b / Marlin / configurator / config / Configuration . h <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / Custom M code points <nl> # define CUSTOM_M_CODES <nl> # ifdef CUSTOM_M_CODES <nl> - # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> - # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> - # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> + # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> + # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> + # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # endif <nl> # endif <nl> <nl> / / @ section extras <nl> mmm a / Marlin / example_configurations / Felix / Configuration . h <nl> ppp b / Marlin / example_configurations / Felix / Configuration . h <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / Custom M code points <nl> # define CUSTOM_M_CODES <nl> # ifdef CUSTOM_M_CODES <nl> - # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> - # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> - # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> + # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> + # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> + # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # endif <nl> # endif <nl> <nl> <nl> mmm a / Marlin / example_configurations / Felix / Configuration_DUAL . h <nl> ppp b / Marlin / example_configurations / Felix / Configuration_DUAL . h <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / Custom M code points <nl> # define CUSTOM_M_CODES <nl> # ifdef CUSTOM_M_CODES <nl> - # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> - # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> - # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> + # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> + # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> + # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # endif <nl> # endif <nl> <nl> <nl> mmm a / Marlin / example_configurations / Hephestos / Configuration . h <nl> ppp b / Marlin / example_configurations / Hephestos / Configuration . h <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / Custom M code points <nl> # define CUSTOM_M_CODES <nl> # ifdef CUSTOM_M_CODES <nl> - # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> - # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> - # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> + # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> + # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> + # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # endif <nl> # endif <nl> <nl> <nl> mmm a / Marlin / example_configurations / K8200 / Configuration . h <nl> ppp b / Marlin / example_configurations / K8200 / Configuration . h <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / Custom M code points <nl> # define CUSTOM_M_CODES <nl> # ifdef CUSTOM_M_CODES <nl> - # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> - # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> - # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> + # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> + # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> + # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # endif <nl> # endif <nl> <nl> <nl> mmm a / Marlin / example_configurations / SCARA / Configuration . h <nl> ppp b / Marlin / example_configurations / SCARA / Configuration . h <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / Custom M code points <nl> / / # define CUSTOM_M_CODES <nl> # ifdef CUSTOM_M_CODES <nl> - # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> - # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> - # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> + # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> + # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> + # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # endif <nl> # endif <nl> <nl> <nl> mmm a / Marlin / example_configurations / WITBOX / Configuration . h <nl> ppp b / Marlin / example_configurations / WITBOX / Configuration . h <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / Custom M code points <nl> # define CUSTOM_M_CODES <nl> # ifdef CUSTOM_M_CODES <nl> - # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> - # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> - # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> + # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> + # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> + # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # endif <nl> # endif <nl> <nl> <nl> mmm a / Marlin / example_configurations / delta / generic / Configuration . h <nl> ppp b / Marlin / example_configurations / delta / generic / Configuration . h <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / Custom M code points <nl> # define CUSTOM_M_CODES <nl> # ifdef CUSTOM_M_CODES <nl> - # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> - # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> - # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> + # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> + # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> + # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # endif <nl> # endif <nl> <nl> <nl> mmm a / Marlin / example_configurations / delta / kossel_mini / Configuration . h <nl> ppp b / Marlin / example_configurations / delta / kossel_mini / Configuration . h <nl> const bool Z_MAX_ENDSTOP_INVERTING = false ; / / set to true to invert the logic o <nl> / / Custom M code points <nl> # define CUSTOM_M_CODES <nl> # ifdef CUSTOM_M_CODES <nl> - # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> - # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> - # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> + # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> + # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> + # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # endif <nl> # endif <nl> <nl> <nl> mmm a / Marlin / example_configurations / makibox / Configuration . h <nl> ppp b / Marlin / example_configurations / makibox / Configuration . h <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / Custom M code points <nl> # define CUSTOM_M_CODES <nl> # ifdef CUSTOM_M_CODES <nl> - # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> - # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> - # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> + # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> + # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> + # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # endif <nl> # endif <nl> <nl> <nl> mmm a / Marlin / example_configurations / tvrrug / Round2 / Configuration . h <nl> ppp b / Marlin / example_configurations / tvrrug / Round2 / Configuration . h <nl> const bool Z_MAX_ENDSTOP_INVERTING = true ; / / set to true to invert the logic of <nl> / / Custom M code points <nl> # define CUSTOM_M_CODES <nl> # ifdef CUSTOM_M_CODES <nl> - # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> - # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> - # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # ifdef ENABLE_AUTO_BED_LEVELING <nl> + # define CUSTOM_M_CODE_SET_Z_PROBE_OFFSET 851 <nl> + # define Z_PROBE_OFFSET_RANGE_MIN - 15 <nl> + # define Z_PROBE_OFFSET_RANGE_MAX - 5 <nl> + # endif <nl> # endif <nl> <nl> <nl> mmm a / Marlin / mesh_bed_leveling . cpp <nl> ppp b / Marlin / mesh_bed_leveling . cpp <nl> <nl> # include " mesh_bed_leveling . h " <nl> <nl> - # if defined ( MESH_BED_LEVELING ) <nl> + # ifdef MESH_BED_LEVELING <nl> <nl> - mesh_bed_leveling mbl ; <nl> + mesh_bed_leveling mbl ; <nl> <nl> - mesh_bed_leveling : : mesh_bed_leveling ( ) { <nl> - reset ( ) ; <nl> - } <nl> - <nl> - void mesh_bed_leveling : : reset ( ) { <nl> - for ( int y = 0 ; y < MESH_NUM_Y_POINTS ; y + + ) { <nl> - for ( int x = 0 ; x < MESH_NUM_X_POINTS ; x + + ) { <nl> - z_values [ y ] [ x ] = 0 ; <nl> - } <nl> - } <nl> + mesh_bed_leveling : : mesh_bed_leveling ( ) { reset ( ) ; } <nl> + <nl> + void mesh_bed_leveling : : reset ( ) { <nl> active = 0 ; <nl> - } <nl> + for ( int y = 0 ; y < MESH_NUM_Y_POINTS ; y + + ) <nl> + for ( int x = 0 ; x < MESH_NUM_X_POINTS ; x + + ) <nl> + z_values [ y ] [ x ] = 0 ; <nl> + } <nl> <nl> # endif / / MESH_BED_LEVELING <nl> mmm a / Marlin / mesh_bed_leveling . h <nl> ppp b / Marlin / mesh_bed_leveling . h <nl> <nl> <nl> # if defined ( MESH_BED_LEVELING ) <nl> <nl> - # define MESH_X_DIST ( ( MESH_MAX_X - MESH_MIN_X ) / ( MESH_NUM_X_POINTS - 1 ) ) <nl> - # define MESH_Y_DIST ( ( MESH_MAX_Y - MESH_MIN_Y ) / ( MESH_NUM_Y_POINTS - 1 ) ) <nl> + # define MESH_X_DIST ( ( MESH_MAX_X - MESH_MIN_X ) / ( MESH_NUM_X_POINTS - 1 ) ) <nl> + # define MESH_Y_DIST ( ( MESH_MAX_Y - MESH_MIN_Y ) / ( MESH_NUM_Y_POINTS - 1 ) ) <nl> <nl> - class mesh_bed_leveling { <nl> - public : <nl> + class mesh_bed_leveling { <nl> + public : <nl> uint8_t active ; <nl> float z_values [ MESH_NUM_Y_POINTS ] [ MESH_NUM_X_POINTS ] ; <nl> <nl> class mesh_bed_leveling { <nl> <nl> void reset ( ) ; <nl> <nl> - float get_x ( int i ) { return MESH_MIN_X + MESH_X_DIST * i ; } <nl> - float get_y ( int i ) { return MESH_MIN_Y + MESH_Y_DIST * i ; } <nl> + float get_x ( int i ) { return MESH_MIN_X + MESH_X_DIST * i ; } <nl> + float get_y ( int i ) { return MESH_MIN_Y + MESH_Y_DIST * i ; } <nl> void set_z ( int ix , int iy , float z ) { z_values [ iy ] [ ix ] = z ; } <nl> <nl> int select_x_index ( float x ) { <nl> - int i = 1 ; <nl> - while ( x > get_x ( i ) & & i < MESH_NUM_X_POINTS - 1 ) { <nl> - i + + ; <nl> - } <nl> - return i - 1 ; <nl> + int i = 1 ; <nl> + while ( x > get_x ( i ) & & i < MESH_NUM_X_POINTS - 1 ) i + + ; <nl> + return i - 1 ; <nl> } <nl> <nl> int select_y_index ( float y ) { <nl> - int i = 1 ; <nl> - while ( y > get_y ( i ) & & i < MESH_NUM_Y_POINTS - 1 ) { <nl> - i + + ; <nl> - } <nl> - return i - 1 ; <nl> + int i = 1 ; <nl> + while ( y > get_y ( i ) & & i < MESH_NUM_Y_POINTS - 1 ) i + + ; <nl> + return i - 1 ; <nl> } <nl> <nl> float calc_z0 ( float a0 , float a1 , float z1 , float a2 , float z2 ) { <nl> - float delta_z = ( z2 - z1 ) / ( a2 - a1 ) ; <nl> - float delta_a = a0 - a1 ; <nl> - return z1 + delta_a * delta_z ; <nl> + float delta_z = ( z2 - z1 ) / ( a2 - a1 ) ; <nl> + float delta_a = a0 - a1 ; <nl> + return z1 + delta_a * delta_z ; <nl> } <nl> <nl> float get_z ( float x0 , float y0 ) { <nl> - int x_index = select_x_index ( x0 ) ; <nl> - int y_index = select_y_index ( y0 ) ; <nl> - float z1 = calc_z0 ( x0 , <nl> - get_x ( x_index ) , z_values [ y_index ] [ x_index ] , <nl> - get_x ( x_index + 1 ) , z_values [ y_index ] [ x_index + 1 ] ) ; <nl> - float z2 = calc_z0 ( x0 , <nl> - get_x ( x_index ) , z_values [ y_index + 1 ] [ x_index ] , <nl> - get_x ( x_index + 1 ) , z_values [ y_index + 1 ] [ x_index + 1 ] ) ; <nl> - float z0 = calc_z0 ( y0 , <nl> - get_y ( y_index ) , z1 , <nl> - get_y ( y_index + 1 ) , z2 ) ; <nl> - return z0 ; <nl> + int x_index = select_x_index ( x0 ) ; <nl> + int y_index = select_y_index ( y0 ) ; <nl> + float z1 = calc_z0 ( x0 , <nl> + get_x ( x_index ) , z_values [ y_index ] [ x_index ] , <nl> + get_x ( x_index + 1 ) , z_values [ y_index ] [ x_index + 1 ] ) ; <nl> + float z2 = calc_z0 ( x0 , <nl> + get_x ( x_index ) , z_values [ y_index + 1 ] [ x_index ] , <nl> + get_x ( x_index + 1 ) , z_values [ y_index + 1 ] [ x_index + 1 ] ) ; <nl> + float z0 = calc_z0 ( y0 , <nl> + get_y ( y_index ) , z1 , <nl> + get_y ( y_index + 1 ) , z2 ) ; <nl> + return z0 ; <nl> } <nl> - } ; <nl> + } ; <nl> <nl> - extern mesh_bed_leveling mbl ; <nl> + extern mesh_bed_leveling mbl ; <nl> <nl> # endif / / MESH_BED_LEVELING <nl> mmm a / Marlin / stepper . cpp <nl> ppp b / Marlin / stepper . cpp <nl> void digipot_current ( uint8_t driver , int current ) { <nl> } <nl> <nl> void microstep_init ( ) { <nl> - const uint8_t microstep_modes [ ] = MICROSTEP_MODES ; <nl> - <nl> # if defined ( E1_MS1_PIN ) & & E1_MS1_PIN > = 0 <nl> pinMode ( E1_MS1_PIN , OUTPUT ) ; <nl> pinMode ( E1_MS2_PIN , OUTPUT ) ; <nl> void microstep_init ( ) { <nl> pinMode ( Z_MS2_PIN , OUTPUT ) ; <nl> pinMode ( E0_MS1_PIN , OUTPUT ) ; <nl> pinMode ( E0_MS2_PIN , OUTPUT ) ; <nl> - for ( int i = 0 ; i < = 4 ; i + + ) microstep_mode ( i , microstep_modes [ i ] ) ; <nl> + const uint8_t microstep_modes [ ] = MICROSTEP_MODES ; <nl> + for ( int i = 0 ; i < sizeof ( microstep_modes ) / sizeof ( microstep_modes [ 0 ] ) ; i + + ) <nl> + microstep_mode ( i , microstep_modes [ i ] ) ; <nl> # endif <nl> } <nl> <nl> mmm a / Marlin / temperature . cpp <nl> ppp b / Marlin / temperature . cpp <nl> static void set_current_temp_raw ( ) { <nl> # endif <nl> # if HAS_TEMP_1 <nl> # ifdef TEMP_SENSOR_1_AS_REDUNDANT <nl> - redundant_temperature_raw = <nl> + redundant_temperature_raw = raw_temp_value [ 1 ] ; <nl> + # else <nl> + current_temperature_raw [ 1 ] = raw_temp_value [ 1 ] ; <nl> # endif <nl> - current_temperature_raw [ 1 ] = raw_temp_value [ 1 ] ; <nl> # if HAS_TEMP_2 <nl> current_temperature_raw [ 2 ] = raw_temp_value [ 2 ] ; <nl> # if HAS_TEMP_3 <nl> mmm a / Marlin / temperature . h <nl> ppp b / Marlin / temperature . h <nl> void manage_heater ( ) ; / / it is critical that this is called periodically . <nl> <nl> / / low level conversion routines <nl> / / do not use these routines and variables outside of temperature . cpp <nl> - extern int target_temperature [ EXTRUDERS ] ; <nl> - extern float current_temperature [ EXTRUDERS ] ; <nl> + extern int target_temperature [ 4 ] ; <nl> + extern float current_temperature [ 4 ] ; <nl> # ifdef SHOW_TEMP_ADC_VALUES <nl> - extern int current_temperature_raw [ EXTRUDERS ] ; <nl> + extern int current_temperature_raw [ 4 ] ; <nl> extern int current_temperature_bed_raw ; <nl> # endif <nl> extern int target_temperature_bed ; <nl> mmm a / Marlin / ultralcd . cpp <nl> ppp b / Marlin / ultralcd . cpp <nl> static void lcd_control_motion_menu ( ) { <nl> START_MENU ( ) ; <nl> MENU_ITEM ( back , MSG_CONTROL , lcd_control_menu ) ; <nl> # ifdef ENABLE_AUTO_BED_LEVELING <nl> - MENU_ITEM_EDIT ( float32 , MSG_ZPROBE_ZOFFSET , & zprobe_zoffset , 0 . 0 , 50 ) ; <nl> + MENU_ITEM_EDIT ( float32 , MSG_ZPROBE_ZOFFSET , & zprobe_zoffset , Z_PROBE_OFFSET_RANGE_MIN , Z_PROBE_OFFSET_RANGE_MAX ) ; <nl> # endif <nl> MENU_ITEM_EDIT ( float5 , MSG_ACC , & acceleration , 10 , 99000 ) ; <nl> MENU_ITEM_EDIT ( float3 , MSG_VXY_JERK , & max_xy_jerk , 1 , 990 ) ; <nl> mmm a / Marlin / vector_3 . cpp <nl> ppp b / Marlin / vector_3 . cpp <nl> vector_3 : : vector_3 ( ) : x ( 0 ) , y ( 0 ) , z ( 0 ) { } <nl> <nl> vector_3 : : vector_3 ( float x_ , float y_ , float z_ ) : x ( x_ ) , y ( y_ ) , z ( z_ ) { } <nl> <nl> - vector_3 vector_3 : : cross ( vector_3 left , vector_3 right ) <nl> - { <nl> + vector_3 vector_3 : : cross ( vector_3 left , vector_3 right ) { <nl> return vector_3 ( left . y * right . z - left . z * right . y , <nl> left . z * right . x - left . x * right . z , <nl> left . x * right . y - left . y * right . x ) ; <nl> } <nl> <nl> - vector_3 vector_3 : : operator + ( vector_3 v ) <nl> - { <nl> - return vector_3 ( ( x + v . x ) , ( y + v . y ) , ( z + v . z ) ) ; <nl> - } <nl> - <nl> - vector_3 vector_3 : : operator - ( vector_3 v ) <nl> - { <nl> - return vector_3 ( ( x - v . x ) , ( y - v . y ) , ( z - v . z ) ) ; <nl> - } <nl> + vector_3 vector_3 : : operator + ( vector_3 v ) { return vector_3 ( ( x + v . x ) , ( y + v . y ) , ( z + v . z ) ) ; } <nl> + vector_3 vector_3 : : operator - ( vector_3 v ) { return vector_3 ( ( x - v . x ) , ( y - v . y ) , ( z - v . z ) ) ; } <nl> <nl> - vector_3 vector_3 : : get_normal ( ) <nl> - { <nl> + vector_3 vector_3 : : get_normal ( ) { <nl> vector_3 normalized = vector_3 ( x , y , z ) ; <nl> normalized . normalize ( ) ; <nl> return normalized ; <nl> } <nl> <nl> - float vector_3 : : get_length ( ) <nl> - { <nl> - float length = sqrt ( ( x * x ) + ( y * y ) + ( z * z ) ) ; <nl> - return length ; <nl> - } <nl> - <nl> - void vector_3 : : normalize ( ) <nl> - { <nl> + float vector_3 : : get_length ( ) { return sqrt ( ( x * x ) + ( y * y ) + ( z * z ) ) ; } <nl> + <nl> + void vector_3 : : normalize ( ) { <nl> float length = get_length ( ) ; <nl> x / = length ; <nl> y / = length ; <nl> z / = length ; <nl> } <nl> <nl> - void vector_3 : : apply_rotation ( matrix_3x3 matrix ) <nl> - { <nl> + void vector_3 : : apply_rotation ( matrix_3x3 matrix ) { <nl> float resultX = x * matrix . matrix [ 3 * 0 + 0 ] + y * matrix . matrix [ 3 * 1 + 0 ] + z * matrix . matrix [ 3 * 2 + 0 ] ; <nl> float resultY = x * matrix . matrix [ 3 * 0 + 1 ] + y * matrix . matrix [ 3 * 1 + 1 ] + z * matrix . matrix [ 3 * 2 + 1 ] ; <nl> float resultZ = x * matrix . matrix [ 3 * 0 + 2 ] + y * matrix . matrix [ 3 * 1 + 2 ] + z * matrix . matrix [ 3 * 2 + 2 ] ; <nl> - <nl> x = resultX ; <nl> y = resultY ; <nl> z = resultZ ; <nl> } <nl> <nl> - void vector_3 : : debug ( char * title ) <nl> - { <nl> + void vector_3 : : debug ( const char title [ ] ) { <nl> SERIAL_PROTOCOL ( title ) ; <nl> SERIAL_PROTOCOLPGM ( " x : " ) ; <nl> SERIAL_PROTOCOL_F ( x , 6 ) ; <nl> void vector_3 : : debug ( char * title ) <nl> SERIAL_EOL ; <nl> } <nl> <nl> - void apply_rotation_xyz ( matrix_3x3 matrix , float & x , float & y , float & z ) <nl> - { <nl> + void apply_rotation_xyz ( matrix_3x3 matrix , float & x , float & y , float & z ) { <nl> vector_3 vector = vector_3 ( x , y , z ) ; <nl> vector . apply_rotation ( matrix ) ; <nl> x = vector . x ; <nl> void apply_rotation_xyz ( matrix_3x3 matrix , float & x , float & y , float & z ) <nl> z = vector . z ; <nl> } <nl> <nl> - matrix_3x3 matrix_3x3 : : create_from_rows ( vector_3 row_0 , vector_3 row_1 , vector_3 row_2 ) <nl> - { <nl> - / / row_0 . debug ( " row_0 " ) ; <nl> - / / row_1 . debug ( " row_1 " ) ; <nl> - / / row_2 . debug ( " row_2 " ) ; <nl> + matrix_3x3 matrix_3x3 : : create_from_rows ( vector_3 row_0 , vector_3 row_1 , vector_3 row_2 ) { <nl> + / / row_0 . debug ( " row_0 " ) ; <nl> + / / row_1 . debug ( " row_1 " ) ; <nl> + / / row_2 . debug ( " row_2 " ) ; <nl> matrix_3x3 new_matrix ; <nl> new_matrix . matrix [ 0 ] = row_0 . x ; new_matrix . matrix [ 1 ] = row_0 . y ; new_matrix . matrix [ 2 ] = row_0 . z ; <nl> new_matrix . matrix [ 3 ] = row_1 . x ; new_matrix . matrix [ 4 ] = row_1 . y ; new_matrix . matrix [ 5 ] = row_1 . z ; <nl> new_matrix . matrix [ 6 ] = row_2 . x ; new_matrix . matrix [ 7 ] = row_2 . y ; new_matrix . matrix [ 8 ] = row_2 . z ; <nl> - / / new_matrix . debug ( " new_matrix " ) ; <nl> - <nl> + / / new_matrix . debug ( " new_matrix " ) ; <nl> return new_matrix ; <nl> } <nl> <nl> - void matrix_3x3 : : set_to_identity ( ) <nl> - { <nl> + void matrix_3x3 : : set_to_identity ( ) { <nl> matrix [ 0 ] = 1 ; matrix [ 1 ] = 0 ; matrix [ 2 ] = 0 ; <nl> matrix [ 3 ] = 0 ; matrix [ 4 ] = 1 ; matrix [ 5 ] = 0 ; <nl> matrix [ 6 ] = 0 ; matrix [ 7 ] = 0 ; matrix [ 8 ] = 1 ; <nl> } <nl> <nl> - matrix_3x3 matrix_3x3 : : create_look_at ( vector_3 target ) <nl> - { <nl> - vector_3 z_row = target . get_normal ( ) ; <nl> - vector_3 x_row = vector_3 ( 1 , 0 , - target . x / target . z ) . get_normal ( ) ; <nl> - vector_3 y_row = vector_3 : : cross ( z_row , x_row ) . get_normal ( ) ; <nl> + matrix_3x3 matrix_3x3 : : create_look_at ( vector_3 target ) { <nl> + vector_3 z_row = target . get_normal ( ) ; <nl> + vector_3 x_row = vector_3 ( 1 , 0 , - target . x / target . z ) . get_normal ( ) ; <nl> + vector_3 y_row = vector_3 : : cross ( z_row , x_row ) . get_normal ( ) ; <nl> <nl> - / / x_row . debug ( " x_row " ) ; <nl> - / / y_row . debug ( " y_row " ) ; <nl> - / / z_row . debug ( " z_row " ) ; <nl> + / / x_row . debug ( " x_row " ) ; <nl> + / / y_row . debug ( " y_row " ) ; <nl> + / / z_row . debug ( " z_row " ) ; <nl> <nl> - <nl> - / / create the matrix already correctly transposed <nl> - matrix_3x3 rot = matrix_3x3 : : create_from_rows ( x_row , y_row , z_row ) ; <nl> + / / create the matrix already correctly transposed <nl> + matrix_3x3 rot = matrix_3x3 : : create_from_rows ( x_row , y_row , z_row ) ; <nl> <nl> - / / rot . debug ( " rot " ) ; <nl> - return rot ; <nl> + / / rot . debug ( " rot " ) ; <nl> + return rot ; <nl> } <nl> <nl> - <nl> - matrix_3x3 matrix_3x3 : : transpose ( matrix_3x3 original ) <nl> - { <nl> + matrix_3x3 matrix_3x3 : : transpose ( matrix_3x3 original ) { <nl> matrix_3x3 new_matrix ; <nl> new_matrix . matrix [ 0 ] = original . matrix [ 0 ] ; new_matrix . matrix [ 1 ] = original . matrix [ 3 ] ; new_matrix . matrix [ 2 ] = original . matrix [ 6 ] ; <nl> new_matrix . matrix [ 3 ] = original . matrix [ 1 ] ; new_matrix . matrix [ 4 ] = original . matrix [ 4 ] ; new_matrix . matrix [ 5 ] = original . matrix [ 7 ] ; <nl> matrix_3x3 matrix_3x3 : : transpose ( matrix_3x3 original ) <nl> return new_matrix ; <nl> } <nl> <nl> - void matrix_3x3 : : debug ( char * title ) { <nl> + void matrix_3x3 : : debug ( const char title [ ] ) { <nl> SERIAL_PROTOCOLLN ( title ) ; <nl> int count = 0 ; <nl> for ( int i = 0 ; i < 3 ; i + + ) { <nl> for ( int j = 0 ; j < 3 ; j + + ) { <nl> + if ( matrix [ count ] > = 0 . 0 ) SERIAL_PROTOCOLPGM ( " + " ) ; <nl> SERIAL_PROTOCOL_F ( matrix [ count ] , 6 ) ; <nl> SERIAL_PROTOCOLPGM ( " " ) ; <nl> count + + ; <nl> void matrix_3x3 : : debug ( char * title ) { <nl> } <nl> } <nl> <nl> - # endif / / # ifdef ENABLE_AUTO_BED_LEVELING <nl> + # endif / / ENABLE_AUTO_BED_LEVELING <nl> <nl> mmm a / Marlin / vector_3 . h <nl> ppp b / Marlin / vector_3 . h <nl> struct vector_3 <nl> float get_length ( ) ; <nl> vector_3 get_normal ( ) ; <nl> <nl> - void debug ( char * title ) ; <nl> + void debug ( const char title [ ] ) ; <nl> <nl> void apply_rotation ( matrix_3x3 matrix ) ; <nl> } ; <nl> struct matrix_3x3 <nl> <nl> void set_to_identity ( ) ; <nl> <nl> - void debug ( char * title ) ; <nl> + void debug ( const char title [ ] ) ; <nl> } ; <nl> <nl> <nl>
|
Merge pull request from thinkyhead / fixup_leveling
|
MarlinFirmware/Marlin
|
1aec2f437c45e176b6d96f05c971dfe0fb09432e
|
2015-03-27T23:37:47Z
|
mmm a / src / mongo / base / initializer_dependency_graph_test . cpp <nl> ppp b / src / mongo / base / initializer_dependency_graph_test . cpp <nl> <nl> namespace mongo { <nl> namespace { <nl> <nl> - Status doNothing ( InitializationContext * ) { return Status : : OK ; } <nl> + Status doNothing ( InitializerContext * ) { return Status : : OK ; } <nl> <nl> TEST ( InitializerDependencyGraphTest , InsertNullFunctionFails ) { <nl> InitializerDependencyGraph graph ; <nl> mmm a / src / mongo / base / initializer_function . h <nl> ppp b / src / mongo / base / initializer_function . h <nl> <nl> <nl> namespace mongo { <nl> <nl> - class InitializationContext ; <nl> + class InitializerContext ; <nl> <nl> / * * <nl> - * An InitializerFunction implements the behavior of an initialization operation . <nl> + * An InitializerFunction implements the behavior of an initializer operation . <nl> * <nl> * On successful execution , an InitializerFunction returns Status : : OK . It may <nl> - * inspect and mutate the supplied InitializationContext . <nl> + * inspect and mutate the supplied InitializerContext . <nl> * / <nl> - typedef boost : : function < Status ( InitializationContext * ) > InitializerFunction ; <nl> + typedef boost : : function < Status ( InitializerContext * ) > InitializerFunction ; <nl> <nl> } / / namespace mongo <nl>
|
Rename InitializationContext to InitializerContext , for consistency with other type names .
|
mongodb/mongo
|
ac4eb3fba2927f4f4c7083520cf825dbb74f0c12
|
2012-09-25T19:04:03Z
|
mmm a / stdlib / public / runtime / Casting . cpp <nl> ppp b / stdlib / public / runtime / Casting . cpp <nl> namespace { <nl> return End ; <nl> } <nl> } ; <nl> - <nl> + <nl> struct ConformanceCacheEntry { <nl> private : <nl> const void * Type ; <nl> namespace { <nl> / / is more than enough invalid pointer values for any realistic generation <nl> / / number . It ' s a little easier to overflow on 32 - bit , so we need an extra <nl> / / bit there . <nl> - # if ! __LP64__ <nl> + # if ! __LP64__ <nl> bool Success ; <nl> - # endif <nl> - <nl> - public : <nl> + # endif <nl> + <nl> ConformanceCacheEntry ( const void * type , <nl> const ProtocolDescriptor * proto , <nl> uintptr_t Data , bool Success ) <nl> : Type ( type ) , Proto ( proto ) , Data ( Data ) <nl> - # if ! __LP64__ <nl> + # if ! __LP64__ <nl> , Success ( Success ) <nl> - # endif <nl> - { } <nl> - <nl> + # endif <nl> + { <nl> + # if __LP64__ <nl> + # if __APPLE__ <nl> + assert ( ( ! Success & & Data < = 0xFFFFFFFFU ) | | <nl> + ( Success & & Data > 0xFFFFFFFFU ) ) ; <nl> + # elif __linux__ <nl> + assert ( ( ! Success & & Data < = 0x0FFFU ) | | <nl> + ( Success & & Data > 0x0FFFU ) ) ; <nl> + # else <nl> + # error " port me " <nl> + # endif <nl> + # endif <nl> + } <nl> + <nl> + public : <nl> ConformanceCacheEntry ( ) = default ; <nl> - <nl> + <nl> + static ConformanceCacheEntry createSuccess ( <nl> + const void * type , const ProtocolDescriptor * proto , <nl> + const swift : : WitnessTable * witness ) { <nl> + return ConformanceCacheEntry ( type , proto , ( uintptr_t ) witness , true ) ; <nl> + } <nl> + <nl> + static ConformanceCacheEntry createFailure ( <nl> + const void * type , const ProtocolDescriptor * proto , <nl> + unsigned failureGeneration ) { <nl> + return ConformanceCacheEntry ( type , proto , ( uintptr_t ) failureGeneration , <nl> + false ) ; <nl> + } <nl> + <nl> / / / \ returns true if the entry represents an entry for the pair \ p type <nl> / / / and \ p proto . <nl> bool matches ( const void * type , const ProtocolDescriptor * proto ) { <nl> namespace { <nl> } <nl> <nl> bool isSuccessful ( ) const { <nl> - # if __LP64__ <nl> + # if __LP64__ <nl> + # if __APPLE__ <nl> return Data > 0xFFFFFFFFU ; <nl> - # else <nl> + # elif __linux__ <nl> + return Data > 0x0FFFU ; <nl> + # else <nl> + # error " port me " <nl> + # endif <nl> + # else <nl> return Success ; <nl> - # endif <nl> + # endif <nl> } <nl> <nl> / / / Get the cached witness table , if successful . <nl> static void _addImageProtocolConformances ( const mach_header * mh , <nl> # elif defined ( __ELF__ ) <nl> static int _addImageProtocolConformances ( struct dl_phdr_info * info , <nl> size_t size , void * / * data * / ) { <nl> - / / Skip the executable and ld - linux . so , which both have a null or empty name . <nl> - if ( ! info - > dlpi_name | | info - > dlpi_name [ 0 ] = = ' \ 0 ' ) <nl> - return 0 ; <nl> - <nl> - void * handle = dlopen ( info - > dlpi_name , RTLD_LAZY | RTLD_NOLOAD ) ; <nl> + void * handle ; <nl> + if ( ! info - > dlpi_name | | info - > dlpi_name [ 0 ] = = ' \ 0 ' ) { <nl> + handle = dlopen ( nullptr , RTLD_LAZY ) ; <nl> + } else <nl> + handle = dlopen ( info - > dlpi_name , RTLD_LAZY | RTLD_NOLOAD ) ; <nl> auto conformances = reinterpret_cast < const uint8_t * > ( <nl> dlsym ( handle , SWIFT_PROTOCOL_CONFORMANCES_SECTION ) ) ; <nl> <nl> swift : : swift_conformsToProtocol ( const Metadata * type , <nl> size_t hash = hashTypeProtocolPair ( type , protocol ) ; <nl> ConcurrentList < ConformanceCacheEntry > & Bucket = <nl> ConformanceCache . findOrAllocateNode ( hash ) ; <nl> - Bucket . push_front ( ConformanceCacheEntry ( type , protocol , <nl> - ProtocolConformanceGeneration , <nl> - false ) ) ; <nl> + Bucket . push_front ( ConformanceCacheEntry : : createFailure ( <nl> + type , protocol , ProtocolConformanceGeneration ) ) ; <nl> pthread_mutex_unlock ( & SectionsToScanLock ) ; <nl> return nullptr ; <nl> } <nl> swift : : swift_conformsToProtocol ( const Metadata * type , <nl> <nl> auto witness = record . getWitnessTable ( metadata ) ; <nl> if ( witness ) <nl> - Bucket . push_front ( ConformanceCacheEntry ( metadata , P , <nl> - ( uintptr_t ) witness , true ) ) ; <nl> + Bucket . push_front ( <nl> + ConformanceCacheEntry : : createSuccess ( metadata , P , witness ) ) ; <nl> else <nl> - Bucket . push_front ( ConformanceCacheEntry ( metadata , P , <nl> - ProtocolConformanceGeneration , <nl> - false ) ) ; <nl> + Bucket . push_front ( ConformanceCacheEntry : : createFailure ( <nl> + metadata , P , ProtocolConformanceGeneration ) ) ; <nl> <nl> / / If the record provides a nondependent witness table for all instances <nl> / / of a generic type , cache it for the generic pattern . <nl> swift : : swift_conformsToProtocol ( const Metadata * type , <nl> size_t hash = hashTypeProtocolPair ( R , P ) ; <nl> ConcurrentList < ConformanceCacheEntry > & Bucket = <nl> ConformanceCache . findOrAllocateNode ( hash ) ; <nl> - Bucket . push_front ( ConformanceCacheEntry ( R , P , <nl> - ( uintptr_t ) record . getStaticWitnessTable ( ) , <nl> - true ) ) ; <nl> + Bucket . push_front ( ConformanceCacheEntry : : createSuccess ( <nl> + R , P , record . getStaticWitnessTable ( ) ) ) ; <nl> } <nl> } <nl> } <nl> mmm a / test / 1_stdlib / ArrayCore . swift <nl> ppp b / test / 1_stdlib / ArrayCore . swift <nl> <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> / / RUN : % target - run - stdlib - swift | FileCheck % s <nl> <nl> - / / XFAIL : linux <nl> - <nl> import Swift <nl> <nl> / / = = = mmm class Tracked mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> mmm a / test / 1_stdlib / Interval . swift <nl> ppp b / test / 1_stdlib / Interval . swift <nl> func = = < T : Comparable > ( lhs : X < T > , rhs : X < T > ) - > Bool { <nl> return lhs . a = = rhs . a <nl> } <nl> <nl> - IntervalTestSuite . test ( " Printable / DebugPrintable " ) <nl> - . xfail ( . LinuxAny ( reason : " dynamic casting is not implemented completely " ) ) <nl> - . code { <nl> + IntervalTestSuite . test ( " Printable / DebugPrintable " ) { <nl> expectEqual ( " 0 . 0 . . < 0 . 1 " , toString ( X ( 0 . 0 ) . . < X ( 0 . 1 ) ) ) <nl> expectEqual ( " 0 . 0 . . . 0 . 1 " , toString ( X ( 0 . 0 ) . . . X ( 0 . 1 ) ) ) <nl> <nl> mmm a / test / 1_stdlib / Range . swift <nl> ppp b / test / 1_stdlib / Range . swift <nl> <nl> / / RUN : % target - run - simple - swift <nl> <nl> / / XFAIL : interpret <nl> - / / XFAIL : linux <nl> <nl> import StdlibUnittest <nl> <nl> mmm a / test / Interpreter / formal_access . swift <nl> ppp b / test / Interpreter / formal_access . swift <nl> <nl> / / RUN : % target - run - simple - swift | FileCheck % s <nl> <nl> - / / XFAIL : linux <nl> - <nl> class C : Printable { <nl> var value : Int <nl> init ( _ v : Int ) { value = v } <nl> doit ( & global [ 0 ] ) <nl> println ( " 8 . global [ 0 ] = = \ ( global [ 0 ] ) " ) <nl> println ( " End " ) <nl> / / CHECK - NEXT : 8 . global [ 0 ] = = 2 <nl> - / / CHECK - NEXT : End <nl> \ No newline at end of file <nl> + / / CHECK - NEXT : End <nl> mmm a / test / Interpreter / protocol_lookup . swift <nl> ppp b / test / Interpreter / protocol_lookup . swift <nl> <nl> - / / RUN : % target - run - simple - swift | FileCheck % s <nl> - <nl> - / / XFAIL : linux <nl> + / / RUN : % target - run - simple - swift | FileCheck % s - - check - prefix = CHECK - - check - prefix = CHECK - % target - runtime <nl> <nl> / / Note : JIT mode is checked in Interpreter / protocol_lookup_jit . swift . <nl> <nl> protocol Runcible : class { <nl> func runce ( ) <nl> } <nl> <nl> + # if _runtime ( _objc ) <nl> @ objc protocol Fungible : class { <nl> func funge ( ) <nl> } <nl> + # endif <nl> <nl> extension C : Runcible { <nl> func runce ( ) { println ( " C " ) } <nl> } <nl> <nl> + # if _runtime ( _objc ) <nl> extension D : Fungible { <nl> @ objc func funge ( ) { println ( " D " ) } <nl> } <nl> + # endif <nl> <nl> let c1 : AnyObject = C ( ) <nl> let c2 : Any = C ( ) <nl> if let fruncible = c2 as ? protocol < Fooable , Runcible > { <nl> println ( " not fooable and runcible " ) <nl> } <nl> <nl> + # if _runtime ( _objc ) <nl> let d : D = D ( ) <nl> let d1 : AnyObject = D ( ) <nl> let d2 : Any = D ( ) <nl> if let frungible = d1 as ? protocol < Fooable , Runcible , Fungible > { <nl> - frungible . foo ( ) / / CHECK - NEXT : D <nl> - frungible . runce ( ) / / CHECK - NEXT : C <nl> - frungible . funge ( ) / / CHECK - NEXT : D <nl> + frungible . foo ( ) / / CHECK - objc - NEXT : D <nl> + frungible . runce ( ) / / CHECK - objc - NEXT : C <nl> + frungible . funge ( ) / / CHECK - objc - NEXT : D <nl> } else { <nl> println ( " not fooable , runcible , and fungible " ) <nl> } <nl> let inttype : Any . Type = Int . self <nl> if let frungibleType = inttype as ? protocol < Fooable , Runcible , Fungible > . Type { <nl> println ( " is fooable , runcible , and fungible " ) <nl> } else { <nl> - println ( " not fooable , runcible , and fungible " ) / / CHECK - NEXT : not <nl> + println ( " not fooable , runcible , and fungible " ) / / CHECK - objc - NEXT : not <nl> } <nl> <nl> let dtype : Any . Type = D . self <nl> if let frungibleType = dtype as ? protocol < Fooable , Runcible , Fungible > . Type { <nl> - println ( " is fooable , runcible , and fungible " ) / / CHECK - NEXT : is <nl> + println ( " is fooable , runcible , and fungible " ) / / CHECK - objc - NEXT : is <nl> } else { <nl> println ( " not fooable , runcible , and fungible " ) <nl> } <nl> func genericCast < U : AnyObject > ( x : AnyObject , _ : U . Type ) - > U ? { <nl> } <nl> <nl> if let fungible = genericCast ( d , Fungible . self ) { <nl> - fungible . funge ( ) / / CHECK - NEXT : D <nl> + fungible . funge ( ) / / CHECK - objc - NEXT : D <nl> } else { <nl> println ( " not fungible " ) <nl> } <nl> + # endif <nl> + <nl> mmm a / test / Interpreter / protocol_lookup_jit . swift <nl> ppp b / test / Interpreter / protocol_lookup_jit . swift <nl> <nl> / / Test protocol_lookup . swift in JIT mode . <nl> - / / RUN : % swift - interpret % S / protocol_lookup . swift | FileCheck % S / protocol_lookup . swift <nl> + / / RUN : % swift - interpret % S / protocol_lookup . swift | FileCheck % s - - check - prefix = CHECK - - check - prefix = CHECK - % target - runtime <nl> <nl> / / REQUIRES : swift_interpreter <nl> <nl>
|
runtime : fix two bugs in swift_conformsToProtocol on Linux
|
apple/swift
|
85764fd6aa5657eafdd1d13951ad171764713499
|
2015-03-11T05:54:11Z
|
mmm a / xbmc / Application . cpp <nl> ppp b / xbmc / Application . cpp <nl> bool CApplication : : LoadUserWindows ( ) <nl> continue ; <nl> } <nl> pWindow - > SetVisibleCondition ( visibleCondition ) ; <nl> + pWindow - > SetLoadType ( CGUIWindow : : KEEP_IN_MEMORY ) ; <nl> g_windowManager . AddCustomWindow ( pWindow ) ; <nl> } <nl> } <nl> mmm a / xbmc / GUIInfoManager . cpp <nl> ppp b / xbmc / GUIInfoManager . cpp <nl> CStdString CGUIInfoManager : : GetSkinVariableString ( int info , <nl> <nl> return " " ; <nl> } <nl> + <nl> + bool CGUIInfoManager : : ConditionsChangedValues ( const std : : map < int , bool > & map ) <nl> + { <nl> + for ( std : : map < int , bool > : : const_iterator it = map . begin ( ) ; it ! = map . end ( ) ; it + + ) <nl> + { <nl> + if ( GetBoolValue ( it - > first ) ! = it - > second ) <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> mmm a / xbmc / GUIInfoManager . h <nl> ppp b / xbmc / GUIInfoManager . h <nl> class CGUIInfoManager : public IMsgTargetCallback , public Observable <nl> int RegisterSkinVariableString ( const INFO : : CSkinVariableString * info ) ; <nl> int TranslateSkinVariableString ( const CStdString & name , int context ) ; <nl> CStdString GetSkinVariableString ( int info , bool preferImage = false , const CGUIListItem * item = NULL ) ; <nl> + <nl> + / / / \ brief iterates through boolean conditions and compares their stored values to current values . Returns true if any condition changed value . <nl> + bool ConditionsChangedValues ( const std : : map < int , bool > & map ) ; <nl> protected : <nl> friend class INFO : : InfoSingle ; <nl> bool GetBool ( int condition , int contextWindow = 0 , const CGUIListItem * item = NULL ) ; <nl> mmm a / xbmc / addons / AddonCallbacksGUI . cpp <nl> ppp b / xbmc / addons / AddonCallbacksGUI . cpp <nl> CGUIAddonWindow : : CGUIAddonWindow ( int id , CStdString strXML , CAddon * addon ) <nl> , m_actionEvent ( true ) <nl> , m_addon ( addon ) <nl> { <nl> - m_loadOnDemand = false ; <nl> + m_loadType = LOAD_ON_GUI_INIT ; <nl> CBOnInit = NULL ; <nl> CBOnFocus = NULL ; <nl> CBOnClick = NULL ; <nl> CGUIAddonWindowDialog : : CGUIAddonWindowDialog ( int id , CStdString strXML , CAddon * <nl> : CGUIAddonWindow ( id , strXML , addon ) <nl> { <nl> m_bRunning = false ; <nl> - m_loadOnDemand = false ; <nl> m_bIsDialog = true ; <nl> } <nl> <nl> mmm a / xbmc / addons / GUIDialogAddonInfo . cpp <nl> ppp b / xbmc / addons / GUIDialogAddonInfo . cpp <nl> CGUIDialogAddonInfo : : CGUIDialogAddonInfo ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_ADDON_INFO , " DialogAddonInfo . xml " ) <nl> { <nl> m_item = CFileItemPtr ( new CFileItem ) ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogAddonInfo : : ~ CGUIDialogAddonInfo ( void ) <nl> mmm a / xbmc / addons / Skin . cpp <nl> ppp b / xbmc / addons / Skin . cpp <nl> void CSkinInfo : : LoadIncludes ( ) <nl> m_includes . LoadIncludes ( includesPath ) ; <nl> } <nl> <nl> - void CSkinInfo : : ResolveIncludes ( TiXmlElement * node ) <nl> + void CSkinInfo : : ResolveIncludes ( TiXmlElement * node , std : : map < int , bool > * xmlIncludeConditions / * = NULL * / ) <nl> { <nl> - m_includes . ResolveIncludes ( node ) ; <nl> + if ( xmlIncludeConditions ) <nl> + xmlIncludeConditions - > clear ( ) ; <nl> + <nl> + m_includes . ResolveIncludes ( node , xmlIncludeConditions ) ; <nl> } <nl> <nl> int CSkinInfo : : GetStartWindow ( ) const <nl> mmm a / xbmc / addons / Skin . h <nl> ppp b / xbmc / addons / Skin . h <nl> class CSkinInfo : public CAddon <nl> * / <nl> static bool TranslateResolution ( const CStdString & name , RESOLUTION_INFO & res ) ; <nl> <nl> - void ResolveIncludes ( TiXmlElement * node ) ; <nl> + void ResolveIncludes ( TiXmlElement * node , std : : map < int , bool > * xmlIncludeConditions = NULL ) ; <nl> <nl> float GetEffectsSlowdown ( ) const { return m_effectsSlowDown ; } ; <nl> <nl> mmm a / xbmc / dialogs / GUIDialogBoxBase . cpp <nl> ppp b / xbmc / dialogs / GUIDialogBoxBase . cpp <nl> <nl> <nl> using namespace std ; <nl> <nl> + # define CONTROL_HEADING 1 <nl> + # define CONTROL_LINES_START 2 <nl> + # define CONTROL_CHOICES_START 10 <nl> + <nl> CGUIDialogBoxBase : : CGUIDialogBoxBase ( int id , const CStdString & xmlFile ) <nl> : CGUIDialog ( id , xmlFile ) <nl> { <nl> m_bConfirmed = false ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogBoxBase : : ~ CGUIDialogBoxBase ( void ) <nl> bool CGUIDialogBoxBase : : IsConfirmed ( ) const <nl> <nl> void CGUIDialogBoxBase : : SetHeading ( const CVariant & heading ) <nl> { <nl> - Initialize ( ) ; <nl> - CGUIMessage msg ( GUI_MSG_LABEL_SET , GetID ( ) , 1 ) ; <nl> - msg . SetLabel ( GetLocalized ( heading ) ) ; <nl> - <nl> - if ( g_application . IsCurrentThread ( ) ) <nl> - OnMessage ( msg ) ; <nl> - else <nl> - g_windowManager . SendThreadMessage ( msg , GetID ( ) ) ; <nl> + m_strHeading = GetLocalized ( heading ) ; <nl> + if ( IsActive ( ) ) <nl> + SET_CONTROL_LABEL_THREAD_SAFE ( 1 , m_strHeading ) ; <nl> } <nl> <nl> void CGUIDialogBoxBase : : SetLine ( int iLine , const CVariant & line ) <nl> { <nl> - Initialize ( ) ; <nl> - CGUIMessage msg ( GUI_MSG_LABEL_SET , GetID ( ) , iLine + 2 ) ; <nl> - msg . SetLabel ( GetLocalized ( line ) ) ; <nl> - <nl> - if ( g_application . IsCurrentThread ( ) ) <nl> - OnMessage ( msg ) ; <nl> - else <nl> - g_windowManager . SendThreadMessage ( msg , GetID ( ) ) ; <nl> + if ( iLine < 0 | | iLine > = DIALOG_MAX_LINES ) <nl> + return ; <nl> + <nl> + m_strLines [ iLine ] = GetLocalized ( line ) ; <nl> + if ( IsActive ( ) ) <nl> + SET_CONTROL_LABEL_THREAD_SAFE ( CONTROL_LINES_START + iLine , m_strLines [ iLine ] ) ; <nl> } <nl> <nl> void CGUIDialogBoxBase : : SetChoice ( int iButton , const CVariant & choice ) / / iButton = = 0 for no , 1 for yes <nl> { <nl> - Initialize ( ) ; <nl> - CGUIMessage msg ( GUI_MSG_LABEL_SET , GetID ( ) , 10 + iButton ) ; <nl> - msg . SetLabel ( GetLocalized ( choice ) ) ; <nl> - <nl> - if ( g_application . IsCurrentThread ( ) ) <nl> - OnMessage ( msg ) ; <nl> - else <nl> - g_windowManager . SendThreadMessage ( msg , GetID ( ) ) ; <nl> + if ( iButton < 0 | | iButton > = DIALOG_MAX_CHOICES ) <nl> + return ; <nl> + <nl> + m_strChoices [ iButton ] = GetLocalized ( choice ) ; <nl> + if ( IsActive ( ) ) <nl> + SET_CONTROL_LABEL_THREAD_SAFE ( CONTROL_CHOICES_START + iButton , m_strChoices [ iButton ] ) ; <nl> } <nl> <nl> void CGUIDialogBoxBase : : OnInitWindow ( ) <nl> { <nl> / / set focus to default <nl> m_lastControlID = m_defaultControl ; <nl> + <nl> + / / set control labels <nl> + SET_CONTROL_LABEL ( CONTROL_HEADING , m_strHeading ) ; <nl> + for ( int i = 0 ; i < DIALOG_MAX_LINES ; + + i ) <nl> + SET_CONTROL_LABEL ( CONTROL_LINES_START + i , m_strLines [ i ] ) ; <nl> + for ( int i = 0 ; i < DIALOG_MAX_CHOICES ; + + i ) <nl> + SET_CONTROL_LABEL ( CONTROL_CHOICES_START + i , m_strChoices [ i ] ) ; <nl> + <nl> CGUIDialog : : OnInitWindow ( ) ; <nl> } <nl> <nl> + void CGUIDialogBoxBase : : OnDeinitWindow ( int nextWindowID ) <nl> + { <nl> + / / make sure we set default labels for heading , lines and choices <nl> + SetHeading ( GetDefaultLabel ( CONTROL_HEADING ) ) ; <nl> + for ( int i = 0 ; i < DIALOG_MAX_LINES ; + + i ) <nl> + SetLine ( i , GetDefaultLabel ( CONTROL_LINES_START + i ) ) ; <nl> + for ( int i = 0 ; i < DIALOG_MAX_CHOICES ; + + i ) <nl> + SetChoice ( i , GetDefaultLabel ( CONTROL_CHOICES_START + i ) ) ; <nl> + <nl> + CGUIDialog : : OnDeinitWindow ( nextWindowID ) ; <nl> + } <nl> + <nl> CStdString CGUIDialogBoxBase : : GetLocalized ( const CVariant & var ) const <nl> { <nl> if ( var . isString ( ) ) <nl> CStdString CGUIDialogBoxBase : : GetLocalized ( const CVariant & var ) const <nl> return g_localizeStrings . Get ( ( uint32_t ) var . asInteger ( ) ) ; <nl> return " " ; <nl> } <nl> + <nl> + CStdString CGUIDialogBoxBase : : GetDefaultLabel ( int controlId ) const <nl> + { <nl> + int labelId = GetDefaultLabelID ( controlId ) ; <nl> + return labelId ! = - 1 ? g_localizeStrings . Get ( labelId ) : " " ; <nl> + } <nl> + <nl> + int CGUIDialogBoxBase : : GetDefaultLabelID ( int controlId ) const <nl> + { <nl> + return - 1 ; <nl> + } <nl> mmm a / xbmc / dialogs / GUIDialogBoxBase . h <nl> ppp b / xbmc / dialogs / GUIDialogBoxBase . h <nl> <nl> # include " guilib / GUIDialog . h " <nl> # include " utils / Variant . h " <nl> <nl> + # define DIALOG_MAX_LINES 3 <nl> + # define DIALOG_MAX_CHOICES 2 <nl> + <nl> class CGUIDialogBoxBase : <nl> public CGUIDialog <nl> { <nl> class CGUIDialogBoxBase : <nl> void SetHeading ( const CVariant & heading ) ; <nl> void SetChoice ( int iButton , const CVariant & choice ) ; <nl> protected : <nl> + CStdString GetDefaultLabel ( int controlId ) const ; <nl> + virtual int GetDefaultLabelID ( int controlId ) const ; <nl> / * ! \ brief Get a localized string from a variant <nl> If the varaint is already a string we return directly , else if it ' s an integer we return the corresponding <nl> localized string . <nl> class CGUIDialogBoxBase : <nl> CStdString GetLocalized ( const CVariant & var ) const ; <nl> <nl> virtual void OnInitWindow ( ) ; <nl> + virtual void OnDeinitWindow ( int nextWindowID ) ; <nl> + <nl> bool m_bConfirmed ; <nl> + <nl> + / / actual strings <nl> + std : : string m_strHeading ; <nl> + std : : string m_strLines [ DIALOG_MAX_LINES ] ; <nl> + std : : string m_strChoices [ DIALOG_MAX_CHOICES ] ; <nl> } ; <nl> mmm a / xbmc / dialogs / GUIDialogBusy . cpp <nl> ppp b / xbmc / dialogs / GUIDialogBusy . cpp <nl> <nl> CGUIDialogBusy : : CGUIDialogBusy ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_BUSY , " DialogBusy . xml " ) , m_bLastVisible ( false ) <nl> { <nl> - m_loadOnDemand = false ; <nl> + m_loadType = LOAD_ON_GUI_INIT ; <nl> m_bModal = true ; <nl> } <nl> <nl> mmm a / xbmc / dialogs / GUIDialogButtonMenu . cpp <nl> ppp b / xbmc / dialogs / GUIDialogButtonMenu . cpp <nl> <nl> CGUIDialogButtonMenu : : CGUIDialogButtonMenu ( int id , const CStdString & xmlFile ) <nl> : CGUIDialog ( id , xmlFile ) <nl> { <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogButtonMenu : : ~ CGUIDialogButtonMenu ( void ) <nl> mmm a / xbmc / dialogs / GUIDialogContextMenu . cpp <nl> ppp b / xbmc / dialogs / GUIDialogContextMenu . cpp <nl> void CContextButtons : : Add ( unsigned int button , int label ) <nl> push_back ( pair < unsigned int , CStdString > ( button , g_localizeStrings . Get ( label ) ) ) ; <nl> } <nl> <nl> - CGUIDialogContextMenu : : CGUIDialogContextMenu ( void ) : CGUIDialog ( WINDOW_DIALOG_CONTEXT_MENU , " DialogContextMenu . xml " ) <nl> + CGUIDialogContextMenu : : CGUIDialogContextMenu ( void ) <nl> + : CGUIDialog ( WINDOW_DIALOG_CONTEXT_MENU , " DialogContextMenu . xml " ) <nl> { <nl> m_clickedButton = - 1 ; <nl> + m_backgroundImageSize = 0 ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogContextMenu : : ~ CGUIDialogContextMenu ( void ) <nl> void CGUIDialogContextMenu : : SetupButtons ( ) <nl> if ( pGroupList - > GetOrientation ( ) = = VERTICAL ) <nl> { <nl> / / keep gap between bottom edges of grouplist and background image <nl> - pControl - > SetHeight ( pControl - > GetHeight ( ) - pGroupList - > Size ( ) + pGroupList - > GetHeight ( ) ) ; <nl> + pControl - > SetHeight ( m_backgroundImageSize - pGroupList - > Size ( ) + pGroupList - > GetHeight ( ) ) ; <nl> } <nl> else <nl> { <nl> / / keep gap between right edges of grouplist and background image <nl> - pControl - > SetWidth ( pControl - > GetWidth ( ) - pGroupList - > Size ( ) + pGroupList - > GetWidth ( ) ) ; <nl> + pControl - > SetWidth ( m_backgroundImageSize - pGroupList - > Size ( ) + pGroupList - > GetWidth ( ) ) ; <nl> } <nl> } <nl> # if PRE_SKIN_VERSION_11_COMPATIBILITY <nl> CMediaSource * CGUIDialogContextMenu : : GetShare ( const CStdString & type , const CFil <nl> <nl> void CGUIDialogContextMenu : : OnWindowLoaded ( ) <nl> { <nl> + m_coordX = m_posX ; <nl> + m_coordY = m_posY ; <nl> + <nl> + const CGUIControlGroupList * pGroupList = NULL ; <nl> + const CGUIControl * pControl = GetControl ( GROUP_LIST ) ; <nl> + if ( pControl & & pControl - > GetControlType ( ) = = GUICONTROL_GROUPLIST ) <nl> + pGroupList = ( CGUIControlGroupList * ) pControl ; <nl> + <nl> + pControl = ( CGUIControl * ) GetControl ( BACKGROUND_IMAGE ) ; <nl> + if ( pControl & & pGroupList ) <nl> + { <nl> + if ( pGroupList - > GetOrientation ( ) = = VERTICAL ) <nl> + m_backgroundImageSize = pControl - > GetHeight ( ) ; <nl> + else <nl> + m_backgroundImageSize = pControl - > GetWidth ( ) ; <nl> + } <nl> + <nl> CGUIDialog : : OnWindowLoaded ( ) ; <nl> - SetInitialVisibility ( ) ; <nl> - SetupButtons ( ) ; <nl> } <nl> <nl> - void CGUIDialogContextMenu : : OnWindowUnload ( ) <nl> + void CGUIDialogContextMenu : : OnDeinitWindow ( int nextWindowID ) <nl> { <nl> + / / we can ' t be sure that controls are removed on window unload <nl> + / / we have to remove them to be sure that they won ' t stay for next use of context menu <nl> + for ( unsigned int i = 0 ; i < m_buttons . size ( ) ; i + + ) <nl> + { <nl> + const CGUIControl * control = GetControl ( BUTTON_START + i ) ; <nl> + if ( control ) <nl> + RemoveControl ( control ) ; <nl> + } <nl> + <nl> m_buttons . clear ( ) ; <nl> - CGUIDialog : : OnWindowUnload ( ) ; <nl> + CGUIDialog : : OnDeinitWindow ( nextWindowID ) ; <nl> } <nl> <nl> CStdString CGUIDialogContextMenu : : GetDefaultShareNameByType ( const CStdString & strType ) <nl> int CGUIDialogContextMenu : : ShowAndGetChoice ( const CContextButtons & choices ) <nl> { <nl> pMenu - > m_buttons = choices ; <nl> pMenu - > Initialize ( ) ; <nl> + pMenu - > SetupButtons ( ) ; <nl> pMenu - > PositionAtCurrentFocus ( ) ; <nl> pMenu - > DoModal ( ) ; <nl> return pMenu - > m_clickedButton ; <nl> void CGUIDialogContextMenu : : PositionAtCurrentFocus ( ) <nl> { <nl> CPoint pos = focusedControl - > GetRenderPosition ( ) + CPoint ( focusedControl - > GetWidth ( ) * 0 . 5f , focusedControl - > GetHeight ( ) * 0 . 5f ) <nl> + window - > GetRenderPosition ( ) ; <nl> - SetPosition ( m_posX + pos . x - GetWidth ( ) * 0 . 5f , m_posY + pos . y - GetHeight ( ) * 0 . 5f ) ; <nl> + SetPosition ( m_coordX + pos . x - GetWidth ( ) * 0 . 5f , m_coordY + pos . y - GetHeight ( ) * 0 . 5f ) ; <nl> return ; <nl> } <nl> } <nl> mmm a / xbmc / dialogs / GUIDialogContextMenu . h <nl> ppp b / xbmc / dialogs / GUIDialogContextMenu . h <nl> class CGUIDialogContextMenu : <nl> virtual float GetHeight ( ) const ; <nl> virtual void OnInitWindow ( ) ; <nl> virtual void OnWindowLoaded ( ) ; <nl> - virtual void OnWindowUnload ( ) ; <nl> + virtual void OnDeinitWindow ( int nextWindowID ) ; <nl> static CStdString GetDefaultShareNameByType ( const CStdString & strType ) ; <nl> static void SetDefault ( const CStdString & strType , const CStdString & strDefault ) ; <nl> static void ClearDefault ( const CStdString & strType ) ; <nl> static CMediaSource * GetShare ( const CStdString & type , const CFileItem * item ) ; <nl> <nl> private : <nl> + float m_coordX , m_coordY ; <nl> + / / / \ brief Stored size of background image ( height or width depending on grouplist orientation ) <nl> + float m_backgroundImageSize ; <nl> int m_clickedButton ; <nl> CContextButtons m_buttons ; <nl> } ; <nl> mmm a / xbmc / dialogs / GUIDialogExtendedProgressBar . cpp <nl> ppp b / xbmc / dialogs / GUIDialogExtendedProgressBar . cpp <nl> void CGUIDialogProgressBarHandle : : SetProgress ( int currentItem , int itemCount ) <nl> CGUIDialogExtendedProgressBar : : CGUIDialogExtendedProgressBar ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_EXT_PROGRESS , " DialogExtendedProgressBar . xml " ) <nl> { <nl> - m_loadOnDemand = false ; <nl> + m_loadType = LOAD_ON_GUI_INIT ; <nl> m_iLastSwitchTime = 0 ; <nl> m_iCurrentItem = 0 ; <nl> } <nl> mmm a / xbmc / dialogs / GUIDialogFavourites . cpp <nl> ppp b / xbmc / dialogs / GUIDialogFavourites . cpp <nl> CGUIDialogFavourites : : CGUIDialogFavourites ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_FAVOURITES , " DialogFavourites . xml " ) <nl> { <nl> m_favourites = new CFileItemList ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogFavourites : : ~ CGUIDialogFavourites ( void ) <nl> mmm a / xbmc / dialogs / GUIDialogFileBrowser . cpp <nl> ppp b / xbmc / dialogs / GUIDialogFileBrowser . cpp <nl> CGUIDialogFileBrowser : : CGUIDialogFileBrowser ( ) <nl> m_thumbLoader . SetObserver ( this ) ; <nl> m_flipEnabled = false ; <nl> m_multipleSelection = false ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogFileBrowser : : ~ CGUIDialogFileBrowser ( ) <nl> mmm a / xbmc / dialogs / GUIDialogKaiToast . cpp <nl> ppp b / xbmc / dialogs / GUIDialogKaiToast . cpp <nl> CGUIDialogKaiToast : : CGUIDialogKaiToast ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_KAI_TOAST , " DialogKaiToast . xml " ) <nl> { <nl> m_defaultIcon = " " ; <nl> - m_loadOnDemand = false ; <nl> + m_loadType = LOAD_ON_GUI_INIT ; <nl> m_timer = 0 ; <nl> m_toastDisplayTime = 0 ; <nl> m_toastMessageTime = 0 ; <nl> mmm a / xbmc / dialogs / GUIDialogKeyboardGeneric . cpp <nl> ppp b / xbmc / dialogs / GUIDialogKeyboardGeneric . cpp <nl> CGUIDialogKeyboardGeneric : : CGUIDialogKeyboardGeneric ( ) <nl> m_keyType = LOWER ; <nl> m_strHeading = " " ; <nl> m_lastRemoteClickTime = 0 ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> void CGUIDialogKeyboardGeneric : : OnInitWindow ( ) <nl> mmm a / xbmc / dialogs / GUIDialogMediaSource . cpp <nl> ppp b / xbmc / dialogs / GUIDialogMediaSource . cpp <nl> CGUIDialogMediaSource : : CGUIDialogMediaSource ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_MEDIA_SOURCE , " DialogMediaSource . xml " ) <nl> { <nl> m_paths = new CFileItemList ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogMediaSource : : ~ CGUIDialogMediaSource ( ) <nl> vector < CStdString > CGUIDialogMediaSource : : GetPaths ( ) <nl> } <nl> return paths ; <nl> } <nl> + <nl> + void CGUIDialogMediaSource : : OnDeinitWindow ( int nextWindowID ) <nl> + { <nl> + CGUIDialog : : OnDeinitWindow ( nextWindowID ) ; <nl> + <nl> + / / clear paths container <nl> + CGUIMessage msg ( GUI_MSG_LABEL_RESET , GetID ( ) , CONTROL_PATH , 0 ) ; <nl> + OnMessage ( msg ) ; <nl> + } <nl> mmm a / xbmc / dialogs / GUIDialogMediaSource . h <nl> ppp b / xbmc / dialogs / GUIDialogMediaSource . h <nl> class CGUIDialogMediaSource : <nl> CGUIDialogMediaSource ( void ) ; <nl> virtual ~ CGUIDialogMediaSource ( void ) ; <nl> virtual bool OnMessage ( CGUIMessage & message ) ; <nl> + virtual void OnDeinitWindow ( int nextWindowID ) ; <nl> virtual bool OnBack ( int actionID ) ; <nl> virtual void OnWindowLoaded ( ) ; <nl> static bool ShowAndAddMediaSource ( const CStdString & type ) ; <nl> mmm a / xbmc / dialogs / GUIDialogMuteBug . cpp <nl> ppp b / xbmc / dialogs / GUIDialogMuteBug . cpp <nl> <nl> CGUIDialogMuteBug : : CGUIDialogMuteBug ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_MUTE_BUG , " DialogMuteBug . xml " ) <nl> { <nl> - m_loadOnDemand = false ; <nl> + m_loadType = LOAD_ON_GUI_INIT ; <nl> } <nl> <nl> CGUIDialogMuteBug : : ~ CGUIDialogMuteBug ( void ) <nl> mmm a / xbmc / dialogs / GUIDialogNumeric . cpp <nl> ppp b / xbmc / dialogs / GUIDialogNumeric . cpp <nl> CGUIDialogNumeric : : CGUIDialogNumeric ( void ) <nl> m_block = 0 ; <nl> memset ( & m_datetime , 0 , sizeof ( SYSTEMTIME ) ) ; <nl> m_dirty = false ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogNumeric : : ~ CGUIDialogNumeric ( void ) <nl> mmm a / xbmc / dialogs / GUIDialogOK . cpp <nl> ppp b / xbmc / dialogs / GUIDialogOK . cpp <nl> void CGUIDialogOK : : ShowAndGetInput ( const CVariant & heading , const CVariant & line <nl> dialog - > SetLine ( 2 , line2 ) ; <nl> dialog - > DoModal ( ) ; <nl> } <nl> + <nl> + int CGUIDialogOK : : GetDefaultLabelID ( int controlId ) const <nl> + { <nl> + if ( controlId = = ID_BUTTON_OK ) <nl> + return 186 ; <nl> + return CGUIDialogBoxBase : : GetDefaultLabelID ( controlId ) ; <nl> + } <nl> mmm a / xbmc / dialogs / GUIDialogOK . h <nl> ppp b / xbmc / dialogs / GUIDialogOK . h <nl> class CGUIDialogOK : <nl> virtual ~ CGUIDialogOK ( void ) ; <nl> virtual bool OnMessage ( CGUIMessage & message ) ; <nl> static void ShowAndGetInput ( const CVariant & heading , const CVariant & line0 , const CVariant & line1 , const CVariant & line2 ) ; <nl> + protected : <nl> + virtual int GetDefaultLabelID ( int controlId ) const ; <nl> } ; <nl> mmm a / xbmc / dialogs / GUIDialogPlayerControls . cpp <nl> ppp b / xbmc / dialogs / GUIDialogPlayerControls . cpp <nl> <nl> CGUIDialogPlayerControls : : CGUIDialogPlayerControls ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_PLAYER_CONTROLS , " PlayerControls . xml " ) <nl> { <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogPlayerControls : : ~ CGUIDialogPlayerControls ( void ) <nl> mmm a / xbmc / dialogs / GUIDialogProgress . cpp <nl> ppp b / xbmc / dialogs / GUIDialogProgress . cpp <nl> void CGUIDialogProgress : : ShowProgressBar ( bool bOnOff ) <nl> g_windowManager . SendThreadMessage ( msg , GetID ( ) ) ; <nl> } <nl> <nl> - void CGUIDialogProgress : : SetHeading ( const string & strLine ) <nl> + int CGUIDialogProgress : : GetDefaultLabelID ( int controlId ) const <nl> { <nl> - m_strHeading = strLine ; <nl> - CGUIDialogBoxBase : : SetHeading ( m_strHeading ) ; <nl> + if ( controlId = = CONTROL_CANCEL_BUTTON ) <nl> + return 222 ; <nl> + return CGUIDialogBoxBase : : GetDefaultLabelID ( controlId ) ; <nl> } <nl> - <nl> - void CGUIDialogProgress : : SetHeading ( int iString ) <nl> - { <nl> - m_strHeading = g_localizeStrings . Get ( iString ) ; <nl> - CGUIDialogBoxBase : : SetHeading ( m_strHeading ) ; <nl> - } <nl> - <nl> mmm a / xbmc / dialogs / GUIDialogProgress . h <nl> ppp b / xbmc / dialogs / GUIDialogProgress . h <nl> class CGUIDialogProgress : <nl> void SetPercentage ( int iPercentage ) ; <nl> int GetPercentage ( ) const { return m_percentage ; } ; <nl> void ShowProgressBar ( bool bOnOff ) ; <nl> - void SetHeading ( const std : : string & strLine ) ; <nl> - void SetHeading ( int iString ) ; / / for convenience to lookup in strings . xml <nl> <nl> / / Implements IProgressCallback <nl> virtual void SetProgressMax ( int iMax ) ; <nl> class CGUIDialogProgress : <nl> void SetCanCancel ( bool bCanCancel ) ; <nl> <nl> protected : <nl> + virtual int GetDefaultLabelID ( int controlId ) const ; <nl> + <nl> bool m_bCanCancel ; <nl> bool m_bCanceled ; <nl> - std : : string m_strHeading ; <nl> <nl> int m_iCurrent ; <nl> int m_iMax ; <nl> mmm a / xbmc / dialogs / GUIDialogSeekBar . cpp <nl> ppp b / xbmc / dialogs / GUIDialogSeekBar . cpp <nl> <nl> CGUIDialogSeekBar : : CGUIDialogSeekBar ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_SEEK_BAR , " DialogSeekBar . xml " ) <nl> { <nl> - m_loadOnDemand = false ; / / the application class handles our resources <nl> + m_loadType = LOAD_ON_GUI_INIT ; / / the application class handles our resources <nl> } <nl> <nl> CGUIDialogSeekBar : : ~ CGUIDialogSeekBar ( void ) <nl> mmm a / xbmc / dialogs / GUIDialogSelect . cpp <nl> ppp b / xbmc / dialogs / GUIDialogSelect . cpp <nl> CGUIDialogSelect : : CGUIDialogSelect ( void ) <nl> m_multiSelection = false ; <nl> m_vecList = m_vecListInternal ; <nl> m_iSelected = - 1 ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogSelect : : ~ CGUIDialogSelect ( void ) <nl> bool CGUIDialogSelect : : OnMessage ( CGUIMessage & message ) <nl> case GUI_MSG_WINDOW_DEINIT : <nl> { <nl> CGUIDialog : : OnMessage ( message ) ; <nl> - m_viewControl . Reset ( ) ; <nl> + m_viewControl . Clear ( ) ; <nl> <nl> m_bButtonEnabled = false ; <nl> m_useDetails = false ; <nl> void CGUIDialogSelect : : OnInitWindow ( ) <nl> if ( m_iSelected > = 0 ) <nl> m_viewControl . SetSelectedItem ( m_iSelected ) ; <nl> } <nl> + <nl> + void CGUIDialogSelect : : OnWindowUnload ( ) <nl> + { <nl> + CGUIDialog : : OnWindowUnload ( ) ; <nl> + m_viewControl . Reset ( ) ; <nl> + } <nl> mmm a / xbmc / dialogs / GUIDialogSelect . h <nl> ppp b / xbmc / dialogs / GUIDialogSelect . h <nl> class CGUIDialogSelect : <nl> virtual CGUIControl * GetFirstFocusableControl ( int id ) ; <nl> virtual void OnWindowLoaded ( ) ; <nl> virtual void OnInitWindow ( ) ; <nl> + virtual void OnWindowUnload ( ) ; <nl> <nl> bool m_bButtonEnabled ; <nl> bool m_bButtonPressed ; <nl> mmm a / xbmc / dialogs / GUIDialogSlider . cpp <nl> ppp b / xbmc / dialogs / GUIDialogSlider . cpp <nl> CGUIDialogSlider : : CGUIDialogSlider ( void ) <nl> { <nl> m_callback = NULL ; <nl> m_callbackData = NULL ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogSlider : : ~ CGUIDialogSlider ( void ) <nl> bool CGUIDialogSlider : : OnMessage ( CGUIMessage & message ) <nl> } <nl> } <nl> break ; <nl> + case GUI_MSG_WINDOW_DEINIT : <nl> + m_callback = NULL ; <nl> + m_callbackData = NULL ; <nl> + break ; <nl> } <nl> return CGUIDialog : : OnMessage ( message ) ; <nl> } <nl> mmm a / xbmc / dialogs / GUIDialogSmartPlaylistEditor . cpp <nl> ppp b / xbmc / dialogs / GUIDialogSmartPlaylistEditor . cpp <nl> CGUIDialogSmartPlaylistEditor : : CGUIDialogSmartPlaylistEditor ( void ) <nl> { <nl> m_cancelled = false ; <nl> m_ruleLabels = new CFileItemList ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogSmartPlaylistEditor : : ~ CGUIDialogSmartPlaylistEditor ( ) <nl> bool CGUIDialogSmartPlaylistEditor : : OnMessage ( CGUIMessage & message ) <nl> return true ; <nl> } <nl> break ; <nl> - case GUI_MSG_WINDOW_INIT : <nl> - { <nl> - m_cancelled = false ; <nl> - UpdateButtons ( ) ; <nl> - } <nl> - break ; <nl> - case GUI_MSG_WINDOW_DEINIT : <nl> - { <nl> - CGUIDialog : : OnMessage ( message ) ; <nl> - / / clear the rule list <nl> - CGUIMessage msg ( GUI_MSG_LABEL_RESET , GetID ( ) , CONTROL_RULE_LIST ) ; <nl> - OnMessage ( msg ) ; <nl> - m_ruleLabels - > Clear ( ) ; <nl> - } <nl> - break ; <nl> case GUI_MSG_FOCUSED : <nl> if ( message . GetControlId ( ) = = CONTROL_RULE_REMOVE | | <nl> message . GetControlId ( ) = = CONTROL_RULE_EDIT ) <nl> void CGUIDialogSmartPlaylistEditor : : OnWindowLoaded ( ) <nl> msg . SetLabel ( label ) ; <nl> OnMessage ( msg ) ; <nl> } <nl> + } <nl> + <nl> + void CGUIDialogSmartPlaylistEditor : : OnInitWindow ( ) <nl> + { <nl> + m_cancelled = false ; <nl> + UpdateButtons ( ) ; <nl> + <nl> SendMessage ( GUI_MSG_ITEM_SELECT , CONTROL_LIMIT , m_playlist . m_limit ) ; <nl> <nl> vector < PLAYLIST_TYPE > allowedTypes ; <nl> void CGUIDialogSmartPlaylistEditor : : OnWindowLoaded ( ) <nl> <nl> SendMessage ( GUI_MSG_ITEM_SELECT , CONTROL_TYPE , type ) ; <nl> m_playlist . SetType ( ConvertType ( type ) ) ; <nl> + <nl> + CGUIDialog : : OnInitWindow ( ) ; <nl> + } <nl> + <nl> + void CGUIDialogSmartPlaylistEditor : : OnDeinitWindow ( int nextWindowID ) <nl> + { <nl> + CGUIDialog : : OnDeinitWindow ( nextWindowID ) ; <nl> + CGUIMessage msg ( GUI_MSG_LABEL_RESET , GetID ( ) , CONTROL_RULE_LIST ) ; <nl> + OnMessage ( msg ) ; <nl> + m_ruleLabels - > Clear ( ) ; <nl> } <nl> <nl> CGUIDialogSmartPlaylistEditor : : PLAYLIST_TYPE CGUIDialogSmartPlaylistEditor : : ConvertType ( const CStdString & type ) <nl> mmm a / xbmc / dialogs / GUIDialogSmartPlaylistEditor . h <nl> ppp b / xbmc / dialogs / GUIDialogSmartPlaylistEditor . h <nl> class CGUIDialogSmartPlaylistEditor : <nl> virtual bool OnMessage ( CGUIMessage & message ) ; <nl> virtual bool OnBack ( int actionID ) ; <nl> virtual void OnWindowLoaded ( ) ; <nl> + virtual void OnInitWindow ( ) ; <nl> + virtual void OnDeinitWindow ( int nextWindowID ) ; <nl> <nl> static bool EditPlaylist ( const CStdString & path , const CStdString & type = " " ) ; <nl> static bool NewPlaylist ( const CStdString & type ) ; <nl> mmm a / xbmc / dialogs / GUIDialogSmartPlaylistRule . cpp <nl> ppp b / xbmc / dialogs / GUIDialogSmartPlaylistRule . cpp <nl> CGUIDialogSmartPlaylistRule : : CGUIDialogSmartPlaylistRule ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_SMART_PLAYLIST_RULE , " SmartPlaylistRule . xml " ) <nl> { <nl> m_cancelled = false ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogSmartPlaylistRule : : ~ CGUIDialogSmartPlaylistRule ( ) <nl> void CGUIDialogSmartPlaylistRule : : AddOperatorLabel ( CSmartPlaylistRule : : SEARCH_OP <nl> OnMessage ( select ) ; <nl> } <nl> <nl> - void CGUIDialogSmartPlaylistRule : : OnInitWindow ( ) <nl> + void CGUIDialogSmartPlaylistRule : : OnWindowLoaded ( ) <nl> { <nl> + CGUIWindow : : OnWindowLoaded ( ) ; <nl> ChangeButtonToEdit ( CONTROL_VALUE , true ) ; / / true for single label <nl> + } <nl> + <nl> + void CGUIDialogSmartPlaylistRule : : OnInitWindow ( ) <nl> + { <nl> CGUIDialog : : OnInitWindow ( ) ; <nl> + <nl> + SendMessage ( GUI_MSG_LABEL_RESET , CONTROL_FIELD ) ; <nl> / / add the fields to the field spincontrol <nl> vector < Field > fields = CSmartPlaylistRule : : GetFields ( m_type ) ; <nl> for ( unsigned int i = 0 ; i < fields . size ( ) ; i + + ) <nl> void CGUIDialogSmartPlaylistRule : : OnInitWindow ( ) <nl> UpdateButtons ( ) ; <nl> } <nl> <nl> + void CGUIDialogSmartPlaylistRule : : OnDeinitWindow ( int nextWindowID ) <nl> + { <nl> + CGUIDialog : : OnDeinitWindow ( nextWindowID ) ; <nl> + <nl> + / / reset field spincontrolex <nl> + SendMessage ( GUI_MSG_LABEL_RESET , CONTROL_FIELD ) ; <nl> + / / reset operator spincontrolex <nl> + SendMessage ( GUI_MSG_LABEL_RESET , CONTROL_OPERATOR ) ; <nl> + } <nl> + <nl> bool CGUIDialogSmartPlaylistRule : : EditRule ( CSmartPlaylistRule & rule , const CStdString & type ) <nl> { <nl> CGUIDialogSmartPlaylistRule * editor = ( CGUIDialogSmartPlaylistRule * ) g_windowManager . GetWindow ( WINDOW_DIALOG_SMART_PLAYLIST_RULE ) ; <nl> mmm a / xbmc / dialogs / GUIDialogSmartPlaylistRule . h <nl> ppp b / xbmc / dialogs / GUIDialogSmartPlaylistRule . h <nl> class CGUIDialogSmartPlaylistRule : <nl> virtual bool OnMessage ( CGUIMessage & message ) ; <nl> virtual bool OnBack ( int actionID ) ; <nl> virtual void OnInitWindow ( ) ; <nl> + virtual void OnWindowLoaded ( ) ; <nl> + virtual void OnDeinitWindow ( int nextWindowID ) ; <nl> <nl> static bool EditRule ( CSmartPlaylistRule & rule , const CStdString & type = " songs " ) ; <nl> <nl> mmm a / xbmc / dialogs / GUIDialogTextViewer . cpp <nl> ppp b / xbmc / dialogs / GUIDialogTextViewer . cpp <nl> <nl> <nl> CGUIDialogTextViewer : : CGUIDialogTextViewer ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_TEXT_VIEWER , " DialogTextViewer . xml " ) <nl> - { } <nl> + { <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> + } <nl> <nl> CGUIDialogTextViewer : : ~ CGUIDialogTextViewer ( void ) <nl> { } <nl> void CGUIDialogTextViewer : : SetHeading ( ) <nl> OnMessage ( msg ) ; <nl> } <nl> <nl> + void CGUIDialogTextViewer : : OnDeinitWindow ( int nextWindowID ) <nl> + { <nl> + CGUIDialog : : OnDeinitWindow ( nextWindowID ) ; <nl> + <nl> + / / reset text area <nl> + CGUIMessage msgReset ( GUI_MSG_LABEL_RESET , GetID ( ) , CONTROL_TEXTAREA ) ; <nl> + OnMessage ( msgReset ) ; <nl> + <nl> + / / reset heading <nl> + SET_CONTROL_LABEL ( CONTROL_HEADING , " " ) ; <nl> + } <nl> mmm a / xbmc / dialogs / GUIDialogTextViewer . h <nl> ppp b / xbmc / dialogs / GUIDialogTextViewer . h <nl> class CGUIDialogTextViewer : <nl> void SetText ( const CStdString & strText ) { m_strText = strText ; } <nl> void SetHeading ( const CStdString & strHeading ) { m_strHeading = strHeading ; } <nl> protected : <nl> + virtual void OnDeinitWindow ( int nextWindowID ) ; <nl> + <nl> CStdString m_strText ; <nl> CStdString m_strHeading ; <nl> <nl> mmm a / xbmc / dialogs / GUIDialogVolumeBar . cpp <nl> ppp b / xbmc / dialogs / GUIDialogVolumeBar . cpp <nl> <nl> CGUIDialogVolumeBar : : CGUIDialogVolumeBar ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_VOLUME_BAR , " DialogVolumeBar . xml " ) <nl> { <nl> - m_loadOnDemand = false ; <nl> + m_loadType = LOAD_ON_GUI_INIT ; <nl> SetAutoClose ( VOLUME_BAR_DISPLAY_TIME ) ; <nl> } <nl> <nl> mmm a / xbmc / dialogs / GUIDialogYesNo . cpp <nl> ppp b / xbmc / dialogs / GUIDialogYesNo . cpp <nl> <nl> # include " GUIDialogYesNo . h " <nl> # include " guilib / GUIWindowManager . h " <nl> <nl> + # define CONTROL_NO_BUTTON 10 <nl> + # define CONTROL_YES_BUTTON 11 <nl> + <nl> CGUIDialogYesNo : : CGUIDialogYesNo ( int overrideId / * = - 1 * / ) <nl> : CGUIDialogBoxBase ( overrideId = = - 1 ? WINDOW_DIALOG_YES_NO : overrideId , " DialogYesNo . xml " ) <nl> { <nl> bool CGUIDialogYesNo : : OnMessage ( CGUIMessage & message ) <nl> int iAction = message . GetParam1 ( ) ; <nl> if ( 1 | | ACTION_SELECT_ITEM = = iAction ) <nl> { <nl> - if ( iControl = = 10 ) <nl> + if ( iControl = = CONTROL_NO_BUTTON ) <nl> { <nl> m_bConfirmed = false ; <nl> Close ( ) ; <nl> return true ; <nl> } <nl> - if ( iControl = = 11 ) <nl> + if ( iControl = = CONTROL_YES_BUTTON ) <nl> { <nl> m_bConfirmed = true ; <nl> Close ( ) ; <nl> bool CGUIDialogYesNo : : ShowAndGetInput ( int heading , int line0 , int line1 , int lin <nl> dialog - > SetAutoClose ( autoCloseTime ) ; <nl> if ( iNoLabel ! = - 1 ) <nl> dialog - > SetChoice ( 0 , iNoLabel ) ; <nl> + else <nl> + dialog - > SetChoice ( 0 , 106 ) ; <nl> if ( iYesLabel ! = - 1 ) <nl> dialog - > SetChoice ( 1 , iYesLabel ) ; <nl> + else <nl> + dialog - > SetChoice ( 1 , 107 ) ; <nl> dialog - > m_bCanceled = false ; <nl> dialog - > DoModal ( ) ; <nl> bCanceled = dialog - > m_bCanceled ; <nl> bool CGUIDialogYesNo : : ShowAndGetInput ( const CStdString & heading , const CStdStrin <nl> dialog - > m_bCanceled = false ; <nl> if ( ! noLabel . IsEmpty ( ) ) <nl> dialog - > SetChoice ( 0 , noLabel ) ; <nl> + else <nl> + dialog - > SetChoice ( 0 , 106 ) ; <nl> if ( ! yesLabel . IsEmpty ( ) ) <nl> dialog - > SetChoice ( 1 , yesLabel ) ; <nl> + else <nl> + dialog - > SetChoice ( 1 , 107 ) ; <nl> dialog - > DoModal ( ) ; <nl> bCanceled = dialog - > m_bCanceled ; <nl> return ( dialog - > IsConfirmed ( ) ) ? true : false ; <nl> } <nl> <nl> + int CGUIDialogYesNo : : GetDefaultLabelID ( int controlId ) const <nl> + { <nl> + if ( controlId = = CONTROL_NO_BUTTON ) <nl> + return 106 ; <nl> + else if ( controlId = = CONTROL_YES_BUTTON ) <nl> + return 107 ; <nl> + return CGUIDialogBoxBase : : GetDefaultLabelID ( controlId ) ; <nl> + } <nl> mmm a / xbmc / dialogs / GUIDialogYesNo . h <nl> ppp b / xbmc / dialogs / GUIDialogYesNo . h <nl> class CGUIDialogYesNo : <nl> static bool ShowAndGetInput ( const CStdString & heading , const CStdString & line0 , const CStdString & line1 , const CStdString & line2 , const CStdString & noLabel = " " , const CStdString & yesLabel = " " ) ; <nl> static bool ShowAndGetInput ( const CStdString & heading , const CStdString & line0 , const CStdString & line1 , const CStdString & line2 , bool & bCanceled , const CStdString & noLabel = " " , const CStdString & yesLabel = " " ) ; <nl> protected : <nl> + virtual int GetDefaultLabelID ( int controlId ) const ; <nl> + <nl> bool m_bCanceled ; <nl> } ; <nl> mmm a / xbmc / guilib / GUIBaseContainer . cpp <nl> ppp b / xbmc / guilib / GUIBaseContainer . cpp <nl> void CGUIBaseContainer : : UpdateStaticItems ( bool refreshItems ) <nl> } <nl> } <nl> <nl> + void CGUIBaseContainer : : SetInitialVisibility ( ) <nl> + { <nl> + UpdateStaticItems ( true ) ; <nl> + CGUIControl : : SetInitialVisibility ( ) ; <nl> + } <nl> + <nl> void CGUIBaseContainer : : CalculateLayout ( ) <nl> { <nl> CGUIListItemLayout * oldFocusedLayout = m_focusedLayout ; <nl> void CGUIBaseContainer : : LoadContent ( TiXmlElement * content ) <nl> } <nl> item = item - > NextSiblingElement ( " item " ) ; <nl> } <nl> - SetStaticContent ( items ) ; <nl> + SetStaticContent ( items , false ) ; <nl> } <nl> <nl> - void CGUIBaseContainer : : SetStaticContent ( const vector < CGUIListItemPtr > & items ) <nl> + void CGUIBaseContainer : : SetStaticContent ( const vector < CGUIListItemPtr > & items , bool forceUpdate / * = true * / ) <nl> { <nl> m_staticContent = true ; <nl> m_staticUpdateTime = 0 ; <nl> m_staticItems . clear ( ) ; <nl> m_staticItems . assign ( items . begin ( ) , items . end ( ) ) ; <nl> - UpdateStaticItems ( true ) ; <nl> + if ( forceUpdate ) <nl> + UpdateStaticItems ( true ) ; <nl> } <nl> <nl> void CGUIBaseContainer : : SetRenderOffset ( const CPoint & offset ) <nl> mmm a / xbmc / guilib / GUIBaseContainer . h <nl> ppp b / xbmc / guilib / GUIBaseContainer . h <nl> class CGUIBaseContainer : public CGUIControl <nl> virtual void AllocResources ( ) ; <nl> virtual void FreeResources ( bool immediately = false ) ; <nl> virtual void UpdateVisibility ( const CGUIListItem * item = NULL ) ; <nl> + virtual void SetInitialVisibility ( ) ; <nl> <nl> virtual unsigned int GetRows ( ) const ; <nl> <nl> class CGUIBaseContainer : public CGUIControl <nl> virtual bool GetCondition ( int condition , int data ) const ; <nl> CStdString GetLabel ( int info ) const ; <nl> <nl> - void SetStaticContent ( const std : : vector < CGUIListItemPtr > & items ) ; <nl> + void SetStaticContent ( const std : : vector < CGUIListItemPtr > & items , bool forceUpdate = true ) ; <nl> <nl> / * ! \ brief Set the offset of the first item in the container from the container ' s position <nl> Useful for lists / panels where the focused item may be larger than the non - focused items and thus <nl> mmm a / xbmc / guilib / GUIIncludes . cpp <nl> ppp b / xbmc / guilib / GUIIncludes . cpp <nl> bool CGUIIncludes : : HasIncludeFile ( const CStdString & file ) const <nl> return false ; <nl> } <nl> <nl> - void CGUIIncludes : : ResolveIncludes ( TiXmlElement * node ) <nl> + void CGUIIncludes : : ResolveIncludes ( TiXmlElement * node , std : : map < int , bool > * xmlIncludeConditions / * = NULL * / ) <nl> { <nl> if ( ! node ) <nl> return ; <nl> - ResolveIncludesForNode ( node ) ; <nl> + ResolveIncludesForNode ( node , xmlIncludeConditions ) ; <nl> <nl> TiXmlElement * child = node - > FirstChildElement ( ) ; <nl> while ( child ) <nl> { <nl> - ResolveIncludes ( child ) ; <nl> + ResolveIncludes ( child , xmlIncludeConditions ) ; <nl> child = child - > NextSiblingElement ( ) ; <nl> } <nl> } <nl> <nl> - void CGUIIncludes : : ResolveIncludesForNode ( TiXmlElement * node ) <nl> + void CGUIIncludes : : ResolveIncludesForNode ( TiXmlElement * node , std : : map < int , bool > * xmlIncludeConditions / * = NULL * / ) <nl> { <nl> / / we have a node , find any < include file = " fileName " > tagName < / include > tags and replace <nl> / / recursively with their real includes <nl> void CGUIIncludes : : ResolveIncludesForNode ( TiXmlElement * node ) <nl> const char * condition = include - > Attribute ( " condition " ) ; <nl> if ( condition ) <nl> { / / check this condition <nl> - if ( ! g_infoManager . EvaluateBool ( condition ) ) <nl> + int conditionID = g_infoManager . Register ( condition ) ; <nl> + bool value = g_infoManager . GetBoolValue ( conditionID ) ; <nl> + <nl> + if ( xmlIncludeConditions ) <nl> + ( * xmlIncludeConditions ) [ conditionID ] = value ; <nl> + <nl> + if ( ! value ) <nl> { <nl> include = include - > NextSiblingElement ( " include " ) ; <nl> continue ; <nl> mmm a / xbmc / guilib / GUIIncludes . h <nl> ppp b / xbmc / guilib / GUIIncludes . h <nl> class CGUIIncludes <nl> " bar " from the include file " foo " . <nl> \ param node an XML Element - all child elements are traversed . <nl> * / <nl> - void ResolveIncludes ( TiXmlElement * node ) ; <nl> + void ResolveIncludes ( TiXmlElement * node , std : : map < int , bool > * xmlIncludeConditions = NULL ) ; <nl> const INFO : : CSkinVariableString * CreateSkinVariable ( const CStdString & name , int context ) ; <nl> <nl> private : <nl> - void ResolveIncludesForNode ( TiXmlElement * node ) ; <nl> + void ResolveIncludesForNode ( TiXmlElement * node , std : : map < int , bool > * xmlIncludeConditions = NULL ) ; <nl> CStdString ResolveConstant ( const CStdString & constant ) const ; <nl> bool HasIncludeFile ( const CStdString & includeFile ) const ; <nl> std : : map < CStdString , TiXmlElement > m_includes ; <nl> mmm a / xbmc / guilib / GUIMessage . h <nl> ppp b / xbmc / guilib / GUIMessage . h <nl> do { \ <nl> OnMessage ( msg ) ; \ <nl> } while ( 0 ) <nl> <nl> + / * ! <nl> + \ ingroup winmsg <nl> + \ brief Set the label of the current control <nl> + * / <nl> + # define SET_CONTROL_LABEL_THREAD_SAFE ( controlID , label ) \ <nl> + { \ <nl> + CGUIMessage msg ( GUI_MSG_LABEL_SET , GetID ( ) , controlID ) ; \ <nl> + msg . SetLabel ( label ) ; \ <nl> + if ( g_application . IsCurrentThread ( ) ) \ <nl> + OnMessage ( msg ) ; \ <nl> + else \ <nl> + g_windowManager . SendThreadMessage ( msg , GetID ( ) ) ; \ <nl> + } <nl> + <nl> / * ! <nl> \ ingroup winmsg <nl> \ brief Set the second label of the current control <nl> mmm a / xbmc / guilib / GUIWindow . cpp <nl> ppp b / xbmc / guilib / GUIWindow . cpp <nl> CGUIWindow : : CGUIWindow ( int id , const CStdString & xmlFile ) <nl> m_isDialog = false ; <nl> m_needsScaling = true ; <nl> m_windowLoaded = false ; <nl> - m_loadOnDemand = true ; <nl> + m_loadType = LOAD_EVERY_TIME ; <nl> m_closing = false ; <nl> m_active = false ; <nl> m_renderOrder = 0 ; <nl> CGUIWindow : : CGUIWindow ( int id , const CStdString & xmlFile ) <nl> m_manualRunActions = false ; <nl> m_exclusiveMouseControl = 0 ; <nl> m_clearBackground = 0xff000000 ; / / opaque black - > always clear <nl> + m_windowXMLRootElement = NULL ; <nl> } <nl> <nl> CGUIWindow : : ~ CGUIWindow ( void ) <nl> - { } <nl> + { <nl> + delete m_windowXMLRootElement ; <nl> + } <nl> <nl> bool CGUIWindow : : Load ( const CStdString & strFileName , bool bContainsPath ) <nl> { <nl> bool CGUIWindow : : Load ( const CStdString & strFileName , bool bContainsPath ) <nl> int64_t start ; <nl> start = CurrentHostCounter ( ) ; <nl> # endif <nl> - CLog : : Log ( LOGINFO , " Loading skin file : % s " , strFileName . c_str ( ) ) ; <nl> + const char * strLoadType ; <nl> + switch ( m_loadType ) <nl> + { <nl> + case LOAD_ON_GUI_INIT : <nl> + strLoadType = " LOAD_ON_GUI_INIT " ; <nl> + break ; <nl> + case KEEP_IN_MEMORY : <nl> + strLoadType = " KEEP_IN_MEMORY " ; <nl> + break ; <nl> + case LOAD_EVERY_TIME : <nl> + default : <nl> + strLoadType = " LOAD_EVERY_TIME " ; <nl> + break ; <nl> + } <nl> + CLog : : Log ( LOGINFO , " Loading skin file : % s , load type : % s " , strFileName . c_str ( ) , strLoadType ) ; <nl> <nl> / / Find appropriate skin folder + resolution to load from <nl> CStdString strPath ; <nl> bool CGUIWindow : : Load ( const CStdString & strFileName , bool bContainsPath ) <nl> <nl> bool CGUIWindow : : LoadXML ( const CStdString & strPath , const CStdString & strLowerPath ) <nl> { <nl> - CXBMCTinyXML xmlDoc ; <nl> - if ( ! xmlDoc . LoadFile ( strPath ) & & ! xmlDoc . LoadFile ( CStdString ( strPath ) . ToLower ( ) ) & & ! xmlDoc . LoadFile ( strLowerPath ) ) <nl> + / / load window xml if we don ' t have it stored yet <nl> + if ( ! m_windowXMLRootElement ) <nl> { <nl> - CLog : : Log ( LOGERROR , " unable to load : % s , Line % d \ n % s " , strPath . c_str ( ) , xmlDoc . ErrorRow ( ) , xmlDoc . ErrorDesc ( ) ) ; <nl> - SetID ( WINDOW_INVALID ) ; <nl> - return false ; <nl> + CXBMCTinyXML xmlDoc ; <nl> + if ( ! xmlDoc . LoadFile ( strPath ) & & ! xmlDoc . LoadFile ( CStdString ( strPath ) . ToLower ( ) ) & & ! xmlDoc . LoadFile ( strLowerPath ) ) <nl> + { <nl> + CLog : : Log ( LOGERROR , " unable to load : % s , Line % d \ n % s " , strPath . c_str ( ) , xmlDoc . ErrorRow ( ) , xmlDoc . ErrorDesc ( ) ) ; <nl> + SetID ( WINDOW_INVALID ) ; <nl> + return false ; <nl> + } <nl> + m_windowXMLRootElement = ( TiXmlElement * ) xmlDoc . RootElement ( ) - > Clone ( ) ; <nl> } <nl> + else <nl> + CLog : : Log ( LOGDEBUG , " Using already stored xml root node for % s " , strPath . c_str ( ) ) ; <nl> <nl> - return Load ( xmlDoc ) ; <nl> + return Load ( m_windowXMLRootElement ) ; <nl> } <nl> <nl> - bool CGUIWindow : : Load ( CXBMCTinyXML & xmlDoc ) <nl> + bool CGUIWindow : : Load ( TiXmlElement * pRootElement ) <nl> { <nl> - TiXmlElement * pRootElement = xmlDoc . RootElement ( ) ; <nl> + if ( ! pRootElement ) <nl> + return false ; <nl> + <nl> if ( strcmpi ( pRootElement - > Value ( ) , " window " ) ) <nl> { <nl> CLog : : Log ( LOGERROR , " file : XML file doesnt contain < window > " ) ; <nl> bool CGUIWindow : : Load ( CXBMCTinyXML & xmlDoc ) <nl> / / be done with respect to the correct aspect ratio <nl> g_graphicsContext . SetScalingResolution ( m_coordsRes , m_needsScaling ) ; <nl> <nl> - / / Resolve any includes that may be present <nl> - g_SkinInfo - > ResolveIncludes ( pRootElement ) ; <nl> + / / Resolve any includes that may be present and save conditions used to do it <nl> + g_SkinInfo - > ResolveIncludes ( pRootElement , & m_xmlIncludeConditions ) ; <nl> / / now load in the skin file <nl> SetDefaults ( ) ; <nl> <nl> void CGUIWindow : : AllocResources ( bool forceLoad / * = FALSE * / ) <nl> int64_t start ; <nl> start = CurrentHostCounter ( ) ; <nl> # endif <nl> - / / load skin xml fil <nl> - CStdString xmlFile = GetProperty ( " xmlfile " ) . asString ( ) ; <nl> - bool bHasPath = false ; <nl> - if ( xmlFile . Find ( " \ \ " ) > - 1 | | xmlFile . Find ( " / " ) > - 1 ) <nl> - bHasPath = true ; <nl> - if ( xmlFile . size ( ) & & ( forceLoad | | m_loadOnDemand | | ! m_windowLoaded ) ) <nl> - Load ( xmlFile , bHasPath ) ; <nl> + / / use forceLoad to determine if xml file needs loading <nl> + forceLoad | = ( m_loadType = = LOAD_EVERY_TIME ) ; <nl> + <nl> + / / if window is loaded ( not cleared before ) and we aren ' t forced to load <nl> + / / we will have to load it only if include conditions values were changed <nl> + if ( m_windowLoaded & & ! forceLoad ) <nl> + forceLoad = g_infoManager . ConditionsChangedValues ( m_xmlIncludeConditions ) ; <nl> + <nl> + / / if window is loaded and load is forced we have to free window resources first <nl> + if ( m_windowLoaded & & forceLoad ) <nl> + FreeResources ( true ) ; <nl> + <nl> + / / load skin xml file only if we are forced to load or window isn ' t loaded yet <nl> + forceLoad | = ! m_windowLoaded ; <nl> + if ( forceLoad ) <nl> + { <nl> + CStdString xmlFile = GetProperty ( " xmlfile " ) . asString ( ) ; <nl> + if ( xmlFile . size ( ) ) <nl> + { <nl> + bool bHasPath = xmlFile . Find ( " \ \ " ) > - 1 | | xmlFile . Find ( " / " ) > - 1 ; <nl> + Load ( xmlFile , bHasPath ) ; <nl> + } <nl> + } <nl> <nl> int64_t slend ; <nl> slend = CurrentHostCounter ( ) ; <nl> void CGUIWindow : : AllocResources ( bool forceLoad / * = FALSE * / ) <nl> int64_t end , freq ; <nl> end = CurrentHostCounter ( ) ; <nl> freq = CurrentHostFrequency ( ) ; <nl> - CLog : : Log ( LOGDEBUG , " Alloc resources : % . 2fms ( % . 2f ms skin load ) " , 1000 . f * ( end - start ) / freq , 1000 . f * ( slend - start ) / freq ) ; <nl> + if ( forceLoad ) <nl> + CLog : : Log ( LOGDEBUG , " Alloc resources : % . 2fms ( % . 2f ms skin load ) " , 1000 . f * ( end - start ) / freq , 1000 . f * ( slend - start ) / freq ) ; <nl> + else <nl> + { <nl> + CLog : : Log ( LOGDEBUG , " Window % s was already loaded " , GetProperty ( " xmlfile " ) . c_str ( ) ) ; <nl> + CLog : : Log ( LOGDEBUG , " Alloc resources : % . 2fm " , 1000 . f * ( end - start ) / freq ) ; <nl> + } <nl> # endif <nl> m_bAllocated = true ; <nl> } <nl> void CGUIWindow : : FreeResources ( bool forceUnload / * = FALSE * / ) <nl> CGUIControlGroup : : FreeResources ( ) ; <nl> / / g_TextureManager . Dump ( ) ; <nl> / / unload the skin <nl> - if ( m_loadOnDemand | | forceUnload ) ClearAll ( ) ; <nl> + if ( m_loadType = = LOAD_EVERY_TIME | | forceUnload ) ClearAll ( ) ; <nl> + if ( forceUnload ) <nl> + { <nl> + delete m_windowXMLRootElement ; <nl> + m_windowXMLRootElement = NULL ; <nl> + } <nl> } <nl> <nl> void CGUIWindow : : DynamicResourceAlloc ( bool bOnOff ) <nl> mmm a / xbmc / guilib / GUIWindow . h <nl> ppp b / xbmc / guilib / GUIWindow . h <nl> class CGUIWindow : public CGUIControlGroup , protected CCriticalSection <nl> public : <nl> <nl> enum WINDOW_TYPE { WINDOW = 0 , MODAL_DIALOG , MODELESS_DIALOG , BUTTON_MENU , SUB_MENU } ; <nl> + enum LOAD_TYPE { LOAD_EVERY_TIME , LOAD_ON_GUI_INIT , KEEP_IN_MEMORY } ; <nl> <nl> CGUIWindow ( int id , const CStdString & xmlFile ) ; <nl> virtual ~ CGUIWindow ( void ) ; <nl> class CGUIWindow : public CGUIControlGroup , protected CCriticalSection <nl> virtual bool IsActive ( ) const ; <nl> void SetCoordsRes ( const RESOLUTION_INFO & res ) { m_coordsRes = res ; } ; <nl> const RESOLUTION_INFO & GetCoordsRes ( ) const { return m_coordsRes ; } ; <nl> - void LoadOnDemand ( bool loadOnDemand ) { m_loadOnDemand = loadOnDemand ; } ; <nl> - bool GetLoadOnDemand ( ) { return m_loadOnDemand ; } <nl> + void SetLoadType ( LOAD_TYPE loadType ) { m_loadType = loadType ; } ; <nl> + LOAD_TYPE GetLoadType ( ) { return m_loadType ; } const <nl> int GetRenderOrder ( ) { return m_renderOrder ; } ; <nl> virtual void SetInitialVisibility ( ) ; <nl> virtual bool IsVisible ( ) const { return true ; } ; / / windows are always considered visible as they implement their own <nl> class CGUIWindow : public CGUIControlGroup , protected CCriticalSection <nl> protected : <nl> virtual EVENT_RESULT OnMouseEvent ( const CPoint & point , const CMouseEvent & event ) ; <nl> virtual bool LoadXML ( const CStdString & strPath , const CStdString & strLowerPath ) ; / / / < Loads from the given file <nl> - bool Load ( CXBMCTinyXML & xmlDoc ) ; / / / < Loads from the given XML document <nl> + bool Load ( TiXmlElement * pRootElement ) ; / / / < Loads from the given XML root element <nl> virtual void LoadAdditionalTags ( TiXmlElement * root ) { } ; / / / < Load additional information from the XML document <nl> <nl> virtual void SetDefaults ( ) ; <nl> class CGUIWindow : public CGUIControlGroup , protected CCriticalSection <nl> RESOLUTION_INFO m_coordsRes ; / / resolution that the window coordinates are in . <nl> bool m_needsScaling ; <nl> bool m_windowLoaded ; / / true if the window ' s xml file has been loaded <nl> - bool m_loadOnDemand ; / / true if the window should be loaded only as needed <nl> + LOAD_TYPE m_loadType ; <nl> bool m_isDialog ; / / true if we have a dialog , false otherwise . <nl> bool m_dynamicResourceAlloc ; <nl> bool m_closing ; <nl> class CGUIWindow : public CGUIControlGroup , protected CCriticalSection <nl> CGUIAction m_loadActions ; <nl> CGUIAction m_unloadActions ; <nl> <nl> + TiXmlElement * m_windowXMLRootElement ; <nl> + <nl> bool m_manualRunActions ; <nl> <nl> int m_exclusiveMouseControl ; / / / < \ brief id of child control that wishes to receive all mouse events \ sa GUI_MSG_EXCLUSIVE_MOUSE <nl> <nl> private : <nl> std : : map < CStdString , CVariant , icompare > m_mapProperties ; <nl> - <nl> + std : : map < int , bool > m_xmlIncludeConditions ; / / / < \ brief used to store conditions used to resolve includes for this window <nl> } ; <nl> <nl> # endif <nl> mmm a / xbmc / guilib / GUIWindowManager . cpp <nl> ppp b / xbmc / guilib / GUIWindowManager . cpp <nl> void CGUIWindowManager : : LoadNotOnDemandWindows ( ) <nl> for ( WindowMap : : iterator it = m_mapWindows . begin ( ) ; it ! = m_mapWindows . end ( ) ; it + + ) <nl> { <nl> CGUIWindow * pWindow = ( * it ) . second ; <nl> - if ( ! pWindow - > GetLoadOnDemand ( ) ) <nl> + if ( pWindow - > GetLoadType ( ) = = CGUIWindow : : LOAD_ON_GUI_INIT ) <nl> { <nl> pWindow - > FreeResources ( true ) ; <nl> pWindow - > Initialize ( ) ; <nl> void CGUIWindowManager : : UnloadNotOnDemandWindows ( ) <nl> for ( WindowMap : : iterator it = m_mapWindows . begin ( ) ; it ! = m_mapWindows . end ( ) ; it + + ) <nl> { <nl> CGUIWindow * pWindow = ( * it ) . second ; <nl> - if ( ! pWindow - > GetLoadOnDemand ( ) ) <nl> + if ( pWindow - > GetLoadType ( ) = = CGUIWindow : : LOAD_ON_GUI_INIT | | <nl> + pWindow - > GetLoadType ( ) = = CGUIWindow : : KEEP_IN_MEMORY ) <nl> { <nl> pWindow - > FreeResources ( true ) ; <nl> } <nl> mmm a / xbmc / interfaces / python / xbmcmodule / GUIPythonWindow . cpp <nl> ppp b / xbmc / interfaces / python / xbmcmodule / GUIPythonWindow . cpp <nl> CGUIPythonWindow : : CGUIPythonWindow ( int id ) <nl> { <nl> pCallbackWindow = NULL ; <nl> m_threadState = NULL ; <nl> - m_loadOnDemand = false ; <nl> + m_loadType = LOAD_ON_GUI_INIT ; <nl> m_destroyAfterDeinit = false ; <nl> } <nl> <nl> mmm a / xbmc / interfaces / python / xbmcmodule / GUIPythonWindowDialog . cpp <nl> ppp b / xbmc / interfaces / python / xbmcmodule / GUIPythonWindowDialog . cpp <nl> <nl> CGUIPythonWindowDialog : : CGUIPythonWindowDialog ( int id ) <nl> : CGUIPythonWindow ( id ) <nl> { <nl> - m_loadOnDemand = false ; <nl> + m_loadType = LOAD_ON_GUI_INIT ; <nl> } <nl> <nl> CGUIPythonWindowDialog : : ~ CGUIPythonWindowDialog ( void ) <nl> mmm a / xbmc / interfaces / python / xbmcmodule / GUIPythonWindowXML . cpp <nl> ppp b / xbmc / interfaces / python / xbmcmodule / GUIPythonWindowXML . cpp <nl> CGUIPythonWindowXML : : CGUIPythonWindowXML ( int id , CStdString strXML , CStdString s <nl> { <nl> pCallbackWindow = NULL ; <nl> m_threadState = NULL ; <nl> - m_loadOnDemand = false ; <nl> + m_loadType = LOAD_ON_GUI_INIT ; <nl> m_scriptPath = strFallBackPath ; <nl> m_destroyAfterDeinit = false ; <nl> } <nl> bool CGUIPythonWindowXML : : LoadXML ( const CStdString & strPath , const CStdString & s <nl> if ( xmlDoc . Error ( ) ) <nl> return false ; <nl> <nl> - return Load ( xmlDoc ) ; <nl> + return Load ( xmlDoc . RootElement ( ) ) ; <nl> } <nl> <nl> void CGUIPythonWindowXML : : FreeResources ( bool forceUnLoad / * = FALSE * / ) <nl> mmm a / xbmc / interfaces / python / xbmcmodule / GUIPythonWindowXMLDialog . cpp <nl> ppp b / xbmc / interfaces / python / xbmcmodule / GUIPythonWindowXMLDialog . cpp <nl> <nl> CGUIPythonWindowXMLDialog : : CGUIPythonWindowXMLDialog ( int id , CStdString strXML , CStdString strFallBackPath ) <nl> : CGUIPythonWindowXML ( id , strXML , strFallBackPath ) <nl> { <nl> - m_loadOnDemand = false ; <nl> + m_loadType = LOAD_ON_GUI_INIT ; <nl> } <nl> <nl> CGUIPythonWindowXMLDialog : : ~ CGUIPythonWindowXMLDialog ( void ) <nl> mmm a / xbmc / music / dialogs / GUIDialogMusicInfo . cpp <nl> ppp b / xbmc / music / dialogs / GUIDialogMusicInfo . cpp <nl> CGUIDialogMusicInfo : : CGUIDialogMusicInfo ( void ) <nl> { <nl> m_bRefresh = false ; <nl> m_albumSongs = new CFileItemList ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogMusicInfo : : ~ CGUIDialogMusicInfo ( void ) <nl> mmm a / xbmc / music / dialogs / GUIDialogMusicOSD . cpp <nl> ppp b / xbmc / music / dialogs / GUIDialogMusicOSD . cpp <nl> <nl> CGUIDialogMusicOSD : : CGUIDialogMusicOSD ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_MUSIC_OSD , " MusicOSD . xml " ) <nl> { <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogMusicOSD : : ~ CGUIDialogMusicOSD ( void ) <nl> mmm a / xbmc / music / dialogs / GUIDialogMusicOverlay . cpp <nl> ppp b / xbmc / music / dialogs / GUIDialogMusicOverlay . cpp <nl> CGUIDialogMusicOverlay : : CGUIDialogMusicOverlay ( ) <nl> : CGUIDialog ( WINDOW_DIALOG_MUSIC_OVERLAY , " MusicOverlay . xml " ) <nl> { <nl> m_renderOrder = 0 ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogMusicOverlay : : ~ CGUIDialogMusicOverlay ( ) <nl> mmm a / xbmc / music / dialogs / GUIDialogMusicScan . cpp <nl> ppp b / xbmc / music / dialogs / GUIDialogMusicScan . cpp <nl> using namespace MUSIC_INFO ; <nl> CGUIDialogMusicScan : : CGUIDialogMusicScan ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_MUSIC_SCAN , " DialogMusicScan . xml " ) <nl> { <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogMusicScan : : ~ CGUIDialogMusicScan ( void ) <nl> mmm a / xbmc / music / dialogs / GUIDialogSongInfo . cpp <nl> ppp b / xbmc / music / dialogs / GUIDialogSongInfo . cpp <nl> CGUIDialogSongInfo : : CGUIDialogSongInfo ( void ) <nl> m_cancelled = false ; <nl> m_needsUpdate = false ; <nl> m_startRating = - 1 ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogSongInfo : : ~ CGUIDialogSongInfo ( void ) <nl> mmm a / xbmc / music / dialogs / GUIDialogVisualisationPresetList . cpp <nl> ppp b / xbmc / music / dialogs / GUIDialogVisualisationPresetList . cpp <nl> CGUIDialogVisualisationPresetList : : CGUIDialogVisualisationPresetList ( void ) <nl> m_currentPreset = 0 ; <nl> m_vecPresets = new CFileItemList ; <nl> m_viz = NULL ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogVisualisationPresetList : : ~ CGUIDialogVisualisationPresetList ( void ) <nl> mmm a / xbmc / music / windows / GUIWindowVisualisation . cpp <nl> ppp b / xbmc / music / windows / GUIWindowVisualisation . cpp <nl> CGUIWindowVisualisation : : CGUIWindowVisualisation ( void ) <nl> m_initTimer ( true ) , m_lockedTimer ( true ) <nl> { <nl> m_bShowPreset = false ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> bool CGUIWindowVisualisation : : OnAction ( const CAction & action ) <nl> mmm a / xbmc / network / GUIDialogNetworkSetup . cpp <nl> ppp b / xbmc / network / GUIDialogNetworkSetup . cpp <nl> CGUIDialogNetworkSetup : : CGUIDialogNetworkSetup ( void ) <nl> { <nl> m_protocol = NET_PROTOCOL_SMB ; <nl> m_confirmed = false ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogNetworkSetup : : ~ CGUIDialogNetworkSetup ( ) <nl> bool CGUIDialogNetworkSetup : : ShowAndGetNetworkAddress ( CStdString & path ) <nl> return dialog - > IsConfirmed ( ) ; <nl> } <nl> <nl> - void CGUIDialogNetworkSetup : : OnInitWindow ( ) <nl> + void CGUIDialogNetworkSetup : : OnWindowLoaded ( ) <nl> { <nl> / / replace our buttons with edits <nl> ChangeButtonToEdit ( CONTROL_SERVER_ADDRESS ) ; <nl> void CGUIDialogNetworkSetup : : OnInitWindow ( ) <nl> ChangeButtonToEdit ( CONTROL_PORT_NUMBER ) ; <nl> ChangeButtonToEdit ( CONTROL_PASSWORD ) ; <nl> <nl> + CGUIDialog : : OnWindowLoaded ( ) ; <nl> + } <nl> + <nl> + void CGUIDialogNetworkSetup : : OnInitWindow ( ) <nl> + { <nl> / / start as unconfirmed <nl> m_confirmed = false ; <nl> <nl> void CGUIDialogNetworkSetup : : OnInitWindow ( ) <nl> OnProtocolChange ( ) ; <nl> } <nl> <nl> + void CGUIDialogNetworkSetup : : OnDeinitWindow ( int nextWindowID ) <nl> + { <nl> + / / clear protocol spinner <nl> + CGUISpinControlEx * pSpin = ( CGUISpinControlEx * ) GetControl ( CONTROL_PROTOCOL ) ; <nl> + if ( pSpin ) <nl> + pSpin - > Clear ( ) ; <nl> + <nl> + CGUIDialog : : OnDeinitWindow ( nextWindowID ) ; <nl> + } <nl> + <nl> void CGUIDialogNetworkSetup : : OnServerBrowse ( ) <nl> { <nl> / / open a filebrowser dialog with the current address <nl> mmm a / xbmc / network / GUIDialogNetworkSetup . h <nl> ppp b / xbmc / network / GUIDialogNetworkSetup . h <nl> class CGUIDialogNetworkSetup : <nl> virtual bool OnMessage ( CGUIMessage & message ) ; <nl> virtual bool OnBack ( int actionID ) ; <nl> virtual void OnInitWindow ( ) ; <nl> + virtual void OnWindowLoaded ( ) ; <nl> + virtual void OnDeinitWindow ( int nextWindowID ) ; <nl> <nl> static bool ShowAndGetNetworkAddress ( CStdString & path ) ; <nl> <nl> mmm a / xbmc / peripherals / dialogs / GUIDialogPeripheralManager . cpp <nl> ppp b / xbmc / peripherals / dialogs / GUIDialogPeripheralManager . cpp <nl> CGUIDialogPeripheralManager : : CGUIDialogPeripheralManager ( void ) : <nl> m_iSelected ( 0 ) , <nl> m_peripheralItems ( new CFileItemList ) <nl> { <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogPeripheralManager : : ~ CGUIDialogPeripheralManager ( void ) <nl> bool CGUIDialogPeripheralManager : : OnAction ( const CAction & action ) <nl> return CGUIDialog : : OnAction ( action ) ; <nl> } <nl> <nl> - bool CGUIDialogPeripheralManager : : OnMessageInit ( CGUIMessage & message ) <nl> + void CGUIDialogPeripheralManager : : OnInitWindow ( ) <nl> { <nl> - CGUIWindow : : OnMessage ( message ) ; <nl> + CGUIWindow : : OnInitWindow ( ) ; <nl> m_iSelected = 0 ; <nl> Update ( ) ; <nl> - <nl> - return true ; <nl> } <nl> <nl> bool CGUIDialogPeripheralManager : : OnClickList ( CGUIMessage & message ) <nl> bool CGUIDialogPeripheralManager : : OnMessage ( CGUIMessage & message ) <nl> break ; <nl> case GUI_MSG_ITEM_SELECT : <nl> return true ; <nl> - case GUI_MSG_WINDOW_INIT : <nl> - { <nl> - OnMessageInit ( message ) ; <nl> - break ; <nl> - } <nl> case GUI_MSG_CLICKED : <nl> return OnMessageClick ( message ) ; <nl> } <nl> mmm a / xbmc / peripherals / dialogs / GUIDialogPeripheralManager . h <nl> ppp b / xbmc / peripherals / dialogs / GUIDialogPeripheralManager . h <nl> namespace PERIPHERALS <nl> virtual ~ CGUIDialogPeripheralManager ( void ) ; <nl> virtual bool OnMessage ( CGUIMessage & message ) ; <nl> virtual bool OnAction ( const CAction & action ) ; <nl> + virtual void OnInitWindow ( ) ; <nl> virtual void OnWindowLoaded ( void ) ; <nl> virtual void OnWindowUnload ( void ) ; <nl> virtual bool HasListItems ( ) const { return true ; } ; <nl> namespace PERIPHERALS <nl> virtual void Update ( void ) ; <nl> <nl> protected : <nl> - virtual bool OnMessageInit ( CGUIMessage & message ) ; <nl> virtual bool OnMessageClick ( CGUIMessage & message ) ; <nl> <nl> virtual bool OnClickList ( CGUIMessage & message ) ; <nl> mmm a / xbmc / pictures / GUIDialogPictureInfo . cpp <nl> ppp b / xbmc / pictures / GUIDialogPictureInfo . cpp <nl> CGUIDialogPictureInfo : : CGUIDialogPictureInfo ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_PICTURE_INFO , " DialogPictureInfo . xml " ) <nl> { <nl> m_pictureInfo = new CFileItemList ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogPictureInfo : : ~ CGUIDialogPictureInfo ( void ) <nl> mmm a / xbmc / pictures / GUIWindowSlideShow . cpp <nl> ppp b / xbmc / pictures / GUIWindowSlideShow . cpp <nl> CGUIWindowSlideShow : : CGUIWindowSlideShow ( void ) <nl> m_pBackgroundLoader = NULL ; <nl> m_slides = new CFileItemList ; <nl> m_Resolution = RES_INVALID ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> Reset ( ) ; <nl> } <nl> <nl> void CGUIWindowSlideShow : : Reset ( ) <nl> m_Resolution = g_graphicsContext . GetVideoResolution ( ) ; <nl> } <nl> <nl> - void CGUIWindowSlideShow : : FreeResources ( ) <nl> - { / / wait for any outstanding picture loads <nl> + void CGUIWindowSlideShow : : OnDeinitWindow ( int nextWindowID ) <nl> + { <nl> + if ( m_Resolution ! = g_guiSettings . m_LookAndFeelResolution ) <nl> + { <nl> + / / FIXME : Use GUI resolution for now <nl> + / / g_graphicsContext . SetVideoResolution ( g_guiSettings . m_LookAndFeelResolution , TRUE ) ; <nl> + } <nl> + <nl> + / / Reset ( ) ; <nl> + if ( nextWindowID ! = WINDOW_PICTURES ) <nl> + m_ImageLib . Unload ( ) ; <nl> + <nl> + g_windowManager . ShowOverlay ( OVERLAY_STATE_SHOWN ) ; <nl> + <nl> + / / wait for any outstanding picture loads <nl> if ( m_pBackgroundLoader ) <nl> { <nl> / / sleep until the loader finishes loading the current pic <nl> void CGUIWindowSlideShow : : FreeResources ( ) <nl> m_Image [ 0 ] . Close ( ) ; <nl> m_Image [ 1 ] . Close ( ) ; <nl> g_infoManager . ResetCurrentSlide ( ) ; <nl> + <nl> + CGUIWindow : : OnDeinitWindow ( nextWindowID ) ; <nl> } <nl> <nl> void CGUIWindowSlideShow : : Add ( const CFileItem * picture ) <nl> bool CGUIWindowSlideShow : : OnMessage ( CGUIMessage & message ) <nl> { <nl> switch ( message . GetMessage ( ) ) <nl> { <nl> - case GUI_MSG_WINDOW_DEINIT : <nl> - { <nl> - if ( m_Resolution ! = g_guiSettings . m_LookAndFeelResolution ) <nl> - { <nl> - / / FIXME : Use GUI resolution for now <nl> - / / g_graphicsContext . SetVideoResolution ( g_guiSettings . m_LookAndFeelResolution , TRUE ) ; <nl> - } <nl> - <nl> - / / Reset ( ) ; <nl> - if ( message . GetParam1 ( ) ! = WINDOW_PICTURES ) <nl> - m_ImageLib . Unload ( ) ; <nl> - <nl> - g_windowManager . ShowOverlay ( OVERLAY_STATE_SHOWN ) ; <nl> - FreeResources ( ) ; <nl> - } <nl> - break ; <nl> - <nl> case GUI_MSG_WINDOW_INIT : <nl> { <nl> m_Resolution = ( RESOLUTION ) g_guiSettings . GetInt ( " pictures . displayresolution " ) ; <nl> mmm a / xbmc / pictures / GUIWindowSlideShow . h <nl> ppp b / xbmc / pictures / GUIWindowSlideShow . h <nl> class CGUIWindowSlideShow : public CGUIWindow <nl> virtual bool OnAction ( const CAction & action ) ; <nl> virtual void Render ( ) ; <nl> virtual void Process ( unsigned int currentTime , CDirtyRegionList & regions ) ; <nl> - virtual void FreeResources ( ) ; <nl> + virtual void OnDeinitWindow ( int nextWindowID ) ; <nl> void OnLoadPic ( int iPic , int iSlideNumber , CBaseTexture * pTexture , bool bFullSize ) ; <nl> int NumSlides ( ) const ; <nl> int CurrentSlide ( ) const ; <nl> mmm a / xbmc / settings / GUIDialogSettings . cpp <nl> ppp b / xbmc / settings / GUIDialogSettings . cpp <nl> CGUIDialogSettings : : CGUIDialogSettings ( int id , const char * xmlFile ) <nl> m_pOriginalSlider = NULL ; <nl> m_pOriginalSeparator = NULL ; <nl> m_usePopupSliders = false ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogSettings : : ~ CGUIDialogSettings ( void ) <nl> mmm a / xbmc / settings / GUIWindowSettings . cpp <nl> ppp b / xbmc / settings / GUIWindowSettings . cpp <nl> <nl> CGUIWindowSettings : : CGUIWindowSettings ( void ) <nl> : CGUIWindow ( WINDOW_SETTINGS_MENU , " Settings . xml " ) <nl> { <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIWindowSettings : : ~ CGUIWindowSettings ( void ) <nl> mmm a / xbmc / settings / GUIWindowSettingsCategory . cpp <nl> ppp b / xbmc / settings / GUIWindowSettingsCategory . cpp <nl> using namespace PERIPHERALS ; <nl> CGUIWindowSettingsCategory : : CGUIWindowSettingsCategory ( void ) <nl> : CGUIWindow ( WINDOW_SETTINGS_MYPICTURES , " SettingsCategory . xml " ) <nl> { <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> m_pOriginalSpin = NULL ; <nl> m_pOriginalRadioButton = NULL ; <nl> m_pOriginalButton = NULL ; <nl> mmm a / xbmc / settings / GUIWindowSettingsProfile . cpp <nl> ppp b / xbmc / settings / GUIWindowSettingsProfile . cpp <nl> CGUIWindowSettingsProfile : : CGUIWindowSettingsProfile ( void ) <nl> : CGUIWindow ( WINDOW_SETTINGS_PROFILES , " SettingsProfile . xml " ) <nl> { <nl> m_listItems = new CFileItemList ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIWindowSettingsProfile : : ~ CGUIWindowSettingsProfile ( void ) <nl> mmm a / xbmc / video / dialogs / GUIDialogFullScreenInfo . cpp <nl> ppp b / xbmc / video / dialogs / GUIDialogFullScreenInfo . cpp <nl> <nl> CGUIDialogFullScreenInfo : : CGUIDialogFullScreenInfo ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_FULLSCREEN_INFO , " DialogFullScreenInfo . xml " ) <nl> { <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogFullScreenInfo : : ~ CGUIDialogFullScreenInfo ( void ) <nl> mmm a / xbmc / video / dialogs / GUIDialogVideoBookmarks . cpp <nl> ppp b / xbmc / video / dialogs / GUIDialogVideoBookmarks . cpp <nl> CGUIDialogVideoBookmarks : : CGUIDialogVideoBookmarks ( ) <nl> : CGUIDialog ( WINDOW_DIALOG_VIDEO_BOOKMARKS , " VideoOSDBookmarks . xml " ) <nl> { <nl> m_vecItems = new CFileItemList ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogVideoBookmarks : : ~ CGUIDialogVideoBookmarks ( ) <nl> mmm a / xbmc / video / dialogs / GUIDialogVideoInfo . cpp <nl> ppp b / xbmc / video / dialogs / GUIDialogVideoInfo . cpp <nl> CGUIDialogVideoInfo : : CGUIDialogVideoInfo ( void ) <nl> m_bRefresh = false ; <nl> m_hasUpdatedThumb = false ; <nl> m_castList = new CFileItemList ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogVideoInfo : : ~ CGUIDialogVideoInfo ( void ) <nl> mmm a / xbmc / video / dialogs / GUIDialogVideoOSD . cpp <nl> ppp b / xbmc / video / dialogs / GUIDialogVideoOSD . cpp <nl> using namespace PVR ; <nl> CGUIDialogVideoOSD : : CGUIDialogVideoOSD ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_VIDEO_OSD , " VideoOSD . xml " ) <nl> { <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogVideoOSD : : ~ CGUIDialogVideoOSD ( void ) <nl> mmm a / xbmc / video / dialogs / GUIDialogVideoOverlay . cpp <nl> ppp b / xbmc / video / dialogs / GUIDialogVideoOverlay . cpp <nl> CGUIDialogVideoOverlay : : CGUIDialogVideoOverlay ( ) <nl> : CGUIDialog ( WINDOW_DIALOG_VIDEO_OVERLAY , " VideoOverlay . xml " ) <nl> { <nl> m_renderOrder = 0 ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogVideoOverlay : : ~ CGUIDialogVideoOverlay ( ) <nl> mmm a / xbmc / video / dialogs / GUIDialogVideoScan . cpp <nl> ppp b / xbmc / video / dialogs / GUIDialogVideoScan . cpp <nl> using namespace VIDEO ; <nl> CGUIDialogVideoScan : : CGUIDialogVideoScan ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_VIDEO_SCAN , " DialogVideoScan . xml " ) <nl> { <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIDialogVideoScan : : ~ CGUIDialogVideoScan ( void ) <nl> void CGUIDialogVideoScan : : UpdateState ( ) <nl> CGUIProgressControl * pProgressCtrl = ( CGUIProgressControl * ) GetControl ( CONTROL_CURRENT_PROGRESS ) ; <nl> if ( pProgressCtrl ) pProgressCtrl - > SetPercentage ( m_fCurrentPercentDone ) ; <nl> } <nl> + else <nl> + SET_CONTROL_HIDDEN ( CONTROL_CURRENT_PROGRESS ) ; <nl> + <nl> if ( m_fPercentDone > - 1 . 0f ) <nl> { <nl> SET_CONTROL_VISIBLE ( CONTROL_PROGRESS ) ; <nl> CGUIProgressControl * pProgressCtrl = ( CGUIProgressControl * ) GetControl ( CONTROL_PROGRESS ) ; <nl> if ( pProgressCtrl ) pProgressCtrl - > SetPercentage ( m_fPercentDone ) ; <nl> } <nl> + else <nl> + SET_CONTROL_HIDDEN ( CONTROL_PROGRESS ) ; <nl> } <nl> else <nl> { <nl> mmm a / xbmc / video / windows / GUIWindowFullScreen . cpp <nl> ppp b / xbmc / video / windows / GUIWindowFullScreen . cpp <nl> CGUIWindowFullScreen : : CGUIWindowFullScreen ( void ) <nl> m_subsLayout = NULL ; <nl> m_bGroupSelectShow = false ; <nl> m_sliderAction = 0 ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> / / audio <nl> / / - language <nl> / / - volume <nl> CGUIWindowFullScreen : : CGUIWindowFullScreen ( void ) <nl> CGUIWindowFullScreen : : ~ CGUIWindowFullScreen ( void ) <nl> { } <nl> <nl> - void CGUIWindowFullScreen : : AllocResources ( bool forceLoad ) <nl> - { <nl> - CGUIWindow : : AllocResources ( forceLoad ) ; <nl> - DynamicResourceAlloc ( false ) ; <nl> - } <nl> - <nl> - void CGUIWindowFullScreen : : FreeResources ( bool forceUnload ) <nl> - { <nl> - g_settings . Save ( ) ; <nl> - DynamicResourceAlloc ( true ) ; <nl> - CGUIWindow : : FreeResources ( forceUnload ) ; <nl> - } <nl> - <nl> bool CGUIWindowFullScreen : : OnAction ( const CAction & action ) <nl> { <nl> if ( g_application . m_pPlayer ! = NULL & & g_application . m_pPlayer - > OnAction ( action ) ) <nl> bool CGUIWindowFullScreen : : OnMessage ( CGUIMessage & message ) <nl> } <nl> case GUI_MSG_WINDOW_DEINIT : <nl> { <nl> - CGUIWindow : : OnMessage ( message ) ; <nl> - <nl> CGUIDialog * pDialog = ( CGUIDialog * ) g_windowManager . GetWindow ( WINDOW_DIALOG_OSD_TELETEXT ) ; <nl> if ( pDialog ) pDialog - > Close ( true ) ; <nl> CGUIDialogSlider * slider = ( CGUIDialogSlider * ) g_windowManager . GetWindow ( WINDOW_DIALOG_SLIDER ) ; <nl> bool CGUIWindowFullScreen : : OnMessage ( CGUIMessage & message ) <nl> pDialog = ( CGUIDialog * ) g_windowManager . GetWindow ( WINDOW_DIALOG_PVR_OSD_CUTTER ) ; <nl> if ( pDialog ) pDialog - > Close ( true ) ; <nl> <nl> - FreeResources ( true ) ; <nl> + CGUIWindow : : OnMessage ( message ) ; <nl> + <nl> + g_settings . Save ( ) ; <nl> <nl> CSingleLock lock ( g_graphicsContext ) ; <nl> g_graphicsContext . SetFullScreenVideo ( false ) ; <nl> mmm a / xbmc / video / windows / GUIWindowFullScreen . h <nl> ppp b / xbmc / video / windows / GUIWindowFullScreen . h <nl> class CGUIWindowFullScreen : <nl> public : <nl> CGUIWindowFullScreen ( void ) ; <nl> virtual ~ CGUIWindowFullScreen ( void ) ; <nl> - virtual void AllocResources ( bool forceLoad = false ) ; <nl> - virtual void FreeResources ( bool forceUnLoad = false ) ; <nl> virtual bool OnMessage ( CGUIMessage & message ) ; <nl> virtual bool OnAction ( const CAction & action ) ; <nl> virtual void FrameMove ( ) ; <nl> mmm a / xbmc / windows / GUIMediaWindow . cpp <nl> ppp b / xbmc / windows / GUIMediaWindow . cpp <nl> using namespace ADDON ; <nl> CGUIMediaWindow : : CGUIMediaWindow ( int id , const char * xmlFile ) <nl> : CGUIWindow ( id , xmlFile ) <nl> { <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> m_vecItems = new CFileItemList ; <nl> m_unfilteredItems = new CFileItemList ; <nl> m_vecItems - > SetPath ( " ? " ) ; <nl> mmm a / xbmc / windows / GUIWindowFileManager . cpp <nl> ppp b / xbmc / windows / GUIWindowFileManager . cpp <nl> CGUIWindowFileManager : : CGUIWindowFileManager ( void ) <nl> m_Directory [ 0 ] - > m_bIsFolder = true ; <nl> m_Directory [ 1 ] - > m_bIsFolder = true ; <nl> bCheckShareConnectivity = true ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIWindowFileManager : : ~ CGUIWindowFileManager ( void ) <nl> mmm a / xbmc / windows / GUIWindowHome . cpp <nl> ppp b / xbmc / windows / GUIWindowHome . cpp <nl> CGUIWindowHome : : CGUIWindowHome ( void ) : CGUIWindow ( WINDOW_HOME , " Home . xml " ) , <nl> m_cumulativeUpdateFlag ( 0 ) <nl> { <nl> m_updateRA = ( Audio | Video | Totals ) ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> <nl> CAnnouncementManager : : AddAnnouncer ( this ) ; <nl> } <nl> mmm a / xbmc / windows / GUIWindowLoginScreen . cpp <nl> ppp b / xbmc / windows / GUIWindowLoginScreen . cpp <nl> CGUIWindowLoginScreen : : CGUIWindowLoginScreen ( void ) <nl> watch . StartZero ( ) ; <nl> m_vecItems = new CFileItemList ; <nl> m_iSelectedItem = - 1 ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIWindowLoginScreen : : ~ CGUIWindowLoginScreen ( void ) <nl> mmm a / xbmc / windows / GUIWindowPointer . cpp <nl> ppp b / xbmc / windows / GUIWindowPointer . cpp <nl> CGUIWindowPointer : : CGUIWindowPointer ( void ) <nl> : CGUIDialog ( WINDOW_DIALOG_POINTER , " Pointer . xml " ) <nl> { <nl> m_pointer = 0 ; <nl> - m_loadOnDemand = false ; <nl> + m_loadType = LOAD_ON_GUI_INIT ; <nl> m_needsScaling = false ; <nl> m_active = false ; <nl> m_renderOrder = INT_MAX - 1 ; <nl> mmm a / xbmc / windows / GUIWindowSystemInfo . cpp <nl> ppp b / xbmc / windows / GUIWindowSystemInfo . cpp <nl> CGUIWindowSystemInfo : : CGUIWindowSystemInfo ( void ) <nl> : CGUIWindow ( WINDOW_SYSTEM_INFORMATION , " SettingsSystemInfo . xml " ) <nl> { <nl> m_section = CONTROL_BT_DEFAULT ; <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> CGUIWindowSystemInfo : : ~ CGUIWindowSystemInfo ( void ) <nl> { <nl> bool CGUIWindowSystemInfo : : OnMessage ( CGUIMessage & message ) <nl> { <nl> CGUIWindow : : OnMessage ( message ) ; <nl> m_diskUsage . clear ( ) ; <nl> + ResetLabels ( ) ; <nl> return true ; <nl> } <nl> break ; <nl> mmm a / xbmc / windows / GUIWindowWeather . cpp <nl> ppp b / xbmc / windows / GUIWindowWeather . cpp <nl> FIXME ' S <nl> CGUIWindowWeather : : CGUIWindowWeather ( void ) <nl> : CGUIWindow ( WINDOW_WEATHER , " MyWeather . xml " ) , m_maxLocation ( 0 ) <nl> { <nl> + m_loadType = KEEP_IN_MEMORY ; <nl> } <nl> <nl> CGUIWindowWeather : : ~ CGUIWindowWeather ( void ) <nl>
|
Merge pull request from pieh / window_load_on_demand
|
xbmc/xbmc
|
2cc5bc96f770ad89d4e302e6151a6df881e3106d
|
2012-09-08T13:15:05Z
|
mmm a / tensorflow / compiler / xla / service / gpu / cudnn_convolution_algorithm_picker . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / cudnn_convolution_algorithm_picker . cc <nl> se : : port : : StatusOr < se : : DeviceMemory < uint8 > > ScratchAllocator : : AllocateBytes ( <nl> / / Determines whether we can safely perform a winograd non - fused convolution for <nl> / / the given input and output shapes . This works around b / 68264959 , an integer <nl> / / overflow in cuDNNv5 and cuDNNv6 . <nl> - / / <nl> - / / TODO ( jlebar ) : We shouldn ' t need this check for cuDNNv7 . <nl> - bool ShouldIncludeWinogradNonfusedAlgo ( <nl> - const Shape & input_shape , const Shape & output_shape , <nl> - const ConvolutionDimensionNumbers & dnums ) { <nl> + bool ShouldIncludeWinogradNonfusedAlgo ( const Shape & input_shape , <nl> + const Shape & output_shape , <nl> + const ConvolutionDimensionNumbers & dnums , <nl> + se : : StreamExecutor * stream_exec ) { <nl> + / / Skip this check for cudnn7 and newer . <nl> + se : : port : : StatusOr < std : : tuple < int , int , int > > version = <nl> + stream_exec - > AsDnn ( ) - > GetVersion ( ) ; <nl> + if ( version . ok ( ) & & std : : get < 0 > ( version . ValueOrDie ( ) ) > = 7 ) { <nl> + return true ; <nl> + } <nl> + <nl> int64 batch = input_shape . dimensions ( dnums . input_batch_dimension ( ) ) ; <nl> int64 in_depths = input_shape . dimensions ( dnums . input_feature_dimension ( ) ) ; <nl> int64 in_rows = input_shape . dimensions ( dnums . input_spatial_dimensions ( 0 ) ) ; <nl> bool ShouldIncludeWinogradNonfusedAlgo ( <nl> <nl> std : : vector < AlgorithmDesc > GetAlgorithms ( CudnnConvKind kind , <nl> bool with_winograd_nonfused , <nl> - se : : StreamExecutor * stream_exec_ ) { <nl> + se : : StreamExecutor * stream_exec ) { <nl> std : : vector < AlgorithmDesc > algorithms ; <nl> switch ( kind ) { <nl> case CudnnConvKind : : kBackwardFilter : <nl> - CHECK ( stream_exec_ - > GetConvolveBackwardFilterAlgorithms ( <nl> + CHECK ( stream_exec - > GetConvolveBackwardFilterAlgorithms ( <nl> with_winograd_nonfused , & algorithms ) ) ; <nl> break ; <nl> case CudnnConvKind : : kBackwardInput : <nl> - CHECK ( stream_exec_ - > GetConvolveBackwardDataAlgorithms ( <nl> + CHECK ( stream_exec - > GetConvolveBackwardDataAlgorithms ( <nl> with_winograd_nonfused , & algorithms ) ) ; <nl> break ; <nl> case CudnnConvKind : : kForward : <nl> - CHECK ( stream_exec_ - > GetConvolveAlgorithms ( with_winograd_nonfused , <nl> - & algorithms ) ) ; <nl> + CHECK ( stream_exec - > GetConvolveAlgorithms ( with_winograd_nonfused , <nl> + & algorithms ) ) ; <nl> break ; <nl> } <nl> <nl> CudnnConvolutionAlgorithmPicker : : PickBestAlgorithm ( <nl> return nullopt ; <nl> } <nl> <nl> - const bool use_winograd_nonfused = <nl> - ShouldIncludeWinogradNonfusedAlgo ( input_shape , output_shape , dnums ) ; <nl> + const bool use_winograd_nonfused = ShouldIncludeWinogradNonfusedAlgo ( <nl> + input_shape , output_shape , dnums , stream_exec_ ) ; <nl> se : : dnn : : ProfileResult best_result ; <nl> int64 best_result_bytes_used = 0 ; <nl> <nl> mmm a / tensorflow / stream_executor / cuda / cuda_dnn . cc <nl> ppp b / tensorflow / stream_executor / cuda / cuda_dnn . cc <nl> port : : Status CudnnSupport : : Init ( ) { <nl> ToString ( status ) ) } ; <nl> } <nl> <nl> + port : : StatusOr < std : : tuple < int , int , int > > CudnnSupport : : GetVersion ( ) { <nl> + CudnnVersion version ; <nl> + TF_RETURN_IF_ERROR ( GetLoadedCudnnVersion ( & version ) ) ; <nl> + return std : : make_tuple ( version . major_version , version . minor_version , <nl> + version . patch_level ) ; <nl> + } <nl> + <nl> / / Turns a BatchDescriptor structure into a cudnn tensor handle within a scope . <nl> class ScopedTensorDescriptor { <nl> public : <nl> mmm a / tensorflow / stream_executor / cuda / cuda_dnn . h <nl> ppp b / tensorflow / stream_executor / cuda / cuda_dnn . h <nl> class CudnnSupport : public dnn : : DnnSupport { <nl> ~ CudnnSupport ( ) override ; <nl> <nl> port : : Status Init ( ) override ; <nl> + port : : StatusOr < std : : tuple < int , int , int > > GetVersion ( ) override ; <nl> <nl> port : : StatusOr < std : : unique_ptr < dnn : : RnnDescriptor > > createRnnDescriptor ( <nl> int num_layers , int hidden_size , int input_size , <nl> mmm a / tensorflow / stream_executor / dnn . h <nl> ppp b / tensorflow / stream_executor / dnn . h <nl> limitations under the License . <nl> # include < functional > <nl> # include < limits > <nl> # include < memory > <nl> + # include < tuple > <nl> <nl> # include " tensorflow / stream_executor / device_memory . h " <nl> # include " tensorflow / stream_executor / lib / array_slice . h " <nl> class DnnSupport { <nl> <nl> virtual port : : Status Init ( ) = 0 ; <nl> <nl> + / / Gets the version of the backing library , as a { major , minor , patch } tuple . <nl> + virtual port : : StatusOr < std : : tuple < int , int , int > > GetVersion ( ) { <nl> + return port : : UnimplementedError ( <nl> + " DnnSupport : : GetVersion not implemented on this platform . " ) ; <nl> + } <nl> + <nl> / / Performs a single - precision forward batch normalization operation onto <nl> / / the stream . <nl> / / <nl>
|
[ XLA : GPU ] Eliminate the guard around Winograd non - fused convolutions with cudnn7 .
|
tensorflow/tensorflow
|
1cd76c209ce6f74298843568a7fc397c2e6f958f
|
2018-04-07T18:45:04Z
|
mmm a / lib / Sema / TypeCheckConstraints . cpp <nl> ppp b / lib / Sema / TypeCheckConstraints . cpp <nl> TypeExpr * PreCheckExpression : : simplifyTypeExpr ( Expr * E ) { <nl> new ( TC . Context ) OptionalTypeRepr ( InnerTypeRepr , QuestionLoc ) ; <nl> return new ( TC . Context ) TypeExpr ( TypeLoc ( NewTypeRepr , Type ( ) ) ) ; <nl> } <nl> - <nl> + <nl> + / / Fold T ! into an IUO type when T is a TypeExpr . <nl> + if ( auto * FVE = dyn_cast < ForceValueExpr > ( E ) ) { <nl> + auto * TyExpr = dyn_cast < TypeExpr > ( FVE - > getSubExpr ( ) ) ; <nl> + if ( ! TyExpr ) return nullptr ; <nl> + <nl> + auto * InnerTypeRepr = TyExpr - > getTypeRepr ( ) ; <nl> + assert ( ! TyExpr - > isImplicit ( ) & & InnerTypeRepr & & <nl> + " This doesn ' t work on implicit TypeExpr ' s , " <nl> + " the TypeExpr should have been built correctly in the first place " ) ; <nl> + <nl> + auto * NewTypeRepr = <nl> + new ( TC . Context ) ImplicitlyUnwrappedOptionalTypeRepr ( InnerTypeRepr , <nl> + FVE - > getExclaimLoc ( ) ) ; <nl> + return new ( TC . Context ) TypeExpr ( TypeLoc ( NewTypeRepr , Type ( ) ) ) ; <nl> + } <nl> + <nl> / / Fold ( T ) into a type T with parens around it . <nl> if ( auto * PE = dyn_cast < ParenExpr > ( E ) ) { <nl> auto * TyExpr = dyn_cast < TypeExpr > ( PE - > getSubExpr ( ) ) ; <nl> mmm a / test / type / types . swift <nl> ppp b / test / type / types . swift <nl> y17 = z17 <nl> let tupleTypeWithNames = ( age : Int , count : Int ) ( 4 , 5 ) <nl> let dictWithTuple = [ String : ( age : Int , count : Int ) ] ( ) <nl> <nl> - <nl> + / / < rdar : / / problem / 21684837 > typeexpr not being formed for postfix ! <nl> + let bb2 = [ Int ! ] ( count : 2 , repeatedValue : nil ) <nl>
|
fix < rdar : / / problem / 21684837 > typeexpr not being formed for postfix !
|
apple/swift
|
07b669c6aee78f4f03854c1f4baedc3fc28c8415
|
2015-07-06T19:51:34Z
|
mmm a / src / code - stubs . h <nl> ppp b / src / code - stubs . h <nl> class CEntryStub : public PlatformCodeStub { <nl> bool NeedsImmovableCode ( ) ; <nl> <nl> class SaveDoublesBits : public BitField < bool , 0 , 1 > { } ; <nl> - class ResultSizeBits : public BitField < int , 3 , 1 > { } ; <nl> + class ResultSizeBits : public BitField < int , 1 , 3 > { } ; <nl> } ; <nl> <nl> <nl>
|
Use correct BitField arguments in CEntryStub .
|
v8/v8
|
a53b7ff6b3b0d68024677bd155710bb57319cf2c
|
2014-08-25T14:44:06Z
|
mmm a / stdlib / public / SDK / Foundation / AffineTransform . swift <nl> ppp b / stdlib / public / SDK / Foundation / AffineTransform . swift <nl> public struct AffineTransform : ReferenceConvertible , Hashable , CustomStringConv <nl> [ 0 0 1 ] <nl> * / <nl> public mutating func rotate ( byRadians angle : CGFloat ) { <nl> - let α = Double ( angle ) <nl> - <nl> - let sine = CGFloat ( sin ( α ) ) <nl> - let cosine = CGFloat ( cos ( α ) ) <nl> - <nl> - append ( AffineTransform ( m11 : cosine , m12 : sine , m21 : - sine , m22 : cosine , tX : 0 , tY : 0 ) ) <nl> + let t2 = self <nl> + let t1 = AffineTransform ( rotationByRadians : angle ) <nl> + <nl> + var t = AffineTransform . identity <nl> + <nl> + t . m11 = t1 . m11 * t2 . m11 + t1 . m12 * t2 . m21 <nl> + t . m12 = t1 . m11 * t2 . m12 + t1 . m12 * t2 . m22 <nl> + t . m21 = t1 . m21 * t2 . m11 + t1 . m22 * t2 . m21 <nl> + t . m22 = t1 . m21 * t2 . m12 + t1 . m22 * t2 . m22 <nl> + t . tX = t1 . tX * t2 . m11 + t1 . tY * t2 . m21 + t2 . tX <nl> + t . tY = t1 . tX * t2 . m12 + t1 . tY * t2 . m22 + t2 . tY <nl> + <nl> + self = t <nl> } <nl> <nl> / * * <nl> mmm a / test / stdlib / TestAffineTransform . swift <nl> ppp b / test / stdlib / TestAffineTransform . swift <nl> class TestAffineTransform : TestAffineTransformSuper { <nl> func test_unconditionallyBridgeFromObjectiveC ( ) { <nl> expectEqual ( AffineTransform ( ) , AffineTransform . _unconditionallyBridgeFromObjectiveC ( nil ) ) <nl> } <nl> + <nl> + func test_rotation_compose ( ) { <nl> + var t = AffineTransform . identity <nl> + t . translate ( x : 1 . 0 , y : 1 . 0 ) <nl> + t . rotate ( byDegrees : 90 ) <nl> + t . translate ( x : - 1 . 0 , y : - 1 . 0 ) <nl> + let result = t . transform ( NSPoint ( x : 1 . 0 , y : 2 . 0 ) ) <nl> + expectEqualWithAccuracy ( 0 . 0 , Double ( result . x ) , accuracy : accuracyThreshold ) <nl> + expectEqualWithAccuracy ( 1 . 0 , Double ( result . y ) , accuracy : accuracyThreshold ) <nl> + } <nl> } <nl> <nl> # if ! FOUNDATION_XCTEST <nl> AffineTransformTests . test ( " test_hashing_values " ) { TestAffineTransform ( ) . test_ha <nl> AffineTransformTests . test ( " test_AnyHashableContainingAffineTransform " ) { TestAffineTransform ( ) . test_AnyHashableContainingAffineTransform ( ) } <nl> AffineTransformTests . test ( " test_AnyHashableCreatedFromNSAffineTransform " ) { TestAffineTransform ( ) . test_AnyHashableCreatedFromNSAffineTransform ( ) } <nl> AffineTransformTests . test ( " test_unconditionallyBridgeFromObjectiveC " ) { TestAffineTransform ( ) . test_unconditionallyBridgeFromObjectiveC ( ) } <nl> + AffineTransformTests . test ( " test_rotation_compose " ) { TestAffineTransform ( ) . test_rotation_compose ( ) } <nl> runAllTests ( ) <nl> # endif <nl> <nl>
|
Merge pull request from phausler / affine_transform_minimal_whitespace
|
apple/swift
|
cfb2a87dc5f35d0b6984619c8cfb5f009f7ec423
|
2017-08-04T21:48:43Z
|
mmm a / . travis - deps . sh <nl> ppp b / . travis - deps . sh <nl> if [ " $ TRAVIS_OS_NAME " = " linux " - o - z " $ TRAVIS_OS_NAME " ] ; then <nl> | tar - xz - C $ HOME / . local - - strip - components = 1 <nl> <nl> ( <nl> - git clone https : / / github . com / glfw / glfw . git - - branch 3 . 1 . 1 - - depth 1 <nl> - mkdir glfw / build & & cd glfw / build <nl> - cmake - DBUILD_SHARED_LIBS = ON \ <nl> - - DGLFW_BUILD_EXAMPLES = OFF \ <nl> - - DGLFW_BUILD_TESTS = OFF \ <nl> - - DCMAKE_INSTALL_PREFIX = $ HOME / . local \ <nl> - . . <nl> + wget http : / / libsdl . org / release / SDL2 - 2 . 0 . 4 . tar . gz - O - | tar xz <nl> + cd SDL2 - 2 . 0 . 4 <nl> + . / configure - - prefix = $ HOME / . local <nl> make - j4 & & make install <nl> ) <nl> - <nl> elif [ " $ TRAVIS_OS_NAME " = " osx " ] ; then <nl> brew update > / dev / null # silence the very verbose output <nl> - brew install qt5 glfw3 <nl> + brew install qt5 sdl2 <nl> gem install xcpretty <nl> fi <nl> mmm a / . travis . yml <nl> ppp b / . travis . yml <nl> addons : <nl> - gcc - 4 . 9 <nl> - g + + - 4 . 9 <nl> - xorg - dev <nl> - - libglu1 - mesa - dev <nl> - - libxcursor - dev <nl> - lib32stdc + + 6 # For CMake <nl> - lftp # To upload builds <nl> <nl> mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> endfunction ( ) <nl> <nl> project ( citra ) <nl> <nl> - option ( ENABLE_GLFW " Enable the GLFW frontend " ON ) <nl> - option ( CITRA_USE_BUNDLED_GLFW " Download bundled GLFW binaries " OFF ) <nl> + option ( ENABLE_SDL2 " Enable the SDL2 frontend " ON ) <nl> + option ( CITRA_USE_BUNDLED_SDL2 " Download bundled SDL2 binaries " OFF ) <nl> <nl> option ( ENABLE_QT " Enable the Qt frontend " ON ) <nl> option ( CITRA_USE_BUNDLED_QT " Download bundled Qt binaries " OFF ) <nl> list ( APPEND CMAKE_MODULE_PATH " $ { CMAKE_SOURCE_DIR } / externals / cmake - modules " ) <nl> find_package ( OpenGL REQUIRED ) <nl> include_directories ( $ { OPENGL_INCLUDE_DIR } ) <nl> <nl> - if ( ENABLE_GLFW ) <nl> - if ( CITRA_USE_BUNDLED_GLFW ) <nl> + if ( ENABLE_SDL2 ) <nl> + if ( CITRA_USE_BUNDLED_SDL2 ) <nl> # Detect toolchain and platform <nl> if ( MSVC14 AND ARCHITECTURE_x86_64 ) <nl> - set ( GLFW_VER " glfw - 3 . 1 . 1 - msvc2015_64 " ) <nl> - elseif ( MSVC12 AND ARCHITECTURE_x86_64 ) <nl> - set ( GLFW_VER " glfw - 3 . 1 . 1 - msvc2013_64 " ) <nl> + set ( SDL2_VER " SDL2 - 2 . 0 . 4 " ) <nl> else ( ) <nl> - message ( FATAL_ERROR " No bundled GLFW binaries for your toolchain . Disable CITRA_USE_BUNDLED_GLFW and provide your own . " ) <nl> + message ( FATAL_ERROR " No bundled SDL2 binaries for your toolchain . Disable CITRA_USE_BUNDLED_SDL2 and provide your own . " ) <nl> endif ( ) <nl> <nl> - if ( DEFINED GLFW_VER ) <nl> - download_bundled_external ( " glfw / " $ { GLFW_VER } GLFW_PREFIX ) <nl> + if ( DEFINED SDL2_VER ) <nl> + download_bundled_external ( " sdl2 / " $ { SDL2_VER } SDL2_PREFIX ) <nl> endif ( ) <nl> <nl> - set ( GLFW_INCLUDE_DIRS " $ { GLFW_PREFIX } / include " CACHE PATH " Path to GLFW3 headers " ) <nl> - set ( GLFW_LIBRARY_DIRS " $ { GLFW_PREFIX } / lib " CACHE PATH " Path to GLFW3 libraries " ) <nl> - set ( GLFW_LIBRARIES glfw3 ) <nl> + set ( SDL2_INCLUDE_DIR " $ { SDL2_PREFIX } / include " CACHE PATH " Path to SDL2 headers " ) <nl> + set ( SDL2_LIBRARY " $ { SDL2_PREFIX } / lib / x64 / SDL2 . lib " CACHE PATH " Path to SDL2 library " ) <nl> + set ( SDL2_DLL_DIR " $ { SDL2_PREFIX } / lib / x64 / " CACHE PATH " Path to SDL2 . dll " ) <nl> else ( ) <nl> - find_package ( PkgConfig REQUIRED ) <nl> - pkg_search_module ( GLFW REQUIRED glfw3 ) <nl> + find_package ( SDL2 REQUIRED ) <nl> endif ( ) <nl> endif ( ) <nl> <nl> IF ( APPLE ) <nl> FIND_LIBRARY ( COCOA_LIBRARY Cocoa ) # Umbrella framework for everything GUI - related <nl> - FIND_LIBRARY ( IOKIT_LIBRARY IOKit ) # GLFW dependency <nl> - FIND_LIBRARY ( COREVIDEO_LIBRARY CoreVideo ) # GLFW dependency <nl> set ( PLATFORM_LIBRARIES iconv $ { COCOA_LIBRARY } $ { IOKIT_LIBRARY } $ { COREVIDEO_LIBRARY } ) <nl> <nl> set ( CMAKE_CXX_FLAGS " $ { CMAKE_CXX_FLAGS } - stdlib = libc + + " ) <nl> mmm a / appveyor . yml <nl> ppp b / appveyor . yml <nl> install : <nl> before_build : <nl> - mkdir build <nl> - cd build <nl> - - cmake - G " Visual Studio 14 2015 Win64 " - DCITRA_USE_BUNDLED_GLFW = 1 - DCITRA_USE_BUNDLED_QT = 1 . . <nl> + - cmake - G " Visual Studio 14 2015 Win64 " - DCITRA_USE_BUNDLED_QT = 1 - DCITRA_USE_BUNDLED_SDL2 = 1 . . <nl> - cd . . <nl> <nl> after_build : <nl> new file mode 100644 <nl> index 00000000000 . . 0af86840a46 <nl> mmm / dev / null <nl> ppp b / externals / cmake - modules / FindSDL2 . cmake <nl> <nl> + <nl> + # This module defines <nl> + # SDL2_LIBRARY , the name of the library to link against <nl> + # SDL2_FOUND , if false , do not try to link to SDL2 <nl> + # SDL2_INCLUDE_DIR , where to find SDL . h <nl> + # <nl> + # This module responds to the the flag : <nl> + # SDL2_BUILDING_LIBRARY <nl> + # If this is defined , then no SDL2main will be linked in because <nl> + # only applications need main ( ) . <nl> + # Otherwise , it is assumed you are building an application and this <nl> + # module will attempt to locate and set the the proper link flags <nl> + # as part of the returned SDL2_LIBRARY variable . <nl> + # <nl> + # Don ' t forget to include SDLmain . h and SDLmain . m your project for the <nl> + # OS X framework based version . ( Other versions link to - lSDL2main which <nl> + # this module will try to find on your behalf . ) Also for OS X , this <nl> + # module will automatically add the - framework Cocoa on your behalf . <nl> + # <nl> + # <nl> + # Additional Note : If you see an empty SDL2_LIBRARY_TEMP in your configuration <nl> + # and no SDL2_LIBRARY , it means CMake did not find your SDL2 library <nl> + # ( SDL2 . dll , libsdl2 . so , SDL2 . framework , etc ) . <nl> + # Set SDL2_LIBRARY_TEMP to point to your SDL2 library , and configure again . <nl> + # Similarly , if you see an empty SDL2MAIN_LIBRARY , you should set this value <nl> + # as appropriate . These values are used to generate the final SDL2_LIBRARY <nl> + # variable , but when these values are unset , SDL2_LIBRARY does not get created . <nl> + # <nl> + # <nl> + # $ SDL2DIR is an environment variable that would <nl> + # correspond to the . / configure - - prefix = $ SDL2DIR <nl> + # used in building SDL2 . <nl> + # l . e . galup 9 - 20 - 02 <nl> + # <nl> + # Modified by Eric Wing . <nl> + # Added code to assist with automated building by using environmental variables <nl> + # and providing a more controlled / consistent search behavior . <nl> + # Added new modifications to recognize OS X frameworks and <nl> + # additional Unix paths ( FreeBSD , etc ) . <nl> + # Also corrected the header search path to follow " proper " SDL guidelines . <nl> + # Added a search for SDL2main which is needed by some platforms . <nl> + # Added a search for threads which is needed by some platforms . <nl> + # Added needed compile switches for MinGW . <nl> + # <nl> + # On OSX , this will prefer the Framework version ( if found ) over others . <nl> + # People will have to manually change the cache values of <nl> + # SDL2_LIBRARY to override this selection or set the CMake environment <nl> + # CMAKE_INCLUDE_PATH to modify the search paths . <nl> + # <nl> + # Note that the header path has changed from SDL2 / SDL . h to just SDL . h <nl> + # This needed to change because " proper " SDL convention <nl> + # is # include " SDL . h " , not < SDL2 / SDL . h > . This is done for portability <nl> + # reasons because not all systems place things in SDL2 / ( see FreeBSD ) . <nl> + <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + # Copyright 2003 - 2009 Kitware , Inc . <nl> + # <nl> + # Distributed under the OSI - approved BSD License ( the " License " ) . <nl> + # <nl> + # This software is distributed WITHOUT ANY WARRANTY ; without even the <nl> + # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . <nl> + # See the License for more information . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + # CMake - Cross Platform Makefile Generator <nl> + # Copyright 2000 - 2016 Kitware , Inc . <nl> + # Copyright 2000 - 2011 Insight Software Consortium <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions <nl> + # are met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # <nl> + # * Redistributions in binary form must reproduce the above copyright <nl> + # notice , this list of conditions and the following disclaimer in the <nl> + # documentation and / or other materials provided with the distribution . <nl> + # <nl> + # * Neither the names of Kitware , Inc . , the Insight Software Consortium , <nl> + # nor the names of their contributors may be used to endorse or promote <nl> + # products derived from this software without specific prior written <nl> + # permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + # <nl> + # mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + # <nl> + # The above copyright and license notice applies to distributions of <nl> + # CMake in source and binary form . Some source files contain additional <nl> + # notices of original copyright by their contributors ; see each source <nl> + # for details . Third - party software packages supplied with CMake under <nl> + # compatible licenses provide their own copyright notices documented in <nl> + # corresponding subdirectories . <nl> + # <nl> + # mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + # <nl> + # CMake was initially developed by Kitware with the following sponsorship : <nl> + # <nl> + # * National Library of Medicine at the National Institutes of Health <nl> + # as part of the Insight Segmentation and Registration Toolkit ( ITK ) . <nl> + # <nl> + # * US National Labs ( Los Alamos , Livermore , Sandia ) ASC Parallel <nl> + # Visualization Initiative . <nl> + # <nl> + # * National Alliance for Medical Image Computing ( NAMIC ) is funded by the <nl> + # National Institutes of Health through the NIH Roadmap for Medical Research , <nl> + # Grant U54 EB005149 . <nl> + # <nl> + # * Kitware , Inc . <nl> + # <nl> + <nl> + message ( " < FindSDL2 . cmake > " ) <nl> + <nl> + SET ( SDL2_SEARCH_PATHS <nl> + ~ / Library / Frameworks <nl> + / Library / Frameworks <nl> + / usr / local <nl> + / usr <nl> + / sw # Fink <nl> + / opt / local # DarwinPorts <nl> + / opt / csw # Blastwave <nl> + / opt <nl> + $ { SDL2_PATH } <nl> + ) <nl> + <nl> + FIND_LIBRARY ( SDL2_LIBRARY_TEMP <nl> + NAMES SDL2 <nl> + HINTS <nl> + $ ENV { SDL2DIR } <nl> + PATH_SUFFIXES lib64 lib <nl> + PATHS $ { SDL2_SEARCH_PATHS } <nl> + ) <nl> + <nl> + IF ( SDL2_LIBRARY_TEMP ) <nl> + FIND_PATH ( SDL2_INCLUDE_DIR SDL . h <nl> + HINTS <nl> + $ ENV { SDL2DIR } <nl> + PATH_SUFFIXES include / SDL2 include <nl> + PATHS $ { SDL2_SEARCH_PATHS } <nl> + ) <nl> + <nl> + IF ( NOT SDL2_BUILDING_LIBRARY ) <nl> + IF ( NOT $ { SDL2_INCLUDE_DIR } MATCHES " . framework " ) <nl> + # Non - OS X framework versions expect you to also dynamically link to <nl> + # SDL2main . This is mainly for Windows and OS X . Other ( Unix ) platforms <nl> + # seem to provide SDL2main for compatibility even though they don ' t <nl> + # necessarily need it . <nl> + FIND_LIBRARY ( SDL2MAIN_LIBRARY <nl> + NAMES SDL2main <nl> + HINTS <nl> + $ ENV { SDL2DIR } <nl> + PATH_SUFFIXES lib64 lib <nl> + PATHS $ { SDL2_SEARCH_PATHS } <nl> + ) <nl> + ENDIF ( NOT $ { SDL2_INCLUDE_DIR } MATCHES " . framework " ) <nl> + ENDIF ( NOT SDL2_BUILDING_LIBRARY ) <nl> + <nl> + # SDL2 may require threads on your system . <nl> + # The Apple build may not need an explicit flag because one of the <nl> + # frameworks may already provide it . <nl> + # But for non - OSX systems , I will use the CMake Threads package . <nl> + IF ( NOT APPLE ) <nl> + FIND_PACKAGE ( Threads ) <nl> + ENDIF ( NOT APPLE ) <nl> + <nl> + # MinGW needs an additional library , mwindows <nl> + # It ' s total link flags should look like - lmingw32 - lSDL2main - lSDL2 - lmwindows <nl> + # ( Actually on second look , I think it only needs one of the m * libraries . ) <nl> + IF ( MINGW ) <nl> + SET ( MINGW32_LIBRARY mingw32 CACHE STRING " mwindows for MinGW " ) <nl> + ENDIF ( MINGW ) <nl> + <nl> + # For SDL2main <nl> + IF ( NOT SDL2_BUILDING_LIBRARY ) <nl> + IF ( SDL2MAIN_LIBRARY ) <nl> + SET ( SDL2_LIBRARY_TEMP $ { SDL2MAIN_LIBRARY } $ { SDL2_LIBRARY_TEMP } ) <nl> + ENDIF ( SDL2MAIN_LIBRARY ) <nl> + ENDIF ( NOT SDL2_BUILDING_LIBRARY ) <nl> + <nl> + # For OS X , SDL2 uses Cocoa as a backend so it must link to Cocoa . <nl> + # CMake doesn ' t display the - framework Cocoa string in the UI even <nl> + # though it actually is there if I modify a pre - used variable . <nl> + # I think it has something to do with the CACHE STRING . <nl> + # So I use a temporary variable until the end so I can set the <nl> + # " real " variable in one - shot . <nl> + IF ( APPLE ) <nl> + SET ( SDL2_LIBRARY_TEMP $ { SDL2_LIBRARY_TEMP } " - framework Cocoa " ) <nl> + ENDIF ( APPLE ) <nl> + <nl> + # For threads , as mentioned Apple doesn ' t need this . <nl> + # In fact , there seems to be a problem if I used the Threads package <nl> + # and try using this line , so I ' m just skipping it entirely for OS X . <nl> + IF ( NOT APPLE ) <nl> + SET ( SDL2_LIBRARY_TEMP $ { SDL2_LIBRARY_TEMP } $ { CMAKE_THREAD_LIBS_INIT } ) <nl> + ENDIF ( NOT APPLE ) <nl> + <nl> + # For MinGW library <nl> + IF ( MINGW ) <nl> + SET ( SDL2_LIBRARY_TEMP $ { MINGW32_LIBRARY } $ { SDL2_LIBRARY_TEMP } ) <nl> + ENDIF ( MINGW ) <nl> + <nl> + # Set the final string here so the GUI reflects the final state . <nl> + SET ( SDL2_LIBRARY $ { SDL2_LIBRARY_TEMP } CACHE STRING " Where the SDL2 Library can be found " ) <nl> + <nl> + # Unset the temp variable to INTERNAL so it is not seen in the CMake GUI <nl> + UNSET ( SDL2_LIBRARY_TEMP ) <nl> + ENDIF ( SDL2_LIBRARY_TEMP ) <nl> + <nl> + message ( " < / FindSDL2 . cmake > " ) <nl> + <nl> + INCLUDE ( FindPackageHandleStandardArgs ) <nl> + <nl> + FIND_PACKAGE_HANDLE_STANDARD_ARGS ( SDL2 REQUIRED_VARS SDL2_LIBRARY SDL2_INCLUDE_DIR ) <nl> new file mode 100644 <nl> index 00000000000 . . cd0c2ce4740 <nl> mmm / dev / null <nl> ppp b / externals / cmake - modules / WindowsCopyFiles . cmake <nl> <nl> + # Copyright 2016 Citra Emulator Project <nl> + # Licensed under GPLv2 or any later version <nl> + # Refer to the license . txt file included . <nl> + <nl> + # This file provides the function windows_copy_files . <nl> + # This is only valid on Windows . <nl> + <nl> + # Include guard <nl> + if ( __windows_copy_files ) <nl> + return ( ) <nl> + endif ( ) <nl> + set ( __windows_copy_files YES ) <nl> + <nl> + # Any number of files to copy from SOURCE_DIR to DEST_DIR can be specified after DEST_DIR . <nl> + # This copying happens post - build . <nl> + function ( windows_copy_files TARGET SOURCE_DIR DEST_DIR ) <nl> + # windows commandline expects the / to be \ so switch them <nl> + string ( REPLACE " / " " \ \ \ \ " SOURCE_DIR $ { SOURCE_DIR } ) <nl> + string ( REPLACE " / " " \ \ \ \ " DEST_DIR $ { DEST_DIR } ) <nl> + <nl> + # / NJH / NJS / NDL / NFL / NC / NS / NP - Silence any output <nl> + # cmake adds an extra check for command success which doesn ' t work too well with robocopy <nl> + # so trick it into thinking the command was successful with the | | cmd / c " exit / b 0 " <nl> + add_custom_command ( TARGET $ { TARGET } POST_BUILD <nl> + COMMAND if not exist $ { DEST_DIR } mkdir $ { DEST_DIR } 2 > nul <nl> + COMMAND robocopy $ { SOURCE_DIR } $ { DEST_DIR } $ { ARGN } / NJH / NJS / NDL / NFL / NC / NS / NP | | cmd / c " exit / b 0 " <nl> + ) <nl> + endfunction ( ) <nl> \ No newline at end of file <nl> mmm a / src / CMakeLists . txt <nl> ppp b / src / CMakeLists . txt <nl> add_subdirectory ( common ) <nl> add_subdirectory ( core ) <nl> add_subdirectory ( video_core ) <nl> add_subdirectory ( audio_core ) <nl> - if ( ENABLE_GLFW ) <nl> + if ( ENABLE_SDL2 ) <nl> add_subdirectory ( citra ) <nl> endif ( ) <nl> if ( ENABLE_QT ) <nl> mmm a / src / citra / CMakeLists . txt <nl> ppp b / src / citra / CMakeLists . txt <nl> <nl> set ( SRCS <nl> - emu_window / emu_window_glfw . cpp <nl> + emu_window / emu_window_sdl2 . cpp <nl> citra . cpp <nl> config . cpp <nl> citra . rc <nl> ) <nl> set ( HEADERS <nl> - emu_window / emu_window_glfw . h <nl> + emu_window / emu_window_sdl2 . h <nl> config . h <nl> default_ini . h <nl> resource . h <nl> set ( HEADERS <nl> <nl> create_directory_groups ( $ { SRCS } $ { HEADERS } ) <nl> <nl> - include_directories ( $ { GLFW_INCLUDE_DIRS } ) <nl> - link_directories ( $ { GLFW_LIBRARY_DIRS } ) <nl> + include_directories ( $ { SDL2_INCLUDE_DIR } ) <nl> <nl> add_executable ( citra $ { SRCS } $ { HEADERS } ) <nl> target_link_libraries ( citra core video_core audio_core common ) <nl> - target_link_libraries ( citra $ { GLFW_LIBRARIES } $ { OPENGL_gl_LIBRARY } inih glad ) <nl> + target_link_libraries ( citra $ { SDL2_LIBRARY } $ { OPENGL_gl_LIBRARY } inih glad ) <nl> if ( MSVC ) <nl> target_link_libraries ( citra getopt ) <nl> endif ( ) <nl> target_link_libraries ( citra $ { PLATFORM_LIBRARIES } ) <nl> if ( $ { CMAKE_SYSTEM_NAME } MATCHES " Linux | FreeBSD | OpenBSD | NetBSD " ) <nl> install ( TARGETS citra RUNTIME DESTINATION " $ { CMAKE_INSTALL_PREFIX } / bin " ) <nl> endif ( ) <nl> + <nl> + if ( MSVC ) <nl> + include ( WindowsCopyFiles ) <nl> + <nl> + set ( DLL_DEST " $ { CMAKE_BINARY_DIR } / bin / $ < CONFIG > / " ) <nl> + <nl> + windows_copy_files ( citra $ { SDL2_DLL_DIR } $ { DLL_DEST } SDL2 . dll ) <nl> + <nl> + unset ( DLL_DEST ) <nl> + endif ( ) <nl> mmm a / src / citra / citra . cpp <nl> ppp b / src / citra / citra . cpp <nl> <nl> # include " core / loader / loader . h " <nl> <nl> # include " citra / config . h " <nl> - # include " citra / emu_window / emu_window_glfw . h " <nl> + # include " citra / emu_window / emu_window_sdl2 . h " <nl> <nl> # include " video_core / video_core . h " <nl> <nl> int main ( int argc , char * * argv ) { <nl> GDBStub : : ToggleServer ( Settings : : values . use_gdbstub ) ; <nl> GDBStub : : SetServerPort ( static_cast < u32 > ( Settings : : values . gdbstub_port ) ) ; <nl> <nl> - EmuWindow_GLFW * emu_window = new EmuWindow_GLFW ; <nl> + EmuWindow_SDL2 * emu_window = new EmuWindow_SDL2 ; <nl> <nl> VideoCore : : g_hw_renderer_enabled = Settings : : values . use_hw_renderer ; <nl> VideoCore : : g_shader_jit_enabled = Settings : : values . use_shader_jit ; <nl> mmm a / src / citra / config . cpp <nl> ppp b / src / citra / config . cpp <nl> <nl> / / Licensed under GPLv2 or any later version <nl> / / Refer to the license . txt file included . <nl> <nl> - # define GLFW_INCLUDE_NONE <nl> - # include < GLFW / glfw3 . h > <nl> # include < inih / cpp / INIReader . h > <nl> <nl> + # include < SDL . h > <nl> + <nl> # include " citra / default_ini . h " <nl> <nl> # include " common / file_util . h " <nl> <nl> <nl> Config : : Config ( ) { <nl> / / TODO : Don ' t hardcode the path ; let the frontend decide where to put the config files . <nl> - glfw_config_loc = FileUtil : : GetUserPath ( D_CONFIG_IDX ) + " glfw - config . ini " ; <nl> - glfw_config = new INIReader ( glfw_config_loc ) ; <nl> + sdl2_config_loc = FileUtil : : GetUserPath ( D_CONFIG_IDX ) + " sdl2 - config . ini " ; <nl> + sdl2_config = new INIReader ( sdl2_config_loc ) ; <nl> <nl> Reload ( ) ; <nl> } <nl> bool Config : : LoadINI ( INIReader * config , const char * location , const std : : string & <nl> } <nl> <nl> static const std : : array < int , Settings : : NativeInput : : NUM_INPUTS > defaults = { <nl> - GLFW_KEY_A , GLFW_KEY_S , GLFW_KEY_Z , GLFW_KEY_X , <nl> - GLFW_KEY_Q , GLFW_KEY_W , GLFW_KEY_1 , GLFW_KEY_2 , <nl> - GLFW_KEY_M , GLFW_KEY_N , GLFW_KEY_B , <nl> - GLFW_KEY_T , GLFW_KEY_G , GLFW_KEY_F , GLFW_KEY_H , <nl> - GLFW_KEY_UP , GLFW_KEY_DOWN , GLFW_KEY_LEFT , GLFW_KEY_RIGHT , <nl> - GLFW_KEY_I , GLFW_KEY_K , GLFW_KEY_J , GLFW_KEY_L <nl> + SDL_SCANCODE_A , SDL_SCANCODE_S , SDL_SCANCODE_Z , SDL_SCANCODE_X , <nl> + SDL_SCANCODE_Q , SDL_SCANCODE_W , SDL_SCANCODE_1 , SDL_SCANCODE_2 , <nl> + SDL_SCANCODE_M , SDL_SCANCODE_N , SDL_SCANCODE_B , <nl> + SDL_SCANCODE_T , SDL_SCANCODE_G , SDL_SCANCODE_F , SDL_SCANCODE_H , <nl> + SDL_SCANCODE_UP , SDL_SCANCODE_DOWN , SDL_SCANCODE_LEFT , SDL_SCANCODE_RIGHT , <nl> + SDL_SCANCODE_I , SDL_SCANCODE_K , SDL_SCANCODE_J , SDL_SCANCODE_L <nl> } ; <nl> <nl> void Config : : ReadValues ( ) { <nl> / / Controls <nl> for ( int i = 0 ; i < Settings : : NativeInput : : NUM_INPUTS ; + + i ) { <nl> Settings : : values . input_mappings [ Settings : : NativeInput : : All [ i ] ] = <nl> - glfw_config - > GetInteger ( " Controls " , Settings : : NativeInput : : Mapping [ i ] , defaults [ i ] ) ; <nl> + sdl2_config - > GetInteger ( " Controls " , Settings : : NativeInput : : Mapping [ i ] , defaults [ i ] ) ; <nl> } <nl> <nl> / / Core <nl> - Settings : : values . frame_skip = glfw_config - > GetInteger ( " Core " , " frame_skip " , 0 ) ; <nl> + Settings : : values . frame_skip = sdl2_config - > GetInteger ( " Core " , " frame_skip " , 0 ) ; <nl> <nl> / / Renderer <nl> - Settings : : values . use_hw_renderer = glfw_config - > GetBoolean ( " Renderer " , " use_hw_renderer " , false ) ; <nl> - Settings : : values . use_shader_jit = glfw_config - > GetBoolean ( " Renderer " , " use_shader_jit " , true ) ; <nl> + Settings : : values . use_hw_renderer = sdl2_config - > GetBoolean ( " Renderer " , " use_hw_renderer " , false ) ; <nl> + Settings : : values . use_shader_jit = sdl2_config - > GetBoolean ( " Renderer " , " use_shader_jit " , true ) ; <nl> <nl> - Settings : : values . bg_red = ( float ) glfw_config - > GetReal ( " Renderer " , " bg_red " , 1 . 0 ) ; <nl> - Settings : : values . bg_green = ( float ) glfw_config - > GetReal ( " Renderer " , " bg_green " , 1 . 0 ) ; <nl> - Settings : : values . bg_blue = ( float ) glfw_config - > GetReal ( " Renderer " , " bg_blue " , 1 . 0 ) ; <nl> + Settings : : values . bg_red = ( float ) sdl2_config - > GetReal ( " Renderer " , " bg_red " , 1 . 0 ) ; <nl> + Settings : : values . bg_green = ( float ) sdl2_config - > GetReal ( " Renderer " , " bg_green " , 1 . 0 ) ; <nl> + Settings : : values . bg_blue = ( float ) sdl2_config - > GetReal ( " Renderer " , " bg_blue " , 1 . 0 ) ; <nl> <nl> / / Data Storage <nl> - Settings : : values . use_virtual_sd = glfw_config - > GetBoolean ( " Data Storage " , " use_virtual_sd " , true ) ; <nl> + Settings : : values . use_virtual_sd = sdl2_config - > GetBoolean ( " Data Storage " , " use_virtual_sd " , true ) ; <nl> <nl> / / System Region <nl> - Settings : : values . region_value = glfw_config - > GetInteger ( " System Region " , " region_value " , 1 ) ; <nl> + Settings : : values . region_value = sdl2_config - > GetInteger ( " System Region " , " region_value " , 1 ) ; <nl> <nl> / / Miscellaneous <nl> - Settings : : values . log_filter = glfw_config - > Get ( " Miscellaneous " , " log_filter " , " * : Info " ) ; <nl> + Settings : : values . log_filter = sdl2_config - > Get ( " Miscellaneous " , " log_filter " , " * : Info " ) ; <nl> <nl> / / Debugging <nl> - Settings : : values . use_gdbstub = glfw_config - > GetBoolean ( " Debugging " , " use_gdbstub " , false ) ; <nl> - Settings : : values . gdbstub_port = glfw_config - > GetInteger ( " Debugging " , " gdbstub_port " , 24689 ) ; <nl> + Settings : : values . use_gdbstub = sdl2_config - > GetBoolean ( " Debugging " , " use_gdbstub " , false ) ; <nl> + Settings : : values . gdbstub_port = sdl2_config - > GetInteger ( " Debugging " , " gdbstub_port " , 24689 ) ; <nl> } <nl> <nl> void Config : : Reload ( ) { <nl> - LoadINI ( glfw_config , glfw_config_loc . c_str ( ) , DefaultINI : : glfw_config_file ) ; <nl> + LoadINI ( sdl2_config , sdl2_config_loc . c_str ( ) , DefaultINI : : sdl2_config_file ) ; <nl> ReadValues ( ) ; <nl> } <nl> <nl> Config : : ~ Config ( ) { <nl> - delete glfw_config ; <nl> + delete sdl2_config ; <nl> } <nl> mmm a / src / citra / config . h <nl> ppp b / src / citra / config . h <nl> <nl> class INIReader ; <nl> <nl> class Config { <nl> - INIReader * glfw_config ; <nl> - std : : string glfw_config_loc ; <nl> + INIReader * sdl2_config ; <nl> + std : : string sdl2_config_loc ; <nl> <nl> bool LoadINI ( INIReader * config , const char * location , const std : : string & default_contents = " " , bool retry = true ) ; <nl> void ReadValues ( ) ; <nl> mmm a / src / citra / default_ini . h <nl> ppp b / src / citra / default_ini . h <nl> <nl> <nl> namespace DefaultINI { <nl> <nl> - const char * glfw_config_file = R " ( <nl> + const char * sdl2_config_file = R " ( <nl> [ Controls ] <nl> pad_start = <nl> pad_select = <nl> deleted file mode 100644 <nl> index 9453b1f486d . . 00000000000 <nl> mmm a / src / citra / emu_window / emu_window_glfw . cpp <nl> ppp / dev / null <nl> <nl> - / / Copyright 2014 Citra Emulator Project <nl> - / / Licensed under GPLv2 or any later version <nl> - / / Refer to the license . txt file included . <nl> - <nl> - # include < algorithm > <nl> - # include < cstdlib > <nl> - # include < string > <nl> - <nl> - / / Let ’ s use our own GL header , instead of one from GLFW . <nl> - # include < glad / glad . h > <nl> - # define GLFW_INCLUDE_NONE <nl> - # include < GLFW / glfw3 . h > <nl> - <nl> - # include " common / assert . h " <nl> - # include " common / key_map . h " <nl> - # include " common / logging / log . h " <nl> - # include " common / scm_rev . h " <nl> - # include " common / string_util . h " <nl> - <nl> - # include " video_core / video_core . h " <nl> - <nl> - # include " core / settings . h " <nl> - # include " core / hle / service / hid / hid . h " <nl> - <nl> - # include " citra / emu_window / emu_window_glfw . h " <nl> - <nl> - EmuWindow_GLFW * EmuWindow_GLFW : : GetEmuWindow ( GLFWwindow * win ) { <nl> - return static_cast < EmuWindow_GLFW * > ( glfwGetWindowUserPointer ( win ) ) ; <nl> - } <nl> - <nl> - void EmuWindow_GLFW : : OnMouseButtonEvent ( GLFWwindow * win , int button , int action , int mods ) { <nl> - if ( button = = GLFW_MOUSE_BUTTON_LEFT ) { <nl> - auto emu_window = GetEmuWindow ( win ) ; <nl> - auto layout = emu_window - > GetFramebufferLayout ( ) ; <nl> - double x , y ; <nl> - glfwGetCursorPos ( win , & x , & y ) ; <nl> - <nl> - if ( action = = GLFW_PRESS ) <nl> - emu_window - > TouchPressed ( static_cast < unsigned > ( x ) , static_cast < unsigned > ( y ) ) ; <nl> - else if ( action = = GLFW_RELEASE ) <nl> - emu_window - > TouchReleased ( ) ; <nl> - } <nl> - } <nl> - <nl> - void EmuWindow_GLFW : : OnCursorPosEvent ( GLFWwindow * win , double x , double y ) { <nl> - GetEmuWindow ( win ) - > TouchMoved ( static_cast < unsigned > ( std : : max ( x , 0 . 0 ) ) , static_cast < unsigned > ( std : : max ( y , 0 . 0 ) ) ) ; <nl> - } <nl> - <nl> - / / / Called by GLFW when a key event occurs <nl> - void EmuWindow_GLFW : : OnKeyEvent ( GLFWwindow * win , int key , int scancode , int action , int mods ) { <nl> - auto emu_window = GetEmuWindow ( win ) ; <nl> - int keyboard_id = emu_window - > keyboard_id ; <nl> - <nl> - if ( action = = GLFW_PRESS ) { <nl> - emu_window - > KeyPressed ( { key , keyboard_id } ) ; <nl> - } else if ( action = = GLFW_RELEASE ) { <nl> - emu_window - > KeyReleased ( { key , keyboard_id } ) ; <nl> - } <nl> - } <nl> - <nl> - / / / Whether the window is still open , and a close request hasn ' t yet been sent <nl> - const bool EmuWindow_GLFW : : IsOpen ( ) { <nl> - return glfwWindowShouldClose ( m_render_window ) = = 0 ; <nl> - } <nl> - <nl> - void EmuWindow_GLFW : : OnFramebufferResizeEvent ( GLFWwindow * win , int width , int height ) { <nl> - GetEmuWindow ( win ) - > NotifyFramebufferLayoutChanged ( EmuWindow : : FramebufferLayout : : DefaultScreenLayout ( width , height ) ) ; <nl> - } <nl> - <nl> - void EmuWindow_GLFW : : OnClientAreaResizeEvent ( GLFWwindow * win , int width , int height ) { <nl> - / / NOTE : GLFW provides no proper way to set a minimal window size . <nl> - / / Hence , we just ignore the corresponding EmuWindow hint . <nl> - OnFramebufferResizeEvent ( win , width , height ) ; <nl> - } <nl> - <nl> - / / / EmuWindow_GLFW constructor <nl> - EmuWindow_GLFW : : EmuWindow_GLFW ( ) { <nl> - keyboard_id = KeyMap : : NewDeviceId ( ) ; <nl> - <nl> - ReloadSetKeymaps ( ) ; <nl> - <nl> - glfwSetErrorCallback ( [ ] ( int error , const char * desc ) { <nl> - LOG_ERROR ( Frontend , " GLFW 0x % 08x : % s " , error , desc ) ; <nl> - } ) ; <nl> - <nl> - / / Initialize the window <nl> - if ( glfwInit ( ) ! = GL_TRUE ) { <nl> - LOG_CRITICAL ( Frontend , " Failed to initialize GLFW ! Exiting . . . " ) ; <nl> - exit ( 1 ) ; <nl> - } <nl> - glfwWindowHint ( GLFW_CONTEXT_VERSION_MAJOR , 3 ) ; <nl> - glfwWindowHint ( GLFW_CONTEXT_VERSION_MINOR , 3 ) ; <nl> - / / GLFW on OSX requires these window hints to be set to create a 3 . 2 + GL context . <nl> - glfwWindowHint ( GLFW_OPENGL_FORWARD_COMPAT , GL_TRUE ) ; <nl> - glfwWindowHint ( GLFW_OPENGL_PROFILE , GLFW_OPENGL_CORE_PROFILE ) ; <nl> - <nl> - std : : string window_title = Common : : StringFromFormat ( " Citra | % s - % s " , Common : : g_scm_branch , Common : : g_scm_desc ) ; <nl> - m_render_window = glfwCreateWindow ( VideoCore : : kScreenTopWidth , <nl> - ( VideoCore : : kScreenTopHeight + VideoCore : : kScreenBottomHeight ) , <nl> - window_title . c_str ( ) , nullptr , nullptr ) ; <nl> - <nl> - if ( m_render_window = = nullptr ) { <nl> - LOG_CRITICAL ( Frontend , " Failed to create GLFW window ! Exiting . . . " ) ; <nl> - exit ( 1 ) ; <nl> - } <nl> - <nl> - glfwSetWindowUserPointer ( m_render_window , this ) ; <nl> - <nl> - / / Notify base interface about window state <nl> - int width , height ; <nl> - glfwGetFramebufferSize ( m_render_window , & width , & height ) ; <nl> - OnFramebufferResizeEvent ( m_render_window , width , height ) ; <nl> - <nl> - glfwGetWindowSize ( m_render_window , & width , & height ) ; <nl> - OnClientAreaResizeEvent ( m_render_window , width , height ) ; <nl> - <nl> - / / Setup callbacks <nl> - glfwSetKeyCallback ( m_render_window , OnKeyEvent ) ; <nl> - glfwSetMouseButtonCallback ( m_render_window , OnMouseButtonEvent ) ; <nl> - glfwSetCursorPosCallback ( m_render_window , OnCursorPosEvent ) ; <nl> - glfwSetFramebufferSizeCallback ( m_render_window , OnFramebufferResizeEvent ) ; <nl> - glfwSetWindowSizeCallback ( m_render_window , OnClientAreaResizeEvent ) ; <nl> - <nl> - DoneCurrent ( ) ; <nl> - } <nl> - <nl> - / / / EmuWindow_GLFW destructor <nl> - EmuWindow_GLFW : : ~ EmuWindow_GLFW ( ) { <nl> - glfwTerminate ( ) ; <nl> - } <nl> - <nl> - / / / Swap buffers to display the next frame <nl> - void EmuWindow_GLFW : : SwapBuffers ( ) { <nl> - glfwSwapBuffers ( m_render_window ) ; <nl> - } <nl> - <nl> - / / / Polls window events <nl> - void EmuWindow_GLFW : : PollEvents ( ) { <nl> - glfwPollEvents ( ) ; <nl> - } <nl> - <nl> - / / / Makes the GLFW OpenGL context current for the caller thread <nl> - void EmuWindow_GLFW : : MakeCurrent ( ) { <nl> - glfwMakeContextCurrent ( m_render_window ) ; <nl> - } <nl> - <nl> - / / / Releases ( dunno if this is the " right " word ) the GLFW context from the caller thread <nl> - void EmuWindow_GLFW : : DoneCurrent ( ) { <nl> - glfwMakeContextCurrent ( nullptr ) ; <nl> - } <nl> - <nl> - void EmuWindow_GLFW : : ReloadSetKeymaps ( ) { <nl> - for ( int i = 0 ; i < Settings : : NativeInput : : NUM_INPUTS ; + + i ) { <nl> - KeyMap : : SetKeyMapping ( { Settings : : values . input_mappings [ Settings : : NativeInput : : All [ i ] ] , keyboard_id } , Service : : HID : : pad_mapping [ i ] ) ; <nl> - } <nl> - } <nl> - <nl> - void EmuWindow_GLFW : : OnMinimalClientAreaChangeRequest ( const std : : pair < unsigned , unsigned > & minimal_size ) { <nl> - std : : pair < int , int > current_size ; <nl> - glfwGetWindowSize ( m_render_window , & current_size . first , & current_size . second ) ; <nl> - <nl> - DEBUG_ASSERT ( ( int ) minimal_size . first > 0 & & ( int ) minimal_size . second > 0 ) ; <nl> - int new_width = std : : max ( current_size . first , ( int ) minimal_size . first ) ; <nl> - int new_height = std : : max ( current_size . second , ( int ) minimal_size . second ) ; <nl> - <nl> - if ( current_size ! = std : : make_pair ( new_width , new_height ) ) <nl> - glfwSetWindowSize ( m_render_window , new_width , new_height ) ; <nl> - } <nl> deleted file mode 100644 <nl> index 7ccd5e6aa68 . . 00000000000 <nl> mmm a / src / citra / emu_window / emu_window_glfw . h <nl> ppp / dev / null <nl> <nl> - / / Copyright 2014 Citra Emulator Project <nl> - / / Licensed under GPLv2 or any later version <nl> - / / Refer to the license . txt file included . <nl> - <nl> - # pragma once <nl> - <nl> - # include < utility > <nl> - <nl> - # include " common / emu_window . h " <nl> - <nl> - struct GLFWwindow ; <nl> - <nl> - class EmuWindow_GLFW : public EmuWindow { <nl> - public : <nl> - EmuWindow_GLFW ( ) ; <nl> - ~ EmuWindow_GLFW ( ) ; <nl> - <nl> - / / / Swap buffers to display the next frame <nl> - void SwapBuffers ( ) override ; <nl> - <nl> - / / / Polls window events <nl> - void PollEvents ( ) override ; <nl> - <nl> - / / / Makes the graphics context current for the caller thread <nl> - void MakeCurrent ( ) override ; <nl> - <nl> - / / / Releases ( dunno if this is the " right " word ) the GLFW context from the caller thread <nl> - void DoneCurrent ( ) override ; <nl> - <nl> - static void OnKeyEvent ( GLFWwindow * win , int key , int scancode , int action , int mods ) ; <nl> - <nl> - static void OnMouseButtonEvent ( GLFWwindow * window , int button , int action , int mods ) ; <nl> - <nl> - static void OnCursorPosEvent ( GLFWwindow * window , double x , double y ) ; <nl> - <nl> - / / / Whether the window is still open , and a close request hasn ' t yet been sent <nl> - const bool IsOpen ( ) ; <nl> - <nl> - static void OnClientAreaResizeEvent ( GLFWwindow * win , int width , int height ) ; <nl> - <nl> - static void OnFramebufferResizeEvent ( GLFWwindow * win , int width , int height ) ; <nl> - <nl> - void ReloadSetKeymaps ( ) override ; <nl> - <nl> - private : <nl> - void OnMinimalClientAreaChangeRequest ( const std : : pair < unsigned , unsigned > & minimal_size ) override ; <nl> - <nl> - static EmuWindow_GLFW * GetEmuWindow ( GLFWwindow * win ) ; <nl> - <nl> - GLFWwindow * m_render_window ; / / / < Internal GLFW render window <nl> - <nl> - / / / Device id of keyboard for use with KeyMap <nl> - int keyboard_id ; <nl> - } ; <nl> new file mode 100644 <nl> index 00000000000 . . 1fed82e78bf <nl> mmm / dev / null <nl> ppp b / src / citra / emu_window / emu_window_sdl2 . cpp <nl> <nl> + / / Copyright 2016 Citra Emulator Project <nl> + / / Licensed under GPLv2 or any later version <nl> + / / Refer to the license . txt file included . <nl> + <nl> + # include < algorithm > <nl> + # include < cstdlib > <nl> + # include < string > <nl> + <nl> + # define SDL_MAIN_HANDLED <nl> + # include < SDL . h > <nl> + <nl> + # include " common / key_map . h " <nl> + # include " common / logging / log . h " <nl> + # include " common / scm_rev . h " <nl> + # include " common / string_util . h " <nl> + <nl> + # include " core / settings . h " <nl> + # include " core / hle / service / hid / hid . h " <nl> + <nl> + # include " citra / emu_window / emu_window_sdl2 . h " <nl> + <nl> + # include " video_core / video_core . h " <nl> + <nl> + void EmuWindow_SDL2 : : OnMouseMotion ( s32 x , s32 y ) { <nl> + TouchMoved ( ( unsigned ) std : : max ( x , 0 ) , ( unsigned ) std : : max ( y , 0 ) ) ; <nl> + } <nl> + <nl> + void EmuWindow_SDL2 : : OnMouseButton ( u32 button , u8 state , s32 x , s32 y ) { <nl> + if ( button ! = SDL_BUTTON_LEFT ) <nl> + return ; <nl> + <nl> + if ( state = = SDL_PRESSED ) { <nl> + TouchPressed ( ( unsigned ) std : : max ( x , 0 ) , ( unsigned ) std : : max ( y , 0 ) ) ; <nl> + } else { <nl> + TouchReleased ( ) ; <nl> + } <nl> + } <nl> + <nl> + void EmuWindow_SDL2 : : OnKeyEvent ( int key , u8 state ) { <nl> + if ( state = = SDL_PRESSED ) { <nl> + KeyPressed ( { key , keyboard_id } ) ; <nl> + } else if ( state = = SDL_RELEASED ) { <nl> + KeyReleased ( { key , keyboard_id } ) ; <nl> + } <nl> + } <nl> + <nl> + bool EmuWindow_SDL2 : : IsOpen ( ) const { <nl> + return is_open ; <nl> + } <nl> + <nl> + void EmuWindow_SDL2 : : OnResize ( ) { <nl> + int width , height ; <nl> + <nl> + SDL_GetWindowSize ( render_window , & width , & height ) ; <nl> + <nl> + NotifyFramebufferLayoutChanged ( EmuWindow : : FramebufferLayout : : DefaultScreenLayout ( width , height ) ) ; <nl> + } <nl> + <nl> + EmuWindow_SDL2 : : EmuWindow_SDL2 ( ) { <nl> + keyboard_id = KeyMap : : NewDeviceId ( ) ; <nl> + <nl> + ReloadSetKeymaps ( ) ; <nl> + <nl> + SDL_SetMainReady ( ) ; <nl> + <nl> + / / Initialize the window <nl> + if ( SDL_Init ( SDL_INIT_VIDEO ) < 0 ) { <nl> + LOG_CRITICAL ( Frontend , " Failed to initialize SDL2 ! Exiting . . . " ) ; <nl> + exit ( 1 ) ; <nl> + } <nl> + <nl> + SDL_GL_SetAttribute ( SDL_GL_CONTEXT_MAJOR_VERSION , 3 ) ; <nl> + SDL_GL_SetAttribute ( SDL_GL_CONTEXT_MINOR_VERSION , 3 ) ; <nl> + SDL_GL_SetAttribute ( SDL_GL_CONTEXT_PROFILE_MASK , SDL_GL_CONTEXT_PROFILE_CORE ) ; <nl> + SDL_GL_SetAttribute ( SDL_GL_DOUBLEBUFFER , 1 ) ; <nl> + <nl> + std : : string window_title = Common : : StringFromFormat ( " Citra | % s - % s " , Common : : g_scm_branch , Common : : g_scm_desc ) ; <nl> + render_window = SDL_CreateWindow ( window_title . c_str ( ) , <nl> + SDL_WINDOWPOS_UNDEFINED , / / x position <nl> + SDL_WINDOWPOS_UNDEFINED , / / y position <nl> + VideoCore : : kScreenTopWidth , <nl> + VideoCore : : kScreenTopHeight + VideoCore : : kScreenBottomHeight , <nl> + SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE | SDL_WINDOW_ALLOW_HIGHDPI ) ; <nl> + <nl> + if ( render_window = = nullptr ) { <nl> + LOG_CRITICAL ( Frontend , " Failed to create SDL2 window ! Exiting . . . " ) ; <nl> + exit ( 1 ) ; <nl> + } <nl> + <nl> + gl_context = SDL_GL_CreateContext ( render_window ) ; <nl> + <nl> + if ( gl_context = = nullptr ) { <nl> + LOG_CRITICAL ( Frontend , " Failed to create SDL2 GL context ! Exiting . . . " ) ; <nl> + exit ( 1 ) ; <nl> + } <nl> + <nl> + OnResize ( ) ; <nl> + OnMinimalClientAreaChangeRequest ( GetActiveConfig ( ) . min_client_area_size ) ; <nl> + SDL_PumpEvents ( ) ; <nl> + <nl> + DoneCurrent ( ) ; <nl> + } <nl> + <nl> + EmuWindow_SDL2 : : ~ EmuWindow_SDL2 ( ) { <nl> + SDL_GL_DeleteContext ( gl_context ) ; <nl> + SDL_Quit ( ) ; <nl> + } <nl> + <nl> + void EmuWindow_SDL2 : : SwapBuffers ( ) { <nl> + SDL_GL_SwapWindow ( render_window ) ; <nl> + } <nl> + <nl> + void EmuWindow_SDL2 : : PollEvents ( ) { <nl> + SDL_Event event ; <nl> + <nl> + / / SDL_PollEvent returns 0 when there are no more events in the event queue <nl> + while ( SDL_PollEvent ( & event ) ) { <nl> + switch ( event . type ) { <nl> + case SDL_WINDOWEVENT : <nl> + switch ( event . window . event ) { <nl> + case SDL_WINDOWEVENT_SIZE_CHANGED : <nl> + case SDL_WINDOWEVENT_RESIZED : <nl> + case SDL_WINDOWEVENT_MAXIMIZED : <nl> + case SDL_WINDOWEVENT_RESTORED : <nl> + case SDL_WINDOWEVENT_MINIMIZED : <nl> + OnResize ( ) ; <nl> + break ; <nl> + case SDL_WINDOWEVENT_CLOSE : <nl> + is_open = false ; <nl> + break ; <nl> + } <nl> + break ; <nl> + case SDL_KEYDOWN : <nl> + case SDL_KEYUP : <nl> + OnKeyEvent ( static_cast < int > ( event . key . keysym . scancode ) , event . key . state ) ; <nl> + break ; <nl> + case SDL_MOUSEMOTION : <nl> + OnMouseMotion ( event . motion . x , event . motion . y ) ; <nl> + break ; <nl> + case SDL_MOUSEBUTTONDOWN : <nl> + case SDL_MOUSEBUTTONUP : <nl> + OnMouseButton ( event . button . button , event . button . state , event . button . x , event . button . y ) ; <nl> + break ; <nl> + case SDL_QUIT : <nl> + is_open = false ; <nl> + break ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void EmuWindow_SDL2 : : MakeCurrent ( ) { <nl> + SDL_GL_MakeCurrent ( render_window , gl_context ) ; <nl> + } <nl> + <nl> + void EmuWindow_SDL2 : : DoneCurrent ( ) { <nl> + SDL_GL_MakeCurrent ( render_window , nullptr ) ; <nl> + } <nl> + <nl> + void EmuWindow_SDL2 : : ReloadSetKeymaps ( ) { <nl> + for ( int i = 0 ; i < Settings : : NativeInput : : NUM_INPUTS ; + + i ) { <nl> + KeyMap : : SetKeyMapping ( { Settings : : values . input_mappings [ Settings : : NativeInput : : All [ i ] ] , keyboard_id } , Service : : HID : : pad_mapping [ i ] ) ; <nl> + } <nl> + } <nl> + <nl> + void EmuWindow_SDL2 : : OnMinimalClientAreaChangeRequest ( const std : : pair < unsigned , unsigned > & minimal_size ) { <nl> + SDL_SetWindowMinimumSize ( render_window , minimal_size . first , minimal_size . second ) ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 77279f0224c <nl> mmm / dev / null <nl> ppp b / src / citra / emu_window / emu_window_sdl2 . h <nl> <nl> + / / Copyright 2016 Citra Emulator Project <nl> + / / Licensed under GPLv2 or any later version <nl> + / / Refer to the license . txt file included . <nl> + <nl> + # pragma once <nl> + <nl> + # include < utility > <nl> + <nl> + # include " common / emu_window . h " <nl> + <nl> + struct SDL_Window ; <nl> + <nl> + class EmuWindow_SDL2 : public EmuWindow { <nl> + public : <nl> + EmuWindow_SDL2 ( ) ; <nl> + ~ EmuWindow_SDL2 ( ) ; <nl> + <nl> + / / / Swap buffers to display the next frame <nl> + void SwapBuffers ( ) override ; <nl> + <nl> + / / / Polls window events <nl> + void PollEvents ( ) override ; <nl> + <nl> + / / / Makes the graphics context current for the caller thread <nl> + void MakeCurrent ( ) override ; <nl> + <nl> + / / / Releases the GL context from the caller thread <nl> + void DoneCurrent ( ) override ; <nl> + <nl> + / / / Whether the window is still open , and a close request hasn ' t yet been sent <nl> + bool IsOpen ( ) const ; <nl> + <nl> + / / / Load keymap from configuration <nl> + void ReloadSetKeymaps ( ) override ; <nl> + <nl> + private : <nl> + / / / Called by PollEvents when a key is pressed or released . <nl> + void OnKeyEvent ( int key , u8 state ) ; <nl> + <nl> + / / / Called by PollEvents when the mouse moves . <nl> + void OnMouseMotion ( s32 x , s32 y ) ; <nl> + <nl> + / / / Called by PollEvents when a mouse button is pressed or released <nl> + void OnMouseButton ( u32 button , u8 state , s32 x , s32 y ) ; <nl> + <nl> + / / / Called by PollEvents when any event that may cause the window to be resized occurs <nl> + void OnResize ( ) ; <nl> + <nl> + / / / Called when a configuration change affects the minimal size of the window <nl> + void OnMinimalClientAreaChangeRequest ( const std : : pair < unsigned , unsigned > & minimal_size ) override ; <nl> + <nl> + / / / Is the window still open ? <nl> + bool is_open = true ; <nl> + <nl> + / / / Internal SDL2 render window <nl> + SDL_Window * render_window ; <nl> + <nl> + using SDL_GLContext = void * ; <nl> + / / / The OpenGL context associated with the window <nl> + SDL_GLContext gl_context ; <nl> + <nl> + / / / Device id of keyboard for use with KeyMap <nl> + int keyboard_id ; <nl> + } ; <nl> mmm a / src / citra_qt / CMakeLists . txt <nl> ppp b / src / citra_qt / CMakeLists . txt <nl> if ( $ { CMAKE_SYSTEM_NAME } MATCHES " Linux | FreeBSD | OpenBSD | NetBSD " ) <nl> endif ( ) <nl> <nl> if ( Qt5_FOUND AND MSVC ) <nl> + include ( WindowsCopyFiles ) <nl> + <nl> set ( Qt5_DLL_DIR " $ { Qt5_DIR } / . . / . . / . . / bin " ) <nl> set ( Qt5_PLATFORMS_DIR " $ { Qt5_DIR } / . . / . . / . . / plugins / platforms / " ) <nl> - set ( Qt5_DLLS <nl> + set ( DLL_DEST " $ { CMAKE_BINARY_DIR } / bin / $ < CONFIG > / " ) <nl> + set ( PLATFORMS $ { DLL_DEST } platforms / ) <nl> + <nl> + windows_copy_files ( citra - qt $ { Qt5_DLL_DIR } $ { DLL_DEST } <nl> icudt * . dll <nl> icuin * . dll <nl> icuuc * . dll <nl> if ( Qt5_FOUND AND MSVC ) <nl> Qt5OpenGL $ < $ < CONFIG : Debug > : d > . * <nl> Qt5Widgets $ < $ < CONFIG : Debug > : d > . * <nl> ) <nl> - set ( DLL_DEST " $ { CMAKE_BINARY_DIR } / bin / $ < CONFIG > / " ) <nl> - set ( PLATFORMS $ { DLL_DEST } platforms / ) <nl> + windows_copy_files ( citra - qt $ { Qt5_PLATFORMS_DIR } $ { PLATFORMS } qwindows $ < $ < CONFIG : Debug > : d > . * ) <nl> <nl> - # windows commandline expects the / to be \ so switch them <nl> - string ( REPLACE " / " " \ \ \ \ " Qt5_DLL_DIR $ { Qt5_DLL_DIR } ) <nl> - string ( REPLACE " / " " \ \ \ \ " Qt5_PLATFORMS_DIR $ { Qt5_PLATFORMS_DIR } ) <nl> - string ( REPLACE " / " " \ \ \ \ " DLL_DEST $ { DLL_DEST } ) <nl> - string ( REPLACE " / " " \ \ \ \ " PLATFORMS $ { PLATFORMS } ) <nl> - <nl> - # / NJH / NJS / NDL / NFL / NC / NS / NP - Silence any output <nl> - # cmake adds an extra check for command success which doesn ' t work too well with robocopy <nl> - # so trick it into thinking the command was successful with the | | cmd / c " exit / b 0 " <nl> - add_custom_command ( TARGET citra - qt POST_BUILD <nl> - COMMAND robocopy $ { Qt5_DLL_DIR } $ { DLL_DEST } $ { Qt5_DLLS } / NJH / NJS / NDL / NFL / NC / NS / NP | | cmd / c " exit / b 0 " <nl> - COMMAND if not exist $ { PLATFORMS } mkdir $ { PLATFORMS } 2 > nul <nl> - COMMAND robocopy $ { Qt5_PLATFORMS_DIR } $ { PLATFORMS } qwindows $ < $ < CONFIG : Debug > : d > . * / NJH / NJS / NDL / NFL / NC / NS / NP | | cmd / c " exit / b 0 " <nl> - ) <nl> - unset ( Qt5_DLLS ) <nl> unset ( Qt5_DLL_DIR ) <nl> unset ( Qt5_PLATFORMS_DIR ) <nl> unset ( DLL_DEST ) <nl>
|
Dependencies : Remove GLFW , Add SDL2
|
yuzu-emu/yuzu
|
ba2a54a9dd6e5a263c5e6886e55b3bc55b95b4ab
|
2016-03-02T14:09:02Z
|
new file mode 100644 <nl> index 000000000000 . . b0f6244de2b3 <nl> mmm / dev / null <nl> ppp b / xbmc / cores / dvdplayer / DVDDemuxers / DVDDemuxBXA . cpp <nl> <nl> + / * <nl> + * Copyright ( C ) 2012 Team XBMC <nl> + * http : / / www . xbmc . org <nl> + * <nl> + * This Program is free software ; you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> + * any later version . <nl> + * <nl> + * This Program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with XBMC ; see the file COPYING . If not , see <nl> + * < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + <nl> + # include " DVDInputStreams / DVDInputStream . h " <nl> + # include " DVDDemuxBXA . h " <nl> + # include " DVDDemuxUtils . h " <nl> + # include " utils / log . h " <nl> + # include " . . / DVDClock . h " <nl> + <nl> + / / AirTunes audio Demuxer . <nl> + <nl> + using namespace std ; <nl> + <nl> + class CDemuxStreamAudioBXA <nl> + : public CDemuxStreamAudio <nl> + { <nl> + CDVDDemuxBXA * m_parent ; <nl> + string m_codec ; <nl> + public : <nl> + CDemuxStreamAudioBXA ( CDVDDemuxBXA * parent , const string & codec ) <nl> + : m_parent ( parent ) <nl> + , m_codec ( codec ) <nl> + <nl> + { } <nl> + void GetStreamInfo ( string & strInfo ) <nl> + { <nl> + CStdString info ; <nl> + info . Format ( " % s " , m_codec . c_str ( ) ) ; <nl> + strInfo = info ; <nl> + } <nl> + } ; <nl> + <nl> + CDVDDemuxBXA : : CDVDDemuxBXA ( ) : CDVDDemux ( ) <nl> + { <nl> + m_pInput = NULL ; <nl> + m_stream = NULL ; <nl> + memset ( & m_header , 0x0 , sizeof ( Demux_BXA_FmtHeader ) ) ; <nl> + } <nl> + <nl> + CDVDDemuxBXA : : ~ CDVDDemuxBXA ( ) <nl> + { <nl> + Dispose ( ) ; <nl> + } <nl> + <nl> + bool CDVDDemuxBXA : : Open ( CDVDInputStream * pInput ) <nl> + { <nl> + Abort ( ) ; <nl> + <nl> + Dispose ( ) ; <nl> + <nl> + if ( ! pInput | | ! pInput - > IsStreamType ( DVDSTREAM_TYPE_FILE ) ) <nl> + return false ; <nl> + <nl> + if ( pInput - > Read ( ( BYTE * ) & m_header , sizeof ( Demux_BXA_FmtHeader ) ) < 1 ) <nl> + return false ; <nl> + <nl> + / / file valid ? <nl> + if ( strncmp ( m_header . fourcc , " BXA " , 4 ) ! = 0 | | m_header . type ! = BXA_PACKET_TYPE_FMT_DEMUX ) <nl> + { <nl> + pInput - > Seek ( 0 , SEEK_SET ) ; <nl> + return false ; <nl> + } <nl> + <nl> + m_pInput = pInput ; <nl> + <nl> + m_stream = new CDemuxStreamAudioBXA ( this , " BXA " ) ; <nl> + <nl> + if ( ! m_stream ) <nl> + return false ; <nl> + <nl> + m_stream - > iSampleRate = m_header . sampleRate ; <nl> + m_stream - > iBitsPerSample = m_header . bitsPerSample ; <nl> + m_stream - > iBitRate = m_header . sampleRate * m_header . channels * m_header . bitsPerSample ; <nl> + m_stream - > iChannels = m_header . channels ; <nl> + m_stream - > type = STREAM_AUDIO ; <nl> + m_stream - > codec = CODEC_ID_PCM_S16LE ; <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + void CDVDDemuxBXA : : Dispose ( ) <nl> + { <nl> + delete m_stream ; <nl> + m_stream = NULL ; <nl> + <nl> + m_pInput = NULL ; <nl> + m_pts = 0 ; <nl> + <nl> + memset ( & m_header , 0x0 , sizeof ( Demux_BXA_FmtHeader ) ) ; <nl> + } <nl> + <nl> + void CDVDDemuxBXA : : Reset ( ) <nl> + { <nl> + CDVDInputStream * pInputStream = m_pInput ; <nl> + Dispose ( ) ; <nl> + Open ( pInputStream ) ; <nl> + } <nl> + <nl> + void CDVDDemuxBXA : : Abort ( ) <nl> + { <nl> + if ( m_pInput ) <nl> + return m_pInput - > Abort ( ) ; <nl> + } <nl> + <nl> + void CDVDDemuxBXA : : Flush ( ) <nl> + { <nl> + } <nl> + <nl> + # define BXA_READ_SIZE 4096 <nl> + DemuxPacket * CDVDDemuxBXA : : Read ( ) <nl> + { <nl> + if ( ! m_pInput ) <nl> + return NULL ; <nl> + <nl> + DemuxPacket * pPacket = CDVDDemuxUtils : : AllocateDemuxPacket ( BXA_READ_SIZE ) ; <nl> + <nl> + if ( ! pPacket ) <nl> + { <nl> + if ( m_pInput ) <nl> + m_pInput - > Close ( ) ; <nl> + return NULL ; <nl> + } <nl> + <nl> + pPacket - > iSize = m_pInput - > Read ( pPacket - > pData , BXA_READ_SIZE ) ; <nl> + pPacket - > iStreamId = 0 ; <nl> + <nl> + if ( pPacket - > iSize < 1 ) <nl> + { <nl> + delete pPacket ; <nl> + pPacket = NULL ; <nl> + } <nl> + else <nl> + { <nl> + int n = ( m_header . channels * m_header . bitsPerSample * m_header . sampleRate ) > > 3 ; <nl> + if ( n > 0 ) <nl> + { <nl> + m_pts + = ( ( double ) pPacket - > iSize * DVD_TIME_BASE ) / n ; <nl> + pPacket - > dts = m_pts ; <nl> + pPacket - > pts = m_pts ; <nl> + } <nl> + else <nl> + { <nl> + pPacket - > dts = DVD_NOPTS_VALUE ; <nl> + pPacket - > pts = DVD_NOPTS_VALUE ; <nl> + } <nl> + } <nl> + <nl> + return pPacket ; <nl> + } <nl> + <nl> + CDemuxStream * CDVDDemuxBXA : : GetStream ( int iStreamId ) <nl> + { <nl> + if ( iStreamId ! = 0 ) <nl> + return NULL ; <nl> + <nl> + return m_stream ; <nl> + } <nl> + <nl> + int CDVDDemuxBXA : : GetNrOfStreams ( ) <nl> + { <nl> + return ( m_stream = = NULL ? 0 : 1 ) ; <nl> + } <nl> + <nl> + std : : string CDVDDemuxBXA : : GetFileName ( ) <nl> + { <nl> + if ( m_pInput ) <nl> + return m_pInput - > GetFileName ( ) ; <nl> + else <nl> + return " " ; <nl> + } <nl> + <nl> + void CDVDDemuxBXA : : GetStreamCodecName ( int iStreamId , CStdString & strName ) <nl> + { <nl> + if ( m_stream & & iStreamId = = 0 ) <nl> + strName = " BXA " ; <nl> + } <nl> new file mode 100644 <nl> index 000000000000 . . 3132bbd78092 <nl> mmm / dev / null <nl> ppp b / xbmc / cores / dvdplayer / DVDDemuxers / DVDDemuxBXA . h <nl> <nl> + # pragma once <nl> + / * <nl> + * Copyright ( C ) 2012 Team XBMC <nl> + * http : / / www . xbmc . org <nl> + * <nl> + * This Program is free software ; you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> + * any later version . <nl> + * <nl> + * This Program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with XBMC ; see the file COPYING . If not , see <nl> + * < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + <nl> + # include " DVDDemux . h " <nl> + <nl> + # ifdef _WIN32 <nl> + # define __attribute__ ( dummy_val ) <nl> + # else <nl> + # include < config . h > <nl> + # endif <nl> + <nl> + # ifdef _WIN32 <nl> + # pragma pack ( push ) <nl> + # pragma pack ( 1 ) <nl> + # endif <nl> + <nl> + typedef struct <nl> + { <nl> + char fourcc [ 4 ] ; <nl> + uint32_t type ; <nl> + uint32_t channels ; <nl> + uint32_t sampleRate ; <nl> + uint32_t bitsPerSample ; <nl> + uint64_t durationMs ; <nl> + } __attribute__ ( ( __packed__ ) ) Demux_BXA_FmtHeader ; <nl> + <nl> + # ifdef _WIN32 <nl> + # pragma pack ( pop ) <nl> + # endif <nl> + <nl> + # include < vector > <nl> + <nl> + # define BXA_PACKET_TYPE_FMT_DEMUX 1 <nl> + <nl> + class CDemuxStreamAudioBXA ; <nl> + <nl> + class CDVDDemuxBXA : public CDVDDemux <nl> + { <nl> + public : <nl> + <nl> + CDVDDemuxBXA ( ) ; <nl> + ~ CDVDDemuxBXA ( ) ; <nl> + <nl> + bool Open ( CDVDInputStream * pInput ) ; <nl> + void Dispose ( ) ; <nl> + void Reset ( ) ; <nl> + void Abort ( ) ; <nl> + void Flush ( ) ; <nl> + DemuxPacket * Read ( ) ; <nl> + bool SeekTime ( int time , bool backwords = false , double * startpts = NULL ) { return false ; } <nl> + void SetSpeed ( int iSpeed ) { } ; <nl> + int GetStreamLength ( ) { return m_header . durationMs ; } <nl> + CDemuxStream * GetStream ( int iStreamId ) ; <nl> + int GetNrOfStreams ( ) ; <nl> + std : : string GetFileName ( ) ; <nl> + virtual void GetStreamCodecName ( int iStreamId , CStdString & strName ) ; <nl> + <nl> + protected : <nl> + friend class CDemuxStreamAudioBXA ; <nl> + CDVDInputStream * m_pInput ; <nl> + double m_pts ; <nl> + <nl> + CDemuxStreamAudioBXA * m_stream ; <nl> + <nl> + Demux_BXA_FmtHeader m_header ; <nl> + } ; <nl> + <nl> mmm a / xbmc / cores / dvdplayer / DVDDemuxers / DVDFactoryDemuxer . cpp <nl> ppp b / xbmc / cores / dvdplayer / DVDDemuxers / DVDFactoryDemuxer . cpp <nl> <nl> # ifdef HAS_FILESYSTEM_HTSP <nl> # include " DVDDemuxHTSP . h " <nl> # endif <nl> + # include " DVDDemuxBXA . h " <nl> # include " DVDDemuxPVRClient . h " <nl> # include " pvr / PVRManager . h " <nl> # include " pvr / addons / PVRClients . h " <nl> using namespace PVR ; <nl> <nl> CDVDDemux * CDVDFactoryDemuxer : : CreateDemuxer ( CDVDInputStream * pInputStream ) <nl> { <nl> + / / Try to open the AirTunes demuxer <nl> + if ( pInputStream - > IsStreamType ( DVDSTREAM_TYPE_FILE ) & & pInputStream - > GetContent ( ) . compare ( " audio / x - xbmc - pcm " ) = = 0 ) <nl> + { <nl> + auto_ptr < CDVDDemuxBXA > demuxer ( new CDVDDemuxBXA ( ) ) ; <nl> + if ( demuxer - > Open ( pInputStream ) ) <nl> + return demuxer . release ( ) ; <nl> + else <nl> + return NULL ; <nl> + } <nl> + <nl> if ( pInputStream - > IsStreamType ( DVDSTREAM_TYPE_HTTP ) ) <nl> { <nl> CDVDInputStreamHttp * pHttpStream = ( CDVDInputStreamHttp * ) pInputStream ; <nl> mmm a / xbmc / cores / dvdplayer / DVDDemuxers / Makefile . in <nl> ppp b / xbmc / cores / dvdplayer / DVDDemuxers / Makefile . in <nl> <nl> INCLUDES + = - I @ abs_top_srcdir @ / xbmc / cores / dvdplayer <nl> <nl> SRCS = DVDDemux . cpp <nl> + SRCS + = DVDDemuxBXA . cpp <nl> SRCS + = DVDDemuxFFmpeg . cpp <nl> SRCS + = DVDDemuxHTSP . cpp <nl> SRCS + = DVDDemuxPVRClient . cpp <nl>
|
Merge pull request from huceke / bxa
|
xbmc/xbmc
|
a40dd9bf9a8fb90ef2f943266fb8b19ec50c76fe
|
2012-10-07T07:15:03Z
|
mmm a / core / class_db . cpp <nl> ppp b / core / class_db . cpp <nl> void ClassDB : : _add_class2 ( const StringName & p_class , const StringName & p_inherit <nl> } <nl> } <nl> <nl> + # ifdef DEBUG_METHODS_ENABLED <nl> + static MethodInfo info_from_bind ( MethodBind * p_method ) { <nl> + MethodInfo minfo ; <nl> + minfo . name = p_method - > get_name ( ) ; <nl> + minfo . id = p_method - > get_method_id ( ) ; <nl> + <nl> + for ( int i = 0 ; i < p_method - > get_argument_count ( ) ; i + + ) { <nl> + minfo . arguments . push_back ( p_method - > get_argument_info ( i ) ) ; <nl> + } <nl> + <nl> + minfo . return_val = p_method - > get_return_info ( ) ; <nl> + minfo . flags = p_method - > get_hint_flags ( ) ; <nl> + <nl> + for ( int i = 0 ; i < p_method - > get_argument_count ( ) ; i + + ) { <nl> + if ( p_method - > has_default_argument ( i ) ) { <nl> + minfo . default_arguments . push_back ( p_method - > get_default_argument ( i ) ) ; <nl> + } <nl> + } <nl> + <nl> + return minfo ; <nl> + } <nl> + # endif <nl> + <nl> void ClassDB : : get_method_list ( StringName p_class , List < MethodInfo > * p_methods , bool p_no_inheritance , bool p_exclude_from_properties ) { <nl> OBJTYPE_RLOCK ; <nl> <nl> void ClassDB : : get_method_list ( StringName p_class , List < MethodInfo > * p_methods , b <nl> } <nl> <nl> for ( List < StringName > : : Element * E = type - > method_order . front ( ) ; E ; E = E - > next ( ) ) { <nl> - MethodBind * method = type - > method_map . get ( E - > get ( ) ) ; <nl> - MethodInfo minfo ; <nl> - minfo . name = E - > get ( ) ; <nl> - minfo . id = method - > get_method_id ( ) ; <nl> - <nl> - if ( p_exclude_from_properties & & type - > methods_in_properties . has ( minfo . name ) ) { <nl> + if ( p_exclude_from_properties & & type - > methods_in_properties . has ( E - > get ( ) ) ) { <nl> continue ; <nl> } <nl> <nl> - for ( int i = 0 ; i < method - > get_argument_count ( ) ; i + + ) { <nl> - / / Variant : : Type t = method - > get_argument_type ( i ) ; <nl> - <nl> - minfo . arguments . push_back ( method - > get_argument_info ( i ) ) ; <nl> - } <nl> - <nl> - minfo . return_val = method - > get_return_info ( ) ; <nl> - minfo . flags = method - > get_hint_flags ( ) ; <nl> - <nl> - for ( int i = 0 ; i < method - > get_argument_count ( ) ; i + + ) { <nl> - if ( method - > has_default_argument ( i ) ) { <nl> - minfo . default_arguments . push_back ( method - > get_default_argument ( i ) ) ; <nl> - } <nl> - } <nl> + MethodBind * method = type - > method_map . get ( E - > get ( ) ) ; <nl> + MethodInfo minfo = info_from_bind ( method ) ; <nl> <nl> p_methods - > push_back ( minfo ) ; <nl> } <nl> void ClassDB : : get_method_list ( StringName p_class , List < MethodInfo > * p_methods , b <nl> } <nl> } <nl> <nl> + bool ClassDB : : get_method_info ( StringName p_class , StringName p_method , MethodInfo * r_info , bool p_no_inheritance , bool p_exclude_from_properties ) { <nl> + OBJTYPE_RLOCK ; <nl> + <nl> + ClassInfo * type = classes . getptr ( p_class ) ; <nl> + <nl> + while ( type ) { <nl> + if ( type - > disabled ) { <nl> + if ( p_no_inheritance ) { <nl> + break ; <nl> + } <nl> + <nl> + type = type - > inherits_ptr ; <nl> + continue ; <nl> + } <nl> + <nl> + # ifdef DEBUG_METHODS_ENABLED <nl> + MethodBind * * method = type - > method_map . getptr ( p_method ) ; <nl> + if ( method & & * method ) { <nl> + if ( r_info ! = nullptr ) { <nl> + MethodInfo minfo = info_from_bind ( * method ) ; <nl> + * r_info = minfo ; <nl> + } <nl> + return true ; <nl> + } else if ( type - > virtual_methods_map . has ( p_method ) ) { <nl> + if ( r_info ) { <nl> + * r_info = type - > virtual_methods_map [ p_method ] ; <nl> + } <nl> + return true ; <nl> + } <nl> + # else <nl> + if ( type - > method_map . has ( p_method ) ) { <nl> + if ( r_info ) { <nl> + MethodBind * m = type - > method_map [ p_method ] ; <nl> + MethodInfo mi ; <nl> + mi . name = m - > get_name ( ) ; <nl> + * r_info = mi ; <nl> + } <nl> + return true ; <nl> + } <nl> + # endif <nl> + <nl> + if ( p_no_inheritance ) { <nl> + break ; <nl> + } <nl> + <nl> + type = type - > inherits_ptr ; <nl> + } <nl> + <nl> + return false ; <nl> + } <nl> + <nl> MethodBind * ClassDB : : get_method ( StringName p_class , StringName p_name ) { <nl> OBJTYPE_RLOCK ; <nl> <nl> int ClassDB : : get_integer_constant ( const StringName & p_class , const StringName & p <nl> return 0 ; <nl> } <nl> <nl> + bool ClassDB : : has_integer_constant ( const StringName & p_class , const StringName & p_name , bool p_no_inheritance ) { <nl> + OBJTYPE_RLOCK ; <nl> + <nl> + ClassInfo * type = classes . getptr ( p_class ) ; <nl> + <nl> + while ( type ) { <nl> + if ( type - > constant_map . has ( p_name ) ) { <nl> + return true ; <nl> + } <nl> + if ( p_no_inheritance ) { <nl> + return false ; <nl> + } <nl> + <nl> + type = type - > inherits_ptr ; <nl> + } <nl> + <nl> + return false ; <nl> + } <nl> + <nl> StringName ClassDB : : get_integer_constant_enum ( const StringName & p_class , const StringName & p_name , bool p_no_inheritance ) { <nl> OBJTYPE_RLOCK ; <nl> <nl> void ClassDB : : get_enum_constants ( const StringName & p_class , const StringName & p_ <nl> } <nl> } <nl> <nl> + bool ClassDB : : has_enum ( const StringName & p_class , const StringName & p_name , bool p_no_inheritance ) { <nl> + OBJTYPE_RLOCK ; <nl> + <nl> + ClassInfo * type = classes . getptr ( p_class ) ; <nl> + <nl> + while ( type ) { <nl> + if ( type - > enum_map . has ( p_name ) ) { <nl> + return true ; <nl> + } <nl> + if ( p_no_inheritance ) { <nl> + return false ; <nl> + } <nl> + <nl> + type = type - > inherits_ptr ; <nl> + } <nl> + <nl> + return false ; <nl> + } <nl> + <nl> void ClassDB : : add_signal ( StringName p_class , const MethodInfo & p_signal ) { <nl> OBJTYPE_WLOCK ; <nl> <nl> void ClassDB : : get_signal_list ( StringName p_class , List < MethodInfo > * p_signals , b <nl> } <nl> } <nl> <nl> - bool ClassDB : : has_signal ( StringName p_class , StringName p_signal ) { <nl> + bool ClassDB : : has_signal ( StringName p_class , StringName p_signal , bool p_no_inheritance ) { <nl> OBJTYPE_RLOCK ; <nl> ClassInfo * type = classes . getptr ( p_class ) ; <nl> ClassInfo * check = type ; <nl> bool ClassDB : : has_signal ( StringName p_class , StringName p_signal ) { <nl> if ( check - > signal_map . has ( p_signal ) ) { <nl> return true ; <nl> } <nl> + if ( p_no_inheritance ) { <nl> + return false ; <nl> + } <nl> check = check - > inherits_ptr ; <nl> } <nl> <nl> void ClassDB : : add_property ( StringName p_class , const PropertyInfo & p_pinfo , cons <nl> OBJTYPE_WLOCK <nl> <nl> type - > property_list . push_back ( p_pinfo ) ; <nl> + type - > property_map [ p_pinfo . name ] = p_pinfo ; <nl> # ifdef DEBUG_METHODS_ENABLED <nl> if ( mb_get ) { <nl> type - > methods_in_properties . insert ( p_getter ) ; <nl> void ClassDB : : get_property_list ( StringName p_class , List < PropertyInfo > * p_list , <nl> } <nl> } <nl> <nl> + bool ClassDB : : get_property_info ( StringName p_class , StringName p_property , PropertyInfo * r_info , bool p_no_inheritance , const Object * p_validator ) { <nl> + OBJTYPE_RLOCK ; <nl> + <nl> + ClassInfo * check = classes . getptr ( p_class ) ; <nl> + while ( check ) { <nl> + if ( check - > property_map . has ( p_property ) ) { <nl> + PropertyInfo pinfo = check - > property_map [ p_property ] ; <nl> + if ( p_validator ) { <nl> + p_validator - > _validate_property ( pinfo ) ; <nl> + } <nl> + if ( r_info ) { <nl> + * r_info = pinfo ; <nl> + } <nl> + return true ; <nl> + } <nl> + if ( p_no_inheritance ) { <nl> + break ; <nl> + } <nl> + check = check - > inherits_ptr ; <nl> + } <nl> + <nl> + return false ; <nl> + } <nl> + <nl> bool ClassDB : : set_property ( Object * p_object , const StringName & p_property , const Variant & p_value , bool * r_valid ) { <nl> ClassInfo * type = classes . getptr ( p_object - > get_class_name ( ) ) ; <nl> ClassInfo * check = type ; <nl> void ClassDB : : add_virtual_method ( const StringName & p_class , const MethodInfo & p_ <nl> mi . flags | = METHOD_FLAG_VIRTUAL ; <nl> } <nl> classes [ p_class ] . virtual_methods . push_back ( mi ) ; <nl> + classes [ p_class ] . virtual_methods_map [ p_method . name ] = mi ; <nl> <nl> # endif <nl> } <nl> mmm a / core / class_db . h <nl> ppp b / core / class_db . h <nl> class ClassDB { <nl> HashMap < StringName , List < StringName > > enum_map ; <nl> HashMap < StringName , MethodInfo > signal_map ; <nl> List < PropertyInfo > property_list ; <nl> + HashMap < StringName , PropertyInfo > property_map ; <nl> # ifdef DEBUG_METHODS_ENABLED <nl> List < StringName > constant_order ; <nl> List < StringName > method_order ; <nl> Set < StringName > methods_in_properties ; <nl> List < MethodInfo > virtual_methods ; <nl> + Map < StringName , MethodInfo > virtual_methods_map ; <nl> StringName category ; <nl> # endif <nl> HashMap < StringName , PropertySetGet > property_setget ; <nl> class ClassDB { <nl> } <nl> <nl> static void add_signal ( StringName p_class , const MethodInfo & p_signal ) ; <nl> - static bool has_signal ( StringName p_class , StringName p_signal ) ; <nl> + static bool has_signal ( StringName p_class , StringName p_signal , bool p_no_inheritance = false ) ; <nl> static bool get_signal ( StringName p_class , StringName p_signal , MethodInfo * r_signal ) ; <nl> static void get_signal_list ( StringName p_class , List < MethodInfo > * p_signals , bool p_no_inheritance = false ) ; <nl> <nl> class ClassDB { <nl> static void add_property ( StringName p_class , const PropertyInfo & p_pinfo , const StringName & p_setter , const StringName & p_getter , int p_index = - 1 ) ; <nl> static void set_property_default_value ( StringName p_class , const StringName & p_name , const Variant & p_default ) ; <nl> static void get_property_list ( StringName p_class , List < PropertyInfo > * p_list , bool p_no_inheritance = false , const Object * p_validator = nullptr ) ; <nl> + static bool get_property_info ( StringName p_class , StringName p_property , PropertyInfo * r_info , bool p_no_inheritance = false , const Object * p_validator = nullptr ) ; <nl> static bool set_property ( Object * p_object , const StringName & p_property , const Variant & p_value , bool * r_valid = nullptr ) ; <nl> static bool get_property ( Object * p_object , const StringName & p_property , Variant & r_value ) ; <nl> static bool has_property ( const StringName & p_class , const StringName & p_property , bool p_no_inheritance = false ) ; <nl> class ClassDB { <nl> static void set_method_flags ( StringName p_class , StringName p_method , int p_flags ) ; <nl> <nl> static void get_method_list ( StringName p_class , List < MethodInfo > * p_methods , bool p_no_inheritance = false , bool p_exclude_from_properties = false ) ; <nl> + static bool get_method_info ( StringName p_class , StringName p_method , MethodInfo * r_info , bool p_no_inheritance = false , bool p_exclude_from_properties = false ) ; <nl> static MethodBind * get_method ( StringName p_class , StringName p_name ) ; <nl> <nl> static void add_virtual_method ( const StringName & p_class , const MethodInfo & p_method , bool p_virtual = true ) ; <nl> class ClassDB { <nl> static void bind_integer_constant ( const StringName & p_class , const StringName & p_enum , const StringName & p_name , int p_constant ) ; <nl> static void get_integer_constant_list ( const StringName & p_class , List < String > * p_constants , bool p_no_inheritance = false ) ; <nl> static int get_integer_constant ( const StringName & p_class , const StringName & p_name , bool * p_success = nullptr ) ; <nl> + static bool has_integer_constant ( const StringName & p_class , const StringName & p_name , bool p_no_inheritance = false ) ; <nl> <nl> static StringName get_integer_constant_enum ( const StringName & p_class , const StringName & p_name , bool p_no_inheritance = false ) ; <nl> static void get_enum_list ( const StringName & p_class , List < StringName > * p_enums , bool p_no_inheritance = false ) ; <nl> static void get_enum_constants ( const StringName & p_class , const StringName & p_enum , List < StringName > * p_constants , bool p_no_inheritance = false ) ; <nl> + static bool has_enum ( const StringName & p_class , const StringName & p_name , bool p_no_inheritance = false ) ; <nl> <nl> static Variant class_get_default_property_value ( const StringName & p_class , const StringName & p_property , bool * r_valid = nullptr ) ; <nl> <nl>
|
Add methods in ClassDB to get property / method / constant / enum info
|
godotengine/godot
|
14e85b762ee8b0425bb89837e98a36e1efe28c81
|
2020-07-06T22:27:05Z
|
mmm a / taskcluster / darwin - opt - base . tyml <nl> ppp b / taskcluster / darwin - opt - base . tyml <nl> payload : <nl> TENSORFLOW_BUILD_ARTIFACT : $ { build . tensorflow } <nl> SUMMARIZE_GRAPH_BINARY : $ { build . summarize_graph } <nl> DEEPSPEECH_TEST_MODEL : https : / / queue . taskcluster . net / v1 / task / $ { training } / artifacts / public / output_graph . pb <nl> - DEEPSPEECH_PROD_MODEL : https : / / github . com / lissyx / DeepSpeech / releases / download / 0 . 0 . 2 / tc - fake - prod . 988_e120 . LSTM . ldc93s1 . pb <nl> <nl> # There is no VM yet running tasks on OSX <nl> # so one should install by hand : <nl> mmm a / taskcluster / linux - opt - base . tyml <nl> ppp b / taskcluster / linux - opt - base . tyml <nl> then : <nl> TENSORFLOW_BUILD_ARTIFACT : $ { build . tensorflow } <nl> SUMMARIZE_GRAPH_BINARY : $ { build . summarize_graph } <nl> DEEPSPEECH_TEST_MODEL : https : / / queue . taskcluster . net / v1 / task / $ { training } / artifacts / public / output_graph . pb <nl> - DEEPSPEECH_PROD_MODEL : https : / / github . com / lissyx / DeepSpeech / releases / download / 0 . 0 . 2 / tc - fake - prod . 988_e120 . LSTM . ldc93s1 . pb <nl> <nl> command : <nl> - " / bin / bash " <nl>
|
Remove old AOT model
|
mozilla/DeepSpeech
|
1540fa392e4aa64c5a74124a0dd4fd57c993b6f0
|
2018-10-26T12:59:12Z
|
mmm a / src / clustering / immediate_consistency / branch / metadata . hpp <nl> ppp b / src / clustering / immediate_consistency / branch / metadata . hpp <nl> <nl> # include " concurrency / fifo_enforcer . hpp " <nl> # include " protocol_api . hpp " <nl> # include " rpc / mailbox / typed . hpp " <nl> - # include " rpc / semilattice / semilattice / map . hpp " <nl> + # include " rpc / semilattice / joins / map . hpp " <nl> # include " timestamps . hpp " <nl> <nl> / * Every broadcaster generates a UUID when it ' s first created . This is the UUID <nl> similarity index 82 % <nl> rename from src / rpc / semilattice / semilattice / map . hpp <nl> rename to src / rpc / semilattice / joins / map . hpp <nl> mmm a / src / rpc / semilattice / semilattice / map . hpp <nl> ppp b / src / rpc / semilattice / joins / map . hpp <nl> <nl> - # ifndef __RPC_METADATA_SEMILATTICE_MAP_HPP__ <nl> - # define __RPC_METADATA_SEMILATTICE_MAP_HPP__ <nl> + # ifndef __RPC_SEMILATTICE_JOINS_MAP_HPP__ <nl> + # define __RPC_SEMILATTICE_JOINS_MAP_HPP__ <nl> <nl> # include < map > <nl> <nl> void semilattice_join ( std : : map < key_t , value_t > * a , const std : : map < key_t , value_t <nl> <nl> } / * namespace std * / <nl> <nl> - # endif / * __RPC_METADATA_SEMILATTICE_MAP_HPP__ * / <nl> + # endif / * __RPC_SEMILATTICE_JOINS_MAP_HPP__ * / <nl> mmm a / src / unittest / rpc_semilattice . cc <nl> ppp b / src / unittest / rpc_semilattice . cc <nl> <nl> # include " unittest / gtest . hpp " <nl> <nl> # include " rpc / semilattice / semilattice_manager . hpp " <nl> - # include " rpc / semilattice / semilattice / map . hpp " <nl> + # include " rpc / semilattice / joins / map . hpp " <nl> # include " rpc / semilattice / view / field . hpp " <nl> # include " rpc / semilattice / view / member . hpp " <nl> # include " unittest / dummy_metadata_controller . hpp " <nl>
|
A better name for the directory we keep our prefab semilattice functions
|
rethinkdb/rethinkdb
|
fde8b4dd08514e7ae5cb5383ef78f45383583593
|
2012-02-23T23:55:15Z
|
mmm a / tools / run_tests / run_tests . py <nl> ppp b / tools / run_tests / run_tests . py <nl> def test_specs ( self ) : <nl> ' EXAMPLE_PATH ' : ' src / objective - c / examples / tvOS - sample ' , <nl> ' FRAMEWORKS ' : ' NO ' <nl> } ) ) <nl> - out . append ( <nl> - self . config . job_spec ( <nl> - [ ' src / objective - c / tests / build_one_example . sh ' ] , <nl> - timeout_seconds = 10 * 60 , <nl> - shortname = ' ios - buildtest - example - tvOS - sample - framework ' , <nl> - cpu_cost = 1e6 , <nl> - environ = { <nl> - ' SCHEME ' : ' tvOS - sample ' , <nl> - ' EXAMPLE_PATH ' : ' src / objective - c / examples / tvOS - sample ' , <nl> - ' FRAMEWORKS ' : ' YES ' <nl> - } ) ) <nl> + # out . append ( <nl> + # self . config . job_spec ( <nl> + # [ ' src / objective - c / tests / build_one_example . sh ' ] , <nl> + # timeout_seconds = 10 * 60 , <nl> + # shortname = ' ios - buildtest - example - tvOS - sample - framework ' , <nl> + # cpu_cost = 1e6 , <nl> + # environ = { <nl> + # ' SCHEME ' : ' tvOS - sample ' , <nl> + # ' EXAMPLE_PATH ' : ' src / objective - c / examples / tvOS - sample ' , <nl> + # ' FRAMEWORKS ' : ' YES ' <nl> + # } ) ) <nl> out . append ( <nl> self . config . job_spec ( <nl> [ ' src / objective - c / tests / build_one_example . sh ' ] , <nl> def test_specs ( self ) : <nl> ' EXAMPLE_PATH ' : ' src / objective - c / examples / watchOS - sample ' , <nl> ' FRAMEWORKS ' : ' NO ' <nl> } ) ) <nl> - out . append ( <nl> - self . config . job_spec ( <nl> - [ ' src / objective - c / tests / build_one_example . sh ' ] , <nl> - timeout_seconds = 20 * 60 , <nl> - shortname = ' ios - buildtest - example - watchOS - sample - framework ' , <nl> - cpu_cost = 1e6 , <nl> - environ = { <nl> - ' SCHEME ' : ' watchOS - sample - WatchKit - App ' , <nl> - ' EXAMPLE_PATH ' : ' src / objective - c / examples / watchOS - sample ' , <nl> - ' FRAMEWORKS ' : ' YES ' <nl> - } ) ) <nl> + # out . append ( <nl> + # self . config . job_spec ( <nl> + # [ ' src / objective - c / tests / build_one_example . sh ' ] , <nl> + # timeout_seconds = 20 * 60 , <nl> + # shortname = ' ios - buildtest - example - watchOS - sample - framework ' , <nl> + # cpu_cost = 1e6 , <nl> + # environ = { <nl> + # ' SCHEME ' : ' watchOS - sample - WatchKit - App ' , <nl> + # ' EXAMPLE_PATH ' : ' src / objective - c / examples / watchOS - sample ' , <nl> + # ' FRAMEWORKS ' : ' YES ' <nl> + # } ) ) <nl> out . append ( <nl> self . config . job_spec ( <nl> [ ' src / objective - c / tests / run_plugin_tests . sh ' ] , <nl>
|
Removed framework tests for tv and watch examples
|
grpc/grpc
|
08795cd20632f1137c03aaff5abaf5c7d0a96eb4
|
2019-08-06T00:07:54Z
|
mmm a / lib / Sema / PlaygroundTransform . cpp <nl> ppp b / lib / Sema / PlaygroundTransform . cpp <nl> class Instrumenter : InstrumenterBase { <nl> return S ; <nl> case StmtKind : : Brace : <nl> return transformBraceStmt ( cast < BraceStmt > ( S ) ) ; <nl> + case StmtKind : : Defer : <nl> + return transformDeferStmt ( cast < DeferStmt > ( S ) ) ; <nl> case StmtKind : : If : <nl> return transformIfStmt ( cast < IfStmt > ( S ) ) ; <nl> case StmtKind : : Guard : <nl> class Instrumenter : InstrumenterBase { <nl> } <nl> } <nl> <nl> + DeferStmt * transformDeferStmt ( DeferStmt * DS ) { <nl> + if ( auto * FD = DS - > getTempDecl ( ) ) { <nl> + auto Implicit = FD - > isImplicit ( ) ; <nl> + FD - > setImplicit ( false ) ; <nl> + auto * D = transformDecl ( FD ) ; <nl> + D - > setImplicit ( Implicit ) ; <nl> + assert ( D = = FD ) ; <nl> + } <nl> + return DS ; <nl> + } <nl> + <nl> / / transform * ( ) return their input if it ' s unmodified , <nl> / / or a modified copy of their input otherwise . <nl> IfStmt * transformIfStmt ( IfStmt * IS ) { <nl>
|
[ PlaygroundTransform ] Implemented support for defer statements .
|
apple/swift
|
0a9a4e7d6f267bfb1b52cdbd3d72bcc628e9e96d
|
2018-01-09T19:21:52Z
|
mmm a / tensorflow / compiler / mlir / tensorflow / BUILD <nl> ppp b / tensorflow / compiler / mlir / tensorflow / BUILD <nl> cc_library ( <nl> " : mangling_util " , <nl> " : mlir_roundtrip_flags " , <nl> " : tensorflow " , <nl> + " / / tensorflow / cc / saved_model : loader " , <nl> " / / tensorflow / compiler / jit : shape_inference_helpers " , <nl> " / / tensorflow / compiler / xla : status_macros " , <nl> " / / tensorflow / core : core_cpu " , <nl> new file mode 100644 <nl> index 0000000000000 . . 5c49c3900a6a7 <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / mlir / tensorflow / tests / savedmodel2mlir / BUILD <nl> <nl> + load ( " / / tensorflow : tensorflow . bzl " , " tf_cc_test " ) <nl> + <nl> + package ( licenses = [ " notice " ] ) <nl> + <nl> + tf_cc_test ( <nl> + name = " half_plus_two " , <nl> + srcs = [ " half_plus_two . cc " ] , <nl> + data = [ <nl> + " / / tensorflow / cc / saved_model : saved_model_half_plus_two " , <nl> + ] , <nl> + deps = [ <nl> + " / / tensorflow / cc / saved_model : tag_constants " , <nl> + " / / tensorflow / compiler / mlir / tensorflow : translate_lib " , <nl> + " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core : test " , <nl> + " / / tensorflow / core : test_main " , <nl> + ] , <nl> + ) <nl> new file mode 100644 <nl> index 0000000000000 . . 6a5cef5b5514b <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / mlir / tensorflow / tests / savedmodel2mlir / half_plus_two . cc <nl> <nl> + / * Copyright 2019 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include < unordered_set > <nl> + <nl> + # include " tensorflow / cc / saved_model / tag_constants . h " <nl> + # include " tensorflow / compiler / mlir / tensorflow / translate / tf_mlir_translate . h " <nl> + # include " tensorflow / core / lib / io / path . h " <nl> + # include " tensorflow / core / platform / test . h " <nl> + <nl> + / / TODO ( silvasean ) : Add a FileCheck based testing harness for SavedModel to <nl> + / / replace the following . The source should be TensorFlow Python code . Then we <nl> + / / can generate SavedModel directories on the fly and import them . Check <nl> + / / directives can be embedded into the same file as the source . <nl> + TEST ( SavedModel , HalfPlusTwo ) { <nl> + const char kSavedModel [ ] = " cc / saved_model / testdata / half_plus_two / 00000123 " ; <nl> + const string saved_model_dir = tensorflow : : io : : JoinPath ( <nl> + tensorflow : : testing : : TensorFlowSrcRoot ( ) , kSavedModel ) ; <nl> + std : : unordered_set < string > tags { tensorflow : : kSavedModelTagServe } ; <nl> + <nl> + mlir : : MLIRContext context ; <nl> + auto module = tensorflow : : SavedModelToMlirImport ( <nl> + saved_model_dir , tags , / * debug_info_file = * / " " , & context ) ; <nl> + auto * block = module - > getBody ( ) ; <nl> + <nl> + / / testdata / half_plus_two does not use any functions . So we only have the <nl> + / / mandatory module terminator op inside its block . <nl> + EXPECT_TRUE ( std : : next ( block - > begin ( ) ) = = block - > end ( ) ) ; <nl> + } <nl> mmm a / tensorflow / compiler / mlir / tensorflow / translate / import_model . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / translate / import_model . cc <nl> limitations under the License . <nl> # include " tensorflow / core / platform / types . h " <nl> # include " tensorflow / core / protobuf / graph_debug_info . pb . h " <nl> <nl> + static inline absl : : string_view StringRefToView ( llvm : : StringRef ref ) { <nl> + return { ref . data ( ) , ref . size ( ) } ; <nl> + } <nl> + <nl> namespace tensorflow { <nl> using stream_executor : : port : : StatusOr ; <nl> <nl> class ImporterBase { <nl> const FunctionLibraryDefinition & flib , const GraphDebugInfo & debug_info , <nl> const NodeSpecs & specs , mlir : : ModuleOp module , <nl> std : : unordered_map < std : : string , std : : string > * tf_name_to_mlir_name ) <nl> - : module_ ( module ) , <nl> + : builder_ ( absl : : make_unique < mlir : : OpBuilder > ( module . getContext ( ) ) ) , <nl> + module_ ( module ) , <nl> context_ ( module . getContext ( ) ) , <nl> tf_name_to_mlir_name_ ( tf_name_to_mlir_name ) , <nl> graph_flib_ ( flib ) , <nl> class ImporterBase { <nl> const absl : : InlinedVector < Node * , 4 > & control_ret_nodes , <nl> llvm : : ArrayRef < mlir : : NamedAttribute > attrs ) ; <nl> <nl> + / / Finds out the function definition for the given function name from the <nl> + / / graph and converts it to a function of the module . This method is called <nl> + / / on demand because the graph flib_def does not provide an iterator <nl> + / / interface . <nl> + Status ConvertLibFunction ( llvm : : StringRef func_name ) ; <nl> + <nl> / / Returns the list of nodes in the graph . Nodes are presented in the reverse <nl> / / order of a post - order depth - first visit starting from the graph ' s source <nl> / / nodes . <nl> class ImporterBase { <nl> Status AddBackedge ( mlir : : Operation * sink , mlir : : Operation * dst , <nl> int dst_input ) ; <nl> <nl> - / / Finds out the function definition for the given function name from the <nl> - / / graph and converts it to a function of the module . This method is called <nl> - / / on demand because the graph flib_def does not provide an iterator <nl> - / / interface . The consequence is that only the referred functions are added to <nl> - / / the MLIR module . <nl> - Status ConvertLibFunction ( const std : : string & func_name ) ; <nl> - <nl> / / Adds the input arguments and return operation to the function . The <nl> / / arguments are added as basic block argument . Also the argument types and <nl> / / the id of the nodes from the input graph needs to be specified . <nl> Status UpdateLegacyFedInputNode ( const GraphDef & graph_def , <nl> / / the GraphDef . <nl> / / - Replacing LegacyFedInput nodes with Placeholder nodes if <nl> / / convert_legacy_fed_inputs option is enabled . <nl> - Status PreprocessGraphDef ( const NodeSpecs & specs , GraphDef * graph_def ) { <nl> + Status PreprocessGraphDef ( const NodeSpecs * specs , GraphDef * graph_def ) { <nl> const tensorflow : : OpRegistrationData * op_reg_data ; <nl> for ( auto & node_def : * graph_def - > mutable_node ( ) ) { <nl> / / TODO ( hinsu ) : Completely deprecate support for LegacyFedInput ops . One <nl> / / solution could be have a tool to let users upgrade old serialized graphs . <nl> - if ( specs . convert_legacy_fed_inputs & & node_def . op ( ) = = " LegacyFedInput " ) { <nl> + if ( specs & & specs - > convert_legacy_fed_inputs & & <nl> + node_def . op ( ) = = " LegacyFedInput " ) { <nl> TF_RETURN_IF_ERROR ( <nl> - UpdateLegacyFedInputNode ( * graph_def , specs . inputs , & node_def ) ) ; <nl> + UpdateLegacyFedInputNode ( * graph_def , specs - > inputs , & node_def ) ) ; <nl> } <nl> <nl> auto status = <nl> void ImporterBase : : GetArgsAndRetsFromFunctionBody ( <nl> * control_ret_nodes = fbody . control_ret_nodes ; <nl> } <nl> <nl> - Status ImporterBase : : ConvertLibFunction ( const std : : string & func_name ) { <nl> + Status ImporterBase : : ConvertLibFunction ( llvm : : StringRef func_name ) { <nl> / / If the library function has been converted already , nothing needs to be <nl> / / done . <nl> if ( tf_name_to_mlir_name_ - > find ( func_name ) ! = tf_name_to_mlir_name_ - > end ( ) ) <nl> return Status : : OK ( ) ; <nl> <nl> - std : : string mlir_func_name = graph_flib_ . UniqueFunctionName ( func_name ) ; <nl> + std : : string mlir_func_name = <nl> + graph_flib_ . UniqueFunctionName ( StringRefToView ( func_name ) ) ; <nl> ( * tf_name_to_mlir_name_ ) [ func_name ] = mlir_func_name ; <nl> <nl> const auto & func_lib = graph_flib_ ; <nl> const auto * func_def = func_lib . Find ( func_name ) ; <nl> if ( func_def = = nullptr ) { <nl> return errors : : FailedPrecondition ( <nl> - absl : : StrCat ( " Failed to find function ' " , func_name , <nl> + absl : : StrCat ( " Failed to find function ' " , StringRefToView ( func_name ) , <nl> " ' . The imported TensorFlow GraphDef is ill - formed . " ) ) ; <nl> } <nl> <nl> StatusOr < mlir : : FunctionType > GraphDefImporter : : InferMainFunctionType ( <nl> return builder . getFunctionType ( arg_types , ret_types ) ; <nl> } <nl> <nl> + / / Stateful helper class to import a TensorFlow model expressed in SavedModel <nl> + / / into an MLIR Module . <nl> + class SavedModelImporter : public ImporterBase { <nl> + public : <nl> + / / Main entry point : converts all functions in the given meta graph to an MLIR <nl> + / / Module . <nl> + static StatusOr < mlir : : OwningModuleRef > Convert ( <nl> + const MetaGraphDef & meta_graph , const GraphDebugInfo & debug_info , <nl> + bool add_default_attributes , mlir : : MLIRContext * context ) ; <nl> + <nl> + private : <nl> + explicit SavedModelImporter ( <nl> + const FunctionLibraryDefinition & flib , const GraphDebugInfo & debug_info , <nl> + const NodeSpecs & specs , mlir : : ModuleOp module , <nl> + std : : unordered_map < std : : string , std : : string > * tf_name_to_mlir_name ) <nl> + : ImporterBase ( flib , debug_info , specs , module , tf_name_to_mlir_name ) { } <nl> + } ; <nl> + <nl> + StatusOr < mlir : : OwningModuleRef > SavedModelImporter : : Convert ( <nl> + const MetaGraphDef & meta_graph , const GraphDebugInfo & debug_info , <nl> + bool add_default_attributes , mlir : : MLIRContext * context ) { <nl> + NodeSpecs specs ; <nl> + mlir : : OwningModuleRef module = <nl> + mlir : : ModuleOp : : create ( mlir : : UnknownLoc : : get ( context ) ) ; <nl> + std : : unordered_map < std : : string , std : : string > tf_name_to_mlir_name ; <nl> + <nl> + const auto & graphdef = meta_graph . graph_def ( ) ; <nl> + GraphConstructorOptions options ; <nl> + options . allow_internal_ops = true ; <nl> + Graph graph ( OpRegistry : : Global ( ) ) ; <nl> + <nl> + GraphDef preprocessed_graphdef ( graphdef ) ; <nl> + if ( add_default_attributes ) { <nl> + TF_RETURN_IF_ERROR ( PreprocessGraphDef ( nullptr , & preprocessed_graphdef ) ) ; <nl> + } <nl> + <nl> + TF_RETURN_IF_ERROR ( <nl> + ConvertGraphDefToGraph ( options , preprocessed_graphdef , & graph ) ) ; <nl> + <nl> + SavedModelImporter importer ( graph . flib_def ( ) , debug_info , specs , module . get ( ) , <nl> + & tf_name_to_mlir_name ) ; <nl> + <nl> + auto fn_names = graph . flib_def ( ) . ListFunctionNames ( ) ; <nl> + for ( const auto & fn_name : fn_names ) { <nl> + TF_RETURN_IF_ERROR ( importer . ConvertLibFunction ( fn_name ) ) ; <nl> + } <nl> + return module ; <nl> + } <nl> } / / namespace <nl> <nl> StatusOr < mlir : : OwningModuleRef > ConvertGraphdefToMlir ( <nl> StatusOr < mlir : : OwningModuleRef > ConvertGraphdefToMlir ( <nl> <nl> GraphDef preprocessed_graphdef ( graphdef ) ; <nl> if ( add_default_attributes ) { <nl> - TF_RETURN_IF_ERROR ( PreprocessGraphDef ( specs , & preprocessed_graphdef ) ) ; <nl> + TF_RETURN_IF_ERROR ( PreprocessGraphDef ( & specs , & preprocessed_graphdef ) ) ; <nl> } <nl> TF_RETURN_IF_ERROR ( ConvertGraphDefToGraph ( <nl> options , std : : move ( preprocessed_graphdef ) , & graph ) ) ; <nl> StatusOr < mlir : : OwningModuleRef > ConvertGraphToMlir ( <nl> return GraphDefImporter : : Convert ( context , graph , debug_info , flib_def , specs ) ; <nl> } <nl> <nl> + StatusOr < mlir : : OwningModuleRef > ConvertSavedModelToMlir ( <nl> + const SavedModelBundle & saved_model , const GraphDebugInfo & debug_info , <nl> + mlir : : MLIRContext * context , bool add_default_attributes ) { <nl> + return SavedModelImporter : : Convert ( saved_model . meta_graph_def , debug_info , <nl> + add_default_attributes , context ) ; <nl> + } <nl> + <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / compiler / mlir / tensorflow / translate / import_model . h <nl> ppp b / tensorflow / compiler / mlir / tensorflow / translate / import_model . h <nl> limitations under the License . <nl> <nl> # include " mlir / IR / MLIRContext . h " / / TF : local_config_mlir <nl> # include " mlir / IR / Module . h " / / TF : local_config_mlir <nl> + # include " tensorflow / cc / saved_model / loader . h " <nl> # include " tensorflow / compiler / mlir / tensorflow / translate / mlir_roundtrip_flags . h " <nl> # include " tensorflow / core / framework / function . h " <nl> # include " tensorflow / core / framework / graph . pb . h " <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> <nl> - / / Given a GraphDef , returns a MLIR module containing the graph in control - flow <nl> - / / form . <nl> + / / Given a GraphDef , returns a MLIR module containing the graph , expressed with <nl> + / / tf_executor dialect . <nl> stream_executor : : port : : StatusOr < mlir : : OwningModuleRef > ConvertGraphdefToMlir ( <nl> const GraphDef & graphdef , const GraphDebugInfo & debug_info , <nl> const NodeSpecs & specs , mlir : : MLIRContext * context , <nl> bool add_default_attributes = true ) ; <nl> <nl> - / / Given a Graph , returns a MLIR module containing the graph in control - flow <nl> - / / form . <nl> + / / Given a Graph , returns a MLIR module containing the graph , expressed with <nl> + / / tf_executor dialect . <nl> stream_executor : : port : : StatusOr < mlir : : OwningModuleRef > ConvertGraphToMlir ( <nl> const Graph & graph , const GraphDebugInfo & debug_info , <nl> const FunctionLibraryDefinition & flib_def , const NodeSpecs & specs , <nl> mlir : : MLIRContext * context ) ; <nl> <nl> + / / Given a SavedModel , returns a MLIR module containing the functions , expressed <nl> + / / with tf_executor dialect . <nl> + stream_executor : : port : : StatusOr < mlir : : OwningModuleRef > ConvertSavedModelToMlir ( <nl> + const SavedModelBundle & saved_model , const GraphDebugInfo & debug_info , <nl> + mlir : : MLIRContext * context , bool add_default_attributes = true ) ; <nl> + <nl> } / / namespace tensorflow <nl> <nl> # endif / / TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSLATE_IMPORT_MODEL_H_ <nl> mmm a / tensorflow / compiler / mlir / tensorflow / translate / tf_mlir_translate . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / translate / tf_mlir_translate . cc <nl> mlir : : OwningModuleRef GraphdefToMlirTranslateFunction ( <nl> return module_or . ConsumeValueOrDie ( ) ; <nl> } <nl> <nl> + mlir : : OwningModuleRef SavedModelToMlirImport ( <nl> + absl : : string_view saved_model_dir , <nl> + const std : : unordered_set < std : : string > & tags , <nl> + absl : : string_view debug_info_file , mlir : : MLIRContext * context ) { <nl> + SessionOptions session_options ; <nl> + RunOptions run_options ; <nl> + tensorflow : : SavedModelBundle bundle ; <nl> + auto load_status = LoadSavedModel ( <nl> + session_options , run_options , <nl> + std : : string ( saved_model_dir . data ( ) , saved_model_dir . length ( ) ) , tags , <nl> + & bundle ) ; <nl> + if ( ! load_status . ok ( ) ) { <nl> + LOG ( ERROR ) < < " Failed to load saved model : " < < saved_model_dir ; <nl> + return nullptr ; <nl> + } <nl> + <nl> + GraphDebugInfo debug_info ; <nl> + if ( ! debug_info_file . empty ( ) ) { <nl> + if ( ! LoadProtoFromFile ( debug_info_file , & debug_info ) . ok ( ) ) { <nl> + LOG ( ERROR ) < < " Failed to load debug info file : " < < debug_info_file ; <nl> + return nullptr ; <nl> + } <nl> + } <nl> + <nl> + auto module_or = ConvertSavedModelToMlir ( bundle , debug_info , context ) ; <nl> + <nl> + if ( ! module_or . status ( ) . ok ( ) ) { <nl> + LOG ( ERROR ) < < " SavedModel import failed : " < < module_or . status ( ) ; <nl> + return nullptr ; <nl> + } <nl> + return module_or . ConsumeValueOrDie ( ) ; <nl> + } <nl> + <nl> mlir : : OwningModuleRef GraphdefToSplattedMlirTranslateFunction ( <nl> absl : : string_view input_filename , absl : : string_view debug_info_file , <nl> absl : : string_view input_arrays , absl : : string_view input_dtypes , <nl> mmm a / tensorflow / compiler / mlir / tensorflow / translate / tf_mlir_translate . h <nl> ppp b / tensorflow / compiler / mlir / tensorflow / translate / tf_mlir_translate . h <nl> limitations under the License . <nl> # ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSLATE_TF_MLIR_TRANSLATE_H_ <nl> # define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSLATE_TF_MLIR_TRANSLATE_H_ <nl> <nl> + # include < string > <nl> + # include < unordered_set > <nl> + <nl> # include " absl / strings / string_view . h " <nl> # include " mlir / IR / MLIRContext . h " / / TF : local_config_mlir <nl> # include " mlir / IR / Module . h " / / TF : local_config_mlir <nl> mlir : : OwningModuleRef GraphdefToSplattedMlirTranslateFunction ( <nl> absl : : string_view max_values , bool prune_unused_nodes , <nl> bool convert_legacy_fed_inputs , bool graph_as_function , <nl> mlir : : MLIRContext * context ) ; <nl> + <nl> + / / Converts a TensorFlow SavedModel stored in the directory with the given <nl> + / / ` saved_model_dir ` into a MLIR module . Creates MLIR entities into the <nl> + / / given MLIR ` context ` . <nl> + mlir : : OwningModuleRef SavedModelToMlirImport ( <nl> + absl : : string_view saved_model_dir , <nl> + const std : : unordered_set < std : : string > & tags , <nl> + absl : : string_view debug_info_file , mlir : : MLIRContext * context ) ; <nl> } / / namespace tensorflow <nl> <nl> # endif / / TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSLATE_TF_MLIR_TRANSLATE_H_ <nl> mmm a / tensorflow / compiler / mlir / tensorflow / translate / tf_mlir_translate_cl . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / translate / tf_mlir_translate_cl . cc <nl> opt < bool > convert_legacy_fed_inputs ( <nl> opt < bool > graph_as_function ( " tf - graph - as - function " , <nl> llvm : : cl : : desc ( " Treat main graph as a function " ) , <nl> llvm : : cl : : init ( false ) ) ; <nl> + <nl> + / / NOLINTNEXTLINE <nl> + opt < std : : string > saved_model_tags ( <nl> + " tf - savedmodel - tags " , <nl> + llvm : : cl : : desc ( " Tags used to indicate which MeataGraphDef to import , " <nl> + " separated by ' , ' " ) , <nl> + llvm : : cl : : init ( " " ) ) ; <nl> mmm a / tensorflow / compiler / mlir / tensorflow / translate / tf_mlir_translate_cl . h <nl> ppp b / tensorflow / compiler / mlir / tensorflow / translate / tf_mlir_translate_cl . h <nl> extern llvm : : cl : : opt < bool > prune_unused_nodes ; <nl> extern llvm : : cl : : opt < bool > convert_legacy_fed_inputs ; <nl> extern llvm : : cl : : opt < bool > graph_as_function ; <nl> <nl> + extern llvm : : cl : : opt < std : : string > saved_model_tags ; <nl> + <nl> # endif / / TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSLATE_TF_MLIR_TRANSLATE_CL_H_ <nl> mmm a / tensorflow / compiler / mlir / tensorflow / translate / tf_mlir_translate_registration . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / translate / tf_mlir_translate_registration . cc <nl> static OwningModuleRef GraphdefToMlirTranslateFunction ( <nl> static TranslateToMLIRRegistration GraphdefToMlirTranslate ( <nl> " graphdef - to - mlir " , GraphdefToMlirTranslateFunction ) ; <nl> <nl> + static OwningModuleRef SavedModelToMlirTranslateFunction ( <nl> + llvm : : StringRef input_filename , MLIRContext * context ) { <nl> + std : : unordered_set < std : : string > tags = absl : : StrSplit ( saved_model_tags , ' , ' ) ; <nl> + return tensorflow : : SavedModelToMlirImport ( StringRefToView ( input_filename ) , <nl> + tags , debug_info_file , context ) ; <nl> + } <nl> + <nl> + static TranslateToMLIRRegistration SavedModelToMlirTranslate ( <nl> + " savedmodel - to - mlir " , SavedModelToMlirTranslateFunction ) ; <nl> + <nl> static OwningModuleRef GraphdefToSplattedMlirTranslateFunction ( <nl> llvm : : StringRef input_filename , MLIRContext * context ) { <nl> return tensorflow : : GraphdefToSplattedMlirTranslateFunction ( <nl>
|
Add basic plumbing for a SavedModel importer
|
tensorflow/tensorflow
|
aed86648b4b5463feb6c282637d57bfa6f01a5a7
|
2019-08-22T02:46:12Z
|
mmm a / fdbcli / fdbcli . actor . cpp <nl> ppp b / fdbcli / fdbcli . actor . cpp <nl> ACTOR Future < bool > exclude ( Database db , std : : vector < StringRef > tokens , Referenc <nl> <nl> ACTOR Future < bool > createSnapshot ( Database db , StringRef snapCmd ) { <nl> try { <nl> - UID snapUID = wait ( makeInterruptable ( mgmtSnapCreate ( db , snapCmd , 2 / * version * / ) ) ) ; <nl> - int version = 2 ; <nl> - if ( version = = 1 ) { <nl> - printf ( " Snapshots tagged with UID : % s , check logs for status \ n " , snapUID . toString ( ) . c_str ( ) ) ; <nl> - } else { <nl> - printf ( " Snapshots create succeeded with UID : % s \ n " , snapUID . toString ( ) . c_str ( ) ) ; <nl> - } <nl> + UID snapUID = wait ( makeInterruptable ( mgmtSnapCreate ( db , snapCmd ) ) ) ; <nl> + printf ( " Snapshots create succeeded with UID : % s \ n " , snapUID . toString ( ) . c_str ( ) ) ; <nl> } catch ( Error & e ) { <nl> fprintf ( stderr , " Snapshot create failed , % d ( % s ) . " <nl> " Please cleanup any instance level snapshots created . \ n " , e . code ( ) , e . what ( ) ) ; <nl> mmm a / fdbclient / CommitTransaction . h <nl> ppp b / fdbclient / CommitTransaction . h <nl> static const char * typeString [ ] = { " SetValue " , <nl> " ByteMax " , <nl> " MinV2 " , <nl> " AndV2 " , <nl> - " CompareAndClear " , <nl> - " Exec " } ; <nl> + " CompareAndClear " } ; <nl> <nl> struct MutationRef { <nl> static const int OVERHEAD_BYTES = 12 ; / / 12 is the size of Header in MutationList entries <nl> struct MutationRef { <nl> MinV2 , <nl> AndV2 , <nl> CompareAndClear , <nl> - / / ExecOp is always set with FIRST_IN_BATCH option to quickly identify <nl> - / / the op in a transaction batch while parsing it in TLog <nl> - Exec , <nl> MAX_ATOMIC_OP <nl> } ; <nl> / / This is stored this way for serialization purposes . <nl> mmm a / fdbclient / ManagementAPI . actor . cpp <nl> ppp b / fdbclient / ManagementAPI . actor . cpp <nl> ACTOR Future < std : : set < NetworkAddress > > checkForExcludingServers ( Database cx , vec <nl> return inProgressExclusion ; <nl> } <nl> <nl> - ACTOR Future < UID > mgmtSnapCreate ( Database cx , StringRef snapCmd , int version ) { <nl> + ACTOR Future < UID > mgmtSnapCreate ( Database cx , StringRef snapCmd ) { <nl> state int retryCount = 0 ; <nl> <nl> loop { <nl> state UID snapUID = deterministicRandom ( ) - > randomUniqueID ( ) ; <nl> try { <nl> - wait ( snapCreate ( cx , snapCmd , snapUID , version ) ) ; <nl> + wait ( snapCreate ( cx , snapCmd , snapUID ) ) ; <nl> TraceEvent ( " SnapCreateSucceeded " ) . detail ( " snapUID " , snapUID ) ; <nl> return snapUID ; <nl> } catch ( Error & e ) { <nl> mmm a / fdbclient / ManagementAPI . actor . h <nl> ppp b / fdbclient / ManagementAPI . actor . h <nl> bool schemaMatch ( json_spirit : : mValue const & schema , json_spirit : : mValue const & <nl> <nl> / / execute payload in ' snapCmd ' on all the coordinators , TLogs and <nl> / / storage nodes <nl> - ACTOR Future < UID > mgmtSnapCreate ( Database cx , StringRef snapCmd , int version ) ; <nl> + ACTOR Future < UID > mgmtSnapCreate ( Database cx , StringRef snapCmd ) ; <nl> <nl> # include " flow / unactorcompiler . h " <nl> # endif <nl> mmm a / fdbclient / MasterProxyInterface . h <nl> ppp b / fdbclient / MasterProxyInterface . h <nl> struct MasterProxyInterface { <nl> RequestStream < struct GetRawCommittedVersionRequest > getRawCommittedVersion ; <nl> RequestStream < struct TxnStateRequest > txnState ; <nl> RequestStream < struct GetHealthMetricsRequest > getHealthMetrics ; <nl> - RequestStream < struct ExecRequest > execReq ; <nl> RequestStream < struct ProxySnapRequest > proxySnapReq ; <nl> <nl> UID id ( ) const { return commit . getEndpoint ( ) . token ; } <nl> struct MasterProxyInterface { <nl> void serialize ( Archive & ar ) { <nl> serializer ( ar , locality , provisional , commit , getConsistentReadVersion , getKeyServersLocations , <nl> waitFailure , getStorageServerRejoinInfo , getRawCommittedVersion , <nl> - txnState , getHealthMetrics , execReq , proxySnapReq ) ; <nl> + txnState , getHealthMetrics , proxySnapReq ) ; <nl> } <nl> <nl> void initEndpoints ( ) { <nl> struct GetHealthMetricsRequest <nl> } <nl> } ; <nl> <nl> - struct ExecRequest <nl> - { <nl> - constexpr static FileIdentifier file_identifier = 22403900 ; <nl> - Arena arena ; <nl> - StringRef execPayload ; <nl> - ReplyPromise < Void > reply ; <nl> - Optional < UID > debugID ; <nl> - <nl> - explicit ExecRequest ( Optional < UID > const & debugID = Optional < UID > ( ) ) : debugID ( debugID ) { } <nl> - explicit ExecRequest ( StringRef exec , Optional < UID > debugID = Optional < UID > ( ) ) : execPayload ( exec ) , debugID ( debugID ) { } <nl> - <nl> - template < class Ar > <nl> - void serialize ( Ar & ar ) { <nl> - serializer ( ar , execPayload , reply , arena , debugID ) ; <nl> - } <nl> - } ; <nl> - <nl> struct ProxySnapRequest <nl> { <nl> constexpr static FileIdentifier file_identifier = 22204900 ; <nl> mmm a / fdbclient / NativeAPI . actor . cpp <nl> ppp b / fdbclient / NativeAPI . actor . cpp <nl> void Transaction : : atomicOp ( const KeyRef & key , const ValueRef & operand , MutationR <nl> TEST ( true ) ; / / NativeAPI atomic operation <nl> } <nl> <nl> - ACTOR Future < Void > executeCoordinators ( DatabaseContext * cx , StringRef execPayload , Optional < UID > debugID ) { <nl> - try { <nl> - if ( debugID . present ( ) ) { <nl> - g_traceBatch . addEvent ( " TransactionDebug " , debugID . get ( ) . first ( ) , " NativeAPI . executeCoordinators . Before " ) ; <nl> - } <nl> - <nl> - state ExecRequest req ( execPayload , debugID ) ; <nl> - if ( debugID . present ( ) ) { <nl> - g_traceBatch . addEvent ( " TransactionDebug " , debugID . get ( ) . first ( ) , <nl> - " NativeAPI . executeCoordinators . Inside loop " ) ; <nl> - } <nl> - wait ( loadBalance ( cx - > getMasterProxies ( false ) , & MasterProxyInterface : : execReq , req , cx - > taskID ) ) ; <nl> - if ( debugID . present ( ) ) <nl> - g_traceBatch . addEvent ( " TransactionDebug " , debugID . get ( ) . first ( ) , <nl> - " NativeAPI . executeCoordinators . After " ) ; <nl> - return Void ( ) ; <nl> - } catch ( Error & e ) { <nl> - TraceEvent ( " NativeAPI . executeCoordinatorsError " ) . error ( e ) ; <nl> - throw ; <nl> - } <nl> - } <nl> - <nl> - void Transaction : : execute ( const KeyRef & cmdType , const ValueRef & cmdPayload ) { <nl> - TraceEvent ( " Execute operation " ) . detail ( " Key " , cmdType . toString ( ) ) . detail ( " Value " , cmdPayload . toString ( ) ) ; <nl> - <nl> - if ( cmdType . size ( ) > CLIENT_KNOBS - > KEY_SIZE_LIMIT ) throw key_too_large ( ) ; <nl> - if ( cmdPayload . size ( ) > CLIENT_KNOBS - > VALUE_SIZE_LIMIT ) throw value_too_large ( ) ; <nl> - <nl> - auto & req = tr ; <nl> - <nl> - / / Helps with quickly finding the exec op in a tlog batch <nl> - setOption ( FDBTransactionOptions : : FIRST_IN_BATCH ) ; <nl> - <nl> - auto & t = req . transaction ; <nl> - auto r = singleKeyRange ( cmdType , req . arena ) ; <nl> - auto v = ValueRef ( req . arena , cmdPayload ) ; <nl> - t . mutations . push_back ( req . arena , MutationRef ( MutationRef : : Exec , r . begin , v ) ) ; <nl> - } <nl> - <nl> void Transaction : : clear ( const KeyRangeRef & range , bool addConflictRange ) { <nl> auto & req = tr ; <nl> auto & t = req . transaction ; <nl> void enableClientInfoLogging ( ) { <nl> TraceEvent ( SevInfo , " ClientInfoLoggingEnabled " ) ; <nl> } <nl> <nl> - ACTOR Future < Void > snapCreateVersion1 ( Database inputCx , StringRef snapCmd , UID snapUID ) { <nl> - state Transaction tr ( inputCx ) ; <nl> - state DatabaseContext * cx = inputCx . getPtr ( ) ; <nl> - / / remember the client ID before the snap operation <nl> - state UID preSnapClientUID = cx - > clientInfo - > get ( ) . id ; <nl> - <nl> - TraceEvent ( " SnapCreateEnter " ) <nl> - . detail ( " SnapCmd " , snapCmd . toString ( ) ) <nl> - . detail ( " UID " , snapUID ) <nl> - . detail ( " PreSnapClientUID " , preSnapClientUID ) ; <nl> - <nl> - StringRef snapCmdArgs = snapCmd ; <nl> - StringRef snapCmdPart = snapCmdArgs . eat ( " : " ) ; <nl> - Standalone < StringRef > snapUIDRef ( snapUID . toString ( ) ) ; <nl> - state Standalone < StringRef > snapPayloadRef = snapCmdPart <nl> - . withSuffix ( LiteralStringRef ( " : uid = " ) ) <nl> - . withSuffix ( snapUIDRef ) <nl> - . withSuffix ( LiteralStringRef ( " , " ) ) <nl> - . withSuffix ( snapCmdArgs ) ; <nl> - state Standalone < StringRef > <nl> - tLogCmdPayloadRef = LiteralStringRef ( " empty - binary : uid = " ) . withSuffix ( snapUIDRef ) ; <nl> - / / disable popping of TLog <nl> - tr . reset ( ) ; <nl> - loop { <nl> - try { <nl> - tr . setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> - tr . execute ( execDisableTLogPop , tLogCmdPayloadRef ) ; <nl> - wait ( timeoutError ( tr . commit ( ) , 10 ) ) ; <nl> - break ; <nl> - } catch ( Error & e ) { <nl> - TraceEvent ( " DisableTLogPopFailed " ) . error ( e ) ; <nl> - wait ( tr . onError ( e ) ) ; <nl> - } <nl> - } <nl> - <nl> - TraceEvent ( " SnapCreateAfterLockingTLogs " ) . detail ( " UID " , snapUID ) ; <nl> - <nl> - / / snap the storage and Tlogs <nl> - / / if we retry the below command in failure cases with the same snapUID <nl> - / / then the snapCreate can end up creating multiple snapshots with <nl> - / / the same name which needs additional handling , hence we fail in <nl> - / / failure cases and let the caller retry with different snapUID <nl> - tr . reset ( ) ; <nl> - try { <nl> - tr . setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> - tr . execute ( execSnap , snapPayloadRef ) ; <nl> - wait ( tr . commit ( ) ) ; <nl> - } catch ( Error & e ) { <nl> - TraceEvent ( " SnapCreateErroSnapTLogStorage " ) . error ( e ) ; <nl> - throw ; <nl> - } <nl> - <nl> - TraceEvent ( " SnapCreateAfterSnappingTLogStorage " ) . detail ( " UID " , snapUID ) ; <nl> - <nl> - if ( BUGGIFY ) { <nl> - int32_t toDelay = deterministicRandom ( ) - > randomInt ( 1 , 30 ) ; <nl> - wait ( delay ( toDelay ) ) ; <nl> - } <nl> - <nl> - / / enable popping of the TLog <nl> - tr . reset ( ) ; <nl> - loop { <nl> - try { <nl> - tr . setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> - tr . execute ( execEnableTLogPop , tLogCmdPayloadRef ) ; <nl> - wait ( tr . commit ( ) ) ; <nl> - break ; <nl> - } catch ( Error & e ) { <nl> - TraceEvent ( " EnableTLogPopFailed " ) . error ( e ) ; <nl> - wait ( tr . onError ( e ) ) ; <nl> - } <nl> - } <nl> - <nl> - TraceEvent ( " SnapCreateAfterUnlockingTLogs " ) . detail ( " UID " , snapUID ) ; <nl> - <nl> - / / snap the coordinators <nl> - try { <nl> - Future < Void > exec = executeCoordinators ( cx , snapPayloadRef , snapUID ) ; <nl> - wait ( timeoutError ( exec , 5 . 0 ) ) ; <nl> - } catch ( Error & e ) { <nl> - TraceEvent ( " SnapCreateErrorSnapCoords " ) . error ( e ) ; <nl> - throw ; <nl> - } <nl> - <nl> - TraceEvent ( " SnapCreateAfterSnappingCoords " ) . detail ( " UID " , snapUID ) ; <nl> - <nl> - / / if the client IDs did not change then we have a clean snapshot <nl> - UID postSnapClientUID = cx - > clientInfo - > get ( ) . id ; <nl> - if ( preSnapClientUID ! = postSnapClientUID ) { <nl> - TraceEvent ( " UID mismatch " ) <nl> - . detail ( " SnapPreSnapClientUID " , preSnapClientUID ) <nl> - . detail ( " SnapPostSnapClientUID " , postSnapClientUID ) ; <nl> - throw coordinators_changed ( ) ; <nl> - } <nl> - <nl> - TraceEvent ( " SnapCreateComplete " ) . detail ( " UID " , snapUID ) ; <nl> - return Void ( ) ; <nl> - } <nl> - <nl> ACTOR Future < Void > snapshotDatabase ( Reference < DatabaseContext > cx , StringRef snapPayload , UID snapUID , Optional < UID > debugID ) { <nl> TraceEvent ( " NativeAPI . SnapshotDatabaseEnter " ) <nl> . detail ( " SnapPayload " , snapPayload ) <nl> ACTOR Future < Void > snapshotDatabase ( Reference < DatabaseContext > cx , StringRef sna <nl> return Void ( ) ; <nl> } <nl> <nl> - ACTOR Future < Void > snapCreateVersion2 ( Database cx , StringRef snapCmd , UID snapUID ) { <nl> + ACTOR Future < Void > snapCreateCore ( Database cx , StringRef snapCmd , UID snapUID ) { <nl> / / remember the client ID before the snap operation <nl> state UID preSnapClientUID = cx - > clientInfo - > get ( ) . id ; <nl> <nl> - TraceEvent ( " SnapCreateEnterVersion2 " ) <nl> + TraceEvent ( " SnapCreateCoreEnter " ) <nl> . detail ( " SnapCmd " , snapCmd . toString ( ) ) <nl> . detail ( " UID " , snapUID ) <nl> . detail ( " PreSnapClientUID " , preSnapClientUID ) ; <nl> ACTOR Future < Void > snapCreateVersion2 ( Database cx , StringRef snapCmd , UID snapUI <nl> Future < Void > exec = snapshotDatabase ( Reference < DatabaseContext > : : addRef ( cx . getPtr ( ) ) , snapPayloadRef , snapUID , snapUID ) ; <nl> wait ( exec ) ; <nl> } catch ( Error & e ) { <nl> - TraceEvent ( " SnapshotDatabaseErrorVersion2 " ) <nl> + TraceEvent ( " SnapCreateCoreError " ) <nl> . detail ( " SnapCmd " , snapCmd . toString ( ) ) <nl> . detail ( " UID " , snapUID ) <nl> . error ( e ) ; <nl> ACTOR Future < Void > snapCreateVersion2 ( Database cx , StringRef snapCmd , UID snapUI <nl> UID postSnapClientUID = cx - > clientInfo - > get ( ) . id ; <nl> if ( preSnapClientUID ! = postSnapClientUID ) { <nl> / / if the client IDs changed then we fail the snapshot <nl> - TraceEvent ( " UIDMismatchVersion2 " ) <nl> + TraceEvent ( " SnapCreateCoreUIDMismatch " ) <nl> . detail ( " SnapPreSnapClientUID " , preSnapClientUID ) <nl> . detail ( " SnapPostSnapClientUID " , postSnapClientUID ) ; <nl> throw coordinators_changed ( ) ; <nl> } <nl> <nl> - TraceEvent ( " SnapCreateExitVersion2 " ) <nl> + TraceEvent ( " SnapCreateCoreExit " ) <nl> . detail ( " SnapCmd " , snapCmd . toString ( ) ) <nl> . detail ( " UID " , snapUID ) <nl> . detail ( " PreSnapClientUID " , preSnapClientUID ) ; <nl> return Void ( ) ; <nl> } <nl> <nl> - ACTOR Future < Void > snapCreate ( Database cx , StringRef snapCmd , UID snapUID , int version ) { <nl> - if ( version = = 1 ) { <nl> - wait ( snapCreateVersion1 ( cx , snapCmd , snapUID ) ) ; <nl> - return Void ( ) ; <nl> - } <nl> + ACTOR Future < Void > snapCreate ( Database cx , StringRef snapCmd , UID snapUID ) { <nl> state int oldMode = wait ( setDDMode ( cx , 0 ) ) ; <nl> try { <nl> - wait ( snapCreateVersion2 ( cx , snapCmd , snapUID ) ) ; <nl> + wait ( snapCreateCore ( cx , snapCmd , snapUID ) ) ; <nl> } catch ( Error & e ) { <nl> state Error err = e ; <nl> wait ( success ( setDDMode ( cx , oldMode ) ) ) ; <nl> mmm a / fdbclient / NativeAPI . actor . h <nl> ppp b / fdbclient / NativeAPI . actor . h <nl> class Transaction : NonCopyable { <nl> / / If checkWriteConflictRanges is true , existing write conflict ranges will be searched for this key <nl> void set ( const KeyRef & key , const ValueRef & value , bool addConflictRange = true ) ; <nl> void atomicOp ( const KeyRef & key , const ValueRef & value , MutationRef : : Type operationType , bool addConflictRange = true ) ; <nl> - / / execute operation is similar to set , but the command will reach <nl> - / / one of the proxies , all the TLogs and all the storage nodes . <nl> - / / instead of setting a key and value on the DB , it executes the command <nl> - / / that is passed in the value field . <nl> - / / - cmdType can be used for logging purposes <nl> - / / - cmdPayload contains the details of the command to be executed : <nl> - / / format of the cmdPayload : < binary - path > : < arg1 = val1 > , < arg2 = val2 > . . . <nl> - void execute ( const KeyRef & cmdType , const ValueRef & cmdPayload ) ; <nl> void clear ( const KeyRangeRef & range , bool addConflictRange = true ) ; <nl> void clear ( const KeyRef & key , bool addConflictRange = true ) ; <nl> Future < Void > commit ( ) ; / / Throws not_committed or commit_unknown_result errors in normal operation <nl> int64_t extractIntOption ( Optional < StringRef > value , int64_t minValue = std : : num <nl> <nl> / / Takes a snapshot of the cluster , specifically the following persistent <nl> / / states : coordinator , TLog and storage state <nl> - ACTOR Future < Void > snapCreate ( Database cx , StringRef snapCmd , UID snapUID , int version ) ; <nl> + ACTOR Future < Void > snapCreate ( Database cx , StringRef snapCmd , UID snapUID ) ; <nl> <nl> # include " flow / unactorcompiler . h " <nl> # endif <nl> mmm a / fdbserver / ConflictSet . h <nl> ppp b / fdbserver / ConflictSet . h <nl> struct ConflictBatch { <nl> TransactionConflict = 0 , <nl> TransactionTooOld , <nl> TransactionCommitted , <nl> - TransactionNotPermitted , <nl> - TransactionNotFullyRecovered , <nl> - TransactionExecLogAntiQuorum , <nl> } ; <nl> <nl> void addTransaction ( const CommitTransactionRef & transaction ) ; <nl> mmm a / fdbserver / FDBExecHelper . actor . cpp <nl> ppp b / fdbserver / FDBExecHelper . actor . cpp <nl> ACTOR Future < int > spawnProcess ( std : : string binPath , std : : vector < std : : string > par <nl> } <nl> # endif <nl> <nl> - ACTOR Future < int > execHelper ( ExecCmdValueString * execArg , std : : string folder , std : : string role , int snapVersion ) { <nl> + ACTOR Future < int > execHelper ( ExecCmdValueString * execArg , std : : string folder , std : : string role ) { <nl> state StringRef uidStr = execArg - > getBinaryArgValue ( LiteralStringRef ( " uid " ) ) ; <nl> state int err = 0 ; <nl> state Future < int > cmdErr ; <nl> - state double maxWaitTime = ( snapVersion = = 2 ) ? SERVER_KNOBS - > SNAP_CREATE_MAX_TIMEOUT : 3 . 0 ; <nl> + state double maxWaitTime = SERVER_KNOBS - > SNAP_CREATE_MAX_TIMEOUT ; <nl> if ( ! g_network - > isSimulated ( ) ) { <nl> / / get bin path <nl> auto snapBin = execArg - > getBinaryPath ( ) ; <nl> ACTOR Future < int > execHelper ( ExecCmdValueString * execArg , std : : string folder , st <nl> / / copy the files <nl> state std : : string folderFrom = folder + " / . " ; <nl> state std : : string folderTo = folder + " - snap - " + uidStr . toString ( ) ; <nl> - double maxSimDelayTime = 1 . 0 ; <nl> - if ( snapVersion = = 1 ) { <nl> - folderTo = folder + " - snap - " + uidStr . toString ( ) ; <nl> - } else { <nl> - folderTo = folder + " - snap - " + uidStr . toString ( ) + " - " + role ; <nl> - maxSimDelayTime = 10 . 0 ; <nl> - } <nl> + double maxSimDelayTime = 10 . 0 ; <nl> + folderTo = folder + " - snap - " + uidStr . toString ( ) + " - " + role ; <nl> std : : vector < std : : string > paramList ; <nl> std : : string mkdirBin = " / bin / mkdir " ; <nl> paramList . push_back ( folderTo ) ; <nl> mmm a / fdbserver / FDBExecHelper . actor . h <nl> ppp b / fdbserver / FDBExecHelper . actor . h <nl> class ExecCmdValueString { <nl> ACTOR Future < int > spawnProcess ( std : : string binPath , std : : vector < std : : string > paramList , double maxWaitTime , bool isSync , double maxSimDelayTime ) ; <nl> <nl> / / helper to run all the work related to running the exec command <nl> - ACTOR Future < int > execHelper ( ExecCmdValueString * execArg , std : : string folder , std : : string role , int version ) ; <nl> + ACTOR Future < int > execHelper ( ExecCmdValueString * execArg , std : : string folder , std : : string role ) ; <nl> <nl> / / returns true if the execUID op is in progress <nl> bool isExecOpInProgress ( UID execUID ) ; <nl> mmm a / fdbserver / LogSystem . h <nl> ppp b / fdbserver / LogSystem . h <nl> struct CompareFirst { <nl> struct LogPushData : NonCopyable { <nl> / / Log subsequences have to start at 1 ( the MergedPeekCursor relies on this to make sure we never have ! hasMessage ( ) in the middle of data for a version <nl> <nl> - explicit LogPushData ( Reference < ILogSystem > logSystem ) : logSystem ( logSystem ) , subsequence ( 1 ) , hasExecOp ( false ) { <nl> + explicit LogPushData ( Reference < ILogSystem > logSystem ) : logSystem ( logSystem ) , subsequence ( 1 ) { <nl> for ( auto & log : logSystem - > getLogSystemConfig ( ) . tLogs ) { <nl> if ( log . isLocal ) { <nl> for ( int i = 0 ; i < log . tLogs . size ( ) ; i + + ) { <nl> struct LogPushData : NonCopyable { <nl> return messagesWriter [ loc ] . toValue ( ) ; <nl> } <nl> <nl> - void setHasExecOp ( ) { hasExecOp = true ; } <nl> - <nl> - bool getHasExecOp ( ) { return hasExecOp ; } <nl> - <nl> private : <nl> Reference < ILogSystem > logSystem ; <nl> std : : vector < Tag > next_message_tags ; <nl> struct LogPushData : NonCopyable { <nl> std : : vector < BinaryWriter > messagesWriter ; <nl> std : : vector < int > msg_locations ; <nl> uint32_t subsequence ; <nl> - bool hasExecOp ; <nl> } ; <nl> <nl> # endif <nl> mmm a / fdbserver / MasterProxyServer . actor . cpp <nl> ppp b / fdbserver / MasterProxyServer . actor . cpp <nl> ACTOR Future < Void > commitBatch ( <nl> toCommit . addTags ( allSources ) ; <nl> } <nl> toCommit . addTypedMessage ( m ) ; <nl> - } else if ( m . type = = MutationRef : : Exec ) { <nl> - state std : : string param2 = m . param2 . toString ( ) ; <nl> - state ExecCmdValueString execArg ( param2 ) ; <nl> - execArg . dbgPrint ( ) ; <nl> - state StringRef binPath = execArg . getBinaryPath ( ) ; <nl> - state StringRef uidStr = execArg . getBinaryArgValue ( LiteralStringRef ( " uid " ) ) ; <nl> - <nl> - auto result = <nl> - self - > txnStateStore - > readValue ( LiteralStringRef ( " log_anti_quorum " ) . withPrefix ( configKeysPrefix ) ) . get ( ) ; <nl> - state int logAntiQuorum = 0 ; <nl> - if ( result . present ( ) ) { <nl> - logAntiQuorum = atoi ( result . get ( ) . toString ( ) . c_str ( ) ) ; <nl> - } <nl> - <nl> - if ( m . param1 ! = execDisableTLogPop <nl> - & & m . param1 ! = execEnableTLogPop <nl> - & & ! isWhitelisted ( self - > whitelistedBinPathVec , binPath ) ) { <nl> - TraceEvent ( " ExecTransactionNotPermitted " ) <nl> - . detail ( " TransactionNum " , transactionNum ) ; <nl> - committed [ transactionNum ] = ConflictBatch : : TransactionNotPermitted ; <nl> - } else if ( self - > db - > get ( ) . recoveryState ! = RecoveryState : : FULLY_RECOVERED ) { <nl> - / / Cluster is not fully recovered and needs TLogs <nl> - / / from previous generation for full recovery . <nl> - / / Currently , snapshot of old tlog generation is not <nl> - / / supported and hence failing the snapshot request until <nl> - / / cluster is fully_recovered . <nl> - TraceEvent ( " ExecTransactionNotFullyRecovered " ) <nl> - . detail ( " TransactionNum " , transactionNum ) ; <nl> - committed [ transactionNum ] = ConflictBatch : : TransactionNotFullyRecovered ; <nl> - } else if ( logAntiQuorum > 0 ) { <nl> - / / exec op is not supported when logAntiQuorum is configured <nl> - / / FIXME : Add support for exec ops in the presence of log anti quorum <nl> - TraceEvent ( " ExecOpNotSupportedWithLogAntiQuorum " ) <nl> - . detail ( " LogAntiQuorum " , logAntiQuorum ) <nl> - . detail ( " TransactionNum " , transactionNum ) ; <nl> - committed [ transactionNum ] = ConflictBatch : : TransactionExecLogAntiQuorum ; <nl> - } else { <nl> - / / Send the ExecOp to <nl> - / / - all the storage nodes in a single region and <nl> - / / - only to storage nodes in local region in multi - region setup <nl> - / / step 1 : get the DatabaseConfiguration <nl> - auto result = <nl> - self - > txnStateStore - > readValue ( LiteralStringRef ( " usable_regions " ) . withPrefix ( configKeysPrefix ) ) . get ( ) ; <nl> - ASSERT ( result . present ( ) ) ; <nl> - state int usableRegions = atoi ( result . get ( ) . toString ( ) . c_str ( ) ) ; <nl> - <nl> - / / step 2 : find the tag . id from locality info of the master <nl> - auto localityKey = <nl> - self - > txnStateStore - > readValue ( tagLocalityListKeyFor ( self - > master . locality . dcId ( ) ) ) . get ( ) ; <nl> - <nl> - int8_t locality = tagLocalityInvalid ; <nl> - if ( usableRegions > 1 ) { <nl> - if ( ! localityKey . present ( ) ) { <nl> - TraceEvent ( SevError , " LocalityKeyNotPresentForMasterDCID " ) ; <nl> - ASSERT ( localityKey . present ( ) ) ; <nl> - } <nl> - locality = decodeTagLocalityListValue ( localityKey . get ( ) ) ; <nl> - } <nl> - <nl> - std : : set < Tag > allSources ; <nl> - auto & m = ( * pMutations ) [ mutationNum ] ; <nl> - if ( debugMutation ( " ProxyCommit " , commitVersion , m ) ) <nl> - TraceEvent ( " ProxyCommitTo " , self - > dbgid ) <nl> - . detail ( " To " , " all sources " ) <nl> - . detail ( " Mutation " , m . toString ( ) ) <nl> - . detail ( " Version " , commitVersion ) ; <nl> - <nl> - std : : vector < Tag > localTags ; <nl> - auto tagKeys = self - > txnStateStore - > readRange ( serverTagKeys ) . get ( ) ; <nl> - for ( auto & kv : tagKeys ) { <nl> - Tag t = decodeServerTagValue ( kv . value ) ; <nl> - if ( ( usableRegions > 1 & & t . locality = = locality ) <nl> - | | ( usableRegions = = 1 ) ) { <nl> - localTags . push_back ( t ) ; <nl> - } <nl> - allSources . insert ( localTags . begin ( ) , localTags . end ( ) ) ; <nl> - } <nl> - <nl> - auto te1 = TraceEvent ( " ProxyCommitTo " , self - > dbgid ) ; <nl> - te1 . detail ( " To " , " all sources " ) ; <nl> - te1 . detail ( " UidStr " , uidStr ) ; <nl> - te1 . detail ( " Mutation " , m . toString ( ) ) ; <nl> - te1 . detail ( " Version " , commitVersion ) ; <nl> - te1 . detail ( " NumTags " , allSources . size ( ) ) ; <nl> - for ( auto & tag : allSources ) { <nl> - toCommit . addTag ( tag ) ; <nl> - } <nl> - toCommit . addTypedMessage ( m , true / * allLocations * / ) ; <nl> - toCommit . setHasExecOp ( ) ; <nl> - } <nl> } else <nl> UNREACHABLE ( ) ; <nl> <nl> ACTOR Future < Void > commitBatch ( <nl> else if ( committed [ t ] = = ConflictBatch : : TransactionTooOld ) { <nl> trs [ t ] . reply . sendError ( transaction_too_old ( ) ) ; <nl> } <nl> - else if ( committed [ t ] = = ConflictBatch : : TransactionNotPermitted ) { <nl> - trs [ t ] . reply . sendError ( transaction_not_permitted ( ) ) ; <nl> - } <nl> - else if ( committed [ t ] = = ConflictBatch : : TransactionNotFullyRecovered ) { <nl> - trs [ t ] . reply . sendError ( cluster_not_fully_recovered ( ) ) ; <nl> - } <nl> - else if ( committed [ t ] = = ConflictBatch : : TransactionExecLogAntiQuorum ) { <nl> - trs [ t ] . reply . sendError ( txn_exec_log_anti_quorum ( ) ) ; <nl> - } else { <nl> + else { <nl> trs [ t ] . reply . sendError ( not_committed ( ) ) ; <nl> } <nl> <nl> ACTOR Future < Void > masterProxyServerCore ( <nl> rep . version = commitData . committedVersion . get ( ) ; <nl> req . reply . send ( rep ) ; <nl> } <nl> - when ( ExecRequest _execReq = waitNext ( proxy . execReq . getFuture ( ) ) ) { <nl> - state ExecRequest execReq = _execReq ; <nl> - if ( execReq . debugID . present ( ) ) <nl> - g_traceBatch . addEvent ( " TransactionDebug " , execReq . debugID . get ( ) . first ( ) , <nl> - " MasterProxyServer . masterProxyServerCore . " <nl> - " ExecRequest " ) ; <nl> - <nl> - TraceEvent ( " ExecRequest " ) . detail ( " Payload " , execReq . execPayload . toString ( ) ) ; <nl> - <nl> - / / get the list of coordinators <nl> - state Optional < Value > coordinators = commitData . txnStateStore - > readValue ( coordinatorsKey ) . get ( ) ; <nl> - state std : : vector < NetworkAddress > coordinatorsAddr = <nl> - ClusterConnectionString ( coordinators . get ( ) . toString ( ) ) . coordinators ( ) ; <nl> - state std : : set < NetworkAddress > coordinatorsAddrSet ; <nl> - for ( int i = 0 ; i < coordinatorsAddr . size ( ) ; i + + ) { <nl> - TraceEvent ( SevDebug , " CoordinatorAddress " ) . detail ( " Addr " , coordinatorsAddr [ i ] ) ; <nl> - coordinatorsAddrSet . insert ( coordinatorsAddr [ i ] ) ; <nl> - } <nl> - <nl> - / / get the list of workers <nl> - state std : : vector < WorkerDetails > workers = <nl> - wait ( commitData . db - > get ( ) . clusterInterface . getWorkers . getReply ( GetWorkersRequest ( ) ) ) ; <nl> - <nl> - / / send the exec command to the list of workers which are <nl> - / / coordinators <nl> - state vector < Future < Void > > execCoords ; <nl> - for ( int i = 0 ; i < workers . size ( ) ; i + + ) { <nl> - NetworkAddress primary = workers [ i ] . interf . address ( ) ; <nl> - Optional < NetworkAddress > secondary = workers [ i ] . interf . tLog . getEndpoint ( ) . addresses . secondaryAddress ; <nl> - if ( coordinatorsAddrSet . find ( primary ) ! = coordinatorsAddrSet . end ( ) <nl> - | | ( secondary . present ( ) & & ( coordinatorsAddrSet . find ( secondary . get ( ) ) ! = coordinatorsAddrSet . end ( ) ) ) ) { <nl> - TraceEvent ( " ExecReqToCoordinator " ) <nl> - . detail ( " PrimaryWorkerAddr " , primary ) <nl> - . detail ( " SecondaryWorkerAddr " , secondary ) ; <nl> - execCoords . push_back ( brokenPromiseToNever ( workers [ i ] . interf . execReq . getReply ( ExecuteRequest ( execReq . execPayload ) ) ) ) ; <nl> - } <nl> - } <nl> - if ( execCoords . size ( ) < = 0 ) { <nl> - TraceEvent ( SevDebug , " CoordinatorWorkersNotFound " ) ; <nl> - execReq . reply . sendError ( operation_failed ( ) ) ; <nl> - } else { <nl> - try { <nl> - wait ( timeoutError ( waitForAll ( execCoords ) , 10 . 0 ) ) ; <nl> - int numSucc = 0 ; <nl> - for ( auto item : execCoords ) { <nl> - if ( item . isValid ( ) & & item . isReady ( ) ) { <nl> - + + numSucc ; <nl> - } <nl> - } <nl> - bool succ = ( numSucc > = ( ( execCoords . size ( ) + 1 ) / 2 ) ) ; <nl> - succ ? execReq . reply . send ( Void ( ) ) : execReq . reply . sendError ( operation_failed ( ) ) ; <nl> - } catch ( Error & e ) { <nl> - TraceEvent ( " WaitingForAllExecCoords " ) . error ( e ) ; <nl> - execReq . reply . sendError ( broken_promise ( ) ) ; <nl> - } <nl> - } <nl> - } <nl> when ( ProxySnapRequest snapReq = waitNext ( proxy . proxySnapReq . getFuture ( ) ) ) { <nl> addActor . send ( proxySnapCreate ( snapReq , & commitData ) ) ; <nl> } <nl> mmm a / fdbserver / OldTLogServer_6_0 . actor . cpp <nl> ppp b / fdbserver / OldTLogServer_6_0 . actor . cpp <nl> ACTOR Future < Void > commitQueue ( TLogData * self ) { <nl> } <nl> } <nl> <nl> - void execProcessingHelper ( TLogData * self , <nl> - Reference < LogData > logData , <nl> - TLogCommitRequest * req , <nl> - Standalone < VectorRef < Tag > > * execTags , <nl> - ExecCmdValueString * execArg , <nl> - StringRef * execCmd , <nl> - Version * execVersion , <nl> - vector < Future < Void > > * snapFailKeySetters , <nl> - vector < Future < Void > > * ignoredPops ) <nl> - { <nl> - / / inspect the messages to find if there is an Exec type and print <nl> - / / it . message are prefixed by the length of the message and each <nl> - / / field is prefixed by the length too <nl> - uint8_t type = MutationRef : : MAX_ATOMIC_OP ; <nl> - StringRef param2 ; <nl> - ArenaReader rd ( req - > arena , req - > messages , Unversioned ( ) ) ; <nl> - int32_t messageLength , rawLength ; <nl> - uint16_t tagCount ; <nl> - uint32_t sub ; <nl> - while ( ! rd . empty ( ) ) { <nl> - Tag tmpTag ; <nl> - bool hasTxsTag = false ; <nl> - rd . checkpoint ( ) ; <nl> - rd > > messageLength > > sub > > tagCount ; <nl> - for ( int i = 0 ; i < tagCount ; i + + ) { <nl> - rd > > tmpTag ; <nl> - if ( tmpTag . locality = = tagLocalityTxs | | tmpTag = = txsTag ) { <nl> - hasTxsTag = true ; <nl> - } <nl> - execTags - > push_back ( execTags - > arena ( ) , tmpTag ) ; <nl> - } <nl> - if ( ! hasTxsTag ) { <nl> - rd > > type ; <nl> - if ( type = = MutationRef : : Exec ) { <nl> - break ; <nl> - } <nl> - } <nl> - rawLength = messageLength + sizeof ( messageLength ) ; <nl> - rd . rewind ( ) ; <nl> - rd . readBytes ( rawLength ) ; <nl> - } <nl> - <nl> - int32_t len = 0 ; <nl> - if ( type = = MutationRef : : Exec ) { <nl> - / / get param1 <nl> - rd > > len ; <nl> - * execCmd = StringRef ( ( uint8_t const * ) rd . readBytes ( len ) , len ) ; <nl> - / / get param2 <nl> - rd > > len ; <nl> - param2 = StringRef ( ( uint8_t const * ) rd . readBytes ( len ) , len ) ; <nl> - <nl> - TraceEvent ( SevDebug , " TLogExecCommandType " , self - > dbgid ) <nl> - . detail ( " Value " , execCmd - > toString ( ) ) <nl> - . detail ( " Version " , req - > version ) ; <nl> - <nl> - execArg - > setCmdValueString ( param2 ) ; <nl> - execArg - > dbgPrint ( ) ; <nl> - StringRef uidStr = execArg - > getBinaryArgValue ( LiteralStringRef ( " uid " ) ) ; <nl> - if ( ! execCmd - > startsWith ( LiteralStringRef ( " \ xff " ) ) ) { <nl> - * execVersion = req - > version ; <nl> - } <nl> - if ( * execCmd = = execSnap ) { <nl> - / / validation check specific to snap request <nl> - std : : string reason ; <nl> - if ( ! self - > ignorePopRequest ) { <nl> - * execVersion = invalidVersion ; <nl> - reason = " SnapFailIgnorePopNotSet " ; <nl> - } else if ( uidStr . toString ( ) ! = self - > ignorePopUid ) { <nl> - * execVersion = invalidVersion ; <nl> - reason = " SnapFailedDisableTLogUidMismatch " ; <nl> - } <nl> - <nl> - if ( * execVersion = = invalidVersion ) { <nl> - TraceEvent ( SevWarn , " TLogSnapFailed " ) <nl> - . detail ( " IgnorePopUid " , self - > ignorePopUid ) <nl> - . detail ( " IgnorePopRequest " , self - > ignorePopRequest ) <nl> - . detail ( " Reason " , reason ) <nl> - . detail ( " Version " , req - > version ) ; <nl> - <nl> - TraceEvent ( " ExecCmdSnapCreate " ) <nl> - . detail ( " Uid " , uidStr . toString ( ) ) <nl> - . detail ( " Status " , - 1 ) <nl> - . detail ( " Tag " , logData - > allTags . begin ( ) - > toString ( ) ) <nl> - . detail ( " Role " , " TLog " ) <nl> - . detail ( " Version " , req - > version ) ; <nl> - if ( g_network - > isSimulated ( ) ) { <nl> - / / write SnapFailedTLog . $ UID <nl> - Standalone < StringRef > keyStr = snapTestFailStatus . withSuffix ( uidStr ) ; <nl> - Standalone < StringRef > valStr = LiteralStringRef ( " Success " ) ; <nl> - TraceEvent ( SevDebug , " TLogKeyStr " ) . detail ( " Value " , keyStr ) ; <nl> - snapFailKeySetters - > push_back ( runRYWTransaction ( self - > cx , [ = ] ( Reference < ReadYourWritesTransaction > tr ) - > Future < Void > <nl> - { tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ; tr - > set ( keyStr , valStr ) ; return Void ( ) ; } ) ) ; <nl> - } <nl> - } <nl> - } <nl> - if ( * execCmd = = execDisableTLogPop ) { <nl> - self - > ignorePopRequest = true ; <nl> - if ( self - > ignorePopUid ! = " " ) { <nl> - TraceEvent ( SevWarn , " TLogPopDisableonDisable " ) <nl> - . detail ( " IgnorePopUid " , self - > ignorePopUid ) <nl> - . detail ( " UidStr " , uidStr . toString ( ) ) <nl> - . detail ( " Version " , req - > version ) ; <nl> - } <nl> - self - > ignorePopUid = uidStr . toString ( ) ; <nl> - self - > ignorePopDeadline = g_network - > now ( ) + SERVER_KNOBS - > TLOG_IGNORE_POP_AUTO_ENABLE_DELAY ; <nl> - TraceEvent ( " TLogExecCmdPopDisable " ) <nl> - . detail ( " ExecCmd " , execCmd - > toString ( ) ) <nl> - . detail ( " UidStr " , uidStr . toString ( ) ) <nl> - . detail ( " IgnorePopUid " , self - > ignorePopUid ) <nl> - . detail ( " IgnporePopRequest " , self - > ignorePopRequest ) <nl> - . detail ( " IgnporePopDeadline " , self - > ignorePopDeadline ) <nl> - . detail ( " Version " , req - > version ) ; <nl> - } <nl> - if ( * execCmd = = execEnableTLogPop ) { <nl> - if ( self - > ignorePopUid ! = uidStr . toString ( ) ) { <nl> - TraceEvent ( SevWarn , " TLogPopDisableEnableUidMismatch " ) <nl> - . detail ( " IgnorePopUid " , self - > ignorePopUid ) <nl> - . detail ( " UidStr " , uidStr . toString ( ) ) <nl> - . detail ( " Version " , req - > version ) ; <nl> - } <nl> - <nl> - TraceEvent ( " EnableTLogPlayAllIgnoredPops2 " ) ; <nl> - / / use toBePopped and issue all the pops <nl> - std : : map < Tag , Version > : : iterator it ; <nl> - self - > ignorePopRequest = false ; <nl> - self - > ignorePopDeadline = 0 . 0 ; <nl> - self - > ignorePopUid = " " ; <nl> - for ( it = self - > toBePopped . begin ( ) ; it ! = self - > toBePopped . end ( ) ; it + + ) { <nl> - TraceEvent ( " PlayIgnoredPop " ) <nl> - . detail ( " Tag " , it - > first . toString ( ) ) <nl> - . detail ( " Version " , it - > second ) ; <nl> - ignoredPops - > push_back ( tLogPopCore ( self , it - > first , it - > second , logData ) ) ; <nl> - } <nl> - self - > toBePopped . clear ( ) ; <nl> - TraceEvent ( " TLogExecCmdPopEnable " ) <nl> - . detail ( " ExecCmd " , execCmd - > toString ( ) ) <nl> - . detail ( " UidStr " , uidStr . toString ( ) ) <nl> - . detail ( " IgnorePopUid " , self - > ignorePopUid ) <nl> - . detail ( " IgnporePopRequest " , self - > ignorePopRequest ) <nl> - . detail ( " IgnporePopDeadline " , self - > ignorePopDeadline ) <nl> - . detail ( " Version " , req - > version ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - <nl> - ACTOR Future < Void > tLogSnapHelper ( TLogData * self , <nl> - Reference < LogData > logData , <nl> - ExecCmdValueString * execArg , <nl> - Version version , <nl> - Version execVersion , <nl> - StringRef execCmd , <nl> - Standalone < VectorRef < Tag > > execTags ) <nl> - { <nl> - state int err = 0 ; <nl> - state StringRef uidStr = execArg - > getBinaryArgValue ( LiteralStringRef ( " uid " ) ) ; <nl> - state UID execUID = UID : : fromString ( uidStr . toString ( ) ) ; <nl> - state bool otherRoleExeced = false ; <nl> - / / TLog is special , we need to snap at the execVersion . <nl> - / / storage on the same node should not initiate a snap before TLog which will make <nl> - / / the snap version at TLog unpredictable <nl> - ASSERT ( ! isExecOpInProgress ( execUID ) ) ; <nl> - if ( ! otherRoleExeced ) { <nl> - setExecOpInProgress ( execUID ) ; <nl> - int tmpErr = wait ( execHelper ( execArg , self - > dataFolder , " role = tlog " , 1 / * version * / ) ) ; <nl> - err = tmpErr ; <nl> - clearExecOpInProgress ( execUID ) ; <nl> - } <nl> - TraceEvent ( " TLogCommitExecTraceTLog " ) <nl> - . detail ( " UidStr " , uidStr . toString ( ) ) <nl> - . detail ( " Status " , err ) <nl> - . detail ( " Tag " , logData - > allTags . begin ( ) - > toString ( ) ) <nl> - . detail ( " OldTagSize " , logData - > allTags . size ( ) ) <nl> - . detail ( " Role " , " TLog " ) ; <nl> - <nl> - / / print the detailed status message <nl> - for ( int i = 0 ; i < execTags . size ( ) ; i + + ) { <nl> - Version poppedTagVersion = - 1 ; <nl> - auto tagv = logData - > getTagData ( execTags [ i ] ) ; <nl> - if ( ! tagv ) { <nl> - continue ; <nl> - } <nl> - poppedTagVersion = tagv - > popped ; <nl> - <nl> - TraceEvent te = TraceEvent ( SevDebug , " TLogExecTraceDetailed " ) ; <nl> - te . detail ( " Uid " , uidStr . toString ( ) ) ; <nl> - te . detail ( " Status " , err ) ; <nl> - te . detail ( " Role " , " TLog " ) ; <nl> - te . detail ( " ExecCmd " , execCmd . toString ( ) ) ; <nl> - te . detail ( " Param2 " , execArg - > getCmdValueString ( ) . toString ( ) ) ; <nl> - te . detail ( " Tag " , tagv - > tag . toString ( ) ) ; <nl> - te . detail ( " Version " , version ) ; <nl> - te . detail ( " PoppedTagVersion " , poppedTagVersion ) ; <nl> - te . detail ( " PersistentDataVersion " , logData - > persistentDataVersion ) ; <nl> - te . detail ( " PersistentDatadurableVersion " , logData - > persistentDataDurableVersion ) ; <nl> - te . detail ( " QueueCommittedVersion " , logData - > queueCommittedVersion . get ( ) ) ; <nl> - te . detail ( " IgnorePopUid " , self - > ignorePopUid ) ; <nl> - } <nl> - return Void ( ) ; <nl> - } <nl> - <nl> ACTOR Future < Void > tLogCommit ( <nl> TLogData * self , <nl> TLogCommitRequest req , <nl> ACTOR Future < Void > tLogCommit ( <nl> wait ( delayJittered ( . 005 , TaskPriority : : TLogCommit ) ) ; <nl> } <nl> <nl> - / / while exec op is being committed , no new transactions will be admitted . <nl> - / / This property is useful for snapshot kind of operations which wants to <nl> - / / take a snap of the disk image at a particular version ( no data from <nl> - / / future version to be included ) <nl> - / / NOTE : execOpCommitInProgress will not be set for exec commands which <nl> - / / start with \ xff <nl> - state bool execOpLockTaken = false ; <nl> - if ( logData - > execOpCommitInProgress ) { <nl> - wait ( logData - > execOpLock . take ( ) ) ; <nl> - execOpLockTaken = true ; <nl> - } <nl> - <nl> if ( logData - > stopped ) { <nl> req . reply . sendError ( tlog_stopped ( ) ) ; <nl> return Void ( ) ; <nl> } <nl> <nl> - state Version execVersion = invalidVersion ; <nl> - state ExecCmdValueString execArg ; <nl> - state TLogQueueEntryRef qe ; <nl> - state StringRef execCmd ; <nl> - state Standalone < VectorRef < Tag > > execTags ; <nl> - state vector < Future < Void > > snapFailKeySetters ; <nl> - state vector < Future < Void > > playIgnoredPops ; <nl> - <nl> if ( logData - > version . get ( ) = = req . prevVersion ) { / / Not a duplicate ( check relies on critical section between here self - > version . set ( ) below ! ) <nl> if ( req . debugID . present ( ) ) <nl> g_traceBatch . addEvent ( " CommitDebug " , tlogDebugID . get ( ) . first ( ) , " TLog . tLogCommit . Before " ) ; <nl> <nl> - if ( req . hasExecOp ) { <nl> - execProcessingHelper ( self , logData , & req , & execTags , & execArg , & execCmd , & execVersion , & snapFailKeySetters , & playIgnoredPops ) ; <nl> - if ( execVersion ! = invalidVersion ) { <nl> - TraceEvent ( SevDebug , " SettingExecOpCommit " ) <nl> - . detail ( " LogId " , logData - > logId ) <nl> - . detail ( " ExecVersion " , execVersion ) <nl> - . detail ( " Version " , req . version ) ; <nl> - logData - > execOpCommitInProgress = true ; <nl> - if ( ! execOpLockTaken ) { <nl> - wait ( logData - > execOpLock . take ( ) ) ; <nl> - execOpLockTaken = true ; <nl> - } else { <nl> - ASSERT ( logData - > execOpLock . available ( ) = = 0 ) ; <nl> - } <nl> - ASSERT ( execOpLockTaken ) ; <nl> - } <nl> - } <nl> - <nl> / / TraceEvent ( " TLogCommit " , logData - > logId ) . detail ( " Version " , req . version ) ; <nl> commitMessages ( self , logData , req . version , req . arena , req . messages ) ; <nl> <nl> logData - > knownCommittedVersion = std : : max ( logData - > knownCommittedVersion , req . knownCommittedVersion ) ; <nl> <nl> + TLogQueueEntryRef qe ; <nl> / / Log the changes to the persistent queue , to be committed by commitQueue ( ) <nl> qe . version = req . version ; <nl> qe . knownCommittedVersion = logData - > knownCommittedVersion ; <nl> ACTOR Future < Void > tLogCommit ( <nl> <nl> / / Notifies the commitQueue actor to commit persistentQueue , and also unblocks tLogPeekMessages actors <nl> logData - > version . set ( req . version ) ; <nl> - wait ( waitForAll ( playIgnoredPops ) ) ; <nl> <nl> if ( req . debugID . present ( ) ) <nl> g_traceBatch . addEvent ( " CommitDebug " , tlogDebugID . get ( ) . first ( ) , " TLog . tLogCommit . AfterTLogCommit " ) ; <nl> ACTOR Future < Void > tLogCommit ( <nl> state Future < Void > stopped = logData - > stopCommit . onTrigger ( ) ; <nl> wait ( timeoutWarning ( logData - > queueCommittedVersion . whenAtLeast ( req . version ) | | stopped , 0 . 1 , warningCollectorInput ) ) ; <nl> <nl> - if ( ( execVersion ! = invalidVersion ) & & execVersion < = logData - > queueCommittedVersion . get ( ) ) { <nl> - wait ( tLogSnapHelper ( self , logData , & execArg , qe . version , execVersion , execCmd , execTags ) ) ; <nl> - } <nl> - if ( execVersion ! = invalidVersion & & logData - > execOpCommitInProgress ) { <nl> - ASSERT ( execOpLockTaken ) ; <nl> - logData - > execOpCommitInProgress = false ; <nl> - } <nl> - if ( execOpLockTaken ) { <nl> - logData - > execOpLock . release ( ) ; <nl> - execOpLockTaken = false ; <nl> - } <nl> - execVersion = invalidVersion ; <nl> - <nl> if ( stopped . isReady ( ) ) { <nl> ASSERT ( logData - > stopped ) ; <nl> req . reply . sendError ( tlog_stopped ( ) ) ; <nl> ACTOR Future < Void > tLogCommit ( <nl> g_traceBatch . addEvent ( " CommitDebug " , tlogDebugID . get ( ) . first ( ) , " TLog . tLogCommit . After " ) ; <nl> <nl> req . reply . send ( logData - > durableKnownCommittedVersion ) ; <nl> - if ( g_network - > isSimulated ( ) ) { <nl> - if ( snapFailKeySetters . size ( ) > 0 ) { <nl> - TraceEvent ( SevDebug , " SettingSnapFailKey " ) ; <nl> - wait ( waitForAll ( snapFailKeySetters ) ) ; <nl> - TraceEvent ( SevDebug , " SettingSnapFailKeyDone " ) ; <nl> - } <nl> - } <nl> return Void ( ) ; <nl> } <nl> <nl> tLogSnapCreate ( TLogSnapRequest snapReq , TLogData * self , Reference < LogData > logDa <nl> ExecCmdValueString snapArg ( snapReq . snapPayload ) ; <nl> try { <nl> Standalone < StringRef > role = LiteralStringRef ( " role = " ) . withSuffix ( snapReq . role ) ; <nl> - int err = wait ( execHelper ( & snapArg , self - > dataFolder , role . toString ( ) , 2 / * version * / ) ) ; <nl> + int err = wait ( execHelper ( & snapArg , self - > dataFolder , role . toString ( ) ) ) ; <nl> <nl> std : : string uidStr = snapReq . snapUID . toString ( ) ; <nl> TraceEvent ( " ExecTraceTLog " ) <nl> tLogSnapCreate ( TLogSnapRequest snapReq , TLogData * self , Reference < LogData > logDa <nl> } <nl> snapReq . reply . send ( Void ( ) ) ; <nl> } catch ( Error & e ) { <nl> - TraceEvent ( " TLogExecHelperError " ) . error ( e , true / * includeCancelled * / ) ; <nl> + TraceEvent ( " TLogSnapCreateError " ) . error ( e , true / * includeCancelled * / ) ; <nl> if ( e . code ( ) ! = error_code_operation_cancelled ) { <nl> snapReq . reply . sendError ( e ) ; <nl> } else { <nl> mmm a / fdbserver / TLogInterface . h <nl> ppp b / fdbserver / TLogInterface . h <nl> struct TLogCommitRequest { <nl> <nl> ReplyPromise < Version > reply ; <nl> Optional < UID > debugID ; <nl> - bool hasExecOp ; <nl> <nl> TLogCommitRequest ( ) { } <nl> - TLogCommitRequest ( const Arena & a , Version prevVersion , Version version , Version knownCommittedVersion , Version minKnownCommittedVersion , StringRef messages , bool hasExecOp , Optional < UID > debugID ) <nl> - : arena ( a ) , prevVersion ( prevVersion ) , version ( version ) , knownCommittedVersion ( knownCommittedVersion ) , minKnownCommittedVersion ( minKnownCommittedVersion ) , messages ( messages ) , debugID ( debugID ) , hasExecOp ( hasExecOp ) { } <nl> + TLogCommitRequest ( const Arena & a , Version prevVersion , Version version , Version knownCommittedVersion , Version minKnownCommittedVersion , StringRef messages , Optional < UID > debugID ) <nl> + : arena ( a ) , prevVersion ( prevVersion ) , version ( version ) , knownCommittedVersion ( knownCommittedVersion ) , minKnownCommittedVersion ( minKnownCommittedVersion ) , messages ( messages ) , debugID ( debugID ) { } <nl> template < class Ar > <nl> void serialize ( Ar & ar ) { <nl> - serializer ( ar , prevVersion , version , knownCommittedVersion , minKnownCommittedVersion , messages , reply , arena , debugID , hasExecOp ) ; <nl> + serializer ( ar , prevVersion , version , knownCommittedVersion , minKnownCommittedVersion , messages , reply , arena , debugID ) ; <nl> } <nl> } ; <nl> <nl> mmm a / fdbserver / TLogServer . actor . cpp <nl> ppp b / fdbserver / TLogServer . actor . cpp <nl> ACTOR Future < Void > commitQueue ( TLogData * self ) { <nl> } <nl> } <nl> <nl> - void execProcessingHelper ( TLogData * self , <nl> - Reference < LogData > logData , <nl> - TLogCommitRequest * req , <nl> - Standalone < VectorRef < Tag > > * execTags , <nl> - ExecCmdValueString * execArg , <nl> - StringRef * execCmd , <nl> - Version * execVersion , <nl> - vector < Future < Void > > * snapFailKeySetters , <nl> - vector < Future < Void > > * ignoredPops ) <nl> - { <nl> - / / inspect the messages to find if there is an Exec type and print <nl> - / / it . message are prefixed by the length of the message and each <nl> - / / field is prefixed by the length too <nl> - uint8_t type = MutationRef : : MAX_ATOMIC_OP ; <nl> - StringRef param2 ; <nl> - ArenaReader rd ( req - > arena , req - > messages , Unversioned ( ) ) ; <nl> - int32_t messageLength , rawLength ; <nl> - uint16_t tagCount ; <nl> - uint32_t sub ; <nl> - while ( ! rd . empty ( ) ) { <nl> - Tag tmpTag ; <nl> - bool hasTxsTag = false ; <nl> - rd . checkpoint ( ) ; <nl> - rd > > messageLength > > sub > > tagCount ; <nl> - for ( int i = 0 ; i < tagCount ; i + + ) { <nl> - rd > > tmpTag ; <nl> - if ( tmpTag . locality = = tagLocalityTxs | | tmpTag = = txsTag ) { <nl> - hasTxsTag = true ; <nl> - } <nl> - execTags - > push_back ( execTags - > arena ( ) , tmpTag ) ; <nl> - } <nl> - if ( ! hasTxsTag ) { <nl> - rd > > type ; <nl> - if ( type = = MutationRef : : Exec ) { <nl> - break ; <nl> - } <nl> - } <nl> - rawLength = messageLength + sizeof ( messageLength ) ; <nl> - rd . rewind ( ) ; <nl> - rd . readBytes ( rawLength ) ; <nl> - } <nl> - <nl> - int32_t len = 0 ; <nl> - if ( type = = MutationRef : : Exec ) { <nl> - / / get param1 <nl> - rd > > len ; <nl> - * execCmd = StringRef ( ( uint8_t const * ) rd . readBytes ( len ) , len ) ; <nl> - / / get param2 <nl> - rd > > len ; <nl> - param2 = StringRef ( ( uint8_t const * ) rd . readBytes ( len ) , len ) ; <nl> - <nl> - TraceEvent ( SevDebug , " TLogExecCommandType " , self - > dbgid ) <nl> - . detail ( " Value " , execCmd - > toString ( ) ) <nl> - . detail ( " Version " , req - > version ) ; <nl> - <nl> - execArg - > setCmdValueString ( param2 ) ; <nl> - execArg - > dbgPrint ( ) ; <nl> - StringRef uidStr = execArg - > getBinaryArgValue ( LiteralStringRef ( " uid " ) ) ; <nl> - if ( ! execCmd - > startsWith ( LiteralStringRef ( " \ xff " ) ) ) { <nl> - * execVersion = req - > version ; <nl> - } <nl> - if ( * execCmd = = execSnap ) { <nl> - / / validation check specific to snap request <nl> - std : : string reason ; <nl> - if ( ! self - > ignorePopRequest ) { <nl> - * execVersion = invalidVersion ; <nl> - reason = " SnapFailIgnorePopNotSet " ; <nl> - } else if ( uidStr . toString ( ) ! = self - > ignorePopUid ) { <nl> - * execVersion = invalidVersion ; <nl> - reason = " SnapFailedDisableTLogUidMismatch " ; <nl> - } <nl> - <nl> - if ( * execVersion = = invalidVersion ) { <nl> - TraceEvent ( SevWarn , " TLogSnapFailed " ) <nl> - . detail ( " IgnorePopUid " , self - > ignorePopUid ) <nl> - . detail ( " IgnorePopRequest " , self - > ignorePopRequest ) <nl> - . detail ( " Reason " , reason ) <nl> - . detail ( " Version " , req - > version ) ; <nl> - <nl> - TraceEvent ( " ExecCmdSnapCreate " ) <nl> - . detail ( " Uid " , uidStr . toString ( ) ) <nl> - . detail ( " Status " , - 1 ) <nl> - . detail ( " Tag " , logData - > allTags . begin ( ) - > toString ( ) ) <nl> - . detail ( " Role " , " TLog " ) <nl> - . detail ( " Version " , req - > version ) ; <nl> - <nl> - if ( g_network - > isSimulated ( ) ) { <nl> - / / write SnapFailedTLog . $ UID <nl> - Standalone < StringRef > keyStr = snapTestFailStatus . withSuffix ( uidStr ) ; <nl> - StringRef valStr = LiteralStringRef ( " Success " ) ; <nl> - TraceEvent ( SevDebug , " TLogKeyStr " ) . detail ( " Value " , keyStr ) ; <nl> - snapFailKeySetters - > push_back ( runRYWTransaction ( self - > cx , [ = ] ( Reference < ReadYourWritesTransaction > tr ) - > Future < Void > <nl> - { tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ; tr - > set ( keyStr , valStr ) ; return Void ( ) ; } ) ) ; <nl> - } <nl> - } <nl> - } <nl> - if ( * execCmd = = execDisableTLogPop ) { <nl> - self - > ignorePopRequest = true ; <nl> - if ( self - > ignorePopUid ! = " " ) { <nl> - TraceEvent ( SevWarn , " TLogPopDisableonDisable " ) <nl> - . detail ( " IgnorePopUid " , self - > ignorePopUid ) <nl> - . detail ( " UidStr " , uidStr . toString ( ) ) <nl> - . detail ( " Version " , req - > version ) ; <nl> - } <nl> - self - > ignorePopUid = uidStr . toString ( ) ; <nl> - self - > ignorePopDeadline = g_network - > now ( ) + SERVER_KNOBS - > TLOG_IGNORE_POP_AUTO_ENABLE_DELAY ; <nl> - TraceEvent ( " TLogExecCmdPopDisable " ) <nl> - . detail ( " ExecCmd " , execCmd - > toString ( ) ) <nl> - . detail ( " UidStr " , uidStr . toString ( ) ) <nl> - . detail ( " IgnorePopUid " , self - > ignorePopUid ) <nl> - . detail ( " IgnporePopRequest " , self - > ignorePopRequest ) <nl> - . detail ( " IgnporePopDeadline " , self - > ignorePopDeadline ) <nl> - . detail ( " Version " , req - > version ) ; <nl> - } <nl> - if ( * execCmd = = execEnableTLogPop ) { <nl> - if ( self - > ignorePopUid ! = uidStr . toString ( ) ) { <nl> - TraceEvent ( SevWarn , " TLogPopDisableEnableUidMismatch " ) <nl> - . detail ( " IgnorePopUid " , self - > ignorePopUid ) <nl> - . detail ( " UidStr " , uidStr . toString ( ) ) <nl> - . detail ( " Version " , req - > version ) ; <nl> - } <nl> - <nl> - TraceEvent ( " EnableTLogPlayAllIgnoredPops2 " ) ; <nl> - / / use toBePopped and issue all the pops <nl> - std : : map < Tag , Version > : : iterator it ; <nl> - self - > ignorePopRequest = false ; <nl> - self - > ignorePopDeadline = 0 . 0 ; <nl> - self - > ignorePopUid = " " ; <nl> - for ( it = self - > toBePopped . begin ( ) ; it ! = self - > toBePopped . end ( ) ; it + + ) { <nl> - TraceEvent ( " PlayIgnoredPop " ) <nl> - . detail ( " Tag " , it - > first . toString ( ) ) <nl> - . detail ( " Version " , it - > second ) ; <nl> - ignoredPops - > push_back ( tLogPopCore ( self , it - > first , it - > second , logData ) ) ; <nl> - } <nl> - self - > toBePopped . clear ( ) ; <nl> - TraceEvent ( " TLogExecCmdPopEnable " ) <nl> - . detail ( " ExecCmd " , execCmd - > toString ( ) ) <nl> - . detail ( " UidStr " , uidStr . toString ( ) ) <nl> - . detail ( " IgnorePopUid " , self - > ignorePopUid ) <nl> - . detail ( " IgnporePopRequest " , self - > ignorePopRequest ) <nl> - . detail ( " IgnporePopDeadline " , self - > ignorePopDeadline ) <nl> - . detail ( " Version " , req - > version ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - ACTOR Future < Void > tLogSnapHelper ( TLogData * self , <nl> - Reference < LogData > logData , <nl> - ExecCmdValueString * execArg , <nl> - Version version , <nl> - Version execVersion , <nl> - StringRef execCmd , <nl> - Standalone < VectorRef < Tag > > execTags ) <nl> - { <nl> - state int err = 0 ; <nl> - state StringRef uidStr = execArg - > getBinaryArgValue ( LiteralStringRef ( " uid " ) ) ; <nl> - state UID execUID = UID : : fromString ( uidStr . toString ( ) ) ; <nl> - state bool otherRoleExeced = false ; <nl> - / / TLog is special , we need to snap at the execVersion . <nl> - / / storage on the same node should not initiate a snap before TLog which will make <nl> - / / the snap version at TLog unpredictable <nl> - ASSERT ( ! isExecOpInProgress ( execUID ) ) ; <nl> - if ( ! otherRoleExeced ) { <nl> - setExecOpInProgress ( execUID ) ; <nl> - int tmpErr = wait ( execHelper ( execArg , self - > dataFolder , " role = tlog " , 1 / * version * / ) ) ; <nl> - err = tmpErr ; <nl> - clearExecOpInProgress ( execUID ) ; <nl> - } <nl> - TraceEvent ( " TLogCommitExecTraceTLog " ) <nl> - . detail ( " UidStr " , uidStr . toString ( ) ) <nl> - . detail ( " Status " , err ) <nl> - . detail ( " Tag " , logData - > allTags . begin ( ) - > toString ( ) ) <nl> - . detail ( " OldTagSize " , logData - > allTags . size ( ) ) <nl> - . detail ( " Role " , " TLog " ) ; <nl> - <nl> - / / print the detailed status message <nl> - for ( int i = 0 ; i < execTags . size ( ) ; i + + ) { <nl> - Version poppedTagVersion = - 1 ; <nl> - auto tagv = logData - > getTagData ( execTags [ i ] ) ; <nl> - if ( ! tagv ) { <nl> - continue ; <nl> - } <nl> - poppedTagVersion = tagv - > popped ; <nl> - <nl> - TraceEvent te = TraceEvent ( SevDebug , " TLogExecTraceDetailed " ) ; <nl> - te . detail ( " Uid " , uidStr . toString ( ) ) ; <nl> - te . detail ( " Status " , err ) ; <nl> - te . detail ( " Role " , " TLog " ) ; <nl> - te . detail ( " ExecCmd " , execCmd . toString ( ) ) ; <nl> - te . detail ( " Param2 " , execArg - > getCmdValueString ( ) . toString ( ) ) ; <nl> - te . detail ( " Tag " , tagv - > tag . toString ( ) ) ; <nl> - te . detail ( " Version " , version ) ; <nl> - te . detail ( " PoppedTagVersion " , poppedTagVersion ) ; <nl> - te . detail ( " PersistentDataVersion " , logData - > persistentDataVersion ) ; <nl> - te . detail ( " PersistentDatadurableVersion " , logData - > persistentDataDurableVersion ) ; <nl> - te . detail ( " QueueCommittedVersion " , logData - > queueCommittedVersion . get ( ) ) ; <nl> - te . detail ( " IgnorePopUid " , self - > ignorePopUid ) ; <nl> - } <nl> - return Void ( ) ; <nl> - } <nl> - <nl> ACTOR Future < Void > tLogCommit ( <nl> TLogData * self , <nl> TLogCommitRequest req , <nl> ACTOR Future < Void > tLogCommit ( <nl> wait ( delayJittered ( . 005 , TaskPriority : : TLogCommit ) ) ; <nl> } <nl> <nl> - / / while exec op is being committed , no new transactions will be admitted . <nl> - / / This property is useful for snapshot kind of operations which wants to <nl> - / / take a snap of the disk image at a particular version ( not data from <nl> - / / future version to be included ) <nl> - / / NOTE : execOpCommitInProgress will not be set for exec commands which <nl> - / / start with \ xff <nl> - state bool execOpLockTaken = false ; <nl> - if ( logData - > execOpCommitInProgress ) { <nl> - wait ( logData - > execOpLock . take ( ) ) ; <nl> - execOpLockTaken = true ; <nl> - } <nl> - <nl> if ( logData - > stopped ) { <nl> req . reply . sendError ( tlog_stopped ( ) ) ; <nl> return Void ( ) ; <nl> } <nl> <nl> - state Version execVersion = invalidVersion ; <nl> - state ExecCmdValueString execArg ; <nl> - state TLogQueueEntryRef qe ; <nl> - state StringRef execCmd ; <nl> - state Standalone < VectorRef < Tag > > execTags ; <nl> - state vector < Future < Void > > playIgnoredPops ; <nl> - state vector < Future < Void > > snapFailKeySetters ; <nl> - <nl> if ( logData - > version . get ( ) = = req . prevVersion ) { / / Not a duplicate ( check relies on critical section between here self - > version . set ( ) below ! ) <nl> if ( req . debugID . present ( ) ) <nl> g_traceBatch . addEvent ( " CommitDebug " , tlogDebugID . get ( ) . first ( ) , " TLog . tLogCommit . Before " ) ; <nl> <nl> - <nl> - if ( req . hasExecOp ) { <nl> - execProcessingHelper ( self , logData , & req , & execTags , & execArg , & execCmd , & execVersion , & snapFailKeySetters , & playIgnoredPops ) ; <nl> - if ( execVersion ! = invalidVersion ) { <nl> - TraceEvent ( SevDebug , " SettingExecOpCommit " ) <nl> - . detail ( " LogId " , logData - > logId ) <nl> - . detail ( " ExecVersion " , execVersion ) <nl> - . detail ( " Version " , req . version ) ; <nl> - logData - > execOpCommitInProgress = true ; <nl> - if ( ! execOpLockTaken ) { <nl> - wait ( logData - > execOpLock . take ( ) ) ; <nl> - execOpLockTaken = true ; <nl> - } else { <nl> - ASSERT ( logData - > execOpLock . available ( ) = = 0 ) ; <nl> - } <nl> - ASSERT ( execOpLockTaken ) ; <nl> - } <nl> - } <nl> - <nl> / / TraceEvent ( " TLogCommit " , logData - > logId ) . detail ( " Version " , req . version ) ; <nl> commitMessages ( self , logData , req . version , req . arena , req . messages ) ; <nl> <nl> logData - > knownCommittedVersion = std : : max ( logData - > knownCommittedVersion , req . knownCommittedVersion ) ; <nl> <nl> + TLogQueueEntryRef qe ; <nl> / / Log the changes to the persistent queue , to be committed by commitQueue ( ) <nl> qe . version = req . version ; <nl> qe . knownCommittedVersion = logData - > knownCommittedVersion ; <nl> ACTOR Future < Void > tLogCommit ( <nl> <nl> / / Notifies the commitQueue actor to commit persistentQueue , and also unblocks tLogPeekMessages actors <nl> logData - > version . set ( req . version ) ; <nl> - wait ( waitForAll ( playIgnoredPops ) ) ; <nl> <nl> if ( req . debugID . present ( ) ) <nl> g_traceBatch . addEvent ( " CommitDebug " , tlogDebugID . get ( ) . first ( ) , " TLog . tLogCommit . AfterTLogCommit " ) ; <nl> ACTOR Future < Void > tLogCommit ( <nl> state Future < Void > stopped = logData - > stopCommit . onTrigger ( ) ; <nl> wait ( timeoutWarning ( logData - > queueCommittedVersion . whenAtLeast ( req . version ) | | stopped , 0 . 1 , warningCollectorInput ) ) ; <nl> <nl> - if ( ( execVersion ! = invalidVersion ) & & <nl> - execVersion < = logData - > queueCommittedVersion . get ( ) ) { <nl> - wait ( tLogSnapHelper ( self , logData , & execArg , qe . version , execVersion , execCmd , execTags ) ) ; <nl> - } <nl> - if ( execVersion ! = invalidVersion & & logData - > execOpCommitInProgress ) { <nl> - ASSERT ( execOpLockTaken ) ; <nl> - logData - > execOpCommitInProgress = false ; <nl> - } <nl> - if ( execOpLockTaken ) { <nl> - logData - > execOpLock . release ( ) ; <nl> - execOpLockTaken = false ; <nl> - } <nl> - execVersion = invalidVersion ; <nl> - <nl> if ( stopped . isReady ( ) ) { <nl> ASSERT ( logData - > stopped ) ; <nl> req . reply . sendError ( tlog_stopped ( ) ) ; <nl> ACTOR Future < Void > tLogCommit ( <nl> g_traceBatch . addEvent ( " CommitDebug " , tlogDebugID . get ( ) . first ( ) , " TLog . tLogCommit . After " ) ; <nl> <nl> req . reply . send ( logData - > durableKnownCommittedVersion ) ; <nl> - if ( g_network - > isSimulated ( ) ) { <nl> - if ( snapFailKeySetters . size ( ) > 0 ) { <nl> - TraceEvent ( SevDebug , " SettingSnapFailKey " ) ; <nl> - wait ( waitForAll ( snapFailKeySetters ) ) ; <nl> - TraceEvent ( SevDebug , " SettingSnapFailKeyDone " ) ; <nl> - } <nl> - } <nl> return Void ( ) ; <nl> } <nl> <nl> tLogSnapCreate ( TLogSnapRequest snapReq , TLogData * self , Reference < LogData > logDa <nl> ExecCmdValueString snapArg ( snapReq . snapPayload ) ; <nl> try { <nl> Standalone < StringRef > role = LiteralStringRef ( " role = " ) . withSuffix ( snapReq . role ) ; <nl> - int err = wait ( execHelper ( & snapArg , self - > dataFolder , role . toString ( ) , 2 / * version * / ) ) ; <nl> + int err = wait ( execHelper ( & snapArg , self - > dataFolder , role . toString ( ) ) ) ; <nl> <nl> std : : string uidStr = snapReq . snapUID . toString ( ) ; <nl> TraceEvent ( " ExecTraceTLog " ) <nl> mmm a / fdbserver / TagPartitionedLogSystem . actor . cpp <nl> ppp b / fdbserver / TagPartitionedLogSystem . actor . cpp <nl> struct TagPartitionedLogSystem : ILogSystem , ReferenceCounted < TagPartitionedLogS <nl> vector < Future < Void > > tLogCommitResults ; <nl> for ( int loc = 0 ; loc < it - > logServers . size ( ) ; loc + + ) { <nl> Standalone < StringRef > msg = data . getMessages ( location ) ; <nl> - allReplies . push_back ( it - > logServers [ loc ] - > get ( ) . interf ( ) . commit . getReply ( TLogCommitRequest ( msg . arena ( ) , prevVersion , version , knownCommittedVersion , minKnownCommittedVersion , msg , data . getHasExecOp ( ) , debugID ) , TaskPriority : : TLogCommitReply ) ) ; <nl> + allReplies . push_back ( it - > logServers [ loc ] - > get ( ) . interf ( ) . commit . getReply ( TLogCommitRequest ( msg . arena ( ) , prevVersion , version , knownCommittedVersion , minKnownCommittedVersion , msg , debugID ) , TaskPriority : : TLogCommitReply ) ) ; <nl> Future < Void > commitSuccess = success ( allReplies . back ( ) ) ; <nl> addActor . get ( ) . send ( commitSuccess ) ; <nl> tLogCommitResults . push_back ( commitSuccess ) ; <nl> mmm a / fdbserver / fdbserver . actor . cpp <nl> ppp b / fdbserver / fdbserver . actor . cpp <nl> int main ( int argc , char * argv [ ] ) { <nl> std : : string absDataFolder = abspath ( dataFolder ) ; <nl> ini . LoadFile ( joinPath ( absDataFolder , " restartInfo . ini " ) . c_str ( ) ) ; <nl> int backupFailed = true ; <nl> - int backupVersion = 1 ; <nl> const char * isRestoringStr = ini . GetValue ( " RESTORE " , " isRestoring " , NULL ) ; <nl> if ( isRestoringStr ) { <nl> isRestoring = atoi ( isRestoringStr ) ; <nl> int main ( int argc , char * argv [ ] ) { <nl> if ( isRestoring & & backupFailedStr ) { <nl> backupFailed = atoi ( backupFailedStr ) ; <nl> } <nl> - const char * backupVersionStr = ini . GetValue ( " RESTORE " , " BackupVersion " , NULL ) ; <nl> - if ( isRestoring & & backupVersionStr ) { <nl> - backupVersion = atoi ( backupVersionStr ) ; <nl> - } <nl> } <nl> if ( isRestoring & & ! backupFailed ) { <nl> - if ( backupVersion = = 1 ) { <nl> - std : : vector < std : : string > returnList ; <nl> - std : : string ext = " " ; <nl> - returnList = platform : : listDirectories ( absDataFolder ) ; <nl> - std : : string snapStr = ini . GetValue ( " RESTORE " , " RestoreSnapUID " ) ; <nl> - <nl> - TraceEvent ( " RestoringDataFolder " ) . detail ( " DataFolder " , absDataFolder ) ; <nl> - TraceEvent ( " RestoreSnapUID " ) . detail ( " UID " , snapStr ) ; <nl> - <nl> - / / delete all files ( except fdb . cluster ) in non - snap directories <nl> - for ( int i = 0 ; i < returnList . size ( ) ; i + + ) { <nl> - if ( returnList [ i ] = = " . " | | returnList [ i ] = = " . . " ) { <nl> - continue ; <nl> - } <nl> - if ( returnList [ i ] . find ( snapStr ) ! = std : : string : : npos ) { <nl> - continue ; <nl> - } <nl> - <nl> - std : : string childf = absDataFolder + " / " + returnList [ i ] ; <nl> - std : : vector < std : : string > returnFiles = platform : : listFiles ( childf , ext ) ; <nl> - for ( int j = 0 ; j < returnFiles . size ( ) ; j + + ) { <nl> - if ( returnFiles [ j ] ! = " fdb . cluster " & & returnFiles [ j ] ! = " fitness " ) { <nl> - TraceEvent ( " DeletingNonSnapfiles " ) <nl> - . detail ( " FileBeingDeleted " , childf + " / " + returnFiles [ j ] ) ; <nl> - deleteFile ( childf + " / " + returnFiles [ j ] ) ; <nl> - } <nl> - } <nl> + std : : vector < std : : string > returnList ; <nl> + std : : string ext = " " ; <nl> + returnList = platform : : listDirectories ( absDataFolder ) ; <nl> + std : : string snapStr = ini . GetValue ( " RESTORE " , " RestoreSnapUID " ) ; <nl> + <nl> + TraceEvent ( " RestoringDataFolder " ) . detail ( " DataFolder " , absDataFolder ) ; <nl> + TraceEvent ( " RestoreSnapUID " ) . detail ( " UID " , snapStr ) ; <nl> + <nl> + / / delete all files ( except fdb . cluster ) in non - snap directories <nl> + for ( const auto & dirEntry : returnList ) { <nl> + if ( dirEntry = = " . " | | dirEntry = = " . . " ) { <nl> + continue ; <nl> } <nl> - / / move the contents from snap folder to the original folder , <nl> - / / delete snap folders <nl> - for ( int i = 0 ; i < returnList . size ( ) ; i + + ) { <nl> - if ( returnList [ i ] = = " . " | | returnList [ i ] = = " . . " ) { <nl> - continue ; <nl> - } <nl> - std : : string dirSrc = absDataFolder + " / " + returnList [ i ] ; <nl> - / / delete snap directories which are not part of restoreSnapUID <nl> - if ( returnList [ i ] . find ( snapStr ) = = std : : string : : npos ) { <nl> - if ( returnList [ i ] . find ( " snap " ) ! = std : : string : : npos ) { <nl> - platform : : eraseDirectoryRecursive ( dirSrc ) ; <nl> - } <nl> - continue ; <nl> - } <nl> - / / remove empty / partial snap directories <nl> - std : : vector < std : : string > childrenList = platform : : listFiles ( dirSrc ) ; <nl> - if ( childrenList . size ( ) = = 0 ) { <nl> - TraceEvent ( " RemovingEmptySnapDirectory " ) . detail ( " DirBeingDeleted " , dirSrc ) ; <nl> - platform : : eraseDirectoryRecursive ( dirSrc ) ; <nl> - continue ; <nl> - } <nl> - std : : string origDir = returnList [ i ] . substr ( 0 , 32 ) ; <nl> - std : : string dirToRemove = absDataFolder + " / " + origDir ; <nl> - TraceEvent ( " DeletingOriginalNonSnapDirectory " ) . detail ( " FileBeingDeleted " , dirToRemove ) ; <nl> - platform : : eraseDirectoryRecursive ( dirToRemove ) ; <nl> - renameFile ( dirSrc , dirToRemove ) ; <nl> - TraceEvent ( " RenamingSnapToOriginalDirectory " ) <nl> - . detail ( " Oldname " , dirSrc ) <nl> - . detail ( " Newname " , dirToRemove ) ; <nl> + if ( dirEntry . find ( snapStr ) ! = std : : string : : npos ) { <nl> + continue ; <nl> } <nl> - } else if ( backupVersion = = 2 ) { <nl> - std : : vector < std : : string > returnList ; <nl> - std : : string ext = " " ; <nl> - returnList = platform : : listDirectories ( absDataFolder ) ; <nl> - std : : string snapStr = ini . GetValue ( " RESTORE " , " RestoreSnapUID " ) ; <nl> - <nl> - TraceEvent ( " RestoringDataFolder " ) . detail ( " DataFolder " , absDataFolder ) ; <nl> - TraceEvent ( " RestoreSnapUID " ) . detail ( " UID " , snapStr ) ; <nl> - <nl> - / / delete all files ( except fdb . cluster ) in non - snap directories <nl> - for ( const auto & dirEntry : returnList ) { <nl> - if ( dirEntry = = " . " | | dirEntry = = " . . " ) { <nl> - continue ; <nl> - } <nl> - if ( dirEntry . find ( snapStr ) ! = std : : string : : npos ) { <nl> - continue ; <nl> - } <nl> <nl> - std : : string childf = absDataFolder + " / " + dirEntry ; <nl> - std : : vector < std : : string > returnFiles = platform : : listFiles ( childf , ext ) ; <nl> - for ( const auto & fileEntry : returnFiles ) { <nl> - if ( fileEntry ! = " fdb . cluster " & & fileEntry ! = " fitness " ) { <nl> - TraceEvent ( " DeletingNonSnapfiles " ) <nl> - . detail ( " FileBeingDeleted " , childf + " / " + fileEntry ) ; <nl> - deleteFile ( childf + " / " + fileEntry ) ; <nl> - } <nl> + std : : string childf = absDataFolder + " / " + dirEntry ; <nl> + std : : vector < std : : string > returnFiles = platform : : listFiles ( childf , ext ) ; <nl> + for ( const auto & fileEntry : returnFiles ) { <nl> + if ( fileEntry ! = " fdb . cluster " & & fileEntry ! = " fitness " ) { <nl> + TraceEvent ( " DeletingNonSnapfiles " ) <nl> + . detail ( " FileBeingDeleted " , childf + " / " + fileEntry ) ; <nl> + deleteFile ( childf + " / " + fileEntry ) ; <nl> } <nl> } <nl> - / / cleanup unwanted and partial directories <nl> - for ( const auto & dirEntry : returnList ) { <nl> - if ( dirEntry = = " . " | | dirEntry = = " . . " ) { <nl> - continue ; <nl> - } <nl> - std : : string dirSrc = absDataFolder + " / " + dirEntry ; <nl> - / / delete snap directories which are not part of restoreSnapUID <nl> - if ( dirEntry . find ( snapStr ) = = std : : string : : npos ) { <nl> - if ( dirEntry . find ( " snap " ) ! = std : : string : : npos ) { <nl> - platform : : eraseDirectoryRecursive ( dirSrc ) ; <nl> - } <nl> - continue ; <nl> - } <nl> - / / remove empty / partial snap directories <nl> - std : : vector < std : : string > childrenList = platform : : listFiles ( dirSrc ) ; <nl> - if ( childrenList . size ( ) = = 0 ) { <nl> - TraceEvent ( " RemovingEmptySnapDirectory " ) . detail ( " DirBeingDeleted " , dirSrc ) ; <nl> + } <nl> + / / cleanup unwanted and partial directories <nl> + for ( const auto & dirEntry : returnList ) { <nl> + if ( dirEntry = = " . " | | dirEntry = = " . . " ) { <nl> + continue ; <nl> + } <nl> + std : : string dirSrc = absDataFolder + " / " + dirEntry ; <nl> + / / delete snap directories which are not part of restoreSnapUID <nl> + if ( dirEntry . find ( snapStr ) = = std : : string : : npos ) { <nl> + if ( dirEntry . find ( " snap " ) ! = std : : string : : npos ) { <nl> platform : : eraseDirectoryRecursive ( dirSrc ) ; <nl> - continue ; <nl> } <nl> + continue ; <nl> } <nl> - / / move snapshotted files to appropriate locations <nl> - for ( const auto & dirEntry : returnList ) { <nl> - if ( dirEntry = = " . " | | dirEntry = = " . . " ) { <nl> - continue ; <nl> - } <nl> - std : : string dirSrc = absDataFolder + " / " + dirEntry ; <nl> - std : : string origDir = dirEntry . substr ( 0 , 32 ) ; <nl> - std : : string dirToMove = absDataFolder + " / " + origDir ; <nl> - if ( ( dirEntry . find ( " snap " ) ! = std : : string : : npos ) & & <nl> - ( dirEntry . find ( " tlog " ) ! = std : : string : : npos ) ) { <nl> - / / restore tlog files <nl> - restoreRoleFilesHelper ( dirSrc , dirToMove , " log " ) ; <nl> - } else if ( ( dirEntry . find ( " snap " ) ! = std : : string : : npos ) & & <nl> - ( dirEntry . find ( " storage " ) ! = std : : string : : npos ) ) { <nl> - / / restore storage files <nl> - restoreRoleFilesHelper ( dirSrc , dirToMove , " storage " ) ; <nl> - } else if ( ( dirEntry . find ( " snap " ) ! = std : : string : : npos ) & & <nl> - ( dirEntry . find ( " coord " ) ! = std : : string : : npos ) ) { <nl> - / / restore coordinator files <nl> - restoreRoleFilesHelper ( dirSrc , dirToMove , " coordination " ) ; <nl> - } <nl> + / / remove empty / partial snap directories <nl> + std : : vector < std : : string > childrenList = platform : : listFiles ( dirSrc ) ; <nl> + if ( childrenList . size ( ) = = 0 ) { <nl> + TraceEvent ( " RemovingEmptySnapDirectory " ) . detail ( " DirBeingDeleted " , dirSrc ) ; <nl> + platform : : eraseDirectoryRecursive ( dirSrc ) ; <nl> + continue ; <nl> + } <nl> + } <nl> + / / move snapshotted files to appropriate locations <nl> + for ( const auto & dirEntry : returnList ) { <nl> + if ( dirEntry = = " . " | | dirEntry = = " . . " ) { <nl> + continue ; <nl> + } <nl> + std : : string dirSrc = absDataFolder + " / " + dirEntry ; <nl> + std : : string origDir = dirEntry . substr ( 0 , 32 ) ; <nl> + std : : string dirToMove = absDataFolder + " / " + origDir ; <nl> + if ( ( dirEntry . find ( " snap " ) ! = std : : string : : npos ) & & <nl> + ( dirEntry . find ( " tlog " ) ! = std : : string : : npos ) ) { <nl> + / / restore tlog files <nl> + restoreRoleFilesHelper ( dirSrc , dirToMove , " log " ) ; <nl> + } else if ( ( dirEntry . find ( " snap " ) ! = std : : string : : npos ) & & <nl> + ( dirEntry . find ( " storage " ) ! = std : : string : : npos ) ) { <nl> + / / restore storage files <nl> + restoreRoleFilesHelper ( dirSrc , dirToMove , " storage " ) ; <nl> + } else if ( ( dirEntry . find ( " snap " ) ! = std : : string : : npos ) & & <nl> + ( dirEntry . find ( " coord " ) ! = std : : string : : npos ) ) { <nl> + / / restore coordinator files <nl> + restoreRoleFilesHelper ( dirSrc , dirToMove , " coordination " ) ; <nl> } <nl> - <nl> } <nl> } <nl> } <nl> mmm a / fdbserver / storageserver . actor . cpp <nl> ppp b / fdbserver / storageserver . actor . cpp <nl> <nl> # include " flow / SystemMonitor . h " <nl> # include " flow / Util . h " <nl> # include " fdbclient / Atomic . h " <nl> + # include " fdbclient / DatabaseContext . h " <nl> # include " fdbclient / KeyRangeMap . h " <nl> - # include " fdbclient / SystemData . h " <nl> + # include " fdbclient / MasterProxyInterface . h " <nl> # include " fdbclient / NativeAPI . actor . h " <nl> # include " fdbclient / Notified . h " <nl> # include " fdbclient / StatusClient . h " <nl> - # include " fdbclient / MasterProxyInterface . h " <nl> - # include " fdbclient / DatabaseContext . h " <nl> - # include " fdbserver / WorkerInterface . actor . h " <nl> - # include " fdbserver / TLogInterface . h " <nl> - # include " fdbserver / MoveKeys . actor . h " <nl> - # include " fdbserver / Knobs . h " <nl> - # include " fdbserver / WaitFailure . h " <nl> - # include " fdbserver / IKeyValueStore . h " <nl> + # include " fdbclient / SystemData . h " <nl> # include " fdbclient / VersionedMap . h " <nl> + # include " fdbserver / FDBExecHelper . actor . h " <nl> + # include " fdbserver / IKeyValueStore . h " <nl> + # include " fdbserver / Knobs . h " <nl> + # include " fdbserver / LatencyBandConfig . h " <nl> + # include " fdbserver / LogProtocolMessage . h " <nl> + # include " fdbserver / LogSystem . h " <nl> + # include " fdbserver / MoveKeys . actor . h " <nl> + # include " fdbserver / RecoveryState . h " <nl> # include " fdbserver / StorageMetrics . h " <nl> - # include " fdbrpc / sim_validation . h " <nl> # include " fdbserver / ServerDBInfo . h " <nl> + # include " fdbserver / TLogInterface . h " <nl> + # include " fdbserver / WaitFailure . h " <nl> + # include " fdbserver / WorkerInterface . actor . h " <nl> + # include " fdbrpc / sim_validation . h " <nl> # include " fdbrpc / Smoother . h " <nl> # include " flow / Stats . h " <nl> - # include " fdbserver / LogSystem . h " <nl> - # include " fdbserver / RecoveryState . h " <nl> - # include " fdbserver / LogProtocolMessage . h " <nl> - # include " fdbserver / LatencyBandConfig . h " <nl> - # include " fdbserver / FDBExecHelper . actor . h " <nl> # include " flow / TDMetric . actor . h " <nl> # include < type_traits > <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> void addMutation ( Reference < T > & target , Version version , MutationRef const & muta <nl> } <nl> <nl> template < class T > <nl> - void splitMutations ( StorageServer * data , KeyRangeMap < T > & map , VerUpdateRef const & update , vector < int > & execIndex ) { <nl> + void splitMutations ( StorageServer * data , KeyRangeMap < T > & map , VerUpdateRef const & update ) { <nl> for ( int i = 0 ; i < update . mutations . size ( ) ; i + + ) { <nl> splitMutation ( data , map , update . mutations [ i ] , update . version ) ; <nl> - if ( update . mutations [ i ] . type = = MutationRef : : Exec ) { <nl> - execIndex . push_back ( i ) ; <nl> - } <nl> } <nl> } <nl> <nl> void splitMutation ( StorageServer * data , KeyRangeMap < T > & map , MutationRef const & <nl> addMutation ( i - > value ( ) , ver , MutationRef ( ( MutationRef : : Type ) m . type , k . begin , k . end ) ) ; <nl> } <nl> } <nl> - } else if ( m . type = = MutationRef : : Exec ) { <nl> } else <nl> ASSERT ( false ) ; / / Unknown mutation type in splitMutations <nl> } <nl> <nl> - ACTOR Future < Void > <nl> - snapHelper ( StorageServer * data , MutationRef m , Version ver ) <nl> - { <nl> - state std : : string cmd = m . param1 . toString ( ) ; <nl> - if ( ( cmd = = execDisableTLogPop ) | | ( cmd = = execEnableTLogPop ) ) { <nl> - TraceEvent ( " IgnoreNonSnapCommands " ) . detail ( " ExecCommand " , cmd ) ; <nl> - return Void ( ) ; <nl> - } <nl> - <nl> - state ExecCmdValueString execArg ( m . param2 ) ; <nl> - state StringRef uidStr = execArg . getBinaryArgValue ( LiteralStringRef ( " uid " ) ) ; <nl> - state int err = 0 ; <nl> - state Future < int > cmdErr ; <nl> - state UID execUID = UID : : fromString ( uidStr . toString ( ) ) ; <nl> - state bool skip = false ; <nl> - if ( cmd = = execSnap & & isTLogInSameNode ( ) ) { <nl> - skip = true ; <nl> - } <nl> - / / other storage has initiated the exec , so we can skip <nl> - if ( ! skip & & isExecOpInProgress ( execUID ) ) { <nl> - skip = true ; <nl> - } <nl> - <nl> - if ( ! skip ) { <nl> - setExecOpInProgress ( execUID ) ; <nl> - int err = wait ( execHelper ( & execArg , data - > folder , " role = storage " , 1 / * version * / ) ) ; <nl> - clearExecOpInProgress ( execUID ) ; <nl> - } <nl> - TraceEvent te = TraceEvent ( " ExecTraceStorage " ) ; <nl> - te . detail ( " Uid " , uidStr . toString ( ) ) ; <nl> - te . detail ( " Status " , err ) ; <nl> - te . detail ( " Role " , " storage " ) ; <nl> - te . detail ( " Version " , ver ) ; <nl> - te . detail ( " Mutation " , m . toString ( ) ) ; <nl> - te . detail ( " Mid " , data - > thisServerID . toString ( ) ) ; <nl> - te . detail ( " DurableVersion " , data - > durableVersion . get ( ) ) ; <nl> - te . detail ( " DataVersion " , data - > version . get ( ) ) ; <nl> - te . detail ( " Tag " , data - > tag . toString ( ) ) ; <nl> - te . detail ( " SnapCreateSkipped " , skip ) ; <nl> - return Void ( ) ; <nl> - } <nl> - <nl> ACTOR Future < Void > fetchKeys ( StorageServer * data , AddingShard * shard ) { <nl> state TraceInterval interval ( " FetchKeys " ) ; <nl> state KeyRange keys = shard - > keys ; <nl> ACTOR Future < Void > fetchKeys ( StorageServer * data , AddingShard * shard ) { <nl> if ( this_block . more ) { <nl> Key nfk = this_block . readThrough . present ( ) ? this_block . readThrough . get ( ) : keyAfter ( this_block . end ( ) [ - 1 ] . key ) ; <nl> if ( nfk ! = keys . end ) { <nl> - state std : : deque < Standalone < VerUpdateRef > > updatesToSplit = std : : move ( shard - > updates ) ; <nl> + std : : deque < Standalone < VerUpdateRef > > updatesToSplit = std : : move ( shard - > updates ) ; <nl> <nl> / / This actor finishes committing the keys [ keys . begin , nfk ) that we already fetched . <nl> / / The remaining unfetched keys [ nfk , keys . end ) will become a separate AddingShard with its own fetchKeys . <nl> shard - > server - > addShard ( ShardInfo : : addingSplitLeft ( KeyRangeRef ( keys . begin , nfk ) , shard ) ) ; <nl> shard - > server - > addShard ( ShardInfo : : newAdding ( data , KeyRangeRef ( nfk , keys . end ) ) ) ; <nl> shard = data - > shards . rangeContaining ( keys . begin ) . value ( ) - > adding ; <nl> - state AddingShard * otherShard = data - > shards . rangeContaining ( nfk ) . value ( ) - > adding ; <nl> + AddingShard * otherShard = data - > shards . rangeContaining ( nfk ) . value ( ) - > adding ; <nl> keys = shard - > keys ; <nl> <nl> / / Split our prior updates . The ones that apply to our new , restricted key range will go back into shard - > updates , <nl> / / and the ones delivered to the new shard will be discarded because it is in WaitPrevious phase ( hasn ' t chosen a fetchVersion yet ) . <nl> / / What we are doing here is expensive and could get more expensive if we started having many more blocks per shard . May need optimization in the future . <nl> - state vector < int > execIdxVec ; <nl> - state std : : deque < Standalone < VerUpdateRef > > : : iterator u = updatesToSplit . begin ( ) ; <nl> + std : : deque < Standalone < VerUpdateRef > > : : iterator u = updatesToSplit . begin ( ) ; <nl> for ( ; u ! = updatesToSplit . end ( ) ; + + u ) { <nl> - ASSERT ( execIdxVec . size ( ) = = 0 ) ; <nl> - splitMutations ( data , data - > shards , * u , execIdxVec ) ; <nl> - for ( auto execIdx : execIdxVec ) { <nl> - wait ( snapHelper ( data , u - > mutations [ execIdx ] , u - > version ) ) ; <nl> - } <nl> - execIdxVec . clear ( ) ; <nl> + splitMutations ( data , data - > shards , * u ) ; <nl> } <nl> <nl> TEST ( true ) ; <nl> void ShardInfo : : addMutation ( Version version , MutationRef const & mutation ) { <nl> adding - > addMutation ( version , mutation ) ; <nl> else if ( readWrite ) <nl> readWrite - > addMutation ( version , mutation , this - > keys , readWrite - > updateEagerReads ) ; <nl> - else if ( ( mutation . type ! = MutationRef : : ClearRange ) <nl> - & & ( mutation . type ! = MutationRef : : Exec ) ) { <nl> + else if ( mutation . type ! = MutationRef : : ClearRange ) { <nl> TraceEvent ( SevError , " DeliveredToNotAssigned " ) . detail ( " Version " , version ) . detail ( " Mutation " , mutation . toString ( ) ) ; <nl> ASSERT ( false ) ; / / Mutation delivered to notAssigned shard ! <nl> } <nl> ACTOR Future < Void > update ( StorageServer * data , bool * pReceivedUpdate ) <nl> state VerUpdateRef * pUpdate = & fii . changes [ changeNum ] ; <nl> for ( ; mutationNum < pUpdate - > mutations . size ( ) ; mutationNum + + ) { <nl> updater . applyMutation ( data , pUpdate - > mutations [ mutationNum ] , pUpdate - > version ) ; <nl> - if ( pUpdate - > mutations [ mutationNum ] . type = = MutationRef : : Exec ) { <nl> - wait ( snapHelper ( data , pUpdate - > mutations [ mutationNum ] , pUpdate - > version ) ) ; <nl> - } <nl> mutationBytes + = pUpdate - > mutations [ mutationNum ] . totalSize ( ) ; <nl> injectedChanges = true ; <nl> if ( mutationBytes > SERVER_KNOBS - > DESIRED_UPDATE_BYTES ) { <nl> ACTOR Future < Void > update ( StorageServer * data , bool * pReceivedUpdate ) <nl> + + data - > counters . atomicMutations ; <nl> break ; <nl> } <nl> - if ( msg . type = = MutationRef : : Exec ) { <nl> - wait ( snapHelper ( data , msg , ver ) ) ; <nl> - } <nl> } <nl> else <nl> TraceEvent ( SevError , " DiscardingPeekedData " , data - > thisServerID ) . detail ( " Mutation " , msg . toString ( ) ) . detail ( " Version " , cloneCursor2 - > version ( ) . toString ( ) ) ; <nl> mmm a / fdbserver / worker . actor . cpp <nl> ppp b / fdbserver / worker . actor . cpp <nl> ACTOR Future < Void > workerSnapCreate ( WorkerSnapRequest snapReq , StringRef snapFol <nl> state ExecCmdValueString snapArg ( snapReq . snapPayload ) ; <nl> try { <nl> Standalone < StringRef > role = LiteralStringRef ( " role = " ) . withSuffix ( snapReq . role ) ; <nl> - int err = wait ( execHelper ( & snapArg , snapFolder . toString ( ) , role . toString ( ) , 2 / * version * / ) ) ; <nl> + int err = wait ( execHelper ( & snapArg , snapFolder . toString ( ) , role . toString ( ) ) ) ; <nl> std : : string uidStr = snapReq . snapUID . toString ( ) ; <nl> TraceEvent ( " ExecTraceWorker " ) <nl> . detail ( " Uid " , uidStr ) <nl> ACTOR Future < Void > workerServer ( <nl> systemMonitor ( ) ; <nl> loggingTrigger = delay ( loggingDelay , TaskPriority : : FlushTrace ) ; <nl> } <nl> - when ( state ExecuteRequest req = waitNext ( interf . execReq . getFuture ( ) ) ) { <nl> - state ExecCmdValueString execArg ( req . execPayload ) ; <nl> - try { <nl> - int err = wait ( execHelper ( & execArg , coordFolder , " role = coordinator " , 1 / * version * / ) ) ; <nl> - StringRef uidStr = execArg . getBinaryArgValue ( LiteralStringRef ( " uid " ) ) ; <nl> - auto tokenStr = " ExecTrace / Coordinators / " + uidStr . toString ( ) ; <nl> - auto te = TraceEvent ( " ExecTraceCoordinators " ) ; <nl> - te . detail ( " Uid " , uidStr . toString ( ) ) ; <nl> - te . detail ( " Status " , err ) ; <nl> - te . detail ( " Role " , " coordinator " ) ; <nl> - te . detail ( " Value " , coordFolder ) ; <nl> - te . detail ( " ExecPayload " , execArg . getCmdValueString ( ) . toString ( ) ) ; <nl> - te . trackLatest ( tokenStr . c_str ( ) ) ; <nl> - req . reply . send ( Void ( ) ) ; <nl> - } catch ( Error & e ) { <nl> - TraceEvent ( " ExecHelperError " ) . error ( e ) ; <nl> - req . reply . sendError ( broken_promise ( ) ) ; <nl> - } <nl> - } <nl> when ( state WorkerSnapRequest snapReq = waitNext ( interf . workerSnapReq . getFuture ( ) ) ) { <nl> Standalone < StringRef > snapFolder = StringRef ( folder ) ; <nl> if ( snapReq . role . toString ( ) = = " coord " ) { <nl> mmm a / fdbserver / workloads / SnapTest . actor . cpp <nl> ppp b / fdbserver / workloads / SnapTest . actor . cpp <nl> struct SnapTestWorkload : TestWorkload { <nl> std : : string restartInfoLocation ; / / file location to store the snap restore info <nl> int maxRetryCntToRetrieveMessage ; / / number of retires to do trackLatest <nl> bool skipCheck ; / / disable check if the exec fails <nl> - int snapVersion ; / / snapVersion to invoke <nl> <nl> public : / / ctor & dtor <nl> SnapTestWorkload ( WorkloadContext const & wcx ) <nl> struct SnapTestWorkload : TestWorkload { <nl> getOption ( options , LiteralStringRef ( " restartInfoLocation " ) , LiteralStringRef ( " simfdb / restartInfo . ini " ) ) <nl> . toString ( ) ; <nl> skipCheck = false ; <nl> - snapVersion = getOption ( options , LiteralStringRef ( " version " ) , 1 ) ; <nl> } <nl> <nl> public : / / workload functions <nl> struct SnapTestWorkload : TestWorkload { <nl> <nl> void getMetrics ( vector < PerfMetric > & m ) override { TraceEvent ( " SnapTestWorkloadGetMetrics " ) ; } <nl> <nl> - ACTOR Future < Void > snapExecHelper ( SnapTestWorkload * self , Database cx , StringRef keyRef , StringRef valueRef ) { <nl> - state Transaction tr ( cx ) ; <nl> - state int retry = 0 ; <nl> - loop { <nl> - try { <nl> - tr . execute ( keyRef , valueRef ) ; <nl> - wait ( tr . commit ( ) ) ; <nl> - break ; <nl> - } catch ( Error & e ) { <nl> - + + retry ; <nl> - if ( e . code ( ) = = error_code_txn_exec_log_anti_quorum ) { <nl> - self - > skipCheck = true ; <nl> - break ; <nl> - <nl> - } <nl> - if ( e . code ( ) = = error_code_cluster_not_fully_recovered ) { <nl> - TraceEvent ( SevWarnAlways , " ClusterNotFullyRecovered " ) <nl> - . error ( e ) ; <nl> - if ( retry > 10 ) { <nl> - self - > skipCheck = true ; <nl> - break ; <nl> - } <nl> - } <nl> - } <nl> - } <nl> - return Void ( ) ; <nl> - } <nl> - <nl> ACTOR Future < Void > _create_keys ( Database cx , std : : string prefix , bool even = true ) { <nl> state Transaction tr ( cx ) ; <nl> state vector < int64_t > keys ; <nl> struct SnapTestWorkload : TestWorkload { <nl> self - > snapUID = deterministicRandom ( ) - > randomUniqueID ( ) ; <nl> try { <nl> StringRef snapCmdRef = LiteralStringRef ( " / bin / snap_create . sh " ) ; <nl> - Future < Void > status = snapCreate ( cx , snapCmdRef , self - > snapUID , self - > snapVersion ) ; <nl> + Future < Void > status = snapCreate ( cx , snapCmdRef , self - > snapUID ) ; <nl> wait ( status ) ; <nl> break ; <nl> } catch ( Error & e ) { <nl> struct SnapTestWorkload : TestWorkload { <nl> snapFailed = true ; <nl> break ; <nl> } <nl> - if ( e . code ( ) = = error_code_cluster_not_fully_recovered ) { <nl> - + + retry ; <nl> - if ( retry > 10 ) { <nl> - snapFailed = true ; <nl> - break ; <nl> - } <nl> - } <nl> - if ( self - > snapVersion = = 2 ) { <nl> - + + retry ; <nl> - / / snap v2 can fail for many reasons , so retry for 5 times and then fail it <nl> - if ( retry > 5 ) { <nl> - snapFailed = true ; <nl> - break ; <nl> - } <nl> + + + retry ; <nl> + / / snap v2 can fail for many reasons , so retry for 5 times and then fail it <nl> + if ( retry > 5 ) { <nl> + snapFailed = true ; <nl> + break ; <nl> } <nl> } <nl> } <nl> struct SnapTestWorkload : TestWorkload { <nl> std : : string uidStr = self - > snapUID . toString ( ) ; <nl> ini . SetValue ( " RESTORE " , " RestoreSnapUID " , uidStr . c_str ( ) ) ; <nl> ini . SetValue ( " RESTORE " , " BackupFailed " , format ( " % d " , snapFailed ) . c_str ( ) ) ; <nl> - ini . SetValue ( " RESTORE " , " BackupVersion " , format ( " % d " , self - > snapVersion ) . c_str ( ) ) ; <nl> ini . SaveFile ( self - > restartInfoLocation . c_str ( ) ) ; <nl> / / write the snapUID to a file <nl> TraceEvent ( " SnapshotCreateStatus " ) . detail ( " Status " , ! snapFailed ? " Success " : " Failure " ) ; <nl> struct SnapTestWorkload : TestWorkload { <nl> throw operation_failed ( ) ; <nl> } <nl> } else if ( self - > testID = = 4 ) { <nl> - / / description : if disable of a TLog pop was not followed by a <nl> - / / corresponding enable , then TLog will automatically enable the <nl> - / / popping of TLogs . this test case validates that we auto <nl> - / / enable the popping of TLogs <nl> - state Standalone < StringRef > payLoadRef = LiteralStringRef ( " empty - binary : uid = a36b2ca0e8dab0452ac3e12b6b926f4b " ) ; <nl> - wait ( self - > snapExecHelper ( self , cx , execDisableTLogPop , payLoadRef ) ) ; <nl> - } else if ( self - > testID = = 5 ) { <nl> - / / snapshot create without disabling pop of the TLog <nl> - StringRef uidStr = LiteralStringRef ( " d78b08d47f341158e9a54d4baaf4a4dd " ) ; <nl> - self - > snapUID = UID : : fromString ( uidStr . toString ( ) ) ; <nl> - state Standalone < StringRef > snapPayload = LiteralStringRef ( " / bin / " <nl> - " snap_create . sh : uid = " ) . withSuffix ( uidStr ) ; <nl> - wait ( self - > snapExecHelper ( self , cx , execSnap , snapPayload ) ) ; <nl> - } else if ( self - > testID = = 6 ) { <nl> - / / disable popping of TLog and snapshot create with mis - matching <nl> - payLoadRef = LiteralStringRef ( " empty - binary : uid = f49d27ddf7a28b6549d930743e0ebdbe " ) ; <nl> - wait ( self - > snapExecHelper ( self , cx , execDisableTLogPop , payLoadRef ) ) ; <nl> - if ( self - > skipCheck ) { <nl> - return Void ( ) ; <nl> - } <nl> - <nl> - StringRef uidStr = LiteralStringRef ( " ba61e9612a561d60bd83ad83e1b63568 " ) ; <nl> - self - > snapUID = UID : : fromString ( uidStr . toString ( ) ) ; <nl> - snapPayload = LiteralStringRef ( " / bin / snap_create . sh : uid = " ) . withSuffix ( uidStr ) ; <nl> - wait ( self - > snapExecHelper ( self , cx , execSnap , snapPayload ) ) ; <nl> - } else if ( self - > testID = = 7 ) { <nl> / / create a snapshot with a non whitelisted binary path and operation <nl> / / should fail <nl> state bool testedFailure = false ; <nl> struct SnapTestWorkload : TestWorkload { <nl> self - > snapUID = deterministicRandom ( ) - > randomUniqueID ( ) ; <nl> try { <nl> StringRef snapCmdRef = LiteralStringRef ( " / bin / snap_create1 . sh " ) ; <nl> - Future < Void > status = snapCreate ( cx , snapCmdRef , self - > snapUID , self - > snapVersion ) ; <nl> + Future < Void > status = snapCreate ( cx , snapCmdRef , self - > snapUID ) ; <nl> wait ( status ) ; <nl> break ; <nl> } catch ( Error & e ) { <nl> mmm a / tests / CMakeLists . txt <nl> ppp b / tests / CMakeLists . txt <nl> add_fdb_test ( TEST_FILES fast / RandomUnitTests . txt ) <nl> add_fdb_test ( TEST_FILES fast / SelectorCorrectness . txt ) <nl> add_fdb_test ( TEST_FILES fast / Sideband . txt ) <nl> add_fdb_test ( TEST_FILES fast / SidebandWithStatus . txt ) <nl> - add_fdb_test ( TEST_FILES fast / SnapTestFailAndDisablePop . txt ) <nl> add_fdb_test ( TEST_FILES fast / SwizzledRollbackSideband . txt ) <nl> add_fdb_test ( TEST_FILES fast / SystemRebootTestCycle . txt ) <nl> add_fdb_test ( TEST_FILES fast / TaskBucketCorrectness . txt ) <nl> deleted file mode 100644 <nl> index 00676a78bb . . 0000000000 <nl> mmm a / tests / fast / SnapTestFailAndDisablePop . txt <nl> ppp / dev / null <nl> <nl> - ; verify that the TLog popping disable times out and switches to enable mode <nl> - ; automatically , if not enabled specifically <nl> - testTitle = SnapTLogPopDisableTimeout <nl> - testName = SnapTest <nl> - numSnaps = 1 <nl> - maxSnapDelay = 3 . 0 <nl> - testID = 4 <nl> - <nl> - ; snapCreate without TLogPopDisable <nl> - testTitle = SnapCreateWithNoDisablePop <nl> - testName = SnapTest <nl> - numSnaps = 1 <nl> - maxSnapDelay = 3 . 0 <nl> - testID = 5 <nl> - <nl> - ; snapCreate and tlogPopDisable with mis - matched UID <nl> - testTitle = SnapCreateDisableTLogPopMismatch <nl> - testName = SnapTest <nl> - numSnaps = 1 <nl> - maxSnapDelay = 3 . 0 <nl> - testID = 6 <nl> - <nl> - ; snapCreate with binary path that is not whitelisted <nl> - testTitle = SnapCreateNotWhitelistedBinaryPath <nl> - testName = SnapTest <nl> - numSnaps = 1 <nl> - maxSnapDelay = 3 . 0 <nl> - testID = 7 <nl> mmm a / tests / restarting / from_6 . 2 . 0 / SnapCycleRestart - 1 . txt <nl> ppp b / tests / restarting / from_6 . 2 . 0 / SnapCycleRestart - 1 . txt <nl> testTitle = SnapCyclePre <nl> maxSnapDelay = 10 . 0 <nl> testID = 1 <nl> clearAfterTest = false <nl> - version = 2 <nl> <nl> testTitle = SnapCycleShutdown <nl> ; save and shutdown <nl> mmm a / tests / restarting / from_6 . 2 . 0 / SnapTestAttrition - 1 . txt <nl> ppp b / tests / restarting / from_6 . 2 . 0 / SnapTestAttrition - 1 . txt <nl> testTitle = SnapTestTakeSnap <nl> maxSnapDelay = 10 . 0 <nl> testID = 1 <nl> clearAfterTest = false <nl> - version = 2 <nl> <nl> testName = Attrition <nl> testDuration = 10 . 0 <nl> mmm a / tests / restarting / from_6 . 2 . 0 / SnapTestRestart - 1 . txt <nl> ppp b / tests / restarting / from_6 . 2 . 0 / SnapTestRestart - 1 . txt <nl> testTitle = SnapTestTakeSnap <nl> maxSnapDelay = 10 . 0 <nl> testID = 1 <nl> clearAfterTest = false <nl> - version = 2 <nl> <nl> testTitle = SnapTestPost <nl> ; write 1000 Keys ending with odd numbers <nl> mmm a / tests / restarting / from_6 . 2 . 0 / SnapTestSimpleRestart - 1 . txt <nl> ppp b / tests / restarting / from_6 . 2 . 0 / SnapTestSimpleRestart - 1 . txt <nl> testTitle = SnapSimpleTakeSnap <nl> maxSnapDelay = 5 . 0 <nl> testID = 1 <nl> clearAfterTest = false <nl> - version = 2 <nl> <nl> ; write 1000 Keys ending with odd number <nl> testTitle = SnapSimplePost <nl> testTitle = SnapSimplePost <nl> testID = 2 <nl> clearAfterTest = false <nl> <nl> - ; save and shutdown <nl> + ; snapCreate with binary path that is not whitelisted <nl> + testTitle = SnapCreateNotWhitelistedBinaryPath <nl> + testName = SnapTest <nl> + numSnaps = 1 <nl> + maxSnapDelay = 3 . 0 <nl> + testID = 4 <nl> + <nl> + ; save and shutdown <nl> testTitle = SnapSimpleShutdown <nl> testName = SaveAndKill <nl> restartInfoLocation = simfdb / restartInfo . ini <nl>
|
remove snap v1 related code
|
apple/foundationdb
|
9afd162e2f6478cb25d7156aa6db3b9d9ec03c4d
|
2019-07-26T00:29:31Z
|
mmm a / src / code - stub - assembler . cc <nl> ppp b / src / code - stub - assembler . cc <nl> void CodeStubAssembler : : HandleBreakOnNode ( ) { <nl> } <nl> <nl> void CodeStubAssembler : : Assert ( const NodeGenerator & condition_body , <nl> - const char * message , const char * file , <nl> - int line ) { <nl> + const char * message , const char * file , int line , <nl> + Node * extra_node1 , const char * extra_node1_name , <nl> + Node * extra_node2 , const char * extra_node2_name , <nl> + Node * extra_node3 , const char * extra_node3_name , <nl> + Node * extra_node4 , const char * extra_node4_name , <nl> + Node * extra_node5 , <nl> + const char * extra_node5_name ) { <nl> # if defined ( DEBUG ) <nl> if ( FLAG_debug_code ) { <nl> - Check ( condition_body , message , file , line ) ; <nl> + Check ( condition_body , message , file , line , extra_node1 , extra_node1_name , <nl> + extra_node2 , extra_node2_name , extra_node3 , extra_node3_name , <nl> + extra_node4 , extra_node4_name , extra_node5 , extra_node5_name ) ; <nl> } <nl> # endif <nl> } <nl> <nl> + # ifdef DEBUG <nl> + namespace { <nl> + void MaybePrintNodeWithName ( CodeStubAssembler * csa , Node * node , <nl> + const char * node_name ) { <nl> + if ( node ! = nullptr ) { <nl> + csa - > CallRuntime ( Runtime : : kPrintWithNameForAssert , csa - > SmiConstant ( 0 ) , <nl> + csa - > StringConstant ( node_name ) , node ) ; <nl> + } <nl> + } <nl> + } / / namespace <nl> + # endif <nl> + <nl> void CodeStubAssembler : : Check ( const NodeGenerator & condition_body , <nl> - const char * message , const char * file , int line ) { <nl> + const char * message , const char * file , int line , <nl> + Node * extra_node1 , const char * extra_node1_name , <nl> + Node * extra_node2 , const char * extra_node2_name , <nl> + Node * extra_node3 , const char * extra_node3_name , <nl> + Node * extra_node4 , const char * extra_node4_name , <nl> + Node * extra_node5 , const char * extra_node5_name ) { <nl> Label ok ( this ) ; <nl> Label not_ok ( this , Label : : kDeferred ) ; <nl> if ( message ! = nullptr & & FLAG_code_comments ) { <nl> void CodeStubAssembler : : Check ( const NodeGenerator & condition_body , <nl> SNPrintF ( buffer , " CSA_ASSERT failed : % s \ n " , message ) ; <nl> } <nl> CallRuntime ( Runtime : : kGlobalPrint , SmiConstant ( 0 ) , <nl> - HeapConstant ( factory ( ) - > InternalizeUtf8String ( & ( buffer [ 0 ] ) ) ) ) ; <nl> + StringConstant ( & ( buffer [ 0 ] ) ) ) ; <nl> } <nl> + <nl> + # ifdef DEBUG <nl> + / / Only print the extra nodes in debug builds . <nl> + MaybePrintNodeWithName ( this , extra_node1 , extra_node1_name ) ; <nl> + MaybePrintNodeWithName ( this , extra_node2 , extra_node2_name ) ; <nl> + MaybePrintNodeWithName ( this , extra_node3 , extra_node3_name ) ; <nl> + MaybePrintNodeWithName ( this , extra_node4 , extra_node4_name ) ; <nl> + MaybePrintNodeWithName ( this , extra_node5 , extra_node5_name ) ; <nl> + # endif <nl> + <nl> DebugBreak ( ) ; <nl> Goto ( & ok ) ; <nl> BIND ( & ok ) ; <nl> mmm a / src / code - stub - assembler . h <nl> ppp b / src / code - stub - assembler . h <nl> class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler : : CodeAssembler { <nl> <nl> typedef std : : function < Node * ( ) > NodeGenerator ; <nl> <nl> - void Assert ( const NodeGenerator & condition_body , const char * string = nullptr , <nl> - const char * file = nullptr , int line = 0 ) ; <nl> - void Check ( const NodeGenerator & condition_body , const char * string = nullptr , <nl> - const char * file = nullptr , int line = 0 ) ; <nl> + void Assert ( const NodeGenerator & condition_body , <nl> + const char * message = nullptr , const char * file = nullptr , <nl> + int line = 0 , Node * extra_node1 = nullptr , <nl> + const char * extra_node1_name = " " , Node * extra_node2 = nullptr , <nl> + const char * extra_node2_name = " " , Node * extra_node3 = nullptr , <nl> + const char * extra_node3_name = " " , Node * extra_node4 = nullptr , <nl> + const char * extra_node4_name = " " , Node * extra_node5 = nullptr , <nl> + const char * extra_node5_name = " " ) ; <nl> + void Check ( const NodeGenerator & condition_body , const char * message = nullptr , <nl> + const char * file = nullptr , int line = 0 , <nl> + Node * extra_node1 = nullptr , const char * extra_node1_name = " " , <nl> + Node * extra_node2 = nullptr , const char * extra_node2_name = " " , <nl> + Node * extra_node3 = nullptr , const char * extra_node3_name = " " , <nl> + Node * extra_node4 = nullptr , const char * extra_node4_name = " " , <nl> + Node * extra_node5 = nullptr , const char * extra_node5_name = " " ) ; <nl> <nl> Node * Select ( Node * condition , const NodeGenerator & true_body , <nl> const NodeGenerator & false_body , MachineRepresentation rep ) ; <nl> class ToDirectStringAssembler : public CodeStubAssembler { <nl> ( csa ) - > Check ( [ & ] { return ( x ) ; } , # x , __FILE__ , __LINE__ ) <nl> <nl> # ifdef DEBUG <nl> - # define CSA_ASSERT ( csa , x ) \ <nl> - ( csa ) - > Assert ( [ & ] { return ( x ) ; } , # x , __FILE__ , __LINE__ ) <nl> - # define CSA_ASSERT_JS_ARGC_OP ( csa , Op , op , expected ) \ <nl> - ( csa ) - > Assert ( \ <nl> - [ & ] { \ <nl> - compiler : : Node * const argc = \ <nl> - ( csa ) - > Parameter ( Descriptor : : kActualArgumentsCount ) ; \ <nl> - return ( csa ) - > Op ( argc , ( csa ) - > Int32Constant ( expected ) ) ; \ <nl> - } , \ <nl> - " argc " # op " " # expected , __FILE__ , __LINE__ ) <nl> + / / Add stringified versions to the given values , except the first . That is , <nl> + / / transform <nl> + / / x , a , b , c , d , e , f <nl> + / / to <nl> + / / a , " a " , b , " b " , c , " c " , d , " d " , e , " e " , f , " f " <nl> + / / <nl> + / / __VA_ARGS__ is ignored to allow the caller to pass through too many <nl> + / / parameters , and the first element is ignored to support having no extra <nl> + / / values without empty __VA_ARGS__ ( which cause all sorts of problems with <nl> + / / extra commas ) . <nl> + # define CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5 ( _ , v1 , v2 , v3 , v4 , v5 , . . . ) \ <nl> + v1 , # v1 , v2 , # v2 , v3 , # v3 , v4 , # v4 , v5 , # v5 <nl> + <nl> + / / Stringify the given variable number of arguments . The arguments are trimmed <nl> + / / to 5 if there are too many , and padded with nullptr if there are not enough . <nl> + # define CSA_ASSERT_STRINGIFY_EXTRA_VALUES ( . . . ) \ <nl> + CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5 ( __VA_ARGS__ , nullptr , nullptr , nullptr , \ <nl> + nullptr , nullptr ) <nl> + <nl> + # define CSA_ASSERT_GET_CONDITION ( x , . . . ) ( x ) <nl> + # define CSA_ASSERT_GET_CONDITION_STR ( x , . . . ) # x <nl> + <nl> + / / CSA_ASSERT ( csa , < condition > , < extra values to print . . . > ) <nl> + <nl> + / / We have to jump through some hoops to allow < extra values to print . . . > to be <nl> + / / empty . <nl> + # define CSA_ASSERT ( csa , . . . ) \ <nl> + ( csa ) - > Assert ( [ & ] { return CSA_ASSERT_GET_CONDITION ( __VA_ARGS__ ) ; } , \ <nl> + CSA_ASSERT_GET_CONDITION_STR ( __VA_ARGS__ ) , __FILE__ , __LINE__ , \ <nl> + CSA_ASSERT_STRINGIFY_EXTRA_VALUES ( __VA_ARGS__ ) ) <nl> + <nl> + # define CSA_ASSERT_JS_ARGC_OP ( csa , Op , op , expected ) \ <nl> + ( csa ) - > Assert ( \ <nl> + [ & ] { \ <nl> + compiler : : Node * const argc = \ <nl> + ( csa ) - > Parameter ( Descriptor : : kActualArgumentsCount ) ; \ <nl> + return ( csa ) - > Op ( argc , ( csa ) - > Int32Constant ( expected ) ) ; \ <nl> + } , \ <nl> + " argc " # op " " # expected , __FILE__ , __LINE__ , \ <nl> + SmiFromWord32 ( ( csa ) - > Parameter ( Descriptor : : kActualArgumentsCount ) ) , \ <nl> + " argc " ) <nl> <nl> # define CSA_ASSERT_JS_ARGC_EQ ( csa , expected ) \ <nl> CSA_ASSERT_JS_ARGC_OP ( csa , Word32Equal , = = , expected ) <nl> class ToDirectStringAssembler : public CodeStubAssembler { <nl> Variable name ( this CSA_DEBUG_INFO ( name ) , __VA_ARGS__ ) ; <nl> <nl> # else / / DEBUG <nl> - # define CSA_ASSERT ( csa , x ) ( ( void ) 0 ) <nl> + # define CSA_ASSERT ( csa , . . . ) ( ( void ) 0 ) <nl> # define CSA_ASSERT_JS_ARGC_EQ ( csa , expected ) ( ( void ) 0 ) <nl> # define CSA_DEBUG_INFO ( name ) <nl> # define BIND ( label ) Bind ( label ) ; <nl> class ToDirectStringAssembler : public CodeStubAssembler { <nl> # endif / / DEBUG <nl> <nl> # ifdef ENABLE_SLOW_DCHECKS <nl> - # define CSA_SLOW_ASSERT ( csa , x ) \ <nl> + # define CSA_SLOW_ASSERT ( csa , . . . ) \ <nl> if ( FLAG_enable_slow_asserts ) { \ <nl> - CSA_ASSERT ( csa , x ) ; \ <nl> + CSA_ASSERT ( csa , __VA_ARGS__ ) ; \ <nl> } <nl> # else <nl> - # define CSA_SLOW_ASSERT ( csa , x ) ( ( void ) 0 ) <nl> + # define CSA_SLOW_ASSERT ( csa , . . . ) ( ( void ) 0 ) <nl> # endif <nl> <nl> DEFINE_OPERATORS_FOR_FLAGS ( CodeStubAssembler : : AllocationFlags ) ; <nl> mmm a / src / runtime / runtime - test . cc <nl> ppp b / src / runtime / runtime - test . cc <nl> RUNTIME_FUNCTION ( Runtime_DebugPrint ) { <nl> return args [ 0 ] ; / / return TOS <nl> } <nl> <nl> + RUNTIME_FUNCTION ( Runtime_PrintWithNameForAssert ) { <nl> + SealHandleScope shs ( isolate ) ; <nl> + DCHECK_EQ ( 2 , args . length ( ) ) ; <nl> + <nl> + CONVERT_ARG_CHECKED ( String , name , 0 ) ; <nl> + <nl> + PrintF ( " * " ) ; <nl> + StringCharacterStream stream ( name ) ; <nl> + while ( stream . HasMore ( ) ) { <nl> + uint16_t character = stream . GetNext ( ) ; <nl> + PrintF ( " % c " , character ) ; <nl> + } <nl> + PrintF ( " : " ) ; <nl> + args [ 1 ] - > ShortPrint ( ) ; <nl> + PrintF ( " \ n " ) ; <nl> + <nl> + return isolate - > heap ( ) - > undefined_value ( ) ; <nl> + } <nl> <nl> RUNTIME_FUNCTION ( Runtime_DebugTrace ) { <nl> SealHandleScope shs ( isolate ) ; <nl> mmm a / src / runtime / runtime . h <nl> ppp b / src / runtime / runtime . h <nl> namespace internal { <nl> F ( DebugPrint , 1 , 1 ) \ <nl> F ( DebugTrace , 0 , 1 ) \ <nl> F ( DebugTrackRetainingPath , 1 , 1 ) \ <nl> + F ( PrintWithNameForAssert , 2 , 1 ) \ <nl> F ( GetExceptionDetails , 1 , 1 ) \ <nl> F ( GlobalPrint , 1 , 1 ) \ <nl> F ( SystemBreak , 0 , 1 ) \ <nl>
|
[ CSA ] Allow Assert to print variables
|
v8/v8
|
eff31fd1eae0a57319109205afa5bfd025f8ccce
|
2017-07-25T15:37:55Z
|
mmm a / tensorflow / python / compat / compat . py <nl> ppp b / tensorflow / python / compat / compat . py <nl> <nl> # This value changes every day with an automatic CL . It can be modified in code <nl> # via ` forward_compatibility_horizon ( ) ` or with the environment variable <nl> # TF_FORWARD_COMPATIBILITY_DELTA_DAYS , which is added to the compatibility date . <nl> - _FORWARD_COMPATIBILITY_HORIZON = datetime . date ( 2020 , 3 , 6 ) <nl> + _FORWARD_COMPATIBILITY_HORIZON = datetime . date ( 2020 , 3 , 7 ) <nl> _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = " TF_FORWARD_COMPATIBILITY_DELTA_DAYS " <nl> _FORWARD_COMPATIBILITY_DATE_NUMBER = None <nl> <nl>
|
compat : Update forward compatibility horizon to 2020 - 03 - 07
|
tensorflow/tensorflow
|
81e316f16531a6a6dcd58a2f92bc9e8f0f9c3ef6
|
2020-03-07T09:07:09Z
|
mmm a / src / sqlitedb . cpp <nl> ppp b / src / sqlitedb . cpp <nl> bool DBBrowserDB : : updateRecord ( int wrow , int wcol , const QByteArray & wtext ) <nl> <nl> sqlite3_stmt * stmt ; <nl> int success = 1 ; <nl> - if ( sqlite3_prepare ( _db , sql . toUtf8 ( ) , - 1 , & stmt , 0 ) ! = SQLITE_OK ) <nl> + if ( sqlite3_prepare_v2 ( _db , sql . toUtf8 ( ) , - 1 , & stmt , 0 ) ! = SQLITE_OK ) <nl> success = 0 ; <nl> if ( success = = 1 & & sqlite3_bind_text ( stmt , 1 , wtext . constData ( ) , wtext . length ( ) , SQLITE_STATIC ) ! = SQLITE_OK ) <nl> success = - 1 ; <nl> void DBBrowserDB : : getTableRecords ( const QString & tablename , const QString & ord <nl> <nl> QString sql = QString ( " SELECT rowid , * FROM ` % 1 ` ORDER BY % 2 ; " ) . arg ( tablename ) . arg ( orderby ) ; <nl> logSQL ( sql , kLogMsg_App ) ; <nl> - if ( sqlite3_prepare ( _db , sql . toUtf8 ( ) , - 1 , & stmt , 0 ) ! = SQLITE_OK ) <nl> + if ( sqlite3_prepare_v2 ( _db , sql . toUtf8 ( ) , - 1 , & stmt , 0 ) ! = SQLITE_OK ) <nl> { <nl> lastErrorMessage = QObject : : tr ( " could not get fields " ) ; <nl> return ; <nl> resultMap DBBrowserDB : : getFindResults ( const QString & wstatement ) <nl> lastErrorMessage = QObject : : tr ( " no error " ) ; <nl> logSQL ( wstatement , kLogMsg_App ) ; <nl> QByteArray statementutf8 = wstatement . toUtf8 ( ) ; <nl> - err = sqlite3_prepare ( _db , statementutf8 , statementutf8 . length ( ) , <nl> + err = sqlite3_prepare_v2 ( _db , statementutf8 , statementutf8 . length ( ) , <nl> & vm , & tail ) ; <nl> if ( err = = SQLITE_OK ) { <nl> int rownum = 0 ; <nl> QString DBBrowserDB : : getTableSQL ( const QString & sTable ) <nl> QString statement = QString ( " SELECT sql FROM sqlite_master WHERE name = ' % 1 ' ; " ) . arg ( sTable ) ; <nl> <nl> QByteArray utf8Statement = statement . toUtf8 ( ) ; <nl> - err = sqlite3_prepare ( _db , utf8Statement , utf8Statement . length ( ) , <nl> + err = sqlite3_prepare_v2 ( _db , utf8Statement , utf8Statement . length ( ) , <nl> & vm , & tail ) ; <nl> if ( err = = SQLITE_OK ) { <nl> logSQL ( statement , kLogMsg_App ) ; <nl> void DBBrowserDB : : updateSchema ( ) <nl> QString statement = " SELECT type , name , sql FROM sqlite_master ; " ; <nl> <nl> QByteArray utf8Statement = statement . toUtf8 ( ) ; <nl> - err = sqlite3_prepare ( _db , utf8Statement , utf8Statement . length ( ) , <nl> + err = sqlite3_prepare_v2 ( _db , utf8Statement , utf8Statement . length ( ) , <nl> & vm , & tail ) ; <nl> if ( err = = SQLITE_OK ) { <nl> logSQL ( statement , kLogMsg_App ) ; <nl> void DBBrowserDB : : updateSchema ( ) <nl> { <nl> statement = QString ( " PRAGMA TABLE_INFO ( ` % 1 ` ) ; " ) . arg ( ( * it ) . getname ( ) ) ; <nl> logSQL ( statement , kLogMsg_App ) ; <nl> - err = sqlite3_prepare ( _db , statement . toUtf8 ( ) , statement . length ( ) , <nl> + err = sqlite3_prepare_v2 ( _db , statement . toUtf8 ( ) , statement . length ( ) , <nl> & vm , & tail ) ; <nl> if ( err = = SQLITE_OK ) { <nl> ( * it ) . fldmap . clear ( ) ; <nl> QString DBBrowserDB : : getPragma ( const QString & pragma ) <nl> QString retval = " " ; <nl> <nl> / / Get value from DB <nl> - int err = sqlite3_prepare ( _db , sql . toUtf8 ( ) , sql . toUtf8 ( ) . length ( ) , & vm , & tail ) ; <nl> + int err = sqlite3_prepare_v2 ( _db , sql . toUtf8 ( ) , sql . toUtf8 ( ) . length ( ) , & vm , & tail ) ; <nl> if ( err = = SQLITE_OK ) { <nl> logSQL ( sql , kLogMsg_App ) ; <nl> if ( sqlite3_step ( vm ) = = SQLITE_ROW ) <nl>
|
replace sqlite3_prepare with _v2 calls
|
sqlitebrowser/sqlitebrowser
|
8f6d8213cbd9393fe0df807f4a2f4322574be573
|
2013-03-22T05:06:06Z
|
mmm a / src / runtime / base / util / http_client . cpp <nl> ppp b / src / runtime / base / util / http_client . cpp <nl> int HttpClient : : impl ( const char * url , const char * data , int size , <nl> ssl [ " capath " ] . toString ( ) . data ( ) ) ; <nl> } <nl> if ( ssl . exists ( " cafile " ) ) { <nl> - curl_easy_setopt ( cp , CURLOPT_CAPATH , <nl> + curl_easy_setopt ( cp , CURLOPT_CAINFO , <nl> ssl [ " cafile " ] . toString ( ) . data ( ) ) ; <nl> } <nl> if ( ssl . exists ( " local_cert " ) ) { <nl>
|
fix " cafile " stream context arg in hhvm
|
facebook/hhvm
|
552e2715fb2de55319562e0c0be3bb1f8d1ed2b5
|
2012-11-01T17:42:42Z
|
mmm a / lib / SILPasses / IPO / GlobalOpt . cpp <nl> ppp b / lib / SILPasses / IPO / GlobalOpt . cpp <nl> SILGlobalVariable * SILGlobalOpt : : getVariableOfGlobalInit ( SILFunction * AddrF ) { <nl> } <nl> <nl> static bool canBeChangedExternally ( SILGlobalVariable * SILG ) { <nl> + <nl> + / / Don ' t assume anything about globals which are imported from other modules . <nl> + if ( isAvailableExternally ( SILG - > getLinkage ( ) ) ) <nl> + return true ; <nl> + <nl> / / Use access specifiers from the declarations , <nl> / / if possible . <nl> if ( auto * Decl = SILG - > getDecl ( ) ) { <nl>
|
GlobalOpt : Don ' t assume we see all accesses to a global if it is imported from another module .
|
apple/swift
|
a6aedecaca06819468dab0272882a19a55331ecd
|
2015-11-19T23:18:17Z
|
mmm a / api / envoy / config / bootstrap / v3 / BUILD <nl> ppp b / api / envoy / config / bootstrap / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / trace / v3 : pkg " , <nl> " / / envoy / extensions / transport_sockets / tls / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / config / bootstrap / v3 / bootstrap . proto <nl> ppp b / api / envoy / config / bootstrap / v3 / bootstrap . proto <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / struct . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> + import " udpa / annotations / migrate . proto " ; <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> option ( udpa . annotations . file_status ) . package_version_status = ACTIVE ; <nl> / / < config_overview_bootstrap > ` for more detail . <nl> <nl> / / Bootstrap : ref : ` configuration overview < config_overview_bootstrap > ` . <nl> - / / [ # next - free - field : 22 ] <nl> + / / [ # next - free - field : 24 ] <nl> message Bootstrap { <nl> option ( udpa . annotations . versioning ) . previous_message_type = <nl> " envoy . config . bootstrap . v2 . Bootstrap " ; <nl> message Bootstrap { <nl> repeated envoy . extensions . transport_sockets . tls . v3 . Secret secrets = 3 ; <nl> } <nl> <nl> + / / [ # next - free - field : 7 ] <nl> message DynamicResources { <nl> option ( udpa . annotations . versioning ) . previous_message_type = <nl> " envoy . config . bootstrap . v2 . Bootstrap . DynamicResources " ; <nl> message Bootstrap { <nl> / / : ref : ` LDS < arch_overview_dynamic_config_lds > ` configuration source . <nl> core . v3 . ConfigSource lds_config = 1 ; <nl> <nl> + / / Resource locator for listener collection . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator lds_resources_locator = 5 ; <nl> + <nl> / / All post - bootstrap : ref : ` Cluster < envoy_api_msg_config . cluster . v3 . Cluster > ` definitions are <nl> / / provided by a single : ref : ` CDS < arch_overview_dynamic_config_cds > ` <nl> / / configuration source . <nl> core . v3 . ConfigSource cds_config = 2 ; <nl> <nl> + / / Resource locator for cluster collection . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator cds_resources_locator = 6 ; <nl> + <nl> / / A single : ref : ` ADS < config_overview_ads > ` source may be optionally <nl> / / specified . This must have : ref : ` api_type <nl> / / < envoy_api_field_config . core . v3 . ApiConfigSource . api_type > ` : ref : ` GRPC <nl> message Bootstrap { <nl> / / Specifies optional bootstrap extensions to be instantiated at startup time . <nl> / / Each item contains extension specific configuration . <nl> repeated core . v3 . TypedExtensionConfig bootstrap_extensions = 21 ; <nl> + <nl> + / / Configuration sources that will participate in <nl> + / / * udpa . core . v1 . ResourceLocator * authority resolution . The algorithm is as <nl> + / / follows : <nl> + / / 1 . The authority field is taken from the * udpa . core . v1 . ResourceLocator * , call <nl> + / / this * resource_authority * . <nl> + / / 2 . * resource_authority * is compared against the authorities in any peer <nl> + / / * ConfigSource * . The peer * ConfigSource * is the configuration source <nl> + / / message which would have been used unconditionally for resolution <nl> + / / with opaque resource names . If there is a match with an authority , the <nl> + / / peer * ConfigSource * message is used . <nl> + / / 3 . * resource_authority * is compared sequentially with the authorities in <nl> + / / each configuration source in * config_sources * . The first * ConfigSource * <nl> + / / to match wins . <nl> + / / 4 . As a fallback , if no configuration source matches , then <nl> + / / * default_config_source * is used . <nl> + / / 5 . If * default_config_source * is not specified , resolution fails . <nl> + / / [ # not - implemented - hide : ] <nl> + repeated core . v3 . ConfigSource config_sources = 22 ; <nl> + <nl> + / / Default configuration source for * udpa . core . v1 . ResourceLocator * if all <nl> + / / other resolution fails . <nl> + / / [ # not - implemented - hide : ] <nl> + core . v3 . ConfigSource default_config_source = 23 ; <nl> } <nl> <nl> / / Administration interface : ref : ` operations documentation <nl> message RuntimeLayer { <nl> " envoy . config . bootstrap . v2 . RuntimeLayer . RtdsLayer " ; <nl> <nl> / / Resource to subscribe to at * rtds_config * for the RTDS layer . <nl> - string name = 1 ; <nl> + string name = 1 [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> + <nl> + / / Resource locator for RTDS layer . This is mutually exclusive to * name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator rtds_resource_locator = 3 <nl> + [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> <nl> / / RTDS configuration source . <nl> core . v3 . ConfigSource rtds_config = 2 ; <nl> mmm a / api / envoy / config / bootstrap / v4alpha / BUILD <nl> ppp b / api / envoy / config / bootstrap / v4alpha / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / overload / v3 : pkg " , <nl> " / / envoy / extensions / transport_sockets / tls / v4alpha : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / config / bootstrap / v4alpha / bootstrap . proto <nl> ppp b / api / envoy / config / bootstrap / v4alpha / bootstrap . proto <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / struct . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> option ( udpa . annotations . file_status ) . package_version_status = NEXT_MAJOR_VERSIO <nl> / / < config_overview_bootstrap > ` for more detail . <nl> <nl> / / Bootstrap : ref : ` configuration overview < config_overview_bootstrap > ` . <nl> - / / [ # next - free - field : 22 ] <nl> + / / [ # next - free - field : 24 ] <nl> message Bootstrap { <nl> option ( udpa . annotations . versioning ) . previous_message_type = <nl> " envoy . config . bootstrap . v3 . Bootstrap " ; <nl> message Bootstrap { <nl> repeated envoy . extensions . transport_sockets . tls . v4alpha . Secret secrets = 3 ; <nl> } <nl> <nl> + / / [ # next - free - field : 7 ] <nl> message DynamicResources { <nl> option ( udpa . annotations . versioning ) . previous_message_type = <nl> " envoy . config . bootstrap . v3 . Bootstrap . DynamicResources " ; <nl> message Bootstrap { <nl> / / : ref : ` LDS < arch_overview_dynamic_config_lds > ` configuration source . <nl> core . v4alpha . ConfigSource lds_config = 1 ; <nl> <nl> + / / Resource locator for listener collection . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator lds_resources_locator = 5 ; <nl> + <nl> / / All post - bootstrap : ref : ` Cluster < envoy_api_msg_config . cluster . v4alpha . Cluster > ` definitions are <nl> / / provided by a single : ref : ` CDS < arch_overview_dynamic_config_cds > ` <nl> / / configuration source . <nl> core . v4alpha . ConfigSource cds_config = 2 ; <nl> <nl> + / / Resource locator for cluster collection . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator cds_resources_locator = 6 ; <nl> + <nl> / / A single : ref : ` ADS < config_overview_ads > ` source may be optionally <nl> / / specified . This must have : ref : ` api_type <nl> / / < envoy_api_field_config . core . v4alpha . ApiConfigSource . api_type > ` : ref : ` GRPC <nl> message Bootstrap { <nl> / / Specifies optional bootstrap extensions to be instantiated at startup time . <nl> / / Each item contains extension specific configuration . <nl> repeated core . v4alpha . TypedExtensionConfig bootstrap_extensions = 21 ; <nl> + <nl> + / / Configuration sources that will participate in <nl> + / / * udpa . core . v1 . ResourceLocator * authority resolution . The algorithm is as <nl> + / / follows : <nl> + / / 1 . The authority field is taken from the * udpa . core . v1 . ResourceLocator * , call <nl> + / / this * resource_authority * . <nl> + / / 2 . * resource_authority * is compared against the authorities in any peer <nl> + / / * ConfigSource * . The peer * ConfigSource * is the configuration source <nl> + / / message which would have been used unconditionally for resolution <nl> + / / with opaque resource names . If there is a match with an authority , the <nl> + / / peer * ConfigSource * message is used . <nl> + / / 3 . * resource_authority * is compared sequentially with the authorities in <nl> + / / each configuration source in * config_sources * . The first * ConfigSource * <nl> + / / to match wins . <nl> + / / 4 . As a fallback , if no configuration source matches , then <nl> + / / * default_config_source * is used . <nl> + / / 5 . If * default_config_source * is not specified , resolution fails . <nl> + / / [ # not - implemented - hide : ] <nl> + repeated core . v4alpha . ConfigSource config_sources = 22 ; <nl> + <nl> + / / Default configuration source for * udpa . core . v1 . ResourceLocator * if all <nl> + / / other resolution fails . <nl> + / / [ # not - implemented - hide : ] <nl> + core . v4alpha . ConfigSource default_config_source = 23 ; <nl> } <nl> <nl> / / Administration interface : ref : ` operations documentation <nl> message RuntimeLayer { <nl> option ( udpa . annotations . versioning ) . previous_message_type = <nl> " envoy . config . bootstrap . v3 . RuntimeLayer . RtdsLayer " ; <nl> <nl> - / / Resource to subscribe to at * rtds_config * for the RTDS layer . <nl> - string name = 1 ; <nl> + oneof name_specifier { <nl> + / / Resource to subscribe to at * rtds_config * for the RTDS layer . <nl> + string name = 1 ; <nl> + <nl> + / / Resource locator for RTDS layer . This is mutually exclusive to * name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator rtds_resource_locator = 3 ; <nl> + } <nl> <nl> / / RTDS configuration source . <nl> core . v4alpha . ConfigSource rtds_config = 2 ; <nl> mmm a / api / envoy / config / cluster / v3 / BUILD <nl> ppp b / api / envoy / config / cluster / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / endpoint / v3 : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / config / cluster / v3 / cluster . proto <nl> ppp b / api / envoy / config / cluster / v3 / cluster . proto <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / struct . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / collection_entry . proto " ; <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> + import " udpa / annotations / migrate . proto " ; <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> option ( udpa . annotations . file_status ) . package_version_status = ACTIVE ; <nl> <nl> / / [ # protodoc - title : Cluster configuration ] <nl> <nl> + / / Cluster list collections . Entries are * Cluster * resources or references . <nl> + / / [ # not - implemented - hide : ] <nl> + message ClusterCollection { <nl> + udpa . core . v1 . CollectionEntry entries = 1 ; <nl> + } <nl> + <nl> / / Configuration for a single upstream cluster . <nl> / / [ # next - free - field : 49 ] <nl> message Cluster { <nl> message Cluster { <nl> / / Optional alternative to cluster name to present to EDS . This does not <nl> / / have the same restrictions as cluster name , i . e . it may be arbitrary <nl> / / length . <nl> - string service_name = 2 ; <nl> + string service_name = 2 [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> + <nl> + / / Resource locator for EDS . This is mutually exclusive to * service_name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator eds_resource_locator = 3 <nl> + [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> } <nl> <nl> / / Optionally divide the endpoints in this cluster into subsets defined by <nl> mmm a / api / envoy / config / cluster / v4alpha / BUILD <nl> ppp b / api / envoy / config / cluster / v4alpha / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / endpoint / v3 : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / config / cluster / v4alpha / cluster . proto <nl> ppp b / api / envoy / config / cluster / v4alpha / cluster . proto <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / struct . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / collection_entry . proto " ; <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> option ( udpa . annotations . file_status ) . package_version_status = NEXT_MAJOR_VERSIO <nl> <nl> / / [ # protodoc - title : Cluster configuration ] <nl> <nl> + / / Cluster list collections . Entries are * Cluster * resources or references . <nl> + / / [ # not - implemented - hide : ] <nl> + message ClusterCollection { <nl> + option ( udpa . annotations . versioning ) . previous_message_type = <nl> + " envoy . config . cluster . v3 . ClusterCollection " ; <nl> + <nl> + udpa . core . v1 . CollectionEntry entries = 1 ; <nl> + } <nl> + <nl> / / Configuration for a single upstream cluster . <nl> / / [ # next - free - field : 49 ] <nl> message Cluster { <nl> message Cluster { <nl> / / Configuration for the source of EDS updates for this Cluster . <nl> core . v4alpha . ConfigSource eds_config = 1 ; <nl> <nl> - / / Optional alternative to cluster name to present to EDS . This does not <nl> - / / have the same restrictions as cluster name , i . e . it may be arbitrary <nl> - / / length . <nl> - string service_name = 2 ; <nl> + oneof name_specifier { <nl> + / / Optional alternative to cluster name to present to EDS . This does not <nl> + / / have the same restrictions as cluster name , i . e . it may be arbitrary <nl> + / / length . <nl> + string service_name = 2 ; <nl> + <nl> + / / Resource locator for EDS . This is mutually exclusive to * service_name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator eds_resource_locator = 3 ; <nl> + } <nl> } <nl> <nl> / / Optionally divide the endpoints in this cluster into subsets defined by <nl> mmm a / api / envoy / config / core / v3 / BUILD <nl> ppp b / api / envoy / config / core / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / type / matcher / v3 : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / config / core / v3 / config_source . proto <nl> ppp b / api / envoy / config / core / v3 / config_source . proto <nl> import " envoy / config / core / v3 / grpc_service . proto " ; <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / authority . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> message ApiConfigSource { <nl> / / the v2 protos is used . <nl> REST = 1 ; <nl> <nl> - / / gRPC v2 API . <nl> + / / SotW gRPC service . <nl> GRPC = 2 ; <nl> <nl> / / Using the delta xDS gRPC service , i . e . DeltaDiscovery { Request , Response } <nl> / / rather than Discovery { Request , Response } . Rather than sending Envoy the entire state <nl> / / with every update , the xDS server only sends what has changed since the last update . <nl> DELTA_GRPC = 3 ; <nl> + <nl> + / / SotW xDS gRPC with ADS . All resources which resolve to this configuration source will be <nl> + / / multiplexed on a single connection to an ADS endpoint . <nl> + / / [ # not - implemented - hide : ] <nl> + AGGREGATED_GRPC = 5 ; <nl> + <nl> + / / Delta xDS gRPC with ADS . All resources which resolve to this configuration source will be <nl> + / / multiplexed on a single connection to an ADS endpoint . <nl> + / / [ # not - implemented - hide : ] <nl> + AGGREGATED_DELTA_GRPC = 6 ; <nl> } <nl> <nl> / / API type ( gRPC , REST , delta gRPC ) <nl> message RateLimitSettings { <nl> / / < arch_overview_service_discovery > ` etc . may either be sourced from the <nl> / / filesystem or from an xDS API source . Filesystem configs are watched with <nl> / / inotify for updates . <nl> - / / [ # next - free - field : 7 ] <nl> + / / [ # next - free - field : 8 ] <nl> message ConfigSource { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . api . v2 . core . ConfigSource " ; <nl> <nl> + / / Authorities that this config source may be used for . An authority specified <nl> + / / in a * udpa . core . v1 . ResourceLocator * is resolved to a * ConfigSource * prior <nl> + / / to configuration fetch . This field provides the association between <nl> + / / authority name and configuration source . <nl> + / / [ # not - implemented - hide : ] <nl> + repeated udpa . core . v1 . Authority authorities = 7 ; <nl> + <nl> oneof config_source_specifier { <nl> option ( validate . required ) = true ; <nl> <nl> mmm a / api / envoy / config / core / v4alpha / BUILD <nl> ppp b / api / envoy / config / core / v4alpha / BUILD <nl> api_proto_package ( <nl> " / / envoy / type / matcher / v4alpha : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / config / core / v4alpha / config_source . proto <nl> ppp b / api / envoy / config / core / v4alpha / config_source . proto <nl> import " envoy / config / core / v4alpha / grpc_service . proto " ; <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / authority . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> message ApiConfigSource { <nl> / / the v2 protos is used . <nl> REST = 1 ; <nl> <nl> - / / gRPC v2 API . <nl> + / / SotW gRPC service . <nl> GRPC = 2 ; <nl> <nl> / / Using the delta xDS gRPC service , i . e . DeltaDiscovery { Request , Response } <nl> / / rather than Discovery { Request , Response } . Rather than sending Envoy the entire state <nl> / / with every update , the xDS server only sends what has changed since the last update . <nl> DELTA_GRPC = 3 ; <nl> + <nl> + / / SotW xDS gRPC with ADS . All resources which resolve to this configuration source will be <nl> + / / multiplexed on a single connection to an ADS endpoint . <nl> + / / [ # not - implemented - hide : ] <nl> + AGGREGATED_GRPC = 5 ; <nl> + <nl> + / / Delta xDS gRPC with ADS . All resources which resolve to this configuration source will be <nl> + / / multiplexed on a single connection to an ADS endpoint . <nl> + / / [ # not - implemented - hide : ] <nl> + AGGREGATED_DELTA_GRPC = 6 ; <nl> } <nl> <nl> / / API type ( gRPC , REST , delta gRPC ) <nl> message RateLimitSettings { <nl> / / < arch_overview_service_discovery > ` etc . may either be sourced from the <nl> / / filesystem or from an xDS API source . Filesystem configs are watched with <nl> / / inotify for updates . <nl> - / / [ # next - free - field : 7 ] <nl> + / / [ # next - free - field : 8 ] <nl> message ConfigSource { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . config . core . v3 . ConfigSource " ; <nl> <nl> + / / Authorities that this config source may be used for . An authority specified <nl> + / / in a * udpa . core . v1 . ResourceLocator * is resolved to a * ConfigSource * prior <nl> + / / to configuration fetch . This field provides the association between <nl> + / / authority name and configuration source . <nl> + / / [ # not - implemented - hide : ] <nl> + repeated udpa . core . v1 . Authority authorities = 7 ; <nl> + <nl> oneof config_source_specifier { <nl> option ( validate . required ) = true ; <nl> <nl> mmm a / api / envoy / config / listener / v3 / BUILD <nl> ppp b / api / envoy / config / listener / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / listener / v2 : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / config / listener / v3 / listener . proto <nl> ppp b / api / envoy / config / listener / v3 / listener . proto <nl> import " google / api / annotations . proto " ; <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / collection_entry . proto " ; <nl> + <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> option ( udpa . annotations . file_status ) . package_version_status = ACTIVE ; <nl> / / [ # protodoc - title : Listener configuration ] <nl> / / Listener : ref : ` configuration overview < config_listeners > ` <nl> <nl> + / / Listener list collections . Entries are * Listener * resources or references . <nl> + / / [ # not - implemented - hide : ] <nl> + message ListenerCollection { <nl> + udpa . core . v1 . CollectionEntry entries = 1 ; <nl> + } <nl> + <nl> / / [ # next - free - field : 23 ] <nl> message Listener { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . api . v2 . Listener " ; <nl> mmm a / api / envoy / config / listener / v4alpha / BUILD <nl> ppp b / api / envoy / config / listener / v4alpha / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / listener / v3 : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / config / listener / v4alpha / listener . proto <nl> ppp b / api / envoy / config / listener / v4alpha / listener . proto <nl> import " google / api / annotations . proto " ; <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / collection_entry . proto " ; <nl> + <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> option ( udpa . annotations . file_status ) . package_version_status = NEXT_MAJOR_VERSIO <nl> / / [ # protodoc - title : Listener configuration ] <nl> / / Listener : ref : ` configuration overview < config_listeners > ` <nl> <nl> + / / Listener list collections . Entries are * Listener * resources or references . <nl> + / / [ # not - implemented - hide : ] <nl> + message ListenerCollection { <nl> + option ( udpa . annotations . versioning ) . previous_message_type = <nl> + " envoy . config . listener . v3 . ListenerCollection " ; <nl> + <nl> + udpa . core . v1 . CollectionEntry entries = 1 ; <nl> + } <nl> + <nl> / / [ # next - free - field : 23 ] <nl> message Listener { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . config . listener . v3 . Listener " ; <nl> mmm a / api / envoy / extensions / common / tap / v3 / BUILD <nl> ppp b / api / envoy / extensions / common / tap / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / core / v3 : pkg " , <nl> " / / envoy / config / tap / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / extensions / common / tap / v3 / common . proto <nl> ppp b / api / envoy / extensions / common / tap / v3 / common . proto <nl> package envoy . extensions . common . tap . v3 ; <nl> import " envoy / config / core / v3 / config_source . proto " ; <nl> import " envoy / config / tap / v3 / common . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> + import " udpa / annotations / migrate . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> import " validate / validate . proto " ; <nl> message CommonExtensionConfig { <nl> config . core . v3 . ConfigSource config_source = 1 [ ( validate . rules ) . message = { required : true } ] ; <nl> <nl> / / Tap config to request from XDS server . <nl> - string name = 2 [ ( validate . rules ) . string = { min_bytes : 1 } ] ; <nl> + string name = 2 [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> + <nl> + / / Resource locator for TAP . This is mutually exclusive to * name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator tap_resource_locator = 3 <nl> + [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> } <nl> <nl> oneof config_type { <nl> mmm a / api / envoy / extensions / common / tap / v4alpha / BUILD <nl> ppp b / api / envoy / extensions / common / tap / v4alpha / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / tap / v4alpha : pkg " , <nl> " / / envoy / extensions / common / tap / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / extensions / common / tap / v4alpha / common . proto <nl> ppp b / api / envoy / extensions / common / tap / v4alpha / common . proto <nl> package envoy . extensions . common . tap . v4alpha ; <nl> import " envoy / config / core / v4alpha / config_source . proto " ; <nl> import " envoy / config / tap / v4alpha / common . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> import " validate / validate . proto " ; <nl> message CommonExtensionConfig { <nl> config . core . v4alpha . ConfigSource config_source = 1 <nl> [ ( validate . rules ) . message = { required : true } ] ; <nl> <nl> - / / Tap config to request from XDS server . <nl> - string name = 2 [ ( validate . rules ) . string = { min_bytes : 1 } ] ; <nl> + oneof name_specifier { <nl> + / / Tap config to request from XDS server . <nl> + string name = 2 ; <nl> + <nl> + / / Resource locator for TAP . This is mutually exclusive to * name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator tap_resource_locator = 3 ; <nl> + } <nl> } <nl> <nl> oneof config_type { <nl> mmm a / api / envoy / extensions / filters / network / http_connection_manager / v3 / BUILD <nl> ppp b / api / envoy / extensions / filters / network / http_connection_manager / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / type / tracing / v3 : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / extensions / filters / network / http_connection_manager / v3 / http_connection_manager . proto <nl> ppp b / api / envoy / extensions / filters / network / http_connection_manager / v3 / http_connection_manager . proto <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / struct . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> + import " udpa / annotations / migrate . proto " ; <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> message Rds { <nl> / / API . This allows an Envoy configuration with multiple HTTP listeners ( and <nl> / / associated HTTP connection manager filters ) to use different route <nl> / / configurations . <nl> - string route_config_name = 2 [ ( validate . rules ) . string = { min_bytes : 1 } ] ; <nl> + string route_config_name = 2 <nl> + [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> + <nl> + / / Resource locator for RDS . This is mutually exclusive to * route_config_name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator rds_resource_locator = 3 <nl> + [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> } <nl> <nl> / / This message is used to work around the limitations with ' oneof ' and repeated fields . <nl> mmm a / api / envoy / extensions / filters / network / http_connection_manager / v4alpha / BUILD <nl> ppp b / api / envoy / extensions / filters / network / http_connection_manager / v4alpha / BUILD <nl> api_proto_package ( <nl> " / / envoy / type / tracing / v3 : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / extensions / filters / network / http_connection_manager / v4alpha / http_connection_manager . proto <nl> ppp b / api / envoy / extensions / filters / network / http_connection_manager / v4alpha / http_connection_manager . proto <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / struct . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> message Rds { <nl> / / Configuration source specifier for RDS . <nl> config . core . v4alpha . ConfigSource config_source = 1 [ ( validate . rules ) . message = { required : true } ] ; <nl> <nl> - / / The name of the route configuration . This name will be passed to the RDS <nl> - / / API . This allows an Envoy configuration with multiple HTTP listeners ( and <nl> - / / associated HTTP connection manager filters ) to use different route <nl> - / / configurations . <nl> - string route_config_name = 2 [ ( validate . rules ) . string = { min_bytes : 1 } ] ; <nl> + oneof name_specifier { <nl> + / / The name of the route configuration . This name will be passed to the RDS <nl> + / / API . This allows an Envoy configuration with multiple HTTP listeners ( and <nl> + / / associated HTTP connection manager filters ) to use different route <nl> + / / configurations . <nl> + string route_config_name = 2 ; <nl> + <nl> + / / Resource locator for RDS . This is mutually exclusive to * route_config_name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator rds_resource_locator = 3 ; <nl> + } <nl> } <nl> <nl> / / This message is used to work around the limitations with ' oneof ' and repeated fields . <nl> mmm a / api / envoy / extensions / transport_sockets / tls / v3 / BUILD <nl> ppp b / api / envoy / extensions / transport_sockets / tls / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / core / v3 : pkg " , <nl> " / / envoy / type / matcher / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / extensions / transport_sockets / tls / v3 / secret . proto <nl> ppp b / api / envoy / extensions / transport_sockets / tls / v3 / secret . proto <nl> import " envoy / config / core / v3 / base . proto " ; <nl> import " envoy / config / core / v3 / config_source . proto " ; <nl> import " envoy / extensions / transport_sockets / tls / v3 / common . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> + import " udpa / annotations / migrate . proto " ; <nl> import " udpa / annotations / sensitive . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> message SdsSecretConfig { <nl> / / Name ( FQDN , UUID , SPKI , SHA256 , etc . ) by which the secret can be uniquely referred to . <nl> / / When both name and config are specified , then secret can be fetched and / or reloaded via <nl> / / SDS . When only name is specified , then secret will be loaded from static resources . <nl> - string name = 1 ; <nl> + string name = 1 [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> + <nl> + / / Resource locator for SDS . This is mutually exclusive to * name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator sds_resource_locator = 3 <nl> + [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> <nl> config . core . v3 . ConfigSource sds_config = 2 ; <nl> } <nl> mmm a / api / envoy / extensions / transport_sockets / tls / v4alpha / BUILD <nl> ppp b / api / envoy / extensions / transport_sockets / tls / v4alpha / BUILD <nl> api_proto_package ( <nl> " / / envoy / extensions / transport_sockets / tls / v3 : pkg " , <nl> " / / envoy / type / matcher / v4alpha : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / extensions / transport_sockets / tls / v4alpha / secret . proto <nl> ppp b / api / envoy / extensions / transport_sockets / tls / v4alpha / secret . proto <nl> import " envoy / config / core / v4alpha / base . proto " ; <nl> import " envoy / config / core / v4alpha / config_source . proto " ; <nl> import " envoy / extensions / transport_sockets / tls / v4alpha / common . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " udpa / annotations / sensitive . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> message SdsSecretConfig { <nl> option ( udpa . annotations . versioning ) . previous_message_type = <nl> " envoy . extensions . transport_sockets . tls . v3 . SdsSecretConfig " ; <nl> <nl> - / / Name ( FQDN , UUID , SPKI , SHA256 , etc . ) by which the secret can be uniquely referred to . <nl> - / / When both name and config are specified , then secret can be fetched and / or reloaded via <nl> - / / SDS . When only name is specified , then secret will be loaded from static resources . <nl> - string name = 1 ; <nl> + oneof name_specifier { <nl> + / / Name ( FQDN , UUID , SPKI , SHA256 , etc . ) by which the secret can be uniquely referred to . <nl> + / / When both name and config are specified , then secret can be fetched and / or reloaded via <nl> + / / SDS . When only name is specified , then secret will be loaded from static resources . <nl> + string name = 1 ; <nl> + <nl> + / / Resource locator for SDS . This is mutually exclusive to * name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator sds_resource_locator = 3 ; <nl> + } <nl> <nl> config . core . v4alpha . ConfigSource sds_config = 2 ; <nl> } <nl> mmm a / api / envoy / service / discovery / v3 / BUILD <nl> ppp b / api / envoy / service / discovery / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / core / v3 : pkg " , <nl> " / / envoy / service / discovery / v2 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / api / envoy / service / discovery / v3 / discovery . proto <nl> ppp b / api / envoy / service / discovery / v3 / discovery . proto <nl> import " envoy / config / core / v3 / base . proto " ; <nl> import " google / protobuf / any . proto " ; <nl> import " google / rpc / status . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + import " udpa / core / v1 / resource_name . proto " ; <nl> + <nl> + import " udpa / annotations / migrate . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> <nl> message DiscoveryResponse { <nl> / / In particular , initial_resource_versions being sent at the " start " of every <nl> / / gRPC stream actually entails a message for each type_url , each with its own <nl> / / initial_resource_versions . <nl> - / / [ # next - free - field : 8 ] <nl> + / / [ # next - free - field : 10 ] <nl> message DeltaDiscoveryRequest { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . api . v2 . DeltaDiscoveryRequest " ; <nl> <nl> message DeltaDiscoveryRequest { <nl> config . core . v3 . Node node = 1 ; <nl> <nl> / / Type of the resource that is being requested , e . g . <nl> - / / " type . googleapis . com / envoy . api . v2 . ClusterLoadAssignment " . <nl> + / / " type . googleapis . com / envoy . api . v2 . ClusterLoadAssignment " . This does not need to be set if <nl> + / / resources are only referenced via * udpa_resource_subscribe * and <nl> + / / * udpa_resources_unsubscribe * . <nl> string type_url = 2 ; <nl> <nl> / / DeltaDiscoveryRequests allow the client to add or remove individual <nl> message DeltaDiscoveryRequest { <nl> / / A list of Resource names to add to the list of tracked resources . <nl> repeated string resource_names_subscribe = 3 ; <nl> <nl> + / / As with * resource_names_subscribe * but used when subscribing to resources indicated <nl> + / / by a * udpa . core . v1 . ResourceLocator * . The directives in the resource locator <nl> + / / are ignored and the context parameters are matched with <nl> + / / * context_param_specifier * specific semantics . <nl> + / / [ # not - implemented - hide : ] <nl> + repeated udpa . core . v1 . ResourceLocator udpa_resources_subscribe = 8 ; <nl> + <nl> / / A list of Resource names to remove from the list of tracked resources . <nl> repeated string resource_names_unsubscribe = 4 ; <nl> <nl> + / / As with * resource_names_unsubscribe * but used when unsubscribing to resources indicated by a <nl> + / / * udpa . core . v1 . ResourceLocator * . This must match a previously subscribed <nl> + / / resource locator provided in * udpa_resources_subscribe * . <nl> + / / [ # not - implemented - hide : ] <nl> + repeated udpa . core . v1 . ResourceLocator udpa_resources_unsubscribe = 9 ; <nl> + <nl> / / Informs the server of the versions of the resources the xDS client knows of , to enable the <nl> / / client to continue the same logical xDS session even in the face of gRPC stream reconnection . <nl> / / It will not be populated : [ 1 ] in the very first stream of a session , since the client will <nl> message DeltaDiscoveryRequest { <nl> google . rpc . Status error_detail = 7 ; <nl> } <nl> <nl> - / / [ # next - free - field : 7 ] <nl> + / / [ # next - free - field : 8 ] <nl> message DeltaDiscoveryResponse { <nl> option ( udpa . annotations . versioning ) . previous_message_type = <nl> " envoy . api . v2 . DeltaDiscoveryResponse " ; <nl> message DeltaDiscoveryResponse { <nl> <nl> / / Type URL for resources . Identifies the xDS API when muxing over ADS . <nl> / / Must be consistent with the type_url in the Any within ' resources ' if ' resources ' is non - empty . <nl> + / / This does not need to be set if * udpa_removed_resources * is used instead of <nl> + / / * removed_resources * . <nl> string type_url = 4 ; <nl> <nl> / / Resources names of resources that have be deleted and to be removed from the xDS Client . <nl> / / Removed resources for missing resources can be ignored . <nl> repeated string removed_resources = 6 ; <nl> <nl> + / / As with * removed_resources * but used when a removed resource was named in <nl> + / / its * Resource * s with a * udpa . core . v1 . ResourceName * . <nl> + / / [ # not - implemented - hide : ] <nl> + repeated udpa . core . v1 . ResourceName udpa_removed_resources = 7 ; <nl> + <nl> / / The nonce provides a way for DeltaDiscoveryRequests to uniquely <nl> / / reference a DeltaDiscoveryResponse when ( N ) ACKing . The nonce is required . <nl> string nonce = 5 ; <nl> } <nl> <nl> + / / [ # next - free - field : 6 ] <nl> message Resource { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . api . v2 . Resource " ; <nl> <nl> / / The resource ' s name , to distinguish it from others of the same type of resource . <nl> - string name = 3 ; <nl> + string name = 3 [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> + <nl> + / / Used instead of * name * when a resource with a * udpa . core . v1 . ResourceName * is delivered . <nl> + udpa . core . v1 . ResourceName udpa_resource_name = 5 <nl> + [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> <nl> / / The aliases are a list of other names that this resource can go by . <nl> repeated string aliases = 4 ; <nl> mmm a / generated_api_shadow / envoy / config / bootstrap / v3 / BUILD <nl> ppp b / generated_api_shadow / envoy / config / bootstrap / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / trace / v3 : pkg " , <nl> " / / envoy / extensions / transport_sockets / tls / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / config / bootstrap / v3 / bootstrap . proto <nl> ppp b / generated_api_shadow / envoy / config / bootstrap / v3 / bootstrap . proto <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / struct . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> + import " udpa / annotations / migrate . proto " ; <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> option ( udpa . annotations . file_status ) . package_version_status = ACTIVE ; <nl> / / < config_overview_bootstrap > ` for more detail . <nl> <nl> / / Bootstrap : ref : ` configuration overview < config_overview_bootstrap > ` . <nl> - / / [ # next - free - field : 22 ] <nl> + / / [ # next - free - field : 24 ] <nl> message Bootstrap { <nl> option ( udpa . annotations . versioning ) . previous_message_type = <nl> " envoy . config . bootstrap . v2 . Bootstrap " ; <nl> message Bootstrap { <nl> repeated envoy . extensions . transport_sockets . tls . v3 . Secret secrets = 3 ; <nl> } <nl> <nl> + / / [ # next - free - field : 7 ] <nl> message DynamicResources { <nl> option ( udpa . annotations . versioning ) . previous_message_type = <nl> " envoy . config . bootstrap . v2 . Bootstrap . DynamicResources " ; <nl> message Bootstrap { <nl> / / : ref : ` LDS < arch_overview_dynamic_config_lds > ` configuration source . <nl> core . v3 . ConfigSource lds_config = 1 ; <nl> <nl> + / / Resource locator for listener collection . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator lds_resources_locator = 5 ; <nl> + <nl> / / All post - bootstrap : ref : ` Cluster < envoy_api_msg_config . cluster . v3 . Cluster > ` definitions are <nl> / / provided by a single : ref : ` CDS < arch_overview_dynamic_config_cds > ` <nl> / / configuration source . <nl> core . v3 . ConfigSource cds_config = 2 ; <nl> <nl> + / / Resource locator for cluster collection . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator cds_resources_locator = 6 ; <nl> + <nl> / / A single : ref : ` ADS < config_overview_ads > ` source may be optionally <nl> / / specified . This must have : ref : ` api_type <nl> / / < envoy_api_field_config . core . v3 . ApiConfigSource . api_type > ` : ref : ` GRPC <nl> message Bootstrap { <nl> / / Each item contains extension specific configuration . <nl> repeated core . v3 . TypedExtensionConfig bootstrap_extensions = 21 ; <nl> <nl> + / / Configuration sources that will participate in <nl> + / / * udpa . core . v1 . ResourceLocator * authority resolution . The algorithm is as <nl> + / / follows : <nl> + / / 1 . The authority field is taken from the * udpa . core . v1 . ResourceLocator * , call <nl> + / / this * resource_authority * . <nl> + / / 2 . * resource_authority * is compared against the authorities in any peer <nl> + / / * ConfigSource * . The peer * ConfigSource * is the configuration source <nl> + / / message which would have been used unconditionally for resolution <nl> + / / with opaque resource names . If there is a match with an authority , the <nl> + / / peer * ConfigSource * message is used . <nl> + / / 3 . * resource_authority * is compared sequentially with the authorities in <nl> + / / each configuration source in * config_sources * . The first * ConfigSource * <nl> + / / to match wins . <nl> + / / 4 . As a fallback , if no configuration source matches , then <nl> + / / * default_config_source * is used . <nl> + / / 5 . If * default_config_source * is not specified , resolution fails . <nl> + / / [ # not - implemented - hide : ] <nl> + repeated core . v3 . ConfigSource config_sources = 22 ; <nl> + <nl> + / / Default configuration source for * udpa . core . v1 . ResourceLocator * if all <nl> + / / other resolution fails . <nl> + / / [ # not - implemented - hide : ] <nl> + core . v3 . ConfigSource default_config_source = 23 ; <nl> + <nl> Runtime hidden_envoy_deprecated_runtime = 11 <nl> [ deprecated = true , ( envoy . annotations . disallowed_by_default ) = true ] ; <nl> } <nl> message RuntimeLayer { <nl> " envoy . config . bootstrap . v2 . RuntimeLayer . RtdsLayer " ; <nl> <nl> / / Resource to subscribe to at * rtds_config * for the RTDS layer . <nl> - string name = 1 ; <nl> + string name = 1 [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> + <nl> + / / Resource locator for RTDS layer . This is mutually exclusive to * name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator rtds_resource_locator = 3 <nl> + [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> <nl> / / RTDS configuration source . <nl> core . v3 . ConfigSource rtds_config = 2 ; <nl> mmm a / generated_api_shadow / envoy / config / bootstrap / v4alpha / BUILD <nl> ppp b / generated_api_shadow / envoy / config / bootstrap / v4alpha / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / trace / v4alpha : pkg " , <nl> " / / envoy / extensions / transport_sockets / tls / v4alpha : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / config / bootstrap / v4alpha / bootstrap . proto <nl> ppp b / generated_api_shadow / envoy / config / bootstrap / v4alpha / bootstrap . proto <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / struct . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> option ( udpa . annotations . file_status ) . package_version_status = NEXT_MAJOR_VERSIO <nl> / / < config_overview_bootstrap > ` for more detail . <nl> <nl> / / Bootstrap : ref : ` configuration overview < config_overview_bootstrap > ` . <nl> - / / [ # next - free - field : 22 ] <nl> + / / [ # next - free - field : 24 ] <nl> message Bootstrap { <nl> option ( udpa . annotations . versioning ) . previous_message_type = <nl> " envoy . config . bootstrap . v3 . Bootstrap " ; <nl> message Bootstrap { <nl> repeated envoy . extensions . transport_sockets . tls . v4alpha . Secret secrets = 3 ; <nl> } <nl> <nl> + / / [ # next - free - field : 7 ] <nl> message DynamicResources { <nl> option ( udpa . annotations . versioning ) . previous_message_type = <nl> " envoy . config . bootstrap . v3 . Bootstrap . DynamicResources " ; <nl> message Bootstrap { <nl> / / : ref : ` LDS < arch_overview_dynamic_config_lds > ` configuration source . <nl> core . v4alpha . ConfigSource lds_config = 1 ; <nl> <nl> + / / Resource locator for listener collection . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator lds_resources_locator = 5 ; <nl> + <nl> / / All post - bootstrap : ref : ` Cluster < envoy_api_msg_config . cluster . v4alpha . Cluster > ` definitions are <nl> / / provided by a single : ref : ` CDS < arch_overview_dynamic_config_cds > ` <nl> / / configuration source . <nl> core . v4alpha . ConfigSource cds_config = 2 ; <nl> <nl> + / / Resource locator for cluster collection . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator cds_resources_locator = 6 ; <nl> + <nl> / / A single : ref : ` ADS < config_overview_ads > ` source may be optionally <nl> / / specified . This must have : ref : ` api_type <nl> / / < envoy_api_field_config . core . v4alpha . ApiConfigSource . api_type > ` : ref : ` GRPC <nl> message Bootstrap { <nl> / / Specifies optional bootstrap extensions to be instantiated at startup time . <nl> / / Each item contains extension specific configuration . <nl> repeated core . v4alpha . TypedExtensionConfig bootstrap_extensions = 21 ; <nl> + <nl> + / / Configuration sources that will participate in <nl> + / / * udpa . core . v1 . ResourceLocator * authority resolution . The algorithm is as <nl> + / / follows : <nl> + / / 1 . The authority field is taken from the * udpa . core . v1 . ResourceLocator * , call <nl> + / / this * resource_authority * . <nl> + / / 2 . * resource_authority * is compared against the authorities in any peer <nl> + / / * ConfigSource * . The peer * ConfigSource * is the configuration source <nl> + / / message which would have been used unconditionally for resolution <nl> + / / with opaque resource names . If there is a match with an authority , the <nl> + / / peer * ConfigSource * message is used . <nl> + / / 3 . * resource_authority * is compared sequentially with the authorities in <nl> + / / each configuration source in * config_sources * . The first * ConfigSource * <nl> + / / to match wins . <nl> + / / 4 . As a fallback , if no configuration source matches , then <nl> + / / * default_config_source * is used . <nl> + / / 5 . If * default_config_source * is not specified , resolution fails . <nl> + / / [ # not - implemented - hide : ] <nl> + repeated core . v4alpha . ConfigSource config_sources = 22 ; <nl> + <nl> + / / Default configuration source for * udpa . core . v1 . ResourceLocator * if all <nl> + / / other resolution fails . <nl> + / / [ # not - implemented - hide : ] <nl> + core . v4alpha . ConfigSource default_config_source = 23 ; <nl> } <nl> <nl> / / Administration interface : ref : ` operations documentation <nl> message RuntimeLayer { <nl> option ( udpa . annotations . versioning ) . previous_message_type = <nl> " envoy . config . bootstrap . v3 . RuntimeLayer . RtdsLayer " ; <nl> <nl> - / / Resource to subscribe to at * rtds_config * for the RTDS layer . <nl> - string name = 1 ; <nl> + oneof name_specifier { <nl> + / / Resource to subscribe to at * rtds_config * for the RTDS layer . <nl> + string name = 1 ; <nl> + <nl> + / / Resource locator for RTDS layer . This is mutually exclusive to * name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator rtds_resource_locator = 3 ; <nl> + } <nl> <nl> / / RTDS configuration source . <nl> core . v4alpha . ConfigSource rtds_config = 2 ; <nl> mmm a / generated_api_shadow / envoy / config / cluster / v3 / BUILD <nl> ppp b / generated_api_shadow / envoy / config / cluster / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / extensions / transport_sockets / tls / v3 : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / config / cluster / v3 / cluster . proto <nl> ppp b / generated_api_shadow / envoy / config / cluster / v3 / cluster . proto <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / struct . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / collection_entry . proto " ; <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> + import " udpa / annotations / migrate . proto " ; <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> option ( udpa . annotations . file_status ) . package_version_status = ACTIVE ; <nl> <nl> / / [ # protodoc - title : Cluster configuration ] <nl> <nl> + / / Cluster list collections . Entries are * Cluster * resources or references . <nl> + / / [ # not - implemented - hide : ] <nl> + message ClusterCollection { <nl> + udpa . core . v1 . CollectionEntry entries = 1 ; <nl> + } <nl> + <nl> / / Configuration for a single upstream cluster . <nl> / / [ # next - free - field : 49 ] <nl> message Cluster { <nl> message Cluster { <nl> / / Optional alternative to cluster name to present to EDS . This does not <nl> / / have the same restrictions as cluster name , i . e . it may be arbitrary <nl> / / length . <nl> - string service_name = 2 ; <nl> + string service_name = 2 [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> + <nl> + / / Resource locator for EDS . This is mutually exclusive to * service_name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator eds_resource_locator = 3 <nl> + [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> } <nl> <nl> / / Optionally divide the endpoints in this cluster into subsets defined by <nl> mmm a / generated_api_shadow / envoy / config / cluster / v4alpha / BUILD <nl> ppp b / generated_api_shadow / envoy / config / cluster / v4alpha / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / endpoint / v3 : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / config / cluster / v4alpha / cluster . proto <nl> ppp b / generated_api_shadow / envoy / config / cluster / v4alpha / cluster . proto <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / struct . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / collection_entry . proto " ; <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> option ( udpa . annotations . file_status ) . package_version_status = NEXT_MAJOR_VERSIO <nl> <nl> / / [ # protodoc - title : Cluster configuration ] <nl> <nl> + / / Cluster list collections . Entries are * Cluster * resources or references . <nl> + / / [ # not - implemented - hide : ] <nl> + message ClusterCollection { <nl> + option ( udpa . annotations . versioning ) . previous_message_type = <nl> + " envoy . config . cluster . v3 . ClusterCollection " ; <nl> + <nl> + udpa . core . v1 . CollectionEntry entries = 1 ; <nl> + } <nl> + <nl> / / Configuration for a single upstream cluster . <nl> / / [ # next - free - field : 49 ] <nl> message Cluster { <nl> message Cluster { <nl> / / Configuration for the source of EDS updates for this Cluster . <nl> core . v4alpha . ConfigSource eds_config = 1 ; <nl> <nl> - / / Optional alternative to cluster name to present to EDS . This does not <nl> - / / have the same restrictions as cluster name , i . e . it may be arbitrary <nl> - / / length . <nl> - string service_name = 2 ; <nl> + oneof name_specifier { <nl> + / / Optional alternative to cluster name to present to EDS . This does not <nl> + / / have the same restrictions as cluster name , i . e . it may be arbitrary <nl> + / / length . <nl> + string service_name = 2 ; <nl> + <nl> + / / Resource locator for EDS . This is mutually exclusive to * service_name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator eds_resource_locator = 3 ; <nl> + } <nl> } <nl> <nl> / / Optionally divide the endpoints in this cluster into subsets defined by <nl> mmm a / generated_api_shadow / envoy / config / core / v3 / BUILD <nl> ppp b / generated_api_shadow / envoy / config / core / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / type / matcher / v3 : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / config / core / v3 / config_source . proto <nl> ppp b / generated_api_shadow / envoy / config / core / v3 / config_source . proto <nl> import " envoy / config / core / v3 / grpc_service . proto " ; <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / authority . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> message ApiConfigSource { <nl> / / the v2 protos is used . <nl> REST = 1 ; <nl> <nl> - / / gRPC v2 API . <nl> + / / SotW gRPC service . <nl> GRPC = 2 ; <nl> <nl> / / Using the delta xDS gRPC service , i . e . DeltaDiscovery { Request , Response } <nl> / / rather than Discovery { Request , Response } . Rather than sending Envoy the entire state <nl> / / with every update , the xDS server only sends what has changed since the last update . <nl> DELTA_GRPC = 3 ; <nl> + <nl> + / / SotW xDS gRPC with ADS . All resources which resolve to this configuration source will be <nl> + / / multiplexed on a single connection to an ADS endpoint . <nl> + / / [ # not - implemented - hide : ] <nl> + AGGREGATED_GRPC = 5 ; <nl> + <nl> + / / Delta xDS gRPC with ADS . All resources which resolve to this configuration source will be <nl> + / / multiplexed on a single connection to an ADS endpoint . <nl> + / / [ # not - implemented - hide : ] <nl> + AGGREGATED_DELTA_GRPC = 6 ; <nl> } <nl> <nl> / / API type ( gRPC , REST , delta gRPC ) <nl> message RateLimitSettings { <nl> / / < arch_overview_service_discovery > ` etc . may either be sourced from the <nl> / / filesystem or from an xDS API source . Filesystem configs are watched with <nl> / / inotify for updates . <nl> - / / [ # next - free - field : 7 ] <nl> + / / [ # next - free - field : 8 ] <nl> message ConfigSource { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . api . v2 . core . ConfigSource " ; <nl> <nl> + / / Authorities that this config source may be used for . An authority specified <nl> + / / in a * udpa . core . v1 . ResourceLocator * is resolved to a * ConfigSource * prior <nl> + / / to configuration fetch . This field provides the association between <nl> + / / authority name and configuration source . <nl> + / / [ # not - implemented - hide : ] <nl> + repeated udpa . core . v1 . Authority authorities = 7 ; <nl> + <nl> oneof config_source_specifier { <nl> option ( validate . required ) = true ; <nl> <nl> mmm a / generated_api_shadow / envoy / config / core / v4alpha / BUILD <nl> ppp b / generated_api_shadow / envoy / config / core / v4alpha / BUILD <nl> api_proto_package ( <nl> " / / envoy / type / matcher / v4alpha : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / config / core / v4alpha / config_source . proto <nl> ppp b / generated_api_shadow / envoy / config / core / v4alpha / config_source . proto <nl> import " envoy / config / core / v4alpha / grpc_service . proto " ; <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / authority . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> message ApiConfigSource { <nl> / / the v2 protos is used . <nl> REST = 1 ; <nl> <nl> - / / gRPC v2 API . <nl> + / / SotW gRPC service . <nl> GRPC = 2 ; <nl> <nl> / / Using the delta xDS gRPC service , i . e . DeltaDiscovery { Request , Response } <nl> / / rather than Discovery { Request , Response } . Rather than sending Envoy the entire state <nl> / / with every update , the xDS server only sends what has changed since the last update . <nl> DELTA_GRPC = 3 ; <nl> + <nl> + / / SotW xDS gRPC with ADS . All resources which resolve to this configuration source will be <nl> + / / multiplexed on a single connection to an ADS endpoint . <nl> + / / [ # not - implemented - hide : ] <nl> + AGGREGATED_GRPC = 5 ; <nl> + <nl> + / / Delta xDS gRPC with ADS . All resources which resolve to this configuration source will be <nl> + / / multiplexed on a single connection to an ADS endpoint . <nl> + / / [ # not - implemented - hide : ] <nl> + AGGREGATED_DELTA_GRPC = 6 ; <nl> } <nl> <nl> / / API type ( gRPC , REST , delta gRPC ) <nl> message RateLimitSettings { <nl> / / < arch_overview_service_discovery > ` etc . may either be sourced from the <nl> / / filesystem or from an xDS API source . Filesystem configs are watched with <nl> / / inotify for updates . <nl> - / / [ # next - free - field : 7 ] <nl> + / / [ # next - free - field : 8 ] <nl> message ConfigSource { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . config . core . v3 . ConfigSource " ; <nl> <nl> + / / Authorities that this config source may be used for . An authority specified <nl> + / / in a * udpa . core . v1 . ResourceLocator * is resolved to a * ConfigSource * prior <nl> + / / to configuration fetch . This field provides the association between <nl> + / / authority name and configuration source . <nl> + / / [ # not - implemented - hide : ] <nl> + repeated udpa . core . v1 . Authority authorities = 7 ; <nl> + <nl> oneof config_source_specifier { <nl> option ( validate . required ) = true ; <nl> <nl> mmm a / generated_api_shadow / envoy / config / listener / v3 / BUILD <nl> ppp b / generated_api_shadow / envoy / config / listener / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / extensions / transport_sockets / tls / v3 : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / config / listener / v3 / listener . proto <nl> ppp b / generated_api_shadow / envoy / config / listener / v3 / listener . proto <nl> import " google / api / annotations . proto " ; <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / collection_entry . proto " ; <nl> + <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> option ( udpa . annotations . file_status ) . package_version_status = ACTIVE ; <nl> / / [ # protodoc - title : Listener configuration ] <nl> / / Listener : ref : ` configuration overview < config_listeners > ` <nl> <nl> + / / Listener list collections . Entries are * Listener * resources or references . <nl> + / / [ # not - implemented - hide : ] <nl> + message ListenerCollection { <nl> + udpa . core . v1 . CollectionEntry entries = 1 ; <nl> + } <nl> + <nl> / / [ # next - free - field : 23 ] <nl> message Listener { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . api . v2 . Listener " ; <nl> mmm a / generated_api_shadow / envoy / config / listener / v4alpha / BUILD <nl> ppp b / generated_api_shadow / envoy / config / listener / v4alpha / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / listener / v3 : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / config / listener / v4alpha / listener . proto <nl> ppp b / generated_api_shadow / envoy / config / listener / v4alpha / listener . proto <nl> import " google / api / annotations . proto " ; <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / collection_entry . proto " ; <nl> + <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> option ( udpa . annotations . file_status ) . package_version_status = NEXT_MAJOR_VERSIO <nl> / / [ # protodoc - title : Listener configuration ] <nl> / / Listener : ref : ` configuration overview < config_listeners > ` <nl> <nl> + / / Listener list collections . Entries are * Listener * resources or references . <nl> + / / [ # not - implemented - hide : ] <nl> + message ListenerCollection { <nl> + option ( udpa . annotations . versioning ) . previous_message_type = <nl> + " envoy . config . listener . v3 . ListenerCollection " ; <nl> + <nl> + udpa . core . v1 . CollectionEntry entries = 1 ; <nl> + } <nl> + <nl> / / [ # next - free - field : 23 ] <nl> message Listener { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . config . listener . v3 . Listener " ; <nl> mmm a / generated_api_shadow / envoy / extensions / common / tap / v3 / BUILD <nl> ppp b / generated_api_shadow / envoy / extensions / common / tap / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / core / v3 : pkg " , <nl> " / / envoy / config / tap / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / extensions / common / tap / v3 / common . proto <nl> ppp b / generated_api_shadow / envoy / extensions / common / tap / v3 / common . proto <nl> package envoy . extensions . common . tap . v3 ; <nl> import " envoy / config / core / v3 / config_source . proto " ; <nl> import " envoy / config / tap / v3 / common . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> + import " udpa / annotations / migrate . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> import " validate / validate . proto " ; <nl> message CommonExtensionConfig { <nl> config . core . v3 . ConfigSource config_source = 1 [ ( validate . rules ) . message = { required : true } ] ; <nl> <nl> / / Tap config to request from XDS server . <nl> - string name = 2 [ ( validate . rules ) . string = { min_bytes : 1 } ] ; <nl> + string name = 2 [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> + <nl> + / / Resource locator for TAP . This is mutually exclusive to * name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator tap_resource_locator = 3 <nl> + [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> } <nl> <nl> oneof config_type { <nl> mmm a / generated_api_shadow / envoy / extensions / common / tap / v4alpha / BUILD <nl> ppp b / generated_api_shadow / envoy / extensions / common / tap / v4alpha / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / tap / v4alpha : pkg " , <nl> " / / envoy / extensions / common / tap / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / extensions / common / tap / v4alpha / common . proto <nl> ppp b / generated_api_shadow / envoy / extensions / common / tap / v4alpha / common . proto <nl> package envoy . extensions . common . tap . v4alpha ; <nl> import " envoy / config / core / v4alpha / config_source . proto " ; <nl> import " envoy / config / tap / v4alpha / common . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> import " validate / validate . proto " ; <nl> message CommonExtensionConfig { <nl> config . core . v4alpha . ConfigSource config_source = 1 <nl> [ ( validate . rules ) . message = { required : true } ] ; <nl> <nl> - / / Tap config to request from XDS server . <nl> - string name = 2 [ ( validate . rules ) . string = { min_bytes : 1 } ] ; <nl> + oneof name_specifier { <nl> + / / Tap config to request from XDS server . <nl> + string name = 2 ; <nl> + <nl> + / / Resource locator for TAP . This is mutually exclusive to * name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator tap_resource_locator = 3 ; <nl> + } <nl> } <nl> <nl> oneof config_type { <nl> mmm a / generated_api_shadow / envoy / extensions / filters / network / http_connection_manager / v3 / BUILD <nl> ppp b / generated_api_shadow / envoy / extensions / filters / network / http_connection_manager / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / type / tracing / v3 : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / extensions / filters / network / http_connection_manager / v3 / http_connection_manager . proto <nl> ppp b / generated_api_shadow / envoy / extensions / filters / network / http_connection_manager / v3 / http_connection_manager . proto <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / struct . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> + import " udpa / annotations / migrate . proto " ; <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> message Rds { <nl> / / API . This allows an Envoy configuration with multiple HTTP listeners ( and <nl> / / associated HTTP connection manager filters ) to use different route <nl> / / configurations . <nl> - string route_config_name = 2 [ ( validate . rules ) . string = { min_bytes : 1 } ] ; <nl> + string route_config_name = 2 <nl> + [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> + <nl> + / / Resource locator for RDS . This is mutually exclusive to * route_config_name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator rds_resource_locator = 3 <nl> + [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> } <nl> <nl> / / This message is used to work around the limitations with ' oneof ' and repeated fields . <nl> mmm a / generated_api_shadow / envoy / extensions / filters / network / http_connection_manager / v4alpha / BUILD <nl> ppp b / generated_api_shadow / envoy / extensions / filters / network / http_connection_manager / v4alpha / BUILD <nl> api_proto_package ( <nl> " / / envoy / type / tracing / v3 : pkg " , <nl> " / / envoy / type / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / extensions / filters / network / http_connection_manager / v4alpha / http_connection_manager . proto <nl> ppp b / generated_api_shadow / envoy / extensions / filters / network / http_connection_manager / v4alpha / http_connection_manager . proto <nl> import " google / protobuf / duration . proto " ; <nl> import " google / protobuf / struct . proto " ; <nl> import " google / protobuf / wrappers . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " envoy / annotations / deprecation . proto " ; <nl> import " udpa / annotations / security . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> message Rds { <nl> / / Configuration source specifier for RDS . <nl> config . core . v4alpha . ConfigSource config_source = 1 [ ( validate . rules ) . message = { required : true } ] ; <nl> <nl> - / / The name of the route configuration . This name will be passed to the RDS <nl> - / / API . This allows an Envoy configuration with multiple HTTP listeners ( and <nl> - / / associated HTTP connection manager filters ) to use different route <nl> - / / configurations . <nl> - string route_config_name = 2 [ ( validate . rules ) . string = { min_bytes : 1 } ] ; <nl> + oneof name_specifier { <nl> + / / The name of the route configuration . This name will be passed to the RDS <nl> + / / API . This allows an Envoy configuration with multiple HTTP listeners ( and <nl> + / / associated HTTP connection manager filters ) to use different route <nl> + / / configurations . <nl> + string route_config_name = 2 ; <nl> + <nl> + / / Resource locator for RDS . This is mutually exclusive to * route_config_name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator rds_resource_locator = 3 ; <nl> + } <nl> } <nl> <nl> / / This message is used to work around the limitations with ' oneof ' and repeated fields . <nl> mmm a / generated_api_shadow / envoy / extensions / transport_sockets / tls / v3 / BUILD <nl> ppp b / generated_api_shadow / envoy / extensions / transport_sockets / tls / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / core / v3 : pkg " , <nl> " / / envoy / type / matcher / v3 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / extensions / transport_sockets / tls / v3 / secret . proto <nl> ppp b / generated_api_shadow / envoy / extensions / transport_sockets / tls / v3 / secret . proto <nl> import " envoy / config / core / v3 / base . proto " ; <nl> import " envoy / config / core / v3 / config_source . proto " ; <nl> import " envoy / extensions / transport_sockets / tls / v3 / common . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> + import " udpa / annotations / migrate . proto " ; <nl> import " udpa / annotations / sensitive . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> message SdsSecretConfig { <nl> / / Name ( FQDN , UUID , SPKI , SHA256 , etc . ) by which the secret can be uniquely referred to . <nl> / / When both name and config are specified , then secret can be fetched and / or reloaded via <nl> / / SDS . When only name is specified , then secret will be loaded from static resources . <nl> - string name = 1 ; <nl> + string name = 1 [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> + <nl> + / / Resource locator for SDS . This is mutually exclusive to * name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator sds_resource_locator = 3 <nl> + [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> <nl> config . core . v3 . ConfigSource sds_config = 2 ; <nl> } <nl> mmm a / generated_api_shadow / envoy / extensions / transport_sockets / tls / v4alpha / BUILD <nl> ppp b / generated_api_shadow / envoy / extensions / transport_sockets / tls / v4alpha / BUILD <nl> api_proto_package ( <nl> " / / envoy / extensions / transport_sockets / tls / v3 : pkg " , <nl> " / / envoy / type / matcher / v4alpha : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / extensions / transport_sockets / tls / v4alpha / secret . proto <nl> ppp b / generated_api_shadow / envoy / extensions / transport_sockets / tls / v4alpha / secret . proto <nl> import " envoy / config / core / v4alpha / base . proto " ; <nl> import " envoy / config / core / v4alpha / config_source . proto " ; <nl> import " envoy / extensions / transport_sockets / tls / v4alpha / common . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + <nl> import " udpa / annotations / sensitive . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> message SdsSecretConfig { <nl> option ( udpa . annotations . versioning ) . previous_message_type = <nl> " envoy . extensions . transport_sockets . tls . v3 . SdsSecretConfig " ; <nl> <nl> - / / Name ( FQDN , UUID , SPKI , SHA256 , etc . ) by which the secret can be uniquely referred to . <nl> - / / When both name and config are specified , then secret can be fetched and / or reloaded via <nl> - / / SDS . When only name is specified , then secret will be loaded from static resources . <nl> - string name = 1 ; <nl> + oneof name_specifier { <nl> + / / Name ( FQDN , UUID , SPKI , SHA256 , etc . ) by which the secret can be uniquely referred to . <nl> + / / When both name and config are specified , then secret can be fetched and / or reloaded via <nl> + / / SDS . When only name is specified , then secret will be loaded from static resources . <nl> + string name = 1 ; <nl> + <nl> + / / Resource locator for SDS . This is mutually exclusive to * name * . <nl> + / / [ # not - implemented - hide : ] <nl> + udpa . core . v1 . ResourceLocator sds_resource_locator = 3 ; <nl> + } <nl> <nl> config . core . v4alpha . ConfigSource sds_config = 2 ; <nl> } <nl> mmm a / generated_api_shadow / envoy / service / discovery / v3 / BUILD <nl> ppp b / generated_api_shadow / envoy / service / discovery / v3 / BUILD <nl> api_proto_package ( <nl> " / / envoy / config / core / v3 : pkg " , <nl> " / / envoy / service / discovery / v2 : pkg " , <nl> " @ com_github_cncf_udpa / / udpa / annotations : pkg " , <nl> + " @ com_github_cncf_udpa / / udpa / core / v1 : pkg " , <nl> ] , <nl> ) <nl> mmm a / generated_api_shadow / envoy / service / discovery / v3 / discovery . proto <nl> ppp b / generated_api_shadow / envoy / service / discovery / v3 / discovery . proto <nl> import " envoy / config / core / v3 / base . proto " ; <nl> import " google / protobuf / any . proto " ; <nl> import " google / rpc / status . proto " ; <nl> <nl> + import " udpa / core / v1 / resource_locator . proto " ; <nl> + import " udpa / core / v1 / resource_name . proto " ; <nl> + <nl> + import " udpa / annotations / migrate . proto " ; <nl> import " udpa / annotations / status . proto " ; <nl> import " udpa / annotations / versioning . proto " ; <nl> <nl> message DiscoveryResponse { <nl> / / In particular , initial_resource_versions being sent at the " start " of every <nl> / / gRPC stream actually entails a message for each type_url , each with its own <nl> / / initial_resource_versions . <nl> - / / [ # next - free - field : 8 ] <nl> + / / [ # next - free - field : 10 ] <nl> message DeltaDiscoveryRequest { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . api . v2 . DeltaDiscoveryRequest " ; <nl> <nl> message DeltaDiscoveryRequest { <nl> config . core . v3 . Node node = 1 ; <nl> <nl> / / Type of the resource that is being requested , e . g . <nl> - / / " type . googleapis . com / envoy . api . v2 . ClusterLoadAssignment " . <nl> + / / " type . googleapis . com / envoy . api . v2 . ClusterLoadAssignment " . This does not need to be set if <nl> + / / resources are only referenced via * udpa_resource_subscribe * and <nl> + / / * udpa_resources_unsubscribe * . <nl> string type_url = 2 ; <nl> <nl> / / DeltaDiscoveryRequests allow the client to add or remove individual <nl> message DeltaDiscoveryRequest { <nl> / / A list of Resource names to add to the list of tracked resources . <nl> repeated string resource_names_subscribe = 3 ; <nl> <nl> + / / As with * resource_names_subscribe * but used when subscribing to resources indicated <nl> + / / by a * udpa . core . v1 . ResourceLocator * . The directives in the resource locator <nl> + / / are ignored and the context parameters are matched with <nl> + / / * context_param_specifier * specific semantics . <nl> + / / [ # not - implemented - hide : ] <nl> + repeated udpa . core . v1 . ResourceLocator udpa_resources_subscribe = 8 ; <nl> + <nl> / / A list of Resource names to remove from the list of tracked resources . <nl> repeated string resource_names_unsubscribe = 4 ; <nl> <nl> + / / As with * resource_names_unsubscribe * but used when unsubscribing to resources indicated by a <nl> + / / * udpa . core . v1 . ResourceLocator * . This must match a previously subscribed <nl> + / / resource locator provided in * udpa_resources_subscribe * . <nl> + / / [ # not - implemented - hide : ] <nl> + repeated udpa . core . v1 . ResourceLocator udpa_resources_unsubscribe = 9 ; <nl> + <nl> / / Informs the server of the versions of the resources the xDS client knows of , to enable the <nl> / / client to continue the same logical xDS session even in the face of gRPC stream reconnection . <nl> / / It will not be populated : [ 1 ] in the very first stream of a session , since the client will <nl> message DeltaDiscoveryRequest { <nl> google . rpc . Status error_detail = 7 ; <nl> } <nl> <nl> - / / [ # next - free - field : 7 ] <nl> + / / [ # next - free - field : 8 ] <nl> message DeltaDiscoveryResponse { <nl> option ( udpa . annotations . versioning ) . previous_message_type = <nl> " envoy . api . v2 . DeltaDiscoveryResponse " ; <nl> message DeltaDiscoveryResponse { <nl> <nl> / / Type URL for resources . Identifies the xDS API when muxing over ADS . <nl> / / Must be consistent with the type_url in the Any within ' resources ' if ' resources ' is non - empty . <nl> + / / This does not need to be set if * udpa_removed_resources * is used instead of <nl> + / / * removed_resources * . <nl> string type_url = 4 ; <nl> <nl> / / Resources names of resources that have be deleted and to be removed from the xDS Client . <nl> / / Removed resources for missing resources can be ignored . <nl> repeated string removed_resources = 6 ; <nl> <nl> + / / As with * removed_resources * but used when a removed resource was named in <nl> + / / its * Resource * s with a * udpa . core . v1 . ResourceName * . <nl> + / / [ # not - implemented - hide : ] <nl> + repeated udpa . core . v1 . ResourceName udpa_removed_resources = 7 ; <nl> + <nl> / / The nonce provides a way for DeltaDiscoveryRequests to uniquely <nl> / / reference a DeltaDiscoveryResponse when ( N ) ACKing . The nonce is required . <nl> string nonce = 5 ; <nl> } <nl> <nl> + / / [ # next - free - field : 6 ] <nl> message Resource { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . api . v2 . Resource " ; <nl> <nl> / / The resource ' s name , to distinguish it from others of the same type of resource . <nl> - string name = 3 ; <nl> + string name = 3 [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> + <nl> + / / Used instead of * name * when a resource with a * udpa . core . v1 . ResourceName * is delivered . <nl> + udpa . core . v1 . ResourceName udpa_resource_name = 5 <nl> + [ ( udpa . annotations . field_migrate ) . oneof_promotion = " name_specifier " ] ; <nl> <nl> / / The aliases are a list of other names that this resource can go by . <nl> repeated string aliases = 4 ; <nl> mmm a / tools / proto_format / proto_sync . py <nl> ppp b / tools / proto_format / proto_sync . py <nl> def GetImportDeps ( proto_path ) : <nl> if import_path . startswith ( ' udpa / annotations / ' ) : <nl> imports . append ( ' @ com_github_cncf_udpa / / udpa / annotations : pkg ' ) <nl> continue <nl> + # Special case handling for UDPA core . <nl> + if import_path . startswith ( ' udpa / core / v1 / ' ) : <nl> + imports . append ( ' @ com_github_cncf_udpa / / udpa / core / v1 : pkg ' ) <nl> + continue <nl> # Explicit remapping for external deps , compute paths for envoy / * . <nl> if import_path in external_proto_deps . EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP : <nl> imports . append ( external_proto_deps . EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP [ import_path ] ) <nl>
|
udpa : plumb udpa : / / resource names / locators . ( )
|
envoyproxy/envoy
|
99471fd8f4ef7406f50cf41cf1cfa22bbdfeacc7
|
2020-07-01T18:45:22Z
|
mmm a / xbmc / cores / dvdplayer / DVDCodecs / Video / DXVA . cpp <nl> ppp b / xbmc / cores / dvdplayer / DVDCodecs / Video / DXVA . cpp <nl> <nl> # include " boost / shared_ptr . hpp " <nl> # include " AutoPtrHandle . h " <nl> <nl> + # define ALLOW_ADDING_SURFACES 1 <nl> + <nl> using namespace DXVA ; <nl> using namespace boost ; <nl> using namespace AUTOPTR ; <nl> CDecoder : : CDecoder ( ) <nl> m_decoder = NULL ; <nl> m_processor = NULL ; <nl> m_buffer_count = 0 ; <nl> + m_buffer_age = 0 ; <nl> m_refs = 0 ; <nl> memset ( & m_format , 0 , sizeof ( m_format ) ) ; <nl> m_context = ( dxva_context * ) calloc ( 1 , sizeof ( dxva_context ) ) ; <nl> bool CDecoder : : Open ( AVCodecContext * avctx , enum PixelFormat fmt ) <nl> m_format . UABProtectionLevel = FALSE ; <nl> m_format . Reserved = 0 ; <nl> <nl> - m_refs = avctx - > refs ; <nl> + if ( avctx - > refs > m_refs ) <nl> + m_refs = avctx - > refs ; <nl> + <nl> if ( m_refs = = 0 ) <nl> { <nl> if ( avctx - > codec_id = = CODEC_ID_H264 ) <nl> bool CDecoder : : Open ( AVCodecContext * avctx , enum PixelFormat fmt ) <nl> else <nl> m_refs = 2 ; <nl> } <nl> + CLog : : Log ( LOGDEBUG , " DXVA - source requires % d references " , avctx - > refs ) ; <nl> + <nl> + / / find what decode configs are available <nl> + UINT cfg_count = 0 ; <nl> + DXVA2_ConfigPictureDecode * cfg_list = NULL ; <nl> + CHECK ( m_service - > GetDecoderConfigurations ( m_input <nl> + , & m_format <nl> + , NULL <nl> + , & cfg_count <nl> + , & cfg_list ) ) <nl> + SCOPE ( DXVA2_ConfigPictureDecode , cfg_list ) ; <nl> <nl> - if ( ! OpenDecoder ( avctx ) ) <nl> + DXVA2_ConfigPictureDecode config = { } ; <nl> + <nl> + unsigned bitstream = 1 ; / / ConfigBitstreamRaw = 2 seems to be broken in current ffmpeg , so prefer mode 1 for now <nl> + for ( unsigned i = 0 ; i < cfg_count ; i + + ) <nl> + { <nl> + CLog : : Log ( LOGDEBUG , " DXVA - bitstream type % d " , cfg_list [ i ] . ConfigBitstreamRaw ) ; <nl> + <nl> + / / select first available <nl> + if ( config . ConfigBitstreamRaw = = 0 & & cfg_list [ i ] . ConfigBitstreamRaw ! = 0 ) <nl> + config = cfg_list [ i ] ; <nl> + <nl> + / / overide with preferred if found <nl> + if ( config . ConfigBitstreamRaw ! = bitstream & & cfg_list [ i ] . ConfigBitstreamRaw = = bitstream ) <nl> + config = cfg_list [ i ] ; <nl> + } <nl> + <nl> + if ( ! config . ConfigBitstreamRaw ) <nl> + { <nl> + CLog : : Log ( LOGDEBUG , " DXVA - failed to find a raw input bitstream " ) ; <nl> + return false ; <nl> + } <nl> + * const_cast < DXVA2_ConfigPictureDecode * > ( m_context - > cfg ) = config ; <nl> + <nl> + if ( ! OpenProcessor ( ) ) <nl> + return false ; <nl> + <nl> + if ( ! OpenDecoder ( ) ) <nl> return false ; <nl> <nl> + avctx - > get_buffer = GetBufferS ; <nl> + avctx - > release_buffer = RelBufferS ; <nl> + avctx - > hwaccel_context = m_context ; <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + bool CDecoder : : OpenProcessor ( ) <nl> + { <nl> m_state = DXVA_OPEN ; <nl> <nl> { CSingleExit leave ( m_section ) ; <nl> bool CDecoder : : Open ( AVCodecContext * avctx , enum PixelFormat fmt ) <nl> m_processor = processor ; <nl> } <nl> <nl> - if ( m_state = = DXVA_RESET ) <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " DXVA - decoder was reset while trying to create a processor , retrying " ) ; <nl> - if ( ! Open ( avctx , fmt ) ) <nl> - return false ; <nl> - } <nl> - <nl> - if ( m_state = = DXVA_LOST ) <nl> + if ( m_state ! = DXVA_OPEN ) <nl> { <nl> CLog : : Log ( LOGERROR , " DXVA - device was lost while trying to create a processor " ) ; <nl> return false ; <nl> } <nl> <nl> - if ( ! m_processor - > Open ( m_format , m_decoder ) ) <nl> + if ( ! m_processor - > Open ( m_format ) ) <nl> return false ; <nl> <nl> - avctx - > get_buffer = GetBufferS ; <nl> - avctx - > release_buffer = RelBufferS ; <nl> - avctx - > hwaccel_context = m_context ; <nl> - <nl> return true ; <nl> } <nl> <nl> int CDecoder : : Check ( AVCodecContext * avctx ) <nl> <nl> if ( avctx - > refs > m_refs ) <nl> { <nl> - CLog : : Log ( LOGWARNING , " CDecoder : : Check - number of required reference frames increased , resetting device " ) ; <nl> + CLog : : Log ( LOGWARNING , " CDecoder : : Check - number of required reference frames increased , recreating decoder " ) ; <nl> + # if ALLOW_ADDING_SURFACES <nl> + if ( ! OpenDecoder ( ) ) <nl> + return VC_ERROR ; <nl> + # else <nl> Close ( ) ; <nl> + return VC_FLUSHED ; <nl> + # endif <nl> } <nl> <nl> if ( m_format . SampleWidth = = 0 <nl> bool CDecoder : : OpenTarget ( const GUID & guid ) <nl> return false ; <nl> } <nl> <nl> - bool CDecoder : : OpenDecoder ( AVCodecContext * avctx ) <nl> + bool CDecoder : : OpenDecoder ( ) <nl> { <nl> - m_context - > surface_count = m_refs + 1 + 1 ; / / refs + 1 decode + 1 libavcodec safety <nl> - CLog : : Log ( LOGDEBUG , " DXVA - allocating % d surfaces for given % d references " , m_context - > surface_count , avctx - > refs ) ; <nl> - <nl> - CHECK ( m_service - > CreateSurface ( ( m_format . SampleWidth + 15 ) & ~ 15 <nl> - , ( m_format . SampleHeight + 15 ) & ~ 15 <nl> - , m_context - > surface_count - 1 <nl> - , m_format . Format <nl> - , D3DPOOL_DEFAULT <nl> - , 0 <nl> - , DXVA2_VideoDecoderRenderTarget <nl> - , m_context - > surface , NULL ) ) ; <nl> - <nl> - m_buffer_count = m_context - > surface_count ; <nl> - m_buffer_age = 0 ; <nl> - for ( unsigned i = 0 ; i < m_buffer_count ; i + + ) <nl> - m_buffer [ i ] . surface = m_context - > surface [ i ] ; <nl> - <nl> - UINT cfg_count = 0 ; <nl> - DXVA2_ConfigPictureDecode * cfg_list = NULL ; <nl> - CHECK ( m_service - > GetDecoderConfigurations ( m_input <nl> - , & m_format <nl> - , NULL <nl> - , & cfg_count <nl> - , & cfg_list ) ) <nl> - SCOPE ( DXVA2_ConfigPictureDecode , cfg_list ) ; <nl> + SAFE_RELEASE ( m_decoder ) <nl> <nl> - DXVA2_ConfigPictureDecode config = { } ; <nl> + m_context - > surface_count = m_refs + 1 + 1 + m_processor - > Size ( ) ; / / refs + 1 decode + 1 libavcodec safety + processor buffer <nl> <nl> - unsigned bitstream = 1 ; / / ConfigBitstreamRaw = 2 seems to be broken in current ffmpeg , so prefer mode 1 for now <nl> - for ( unsigned i = 0 ; i < cfg_count ; i + + ) <nl> + if ( m_context - > surface_count > m_buffer_count ) <nl> { <nl> - CLog : : Log ( LOGDEBUG , " DXVA - bitstream type % d " , cfg_list [ i ] . ConfigBitstreamRaw ) ; <nl> + CLog : : Log ( LOGDEBUG , " DXVA - allocating % d surfaces " , m_context - > surface_count - m_buffer_count ) ; <nl> <nl> - / / select first available <nl> - if ( config . ConfigBitstreamRaw = = 0 & & cfg_list [ i ] . ConfigBitstreamRaw ! = 0 ) <nl> - config = cfg_list [ i ] ; <nl> + CHECK ( m_service - > CreateSurface ( ( m_format . SampleWidth + 15 ) & ~ 15 <nl> + , ( m_format . SampleHeight + 15 ) & ~ 15 <nl> + , m_context - > surface_count - 1 - m_buffer_count <nl> + , m_format . Format <nl> + , D3DPOOL_DEFAULT <nl> + , 0 <nl> + , DXVA2_VideoDecoderRenderTarget <nl> + , m_context - > surface + m_buffer_count , NULL ) ) ; <nl> <nl> - / / overide with preferred if found <nl> - if ( config . ConfigBitstreamRaw ! = bitstream & & cfg_list [ i ] . ConfigBitstreamRaw = = bitstream ) <nl> - config = cfg_list [ i ] ; <nl> - } <nl> + for ( unsigned i = m_buffer_count ; i < m_context - > surface_count ; i + + ) <nl> + m_buffer [ i ] . surface = m_context - > surface [ i ] ; <nl> <nl> - if ( ! config . ConfigBitstreamRaw ) <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " DXVA - failed to find a raw input bitstream " ) ; <nl> - return false ; <nl> + m_buffer_count = m_context - > surface_count ; <nl> } <nl> <nl> - CHECK ( m_service - > CreateVideoDecoder ( m_input , & m_format , & config <nl> + CHECK ( m_service - > CreateVideoDecoder ( m_input , & m_format <nl> + , m_context - > cfg <nl> , m_context - > surface <nl> , m_context - > surface_count <nl> , & m_decoder ) ) <nl> <nl> - * const_cast < DXVA2_ConfigPictureDecode * > ( m_context - > cfg ) = config ; <nl> + / / CreateVideoDecoder will not addref the surfaces , but will release them when released <nl> + for ( unsigned i = 0 ; i < m_buffer_count ; i + + ) <nl> + m_buffer [ i ] . surface - > AddRef ( ) ; <nl> + <nl> m_context - > decoder = m_decoder ; <nl> <nl> return true ; <nl> int CDecoder : : GetBuffer ( AVCodecContext * avctx , AVFrame * pic ) <nl> } <nl> } <nl> <nl> - SVideoBuffer * buf_old = NULL ; <nl> - SVideoBuffer * buf = NULL ; <nl> + int count = 0 ; <nl> + SVideoBuffer * buf = NULL ; <nl> for ( unsigned i = 0 ; i < m_buffer_count ; i + + ) <nl> { <nl> - if ( ! m_buffer [ i ] . used ) <nl> + if ( m_buffer [ i ] . used ) <nl> + count + + ; <nl> + else <nl> { <nl> if ( ! buf | | buf - > age > m_buffer [ i ] . age ) <nl> buf = m_buffer + i ; <nl> } <nl> - <nl> - if ( ! buf_old | | buf_old - > age > m_buffer [ i ] . age ) <nl> - buf_old = m_buffer + i ; <nl> } <nl> <nl> - if ( ! buf ) <nl> + if ( count > = m_refs + 2 ) <nl> { <nl> - if ( buf_old ) <nl> - CLog : : Log ( LOGERROR , " DXVA - unable to find new unused buffer " ) ; <nl> - else <nl> - { <nl> - CLog : : Log ( LOGERROR , " DXVA - unable to find any buffer " ) ; <nl> + m_refs + + ; <nl> + # if ALLOW_ADDING_SURFACES <nl> + if ( ! OpenDecoder ( ) ) <nl> return - 1 ; <nl> - } <nl> - buf = buf_old ; <nl> + return GetBuffer ( avctx , pic ) ; <nl> + # else <nl> + Close ( ) ; <nl> + return - 1 ; <nl> + # endif <nl> } <nl> <nl> + if ( ! buf ) <nl> + { <nl> + CLog : : Log ( LOGERROR , " DXVA - unable to find new unused buffer " ) ; <nl> + return - 1 ; <nl> + } <nl> <nl> pic - > reordered_opaque = avctx - > reordered_opaque ; <nl> pic - > type = FF_BUFFER_TYPE_USER ; <nl> void CProcessor : : Close ( ) <nl> CSingleLock lock ( m_section ) ; <nl> SAFE_RELEASE ( m_process ) ; <nl> SAFE_RELEASE ( m_service ) ; <nl> - SAFE_RELEASE ( m_decoder ) ; <nl> for ( unsigned i = 0 ; i < m_sample . size ( ) ; i + + ) <nl> SAFE_RELEASE ( m_sample [ i ] . SrcSurface ) ; <nl> m_sample . clear ( ) ; <nl> } <nl> <nl> <nl> - bool CProcessor : : Open ( const DXVA2_VideoDesc & dsc , IDirectXVideoDecoder * decoder ) <nl> + bool CProcessor : : Open ( const DXVA2_VideoDesc & dsc ) <nl> { <nl> if ( ! LoadDXVA ( ) ) <nl> return false ; <nl> bool CProcessor : : Open ( const DXVA2_VideoDesc & dsc , IDirectXVideoDecoder * decoder ) <nl> CHECK ( m_service - > GetProcAmpRange ( m_device , & m_desc , output , DXVA2_ProcAmp_Saturation , & m_saturation ) ) ; <nl> <nl> m_time = 0 ; <nl> - decoder - > AddRef ( ) ; <nl> - m_decoder = decoder ; <nl> return true ; <nl> } <nl> <nl> mmm a / xbmc / cores / dvdplayer / DVDCodecs / Video / DXVA . h <nl> ppp b / xbmc / cores / dvdplayer / DVDCodecs / Video / DXVA . h <nl> class CDecoder <nl> virtual void Close ( ) ; <nl> virtual const std : : string Name ( ) { return " dxva2 " ; } <nl> <nl> + bool OpenProcessor ( ) ; <nl> bool OpenTarget ( const GUID & guid ) ; <nl> - bool OpenDecoder ( AVCodecContext * avctx ) ; <nl> + bool OpenDecoder ( ) ; <nl> int GetBuffer ( AVCodecContext * avctx , AVFrame * pic ) ; <nl> void RelBuffer ( AVCodecContext * avctx , AVFrame * pic ) ; <nl> <nl> class CProcessor <nl> CProcessor ( ) ; <nl> ~ CProcessor ( ) ; <nl> <nl> - bool Open ( const DXVA2_VideoDesc & dsc , IDirectXVideoDecoder * decoder ) ; <nl> + bool Open ( const DXVA2_VideoDesc & dsc ) ; <nl> void Close ( ) ; <nl> REFERENCE_TIME Add ( IDirect3DSurface9 * source ) ; <nl> bool Render ( const RECT & dst , IDirect3DSurface9 * target , const REFERENCE_TIME time ) ; <nl> void CropSource ( RECT & dst , RECT & src , const D3DSURFACE_DESC & desc ) ; <nl> + int Size ( ) { return m_size ; } <nl> <nl> CProcessor * Acquire ( ) ; <nl> long Release ( ) ; <nl> class CProcessor <nl> long m_references ; <nl> <nl> protected : <nl> - IDirectXVideoDecoder * m_decoder ; <nl> } ; <nl> <nl> } ; <nl>
|
[ DXVA ] attempt to grow available surfaces if file is flagged invalid
|
xbmc/xbmc
|
ca6dc13c07970b6d30ca888e5e71176b519935f5
|
2010-05-01T15:20:18Z
|
mmm a / Source / CNTK / NetworkDescriptionLanguage . cpp <nl> ppp b / Source / CNTK / NetworkDescriptionLanguage . cpp <nl> NDLPass & operator + + ( NDLPass & ndlPass ) <nl> / / string1 - [ in , out ] string to compare , if comparision is equal and at least half the full node name will replace with full node name <nl> / / allowUndeterminedVariable - [ out ] set to true if undetermined variables ( symbols yet to be defined ) are allowed here <nl> / / return - true if function name found <nl> - template < typename ElemType > <nl> bool CheckFunction ( std : : string & p_nodeType , bool * allowUndeterminedVariable ) <nl> { <nl> if ( allowUndeterminedVariable ) <nl> mmm a / Source / CNTK / NetworkDescriptionLanguage . h <nl> ppp b / Source / CNTK / NetworkDescriptionLanguage . h <nl> bool EqualInsensitive ( std : : wstring & string1 , const std : : wstring & string2 , const <nl> / / string1 - [ in , out ] string to compare , if comparision is equal and at least half the full node name will replace with full node name <nl> / / allowUndeterminedVariable - [ out ] set to true if undetermined variables ( symbols yet to be defined ) are allowed here <nl> / / return - true if function name found <nl> - template < typename ElemType > <nl> bool CheckFunction ( std : : string & p_nodeType , bool * allowUndeterminedVariable = nullptr ) ; <nl> <nl> / / NDLType - Network Description Language node type <nl> class NDLScript : public ConfigParser <nl> std : : string functionName = param ; <nl> / / check for function name , a function may have two valid names <nl> / / in which case ' functionName ' will get the default node name returned <nl> - if ( CheckFunction < ElemType > ( functionName ) ) <nl> + if ( CheckFunction ( functionName ) ) <nl> { <nl> RuntimeError ( " NDLScript : Macro % s includes a parameter % s , which is also the name of a function . Parameter names may not be the same as function names . " , macroName . c_str ( ) , param . c_str ( ) ) ; <nl> } <nl> class NDLScript : public ConfigParser <nl> std : : string functionName = name ; <nl> / / check for function name , a function may have two valid names <nl> / / in which case ' functionName ' will get the default node name returned <nl> - if ( CheckFunction < ElemType > ( functionName ) ) <nl> + if ( CheckFunction ( functionName ) ) <nl> { <nl> NDLNode < ElemType > * ndlNode = new NDLNode < ElemType > ( " " , functionName , this , ndlTypeFunction ) ; <nl> return ndlNode ; <nl> class NDLScript : public ConfigParser <nl> <nl> / / check to make sure variable name isn ' t a valid function name as well <nl> string strTemp = key ; <nl> - if ( CheckFunction < ElemType > ( strTemp ) ) <nl> + if ( CheckFunction ( strTemp ) ) <nl> RuntimeError ( " variable % s is invalid , it is reserved because it is also the name of a function " , key . c_str ( ) ) ; <nl> <nl> tokenStart = keyEnd ; <nl> mmm a / Source / Readers / ImageReader / ImageDataDeserializer . cpp <nl> ppp b / Source / Readers / ImageReader / ImageDataDeserializer . cpp <nl> ImageDataDeserializer : : ImageDataDeserializer ( const ConfigParameters & config ) <nl> } <nl> else <nl> { <nl> - RuntimeError ( " Unsupported label element type ' % d ' . " , label - > m_elementType ) ; <nl> + RuntimeError ( " Unsupported label element type ' % d ' . " , ( int ) label - > m_elementType ) ; <nl> } <nl> <nl> CreateSequenceDescriptions ( configHelper . GetMapPath ( ) , labelDimension ) ; <nl> mmm a / Source / Readers / ReaderLib / ReaderShim . cpp <nl> ppp b / Source / Readers / ReaderLib / ReaderShim . cpp <nl> void ReaderShim < ElemType > : : Init ( const ConfigParameters & config ) <nl> } <nl> <nl> template < class ElemType > <nl> - void ReaderShim < ElemType > : : StartMinibatchLoop ( size_t mbSize , size_t epoch , size_t requestedEpochSamples = requestDataSize ) <nl> + void ReaderShim < ElemType > : : StartMinibatchLoop ( size_t mbSize , size_t epoch , size_t requestedEpochSamples ) <nl> { <nl> return StartDistributedMinibatchLoop ( mbSize , epoch , 0 , 1 , requestedEpochSamples ) ; <nl> } <nl> mmm a / Source / Readers / ReaderLib / ReaderShim . h <nl> ppp b / Source / Readers / ReaderLib / ReaderShim . h <nl> class ReaderShim : public IDataReader < ElemType > <nl> delete this ; <nl> } <nl> <nl> - virtual void StartMinibatchLoop ( size_t mbSize , size_t epoch , size_t requestedEpochSamples ) override ; <nl> + virtual void StartMinibatchLoop ( size_t mbSize , size_t epoch , size_t requestedEpochSamples = requestDataSize ) override ; <nl> virtual void StartDistributedMinibatchLoop ( size_t requestedMBSize , size_t epoch , size_t subsetNum , size_t numSubsets , size_t requestedEpochSamples ) override ; <nl> <nl> virtual bool SupportsDistributedMBRead ( ) const override <nl> mmm a / Source / Readers / ReaderLib / SampleModePacker . cpp <nl> ppp b / Source / Readers / ReaderLib / SampleModePacker . cpp <nl> void SampleModePacker : : CopySequenceToBuffer ( size_t sampleIndex , size_t streamInd <nl> } <nl> else <nl> { <nl> - RuntimeError ( " Storage type % d is not supported . " , m_inputStreams [ streamIndex ] - > m_storageType ) ; <nl> + RuntimeError ( " Storage type % d is not supported . " , ( int ) m_inputStreams [ streamIndex ] - > m_storageType ) ; <nl> } <nl> } <nl> <nl>
|
Integrate 5925ae722a949e0d52d2ae82dd9c85f7f753ea99 into master
|
microsoft/CNTK
|
afbdff16b7b216d2956c526aa490d9dcf62bd730
|
2016-02-23T01:25:32Z
|
mmm a / lib / IDE / SwiftSourceDocInfo . cpp <nl> ppp b / lib / IDE / SwiftSourceDocInfo . cpp <nl> <nl> # include " swift / IDE / CommentConversion . h " <nl> # include " swift / IDE / Utils . h " <nl> # include " swift / Markup / XMLUtils . h " <nl> + # include " swift / Subsystems . h " <nl> <nl> # include " clang / AST / ASTContext . h " <nl> # include " clang / AST / DeclObjC . h " <nl> void ResolvedRangeInfo : : print ( llvm : : raw_ostream & OS ) { <nl> OS < < " < / Type > \ n " ; <nl> } <nl> <nl> - OS < < " < Context > " ; <nl> - printContext ( OS , RangeContext ) ; <nl> - OS < < " < / Context > \ n " ; <nl> + if ( RangeContext ) { <nl> + OS < < " < Context > " ; <nl> + printContext ( OS , RangeContext ) ; <nl> + OS < < " < / Context > \ n " ; <nl> + } <nl> <nl> if ( ! HasSingleEntry ) { <nl> OS < < " < Entry > Multi < / Entry > \ n " ; <nl> struct RangeResolver : : Implementation { <nl> ContainedInRange ( ContainedInRange ) { } <nl> } ; <nl> <nl> + std : : vector < Token > AllTokens ; <nl> + Token & StartTok ; <nl> + Token & EndTok ; <nl> SourceLoc Start ; <nl> SourceLoc End ; <nl> StringRef Content ; <nl> struct RangeResolver : : Implementation { <nl> return true ; <nl> } <nl> <nl> - static SourceLoc getNonwhitespaceLocBefore ( SourceManager & SM , <nl> - unsigned BufferID , <nl> - unsigned Offset ) { <nl> - CharSourceRange entireRange = SM . getRangeForBuffer ( BufferID ) ; <nl> - StringRef Buffer = SM . extractText ( entireRange ) ; <nl> - <nl> - const char * BufStart = Buffer . data ( ) ; <nl> - if ( Offset > = Buffer . size ( ) ) <nl> - return SourceLoc ( ) ; <nl> - <nl> - for ( unsigned Off = Offset ; Off ! = 0 ; Off - - ) { <nl> - if ( ! clang : : isWhitespace ( * ( BufStart + Off ) ) ) { <nl> - return SM . getLocForOffset ( BufferID , Off ) ; <nl> - } <nl> - } <nl> - return clang : : isWhitespace ( * BufStart ) ? SourceLoc ( ) : <nl> - SM . getLocForOffset ( BufferID , 0 ) ; <nl> - } <nl> - <nl> - static SourceLoc getNonwhitespaceLocAfter ( SourceManager & SM , <nl> - unsigned BufferID , <nl> - unsigned Offset ) { <nl> - CharSourceRange entireRange = SM . getRangeForBuffer ( BufferID ) ; <nl> - StringRef Buffer = SM . extractText ( entireRange ) ; <nl> - <nl> - const char * BufStart = Buffer . data ( ) ; <nl> - if ( Offset > = Buffer . size ( ) ) <nl> - return SourceLoc ( ) ; <nl> - <nl> - for ( unsigned Off = Offset ; Off < Buffer . size ( ) ; Off + + ) { <nl> - if ( ! clang : : isWhitespace ( * ( BufStart + Off ) ) ) { <nl> - return SM . getLocForOffset ( BufferID , Off ) ; <nl> - } <nl> - } <nl> - return SourceLoc ( ) ; <nl> - } <nl> - <nl> DeclContext * getImmediateContext ( ) { <nl> for ( auto It = ContextStack . rbegin ( ) ; It ! = ContextStack . rend ( ) ; It + + ) { <nl> if ( auto * DC = It - > Parent . getAsDeclContext ( ) ) <nl> struct RangeResolver : : Implementation { <nl> return static_cast < DeclContext * > ( & File ) ; <nl> } <nl> <nl> - Implementation ( SourceFile & File , SourceLoc Start , SourceLoc End ) : <nl> - File ( File ) , Ctx ( File . getASTContext ( ) ) , SM ( Ctx . SourceMgr ) , Start ( Start ) , <nl> - End ( End ) , Content ( getContent ( ) ) { } <nl> + Implementation ( SourceFile & File , std : : vector < Token > AllTokens , <nl> + unsigned StartIdx , unsigned EndIdx ) : <nl> + File ( File ) , Ctx ( File . getASTContext ( ) ) , SM ( Ctx . SourceMgr ) , <nl> + AllTokens ( AllTokens ) , StartTok ( AllTokens [ StartIdx ] ) , EndTok ( AllTokens [ EndIdx ] ) , <nl> + Start ( StartTok . getLoc ( ) ) , End ( EndTok . getLoc ( ) ) , <nl> + Content ( getContent ( ) ) { <nl> + assert ( Start . isValid ( ) & & End . isValid ( ) ) ; <nl> + } <nl> <nl> public : <nl> bool hasResult ( ) { return Result . hasValue ( ) ; } <nl> struct RangeResolver : : Implementation { <nl> unsigned Length ) { <nl> SourceManager & SM = File . getASTContext ( ) . SourceMgr ; <nl> unsigned BufferId = File . getBufferID ( ) . getValue ( ) ; <nl> - SourceLoc StartLoc = Implementation : : getNonwhitespaceLocAfter ( SM , BufferId , <nl> - StartOff ) ; <nl> - SourceLoc EndLoc = Implementation : : getNonwhitespaceLocBefore ( SM , BufferId , <nl> - StartOff + Length - 1 ) ; <nl> - StartLoc = Lexer : : getLocForStartOfToken ( SM , StartLoc ) ; <nl> - EndLoc = Lexer : : getLocForStartOfToken ( SM , EndLoc ) ; <nl> - return StartLoc . isInvalid ( ) | | EndLoc . isInvalid ( ) ? nullptr : <nl> - new Implementation ( File , StartLoc , EndLoc ) ; <nl> + <nl> + LangOptions Opts = File . getASTContext ( ) . LangOpts ; <nl> + Opts . AttachCommentsToDecls = true ; <nl> + std : : vector < Token > AllTokens = tokenize ( Opts , SM , BufferId , 0 , 0 , false ) ; <nl> + auto TokenComp = [ & ] ( Token & LHS , SourceLoc Loc ) { <nl> + return SM . isBeforeInBuffer ( LHS . getLoc ( ) , Loc ) ; <nl> + } ; <nl> + <nl> + SourceLoc StartRaw = SM . getLocForOffset ( BufferId , StartOff ) ; <nl> + SourceLoc EndRaw = SM . getLocForOffset ( BufferId , StartOff + Length ) ; <nl> + <nl> + / / This points to the first token after or on the start loc . <nl> + auto StartIt = std : : lower_bound ( AllTokens . begin ( ) , AllTokens . end ( ) , StartRaw , <nl> + TokenComp ) ; <nl> + / / This points to the first token after or on the end loc ; <nl> + auto EndIt = std : : lower_bound ( AllTokens . begin ( ) , AllTokens . end ( ) , EndRaw , <nl> + TokenComp ) ; <nl> + / / Erroneous case . <nl> + if ( StartIt = = AllTokens . end ( ) | | EndIt = = AllTokens . begin ( ) ) <nl> + return nullptr ; <nl> + <nl> + / / The start token is inclusive . <nl> + unsigned StartIdx = StartIt - AllTokens . begin ( ) ; <nl> + <nl> + / / The end token is exclusive . <nl> + unsigned EndIdx = EndIt - 1 - AllTokens . begin ( ) ; <nl> + return new Implementation ( File , std : : move ( AllTokens ) , StartIdx , EndIdx ) ; <nl> } <nl> <nl> static Implementation * createInstance ( SourceFile & File , SourceLoc Start , <nl> struct RangeResolver : : Implementation { <nl> <nl> StringRef getContent ( ) { <nl> SourceManager & SM = File . getASTContext ( ) . SourceMgr ; <nl> - return CharSourceRange ( SM , Start , Lexer : : getLocForEndOfToken ( SM , End ) ) . str ( ) ; <nl> + return CharSourceRange ( SM , StartTok . hasComment ( ) ? <nl> + StartTok . getCommentStart ( ) : StartTok . getLoc ( ) , <nl> + Lexer : : getLocForEndOfToken ( SM , End ) ) . str ( ) ; <nl> } <nl> } ; <nl> <nl> new file mode 100644 <nl> index 000000000000 . . 3129847fff8f <nl> mmm / dev / null <nl> ppp b / test / IDE / range_info_comments . swift <nl> <nl> + func foo ( ) - > Int { <nl> + / / some comments <nl> + var aaa = 1 + 2 <nl> + aaa = aaa + 3 <nl> + if aaa = = 3 { aaa = 4 } <nl> + / / some comments <nl> + return aaa <nl> + } <nl> + <nl> + func foo1 ( ) - > Int { <nl> + / / / some comments <nl> + var aaa = 1 + 2 <nl> + aaa = aaa + 3 <nl> + if aaa = = 3 { aaa = 4 } <nl> + / / / some comments <nl> + return aaa <nl> + } <nl> + <nl> + func foo2 ( ) - > Int { <nl> + / * some comments * / <nl> + var aaa = 1 + 2 <nl> + aaa = aaa + 3 <nl> + if aaa = = 3 { aaa = 4 } <nl> + / * some comments * / <nl> + return aaa <nl> + } <nl> + <nl> + / / RUN : % target - swift - ide - test - range - pos = 2 : 1 - end - pos 6 : 19 - source - filename % s | % FileCheck % s - check - prefix = CHECK1 <nl> + / / RUN : % target - swift - ide - test - range - pos = 11 : 1 - end - pos 15 : 20 - source - filename % s | % FileCheck % s - check - prefix = CHECK - KIND <nl> + / / RUN : % target - swift - ide - test - range - pos = 20 : 1 - end - pos 24 : 21 - source - filename % s | % FileCheck % s - check - prefix = CHECK - KIND <nl> + / / RUN : % target - swift - ide - test - range - pos = 1 : 1 - end - pos 15 : 20 - source - filename % s | % FileCheck % s - check - prefix = CHECK - INVALID <nl> + <nl> + / / CHECK1 : < Kind > MultiStatement < / Kind > <nl> + / / CHECK1 - NEXT : < Content > / / some comments <nl> + / / CHECK1 - NEXT : var aaa = 1 + 2 <nl> + / / CHECK1 - NEXT : aaa = aaa + 3 <nl> + / / CHECK1 - NEXT : if aaa = = 3 { aaa = 4 } < / Content > <nl> + / / CHECK1 - NEXT : < Type > Void < / Type > <nl> + / / CHECK1 - NEXT : < Context > swift_ide_test . ( file ) . foo ( ) < / Context > <nl> + / / CHECK1 - NEXT : < Declared > aaa < / Declared > < OutscopeReference > true < / OutscopeReference > <nl> + / / CHECK1 - NEXT : < Referenced > aaa < / Referenced > < Type > @ lvalue Int < / Type > <nl> + / / CHECK1 - NEXT : < ASTNodes > 3 < / ASTNodes > <nl> + / / CHECK1 - NEXT : < end > <nl> + <nl> + / / CHECK - KIND : < Kind > MultiStatement < / Kind > <nl> + / / CHECK - INVALID : < Kind > Invalid < / Kind > <nl> mmm a / test / SourceKit / RangeInfo / basic . swift <nl> ppp b / test / SourceKit / RangeInfo / basic . swift <nl> struct S { func foo ( ) { } } <nl> / / RUN : % sourcekitd - test - req = range - pos = 4 : 1 - length 25 % s - - % s | % FileCheck % s - check - prefix = CHECK3 <nl> / / RUN : % sourcekitd - test - req = range - pos = 4 : 1 - length 26 % s - - % s | % FileCheck % s - check - prefix = CHECK3 <nl> / / RUN : % sourcekitd - test - req = range - pos = 4 : 1 - length 27 % s - - % s | % FileCheck % s - check - prefix = CHECK3 <nl> - / / RUN : % sourcekitd - test - req = range - pos = 4 : 4 - length 22 % s - - % s | % FileCheck % s - check - prefix = CHECK3 <nl> <nl> / / RUN : % sourcekitd - test - req = range - pos = 5 : 1 - length 12 % s - - % s | % FileCheck % s - check - prefix = CHECK4 <nl> / / RUN : % sourcekitd - test - req = range - pos = 5 : 2 - length 11 % s - - % s | % FileCheck % s - check - prefix = CHECK4 <nl> - / / RUN : % sourcekitd - test - req = range - pos = 5 : 5 - length 8 % s - - % s | % FileCheck % s - check - prefix = CHECK4 <nl> - / / RUN : % sourcekitd - test - req = range - pos = 5 : 5 - length 9 % s - - % s | % FileCheck % s - check - prefix = CHECK4 <nl> <nl> / / RUN : % sourcekitd - test - req = range - pos = 8 : 1 - length 31 % s - - % s | % FileCheck % s - check - prefix = CHECK5 <nl> / / RUN : % sourcekitd - test - req = range - pos = 9 : 1 - length 25 % s - - % s | % FileCheck % s - check - prefix = CHECK6 <nl>
|
Merge remote - tracking branch ' origin / master ' into master - next
|
apple/swift
|
8e822619d8950a6eacb6df7d74301b8bf44d6753
|
2017-03-13T21:48:53Z
|
mmm a / src / compiler / js - call - reducer . cc <nl> ppp b / src / compiler / js - call - reducer . cc <nl> Reduction JSCallReducer : : ReduceJSConstruct ( Node * node ) { <nl> DCHECK_EQ ( IrOpcode : : kJSConstruct , node - > opcode ( ) ) ; <nl> ConstructParameters const & p = ConstructParametersOf ( node - > op ( ) ) ; <nl> DCHECK_LE ( 2u , p . arity ( ) ) ; <nl> - int const arity = static_cast < int > ( p . arity ( ) - 2 ) ; <nl> + int arity = static_cast < int > ( p . arity ( ) - 2 ) ; <nl> Node * target = NodeProperties : : GetValueInput ( node , 0 ) ; <nl> Node * new_target = NodeProperties : : GetValueInput ( node , arity + 1 ) ; <nl> Node * effect = NodeProperties : : GetEffectInput ( node ) ; <nl> Reduction JSCallReducer : : ReduceJSConstruct ( Node * node ) { <nl> / / Try to specialize JSConstruct { node } s with constant { target } s . <nl> HeapObjectMatcher m ( target ) ; <nl> if ( m . HasValue ( ) ) { <nl> + / / Raise a TypeError if the { target } is not a constructor . <nl> + if ( ! m . Value ( ) - > IsConstructor ( ) ) { <nl> + NodeProperties : : ReplaceValueInputs ( node , target ) ; <nl> + NodeProperties : : ChangeOp ( node , <nl> + javascript ( ) - > CallRuntime ( <nl> + Runtime : : kThrowConstructedNonConstructable ) ) ; <nl> + return Changed ( node ) ; <nl> + } <nl> + <nl> if ( m . Value ( ) - > IsJSFunction ( ) ) { <nl> Handle < JSFunction > function = Handle < JSFunction > : : cast ( m . Value ( ) ) ; <nl> <nl> - / / Raise a TypeError if the { target } is not a constructor . <nl> - if ( ! function - > IsConstructor ( ) ) { <nl> - NodeProperties : : ReplaceValueInputs ( node , target ) ; <nl> - NodeProperties : : ChangeOp ( <nl> - node , javascript ( ) - > CallRuntime ( <nl> - Runtime : : kThrowConstructedNonConstructable ) ) ; <nl> - return Changed ( node ) ; <nl> - } <nl> - <nl> / / Don ' t inline cross native context . <nl> if ( function - > native_context ( ) ! = * native_context ( ) ) return NoChange ( ) ; <nl> <nl> Reduction JSCallReducer : : ReduceJSConstruct ( Node * node ) { <nl> return Changed ( node ) ; <nl> } <nl> } <nl> + } else if ( m . Value ( ) - > IsJSBoundFunction ( ) ) { <nl> + Handle < JSBoundFunction > function = <nl> + Handle < JSBoundFunction > : : cast ( m . Value ( ) ) ; <nl> + Handle < JSReceiver > bound_target_function ( <nl> + function - > bound_target_function ( ) , isolate ( ) ) ; <nl> + Handle < FixedArray > bound_arguments ( function - > bound_arguments ( ) , <nl> + isolate ( ) ) ; <nl> + <nl> + / / Patch { node } to use [ [ BoundTargetFunction ] ] . <nl> + NodeProperties : : ReplaceValueInput ( <nl> + node , jsgraph ( ) - > Constant ( bound_target_function ) , 0 ) ; <nl> + <nl> + / / Patch { node } to use [ [ BoundTargetFunction ] ] <nl> + / / as new . target if { new_target } equals { target } . <nl> + NodeProperties : : ReplaceValueInput ( <nl> + node , <nl> + graph ( ) - > NewNode ( common ( ) - > Select ( MachineRepresentation : : kTagged ) , <nl> + graph ( ) - > NewNode ( simplified ( ) - > ReferenceEqual ( ) , <nl> + target , new_target ) , <nl> + jsgraph ( ) - > Constant ( bound_target_function ) , <nl> + new_target ) , <nl> + arity + 1 ) ; <nl> + <nl> + / / Insert the [ [ BoundArguments ] ] for { node } . <nl> + for ( int i = 0 ; i < bound_arguments - > length ( ) ; + + i ) { <nl> + node - > InsertInput ( <nl> + graph ( ) - > zone ( ) , i + 1 , <nl> + jsgraph ( ) - > Constant ( handle ( bound_arguments - > get ( i ) , isolate ( ) ) ) ) ; <nl> + arity + + ; <nl> + } <nl> + <nl> + / / Update the JSConstruct operator on { node } . <nl> + NodeProperties : : ChangeOp ( <nl> + node , <nl> + javascript ( ) - > Construct ( arity + 2 , p . frequency ( ) , VectorSlotPair ( ) ) ) ; <nl> + <nl> + / / Try to further reduce the JSConstruct { node } . <nl> + Reduction const reduction = ReduceJSConstruct ( node ) ; <nl> + return reduction . Changed ( ) ? reduction : Changed ( node ) ; <nl> + } <nl> + <nl> + / / TODO ( bmeurer ) : Also support optimizing proxies here . <nl> + } <nl> + <nl> + / / If { target } is the result of a JSCreateBoundFunction operation , <nl> + / / we can just fold the construction and construct the bound target <nl> + / / function directly instead . <nl> + if ( target - > opcode ( ) = = IrOpcode : : kJSCreateBoundFunction ) { <nl> + Node * bound_target_function = NodeProperties : : GetValueInput ( target , 0 ) ; <nl> + int const bound_arguments_length = <nl> + static_cast < int > ( CreateBoundFunctionParametersOf ( target - > op ( ) ) . arity ( ) ) ; <nl> + <nl> + / / Patch the { node } to use [ [ BoundTargetFunction ] ] . <nl> + NodeProperties : : ReplaceValueInput ( node , bound_target_function , 0 ) ; <nl> + <nl> + / / Patch { node } to use [ [ BoundTargetFunction ] ] <nl> + / / as new . target if { new_target } equals { target } . <nl> + NodeProperties : : ReplaceValueInput ( <nl> + node , <nl> + graph ( ) - > NewNode ( common ( ) - > Select ( MachineRepresentation : : kTagged ) , <nl> + graph ( ) - > NewNode ( simplified ( ) - > ReferenceEqual ( ) , <nl> + target , new_target ) , <nl> + bound_target_function , new_target ) , <nl> + arity + 1 ) ; <nl> + <nl> + / / Insert the [ [ BoundArguments ] ] for { node } . <nl> + for ( int i = 0 ; i < bound_arguments_length ; + + i ) { <nl> + Node * value = NodeProperties : : GetValueInput ( target , 2 + i ) ; <nl> + node - > InsertInput ( graph ( ) - > zone ( ) , 1 + i , value ) ; <nl> + arity + + ; <nl> } <nl> <nl> - / / TODO ( bmeurer ) : Also support optimizing bound functions and proxies here . <nl> + / / Update the JSConstruct operator on { node } . <nl> + NodeProperties : : ChangeOp ( <nl> + node , <nl> + javascript ( ) - > Construct ( arity + 2 , p . frequency ( ) , VectorSlotPair ( ) ) ) ; <nl> + <nl> + / / Try to further reduce the JSConstruct { node } . <nl> + Reduction const reduction = ReduceJSConstruct ( node ) ; <nl> + return reduction . Changed ( ) ? reduction : Changed ( node ) ; <nl> } <nl> <nl> return NoChange ( ) ; <nl> mmm a / test / mjsunit / compiler / function - bind . js <nl> ppp b / test / mjsunit / compiler / function - bind . js <nl> <nl> % OptimizeFunctionOnNextCall ( foo ) ; <nl> assertEquals ( 2 , foo ( inc ) ) ; <nl> } ) ( ) ; <nl> + <nl> + ( function ( ) { <nl> + const A = class A { } ; <nl> + const B = A . bind ( ) ; <nl> + <nl> + function foo ( ) { return new B ; } <nl> + <nl> + assertInstanceof ( foo ( ) , A ) ; <nl> + assertInstanceof ( foo ( ) , B ) ; <nl> + % OptimizeFunctionOnNextCall ( foo ) ; <nl> + assertInstanceof ( foo ( ) , A ) ; <nl> + assertInstanceof ( foo ( ) , B ) ; <nl> + } ) ( ) ; <nl> + <nl> + ( function ( ) { <nl> + const A = class A { <nl> + constructor ( x , y , z ) { <nl> + this . x = x ; <nl> + this . y = y ; <nl> + this . z = z ; <nl> + } <nl> + } ; <nl> + const B = A . bind ( null , 1 , 2 ) ; <nl> + <nl> + function foo ( z ) { return new B ( z ) ; } <nl> + <nl> + assertEquals ( 1 , foo ( 3 ) . x ) ; <nl> + assertEquals ( 2 , foo ( 3 ) . y ) ; <nl> + assertEquals ( 3 , foo ( 3 ) . z ) ; <nl> + % OptimizeFunctionOnNextCall ( foo ) ; <nl> + assertEquals ( 1 , foo ( 3 ) . x ) ; <nl> + assertEquals ( 2 , foo ( 3 ) . y ) ; <nl> + assertEquals ( 3 , foo ( 3 ) . z ) ; <nl> + } ) ( ) ; <nl> + <nl> + ( function ( ) { <nl> + const A = class A { } ; <nl> + <nl> + function foo ( ) { <nl> + const B = A . bind ( ) ; <nl> + return new B ; <nl> + } <nl> + <nl> + assertInstanceof ( foo ( ) , A ) ; <nl> + assertInstanceof ( foo ( ) , A ) ; <nl> + % OptimizeFunctionOnNextCall ( foo ) ; <nl> + assertInstanceof ( foo ( ) , A ) ; <nl> + } ) ( ) ; <nl> + <nl> + ( function ( ) { <nl> + const A = class A { <nl> + constructor ( x , y , z ) { <nl> + this . x = x ; <nl> + this . y = y ; <nl> + this . z = z ; <nl> + } <nl> + } ; <nl> + <nl> + function foo ( z ) { <nl> + const B = A . bind ( null , 1 , 2 ) ; <nl> + return new B ( z ) ; <nl> + } <nl> + <nl> + assertEquals ( 1 , foo ( 3 ) . x ) ; <nl> + assertEquals ( 2 , foo ( 3 ) . y ) ; <nl> + assertEquals ( 3 , foo ( 3 ) . z ) ; <nl> + % OptimizeFunctionOnNextCall ( foo ) ; <nl> + assertEquals ( 1 , foo ( 3 ) . x ) ; <nl> + assertEquals ( 2 , foo ( 3 ) . y ) ; <nl> + assertEquals ( 3 , foo ( 3 ) . z ) ; <nl> + } ) ( ) ; <nl>
|
[ turbofan ] Handle JSBoundFunction targets for JSConstruct .
|
v8/v8
|
301bc628d6bb18d2355a8e9af7eb6acb65eda4f5
|
2017-11-28T11:52:55Z
|
mmm a / include / swift / Remote / MetadataReader . h <nl> ppp b / include / swift / Remote / MetadataReader . h <nl> class MetadataReader { <nl> if ( ! Reader - > readBytes ( RemoteAddress ( ExistentialAddress ) , <nl> ( uint8_t * ) & Container , sizeof ( Container ) ) ) <nl> return None ; <nl> - auto MetadataAddress = reinterpret_cast < StoredPointer > ( Container . Type ) ; <nl> + auto MetadataAddress = static_cast < StoredPointer > ( Container . Type ) ; <nl> auto Metadata = readMetadata ( MetadataAddress ) ; <nl> if ( ! Metadata ) <nl> return None ; <nl>
|
Remote : loosen a cast
|
apple/swift
|
40c3655838aadf1974e55aedde8bef7ef279deb5
|
2019-02-25T19:19:47Z
|
mmm a / doc / classes / RigidBody . xml <nl> ppp b / doc / classes / RigidBody . xml <nl> <nl> < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> < class name = " RigidBody " inherits = " PhysicsBody " category = " Core " version = " 3 . 0 . alpha . custom_build " > <nl> < brief_description > <nl> - Rigid body node . <nl> + Physics Body whose position is determined through physics simulation in 3D space . <nl> < / brief_description > <nl> < description > <nl> - Rigid body node . This node is used for placing rigid bodies in the scene . It can contain a number of shapes , and also shift mode between regular Rigid body , Kinematic , Character or Static . <nl> + This is the node that implements full 3D physics . This means that you do not control a RigidBody directly . Instead you can apply forces to it ( gravity , impulses , etc . ) , and the physics simulation will calculate the resulting movement , collision , bouncing , rotating , etc . <nl> + This node can use custom force integration , for writing complex physics motion behavior per node . <nl> + This node can shift state between regular Rigid body , Kinematic , Character or Static . <nl> + Character mode forbids this node from being rotated . <nl> + As a warning , don ' t change RigidBody ' s position every frame or very often . Sporadic changes work fine , but physics runs at a different granularity ( fixed hz ) than usual rendering ( process callback ) and maybe even in a separate thread , so changing this from a process loop will yield strange behavior . <nl> < / description > <nl> < tutorials > <nl> < / tutorials > <nl> <nl> < / methods > <nl> < members > <nl> < member name = " angular_damp " type = " float " setter = " set_angular_damp " getter = " get_angular_damp " > <nl> - Dampens rotational forces of the Rigid body by the ' angular_damp ' rate . <nl> + Damps RigidBody ' s rotational forces . <nl> < / member > <nl> < member name = " angular_velocity " type = " Vector3 " setter = " set_angular_velocity " getter = " get_angular_velocity " > <nl> - The current rotational velocity of the Rigid body <nl> + RigidBody ' s rotational velocity . <nl> < / member > <nl> < member name = " axis_lock " type = " int " setter = " set_axis_lock " getter = " get_axis_lock " enum = " RigidBody . AxisLock " > <nl> Locks the rotational forces to a particular axis , preventing rotations on other axes . <nl> < / member > <nl> < member name = " bounce " type = " float " setter = " set_bounce " getter = " get_bounce " > <nl> - Bounciness of the Rigid body . <nl> + RigidBody ' s bounciness . <nl> < / member > <nl> < member name = " can_sleep " type = " bool " setter = " set_can_sleep " getter = " is_able_to_sleep " > <nl> - If true , the Rigid body will no longer calculate forces when there is no movement and will act as a static body . It will wake up when other forces are applied through other collisions or when the ' apply_impulse ' method is used . <nl> + If [ code ] true [ / code ] the RigidBody will not calculate forces and will act as a static body while there is no movement . It will wake up when forces are applied through other collisions or when the [ code ] apply_impulse [ / code ] method is used . <nl> < / member > <nl> < member name = " contact_monitor " type = " bool " setter = " set_contact_monitor " getter = " is_contact_monitor_enabled " > <nl> - If true , the Rigid body will emit signals when it collides with another Rigid body . <nl> + If true , the RigidBody will emit signals when it collides with another RigidBody . <nl> < / member > <nl> < member name = " contacts_reported " type = " int " setter = " set_max_contacts_reported " getter = " get_max_contacts_reported " > <nl> The maximum contacts to report . Bodies can keep a log of the contacts with other bodies , this is enabled by setting the maximum amount of contacts reported to a number greater than 0 . <nl> < / member > <nl> < member name = " continuous_cd " type = " bool " setter = " set_use_continuous_collision_detection " getter = " is_using_continuous_collision_detection " > <nl> - Continuous collision detection tries to predict where a moving body will collide , instead of moving it and correcting its movement if it collided . The first is more precise , and misses less impacts by small , fast - moving objects . The second is faster to compute , but can miss small , fast - moving objects . <nl> + If [ code ] true [ / code ] continuous collision detection is used . <nl> + Continuous collision detection tries to predict where a moving body will collide , instead of moving it and correcting its movement if it collided . Continuous collision detection is more precise , and misses less impacts by small , fast - moving objects . Not using continuous collision detection is faster to compute , but can miss small , fast - moving objects . <nl> < / member > <nl> < member name = " custom_integrator " type = " bool " setter = " set_use_custom_integrator " getter = " is_using_custom_integrator " > <nl> - If true , internal force integration will be disabled ( like gravity or air friction ) for this body . Other than collision response , the body will only move as determined by the [ method _integrate_forces ] function , if defined . <nl> + If [ code ] true [ / code ] internal force integration will be disabled ( like gravity or air friction ) for this body . Other than collision response , the body will only move as determined by the [ method _integrate_forces ] function , if defined . <nl> < / member > <nl> < member name = " friction " type = " float " setter = " set_friction " getter = " get_friction " > <nl> The body friction , from 0 ( frictionless ) to 1 ( max friction ) . <nl> < / member > <nl> < member name = " gravity_scale " type = " float " setter = " set_gravity_scale " getter = " get_gravity_scale " > <nl> - The ' gravity_scale ' for this Rigid body will be multiplied by the global 3d gravity setting found in " Project & gt ; Project Settings & gt ; Physics & gt ; 3d " . A value of 1 will be normal gravity , 2 will apply double gravity , and 0 . 5 will apply half gravity to this object . <nl> + This is multiplied by the global 3D gravity setting found in " Project & gt ; Project Settings & gt ; Physics & gt ; 3d " to produce RigidBody ' s gravity . E . g . a value of 1 will be normal gravity , 2 will apply double gravity , and 0 . 5 will apply half gravity to this object . <nl> < / member > <nl> < member name = " linear_damp " type = " float " setter = " set_linear_damp " getter = " get_linear_damp " > <nl> - The linear damp for this body . Default of - 1 , cannot be less than - 1 . If this value is different from - 1 , any linear damp derived from the world or areas will be overridden . <nl> + RigidBody ' s linear damp . Default value : - 1 , cannot be less than - 1 . If this value is different from - 1 , any linear damp derived from the world or areas will be overridden . <nl> < / member > <nl> < member name = " linear_velocity " type = " Vector3 " setter = " set_linear_velocity " getter = " get_linear_velocity " > <nl> - The body linear velocity . Can be used sporadically , but [ b ] DON ' T SET THIS IN EVERY FRAME [ / b ] , because physics may run in another thread and runs at a different granularity . Use [ method _integrate_forces ] as your process loop for precise control of the body state . <nl> + RigidBody ' s linear velocity . Can be used sporadically , but [ b ] DON ' T SET THIS IN EVERY FRAME [ / b ] , because physics may run in another thread and runs at a different granularity . Use [ method _integrate_forces ] as your process loop for precise control of the body state . <nl> < / member > <nl> < member name = " mass " type = " float " setter = " set_mass " getter = " get_mass " > <nl> - The body mass . <nl> + RigidBody ' s mass . <nl> < / member > <nl> < member name = " mode " type = " int " setter = " set_mode " getter = " get_mode " enum = " RigidBody . Mode " > <nl> The body mode from the MODE_ * enum . Modes include : MODE_STATIC , MODE_KINEMATIC , MODE_RIGID , and MODE_CHARACTER . <nl> < / member > <nl> < member name = " sleeping " type = " bool " setter = " set_sleeping " getter = " is_sleeping " > <nl> - The current ' sleeping ' state of the Rigid body . <nl> + If [ code ] true [ / code ] RigidBody is sleeping and will not calculate forces until woken up by a collision or the [ code ] apply_impulse [ / code ] method . <nl> < / member > <nl> < member name = " weight " type = " float " setter = " set_weight " getter = " get_weight " > <nl> - The body weight given standard earth - weight ( gravity 9 . 8 ) . <nl> + RigidBody ' s weight based on its mass and the global 3D gravity . Global values are set in " Project & gt ; Project Settings & gt ; Physics & gt ; 3d " . <nl> < / member > <nl> < / members > <nl> < signals > <nl> mmm a / doc / classes / RigidBody2D . xml <nl> ppp b / doc / classes / RigidBody2D . xml <nl> <nl> < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> < class name = " RigidBody2D " inherits = " PhysicsBody2D " category = " Core " version = " 3 . 0 . alpha . custom_build " > <nl> < brief_description > <nl> - Rigid body 2D node . <nl> + Physics Body whose position is determined through physics simulation in 2D space . <nl> < / brief_description > <nl> < description > <nl> - Rigid body 2D node . This node is used for placing rigid bodies in the scene . It can contain a number of shapes , and also shift state between regular Rigid body , Kinematic , Character or Static . <nl> - Character mode forbids the node from being rotated . This node can have a custom force integrator function , for writing complex physics motion behavior per node . <nl> - As a warning , don ' t change this node position every frame or very often . Sporadic changes work fine , but physics runs at a different granularity ( fixed hz ) than usual rendering ( process callback ) and maybe even in a separate thread , so changing this from a process loop will yield strange behavior . <nl> + This is the node that implements full 2D physics . This means that you do not control a RigidBody2D directly . Instead you can apply forces to it ( gravity , impulses , etc . ) , and the physics simulation will calculate the resulting movement , collision , bouncing , rotating , etc . <nl> + This node can use custom force integration , for writing complex physics motion behavior per node . <nl> + This node can shift state between regular Rigid body , Kinematic , Character or Static . <nl> + Character mode forbids this node from being rotated . <nl> + As a warning , don ' t change RigidBody2D ' s position every frame or very often . Sporadic changes work fine , but physics runs at a different granularity ( fixed hz ) than usual rendering ( process callback ) and maybe even in a separate thread , so changing this from a process loop will yield strange behavior . <nl> < / description > <nl> < tutorials > <nl> < / tutorials > <nl> <nl> < / methods > <nl> < members > <nl> < member name = " angular_damp " type = " float " setter = " set_angular_damp " getter = " get_angular_damp " > <nl> + Damps RigidBody2D ' s rotational forces . <nl> < / member > <nl> < member name = " angular_velocity " type = " float " setter = " set_angular_velocity " getter = " get_angular_velocity " > <nl> + RigidBody2D ' s rotational velocity . <nl> < / member > <nl> < member name = " bounce " type = " float " setter = " set_bounce " getter = " get_bounce " > <nl> + RigidBody2D ' s bounciness . <nl> < / member > <nl> < member name = " can_sleep " type = " bool " setter = " set_can_sleep " getter = " is_able_to_sleep " > <nl> + If [ code ] true [ / code ] RigidBody2D will not calculate forces and will act as a static body while there is no movement . It will wake up when other forces are applied through other collisions or when the [ code ] apply_impulse [ / code ] method is used . Default value : [ code ] true [ / code ] <nl> < / member > <nl> < member name = " contact_monitor " type = " bool " setter = " set_contact_monitor " getter = " is_contact_monitor_enabled " > <nl> + If [ code ] true [ / code ] RigidBody2D will emit signals when it collides with another RigidBody2D . <nl> < / member > <nl> < member name = " contacts_reported " type = " int " setter = " set_max_contacts_reported " getter = " get_max_contacts_reported " > <nl> + The maximum contacts to report . Bodies can keep a log of the contacts with other bodies , this is enabled by setting the maximum amount of contacts reported to a number greater than 0 . <nl> < / member > <nl> < member name = " continuous_cd " type = " int " setter = " set_continuous_collision_detection_mode " getter = " get_continuous_collision_detection_mode " enum = " RigidBody2D . CCDMode " > <nl> + If [ code ] true [ / code ] continuous collision detection is used . Default value : [ code ] false [ / code ] <nl> + Continuous collision detection tries to predict where a moving body will collide , instead of moving it and correcting its movement if it collided . Continuous collision detection is more precise , and misses less impacts by small , fast - moving objects . Not using continuous collision detection is faster to compute , but can miss small , fast - moving objects . <nl> < / member > <nl> < member name = " custom_integrator " type = " bool " setter = " set_use_custom_integrator " getter = " is_using_custom_integrator " > <nl> + If [ code ] true [ / code ] internal force integration will be disabled ( like gravity or air friction ) for this body . Other than collision response , the body will only move as determined by the [ method _integrate_forces ] function , if defined . <nl> < / member > <nl> < member name = " friction " type = " float " setter = " set_friction " getter = " get_friction " > <nl> + The body friction , from 0 ( frictionless ) to 1 ( max friction ) . <nl> < / member > <nl> < member name = " gravity_scale " type = " float " setter = " set_gravity_scale " getter = " get_gravity_scale " > <nl> + This is multiplied by the global 2D gravity setting found in " Project & gt ; Project Settings & gt ; Physics & gt ; 2d " to produce RigidBody2D ' s gravity . E . g . a value of 1 will be normal gravity , 2 will apply double gravity , and 0 . 5 will apply half gravity to this object . <nl> < / member > <nl> < member name = " linear_damp " type = " float " setter = " set_linear_damp " getter = " get_linear_damp " > <nl> + RigidBody2D ' s linear damp . Default of - 1 , cannot be less than - 1 . If this value is different from - 1 , any linear damp derived from the world or areas will be overridden . <nl> < / member > <nl> < member name = " linear_velocity " type = " Vector2 " setter = " set_linear_velocity " getter = " get_linear_velocity " > <nl> + RigidBody2D ' s linear velocity . Can be used sporadically , but [ b ] DON ' T SET THIS IN EVERY FRAME [ / b ] , because physics may run in another thread and runs at a different granularity . Use [ method _integrate_forces ] as your process loop for precise control of the body state . <nl> < / member > <nl> < member name = " mass " type = " float " setter = " set_mass " getter = " get_mass " > <nl> + RigidBody2D ' s mass . <nl> < / member > <nl> < member name = " mode " type = " int " setter = " set_mode " getter = " get_mode " enum = " RigidBody2D . Mode " > <nl> + The body mode from the MODE_ * enum . Modes include : MODE_STATIC , MODE_KINEMATIC , MODE_RIGID , and MODE_CHARACTER . <nl> < / member > <nl> < member name = " sleeping " type = " bool " setter = " set_sleeping " getter = " is_sleeping " > <nl> + If [ code ] true [ / code ] RigidBody2D is sleeping and will not calculate forces until woken up by a collision or the [ code ] apply_impulse [ / code ] method . <nl> < / member > <nl> < member name = " weight " type = " float " setter = " set_weight " getter = " get_weight " > <nl> + RigidBody2D ' s weight based on its mass and the global 2D gravity . Global values are set in " Project & gt ; Project Settings & gt ; Physics & gt ; 2d " . <nl> < / member > <nl> < / members > <nl> < signals > <nl>
|
Modified / Added documentation for RigidBody and RigidBody2D
|
godotengine/godot
|
ba568456c6bef386c8b0a7be4a5fda56661efcf4
|
2017-09-18T17:18:05Z
|
mmm a / include / nlohmann / json . hpp <nl> ppp b / include / nlohmann / json . hpp <nl> class basic_json <nl> not std : : is_same < ValueType , typename string_t : : value_type > : : value and <nl> not detail : : is_basic_json < ValueType > : : value <nl> and not std : : is_same < ValueType , std : : initializer_list < typename string_t : : value_type > > : : value <nl> - # if defined ( JSON_HAS_CPP_17 ) & & ( defined ( __GNUC__ ) | | ( defined ( _MSC_VER ) and _MSC_VER < = 1914 ) ) <nl> + # if defined ( JSON_HAS_CPP_17 ) & & ( defined ( __GNUC__ ) | | ( defined ( _MSC_VER ) and _MSC_VER > = 1910 and _MSC_VER < = 1914 ) ) <nl> and not std : : is_same < ValueType , typename std : : string_view > : : value <nl> # endif <nl> and detail : : is_detected < detail : : get_template_function , const basic_json_t & , ValueType > : : value <nl> mmm a / single_include / nlohmann / json . hpp <nl> ppp b / single_include / nlohmann / json . hpp <nl> class basic_json <nl> not std : : is_same < ValueType , typename string_t : : value_type > : : value and <nl> not detail : : is_basic_json < ValueType > : : value <nl> and not std : : is_same < ValueType , std : : initializer_list < typename string_t : : value_type > > : : value <nl> - # if defined ( JSON_HAS_CPP_17 ) & & ( defined ( __GNUC__ ) | | ( defined ( _MSC_VER ) and _MSC_VER < = 1914 ) ) <nl> + # if defined ( JSON_HAS_CPP_17 ) & & ( defined ( __GNUC__ ) | | ( defined ( _MSC_VER ) and _MSC_VER > = 1910 and _MSC_VER < = 1914 ) ) <nl> and not std : : is_same < ValueType , typename std : : string_view > : : value <nl> # endif <nl> and detail : : is_detected < detail : : get_template_function , const basic_json_t & , ValueType > : : value <nl>
|
Merge pull request from dota17 / MSC_VER
|
nlohmann/json
|
d70d06ae41d7b3241e16d366b3b67b5aebd39e9e
|
2020-05-27T06:49:19Z
|
mmm a / test / Index / index_keypath_member_lookup . swift <nl> ppp b / test / Index / index_keypath_member_lookup . swift <nl> func testExplicit ( r : Lens < Rectangle > , a : Lens < [ Int ] > ) { <nl> / / CHECK : [ [ EA_LINE ] ] : 8 | instance - property / subscript / Swift | subscript ( dynamicMember : ) | [ [ SUB_USR ] ] | Ref , Read , RelCont | rel : 1 <nl> / / CHECK : [ [ EA_LINE ] ] : 26 | instance - property / subscript / Swift | subscript ( _ : ) | s : SayxSicip | Ref , Read , RelCont | rel : 1 <nl> } <nl> + <nl> + / / Don ' t crash : rdar63558609 <nl> + / / <nl> + @ dynamicMemberLookup <nl> + protocol Foo { <nl> + var prop : Bar { get } <nl> + / / CHECK : [ [ @ LINE - 1 ] ] : 7 | instance - property / Swift | prop | [ [ PROP_USR : . * ] ] | Def , RelChild | rel : 1 <nl> + } <nl> + struct Bar { <nl> + let enabled = false <nl> + } <nl> + extension Foo { <nl> + subscript < T > ( dynamicMember keyPath : KeyPath < Bar , T > ) - > T { <nl> + / / CHECK : [ [ @ LINE - 1 ] ] : 3 | instance - property / subscript / Swift | subscript ( dynamicMember : ) | [ [ SUB2_USR : . * ] ] | Def , RelChild | rel : 1 <nl> + / / CHECK : [ [ @ LINE - 2 ] ] : 60 | instance - method / acc - get / Swift | getter : subscript ( dynamicMember : ) | { { . * } } | Def , Dyn , RelChild , RelAcc | rel : 1 <nl> + / / CHECK - NEXT : RelChild , RelAcc | instance - property / subscript / Swift | subscript ( dynamicMember : ) | [ [ SUB2_USR ] ] <nl> + <nl> + prop [ keyPath : keyPath ] <nl> + / / CHECK : [ [ @ LINE - 1 ] ] : 5 | instance - property / Swift | prop | [ [ PROP_USR ] ] | Ref , Read , RelCont | rel : 1 <nl> + } <nl> + } <nl>
|
[ Index ] Add regression test case for @ dynamicMemberLookup on a protocol .
|
apple/swift
|
1f01103cc1500a4ea50255922be9f1675687d385
|
2020-06-03T20:39:11Z
|
mmm a / DEPRECATED . md <nl> ppp b / DEPRECATED . md <nl> The following features have been DEPRECATED and will be removed in the specified <nl> * Admin mutations should be sent as POSTs rather than GETs . HTTP GETs will result in an error <nl> status code and will not have their intended effect . Prior to 1 . 7 , GETs can be used for <nl> admin mutations , but a warning is logged . <nl> - * Rate limit service configuration via the ` cluster_name ` field is deprecated . Use ` grpc_service ` <nl> - instead . <nl> - * gRPC service configuration via the ` cluster_names ` field in ` ApiConfigSource ` is deprecated . Use <nl> - ` grpc_services ` instead . Prior to 1 . 7 , a warning is logged . <nl> <nl> # # Version 1 . 6 . 0 ( March 20 , 2018 ) <nl> <nl> * DOWNSTREAM_ADDRESS log formatter is deprecated . Use DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT <nl> instead . <nl> * CLIENT_IP header formatter is deprecated . Use DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT instead . <nl> + * Rate limit service configuration via the ` cluster_name ` field is deprecated . Use ` grpc_service ` <nl> + instead . <nl> + * gRPC service configuration via the ` cluster_names ` field in ` ApiConfigSource ` is deprecated . Use <nl> + ` grpc_services ` instead . <nl> * ' use_original_dst ' field in the v2 LDS API is deprecated . Use listener filters and filter chain <nl> matching instead . <nl> * ` value ` and ` regex ` fields in the ` HeaderMatcher ` message is deprecated . Use the ` exact_match ` <nl> mmm a / source / common / config / subscription_factory . h <nl> ppp b / source / common / config / subscription_factory . h <nl> class SubscriptionFactory { <nl> case envoy : : api : : v2 : : core : : ConfigSource : : kApiConfigSource : { <nl> const envoy : : api : : v2 : : core : : ApiConfigSource & api_config_source = config . api_config_source ( ) ; <nl> Utility : : checkApiConfigSourceSubscriptionBackingCluster ( cm . clusters ( ) , api_config_source ) ; <nl> + const std : : string & cluster_name = api_config_source . cluster_names ( ) [ 0 ] ; <nl> switch ( api_config_source . api_type ( ) ) { <nl> case envoy : : api : : v2 : : core : : ApiConfigSource : : REST_LEGACY : <nl> result . reset ( rest_legacy_constructor ( ) ) ; <nl> break ; <nl> case envoy : : api : : v2 : : core : : ApiConfigSource : : REST : <nl> result . reset ( new HttpSubscriptionImpl < ResourceType > ( <nl> - node , cm , api_config_source . cluster_names ( ) [ 0 ] , dispatcher , random , <nl> + node , cm , cluster_name , dispatcher , random , <nl> Utility : : apiConfigSourceRefreshDelay ( api_config_source ) , <nl> * Protobuf : : DescriptorPool : : generated_pool ( ) - > FindMethodByName ( rest_method ) , stats ) ) ; <nl> break ; <nl> case envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC : { <nl> result . reset ( new GrpcSubscriptionImpl < ResourceType > ( <nl> node , <nl> - Config : : Utility : : factoryForGrpcApiConfigSource ( cm . grpcAsyncClientManager ( ) , <nl> - config . api_config_source ( ) , scope ) <nl> + Config : : Utility : : factoryForApiConfigSource ( cm . grpcAsyncClientManager ( ) , <nl> + config . api_config_source ( ) , scope ) <nl> - > create ( ) , <nl> dispatcher , * Protobuf : : DescriptorPool : : generated_pool ( ) - > FindMethodByName ( grpc_method ) , <nl> stats ) ) ; <nl> mmm a / source / common / config / utility . cc <nl> ppp b / source / common / config / utility . cc <nl> void Utility : : checkFilesystemSubscriptionBackingPath ( const std : : string & path ) { <nl> } <nl> } <nl> <nl> - void Utility : : checkApiConfigSourceNames ( <nl> + void Utility : : checkApiConfigSourceSubscriptionBackingCluster ( <nl> + const Upstream : : ClusterManager : : ClusterInfoMap & clusters , <nl> const envoy : : api : : v2 : : core : : ApiConfigSource & api_config_source ) { <nl> - const bool is_grpc = <nl> - ( api_config_source . api_type ( ) = = envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) ; <nl> - <nl> - if ( api_config_source . cluster_names ( ) . size ( ) = = 0 & & <nl> - api_config_source . grpc_services ( ) . size ( ) = = 0 ) { <nl> - throw EnvoyException ( " API configs must have either a gRPC service or a cluster name defined " ) ; <nl> + if ( api_config_source . cluster_names ( ) . size ( ) ! = 1 ) { <nl> + / / TODO ( htuch ) : Add support for multiple clusters , # 1170 . <nl> + throw EnvoyException ( <nl> + " envoy : : api : : v2 : : core : : ConfigSource must have a singleton cluster name specified " ) ; <nl> } <nl> <nl> - if ( is_grpc ) { <nl> - if ( api_config_source . cluster_names ( ) . size ( ) ! = 0 ) { <nl> - ENVOY_LOG_MISC ( warn , " Setting a cluster name for API config source type " <nl> - " envoy : : api : : v2 : : core : : ConfigSource : : GRPC is deprecated " ) ; <nl> - } <nl> - if ( api_config_source . cluster_names ( ) . size ( ) > 1 ) { <nl> - throw EnvoyException ( <nl> - " envoy : : api : : v2 : : core : : ConfigSource must have a singleton cluster name specified " ) ; <nl> - } <nl> - if ( api_config_source . grpc_services ( ) . size ( ) > 1 ) { <nl> - throw EnvoyException ( <nl> - " envoy : : api : : v2 : : core : : ConfigSource : : GRPC must have a single gRPC service specified " ) ; <nl> - } <nl> - } else { <nl> - if ( api_config_source . grpc_services ( ) . size ( ) ! = 0 ) { <nl> - throw EnvoyException ( " envoy : : api : : v2 : : core : : ConfigSource , if not of type gRPC , must not have " <nl> - " a gRPC service specified " ) ; <nl> - } <nl> - if ( api_config_source . cluster_names ( ) . size ( ) ! = 1 ) { <nl> - throw EnvoyException ( <nl> - " envoy : : api : : v2 : : core : : ConfigSource must have a singleton cluster name specified " ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - void Utility : : validateClusterName ( const Upstream : : ClusterManager : : ClusterInfoMap & clusters , <nl> - const std : : string & cluster_name ) { <nl> + const auto & cluster_name = api_config_source . cluster_names ( ) [ 0 ] ; <nl> const auto & it = clusters . find ( cluster_name ) ; <nl> if ( it = = clusters . end ( ) | | it - > second . get ( ) . info ( ) - > addedViaApi ( ) | | <nl> it - > second . get ( ) . info ( ) - > type ( ) = = envoy : : api : : v2 : : Cluster : : EDS ) { <nl> void Utility : : validateClusterName ( const Upstream : : ClusterManager : : ClusterInfoMap <nl> } <nl> } <nl> <nl> - void Utility : : checkApiConfigSourceSubscriptionBackingCluster ( <nl> - const Upstream : : ClusterManager : : ClusterInfoMap & clusters , <nl> - const envoy : : api : : v2 : : core : : ApiConfigSource & api_config_source ) { <nl> - Utility : : checkApiConfigSourceNames ( api_config_source ) ; <nl> - <nl> - const bool is_grpc = <nl> - ( api_config_source . api_type ( ) = = envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) ; <nl> - <nl> - if ( ! api_config_source . cluster_names ( ) . empty ( ) ) { <nl> - / / All API configs of type REST and REST_LEGACY should have cluster names . <nl> - / / Additionally , some gRPC API configs might have a cluster name set instead <nl> - / / of an envoy gRPC . <nl> - Utility : : validateClusterName ( clusters , api_config_source . cluster_names ( ) [ 0 ] ) ; <nl> - } else if ( is_grpc ) { <nl> - / / Some ApiConfigSources of type GRPC won ' t have a cluster name , such as if <nl> - / / they ' ve been configured with google_grpc . <nl> - if ( api_config_source . grpc_services ( ) [ 0 ] . has_envoy_grpc ( ) ) { <nl> - / / If an Envoy gRPC exists , we take its cluster name . <nl> - Utility : : validateClusterName ( <nl> - clusters , api_config_source . grpc_services ( ) [ 0 ] . envoy_grpc ( ) . cluster_name ( ) ) ; <nl> - } <nl> - } <nl> - / / Otherwise , there is no cluster name to validate . <nl> - } <nl> - <nl> std : : chrono : : milliseconds Utility : : apiConfigSourceRefreshDelay ( <nl> const envoy : : api : : v2 : : core : : ApiConfigSource & api_config_source ) { <nl> if ( ! api_config_source . has_refresh_delay ( ) ) { <nl> void Utility : : checkObjNameLength ( const std : : string & error_prefix , const std : : str <nl> } <nl> } <nl> <nl> - Grpc : : AsyncClientFactoryPtr Utility : : factoryForGrpcApiConfigSource ( <nl> - Grpc : : AsyncClientManager & async_client_manager , <nl> - const envoy : : api : : v2 : : core : : ApiConfigSource & api_config_source , Stats : : Scope & scope ) { <nl> - Utility : : checkApiConfigSourceNames ( api_config_source ) ; <nl> + Grpc : : AsyncClientFactoryPtr <nl> + Utility : : factoryForApiConfigSource ( Grpc : : AsyncClientManager & async_client_manager , <nl> + const envoy : : api : : v2 : : core : : ApiConfigSource & api_config_source , <nl> + Stats : : Scope & scope ) { <nl> + ASSERT ( api_config_source . api_type ( ) = = envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) ; <nl> + envoy : : api : : v2 : : core : : GrpcService grpc_service ; <nl> + if ( api_config_source . cluster_names ( ) . empty ( ) ) { <nl> + if ( api_config_source . grpc_services ( ) . empty ( ) ) { <nl> + throw EnvoyException ( <nl> + fmt : : format ( " Missing gRPC services in envoy : : api : : v2 : : core : : ApiConfigSource : { } " , <nl> + api_config_source . DebugString ( ) ) ) ; <nl> + } <nl> + / / TODO ( htuch ) : Implement multiple gRPC services . <nl> + if ( api_config_source . grpc_services ( ) . size ( ) ! = 1 ) { <nl> + throw EnvoyException ( fmt : : format ( " Only singleton gRPC service lists supported in " <nl> + " envoy : : api : : v2 : : core : : ApiConfigSource : { } " , <nl> + api_config_source . DebugString ( ) ) ) ; <nl> + } <nl> + grpc_service . MergeFrom ( api_config_source . grpc_services ( 0 ) ) ; <nl> + } else { <nl> + / / TODO ( htuch ) : cluster_names is deprecated , remove after 1 . 6 . 0 . <nl> + if ( api_config_source . cluster_names ( ) . size ( ) ! = 1 ) { <nl> + throw EnvoyException ( fmt : : format ( " Only singleton cluster name lists supported in " <nl> + " envoy : : api : : v2 : : core : : ApiConfigSource : { } " , <nl> + api_config_source . DebugString ( ) ) ) ; <nl> + } <nl> + grpc_service . mutable_envoy_grpc ( ) - > set_cluster_name ( api_config_source . cluster_names ( 0 ) ) ; <nl> + } <nl> <nl> - return async_client_manager . factoryForGrpcService ( api_config_source . grpc_services ( 0 ) , scope , <nl> - false ) ; <nl> + return async_client_manager . factoryForGrpcService ( grpc_service , scope , false ) ; <nl> } <nl> <nl> } / / namespace Config <nl> mmm a / source / common / config / utility . h <nl> ppp b / source / common / config / utility . h <nl> class Utility { <nl> * / <nl> static void checkFilesystemSubscriptionBackingPath ( const std : : string & path ) ; <nl> <nl> - / * * <nl> - * Check the grpc_services and cluster_names for API config sanity . Throws on error . <nl> - * @ param api_config_source the config source to validate . <nl> - * @ throws EnvoyException when an API config has the wrong number of gRPC <nl> - * services or cluster names , depending on expectations set by its API type . <nl> - * / <nl> - static void <nl> - checkApiConfigSourceNames ( const envoy : : api : : v2 : : core : : ApiConfigSource & api_config_source ) ; <nl> - <nl> / * * <nl> * Check the validity of a cluster backing an api config source . Throws on error . <nl> * @ param clusters the clusters currently loaded in the cluster manager . <nl> * @ param api_config_source the config source to validate . <nl> - * @ throws EnvoyException when an API config doesn ' t have a statically defined non - EDS cluster . <nl> - * / <nl> - static void validateClusterName ( const Upstream : : ClusterManager : : ClusterInfoMap & clusters , <nl> - const std : : string & cluster_name ) ; <nl> - <nl> - / * * <nl> - * Potentially calls Utility : : validateClusterName , if a cluster name can be found . <nl> - * @ param clusters the clusters currently loaded in the cluster manager . <nl> - * @ param api_config_source the config source to validate . <nl> - * @ throws EnvoyException when an API config doesn ' t have a statically defined non - EDS cluster . <nl> * / <nl> static void checkApiConfigSourceSubscriptionBackingCluster ( <nl> const Upstream : : ClusterManager : : ClusterInfoMap & clusters , <nl> class Utility { <nl> * @ return Grpc : : AsyncClientFactoryPtr gRPC async client factory . <nl> * / <nl> static Grpc : : AsyncClientFactoryPtr <nl> - factoryForGrpcApiConfigSource ( Grpc : : AsyncClientManager & async_client_manager , <nl> - const envoy : : api : : v2 : : core : : ApiConfigSource & api_config_source , <nl> - Stats : : Scope & scope ) ; <nl> + factoryForApiConfigSource ( Grpc : : AsyncClientManager & async_client_manager , <nl> + const envoy : : api : : v2 : : core : : ApiConfigSource & api_config_source , <nl> + Stats : : Scope & scope ) ; <nl> } ; <nl> <nl> } / / namespace Config <nl> mmm a / source / common / upstream / cluster_manager_impl . cc <nl> ppp b / source / common / upstream / cluster_manager_impl . cc <nl> ClusterManagerImpl : : ClusterManagerImpl ( const envoy : : config : : bootstrap : : v2 : : Boots <nl> if ( bootstrap . dynamic_resources ( ) . has_ads_config ( ) ) { <nl> ads_mux_ . reset ( new Config : : GrpcMuxImpl ( <nl> bootstrap . node ( ) , <nl> - Config : : Utility : : factoryForGrpcApiConfigSource ( <nl> + Config : : Utility : : factoryForApiConfigSource ( <nl> * async_client_manager_ , bootstrap . dynamic_resources ( ) . ads_config ( ) , stats ) <nl> - > create ( ) , <nl> main_thread_dispatcher , <nl> ClusterManagerImpl : : ClusterManagerImpl ( const envoy : : config : : bootstrap : : v2 : : Boots <nl> <nl> if ( cm_config . has_load_stats_config ( ) ) { <nl> const auto & load_stats_config = cm_config . load_stats_config ( ) ; <nl> - load_stats_reporter_ . reset ( <nl> - new LoadStatsReporter ( bootstrap . node ( ) , * this , stats , <nl> - Config : : Utility : : factoryForGrpcApiConfigSource ( <nl> - * async_client_manager_ , load_stats_config , stats ) <nl> - - > create ( ) , <nl> - main_thread_dispatcher ) ) ; <nl> + load_stats_reporter_ . reset ( new LoadStatsReporter ( <nl> + bootstrap . node ( ) , * this , stats , <nl> + Config : : Utility : : factoryForApiConfigSource ( * async_client_manager_ , load_stats_config , stats ) <nl> + - > create ( ) , <nl> + main_thread_dispatcher ) ) ; <nl> } <nl> } <nl> <nl> mmm a / test / common / config / subscription_factory_test . cc <nl> ppp b / test / common / config / subscription_factory_test . cc <nl> TEST_F ( SubscriptionFactoryTest , NoConfigSpecifier ) { <nl> " Missing config source specifier in envoy : : api : : v2 : : core : : ConfigSource " ) ; <nl> } <nl> <nl> - TEST_F ( SubscriptionFactoryTest , RestClusterEmpty ) { <nl> + TEST_F ( SubscriptionFactoryTest , WrongClusterNameLength ) { <nl> envoy : : api : : v2 : : core : : ConfigSource config ; <nl> - Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> - <nl> - config . mutable_api_config_source ( ) - > set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : REST ) ; <nl> - <nl> - EXPECT_CALL ( cm_ , clusters ( ) ) . WillOnce ( Return ( cluster_map ) ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( <nl> - subscriptionFromConfigSource ( config ) , EnvoyException , <nl> - " API configs must have either a gRPC service or a cluster name defined " ) ; <nl> - } <nl> - <nl> - TEST_F ( SubscriptionFactoryTest , GrpcClusterEmpty ) { <nl> - envoy : : api : : v2 : : core : : ConfigSource config ; <nl> - Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> - <nl> - config . mutable_api_config_source ( ) - > set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) ; <nl> - <nl> - EXPECT_CALL ( cm_ , clusters ( ) ) . WillOnce ( Return ( cluster_map ) ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( <nl> - subscriptionFromConfigSource ( config ) , EnvoyException , <nl> - " API configs must have either a gRPC service or a cluster name defined " ) ; <nl> - } <nl> - <nl> - TEST_F ( SubscriptionFactoryTest , RestClusterSingleton ) { <nl> - envoy : : api : : v2 : : core : : ConfigSource config ; <nl> - Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> - NiceMock < Upstream : : MockCluster > cluster ; <nl> - <nl> - config . mutable_api_config_source ( ) - > set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : REST ) ; <nl> - config . mutable_api_config_source ( ) - > mutable_refresh_delay ( ) - > set_seconds ( 1 ) ; <nl> - config . mutable_api_config_source ( ) - > add_cluster_names ( " static_cluster " ) ; <nl> - cluster_map . emplace ( " static_cluster " , cluster ) ; <nl> - <nl> - EXPECT_CALL ( dispatcher_ , createTimer_ ( _ ) ) ; <nl> - EXPECT_CALL ( cm_ , clusters ( ) ) . WillOnce ( Return ( cluster_map ) ) ; <nl> - EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) . WillOnce ( Return ( false ) ) ; <nl> - EXPECT_CALL ( * cluster . info_ , type ( ) ) . WillOnce ( Return ( envoy : : api : : v2 : : Cluster : : STATIC ) ) ; <nl> - subscriptionFromConfigSource ( config ) ; <nl> - } <nl> - <nl> - TEST_F ( SubscriptionFactoryTest , GrpcClusterSingleton ) { <nl> - envoy : : api : : v2 : : core : : ConfigSource config ; <nl> - Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> - NiceMock < Upstream : : MockCluster > cluster ; <nl> - <nl> - config . mutable_api_config_source ( ) - > set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) ; <nl> - config . mutable_api_config_source ( ) - > mutable_refresh_delay ( ) - > set_seconds ( 1 ) ; <nl> - config . mutable_api_config_source ( ) - > add_grpc_services ( ) - > mutable_envoy_grpc ( ) - > set_cluster_name ( <nl> - " static_cluster " ) ; <nl> - cluster_map . emplace ( " static_cluster " , cluster ) ; <nl> - <nl> - envoy : : api : : v2 : : core : : GrpcService expected_grpc_service ; <nl> - expected_grpc_service . mutable_envoy_grpc ( ) - > set_cluster_name ( " static_cluster " ) ; <nl> - <nl> - EXPECT_CALL ( cm_ , clusters ( ) ) . WillOnce ( Return ( cluster_map ) ) ; <nl> - EXPECT_CALL ( cm_ , grpcAsyncClientManager ( ) ) . WillOnce ( ReturnRef ( cm_ . async_client_manager_ ) ) ; <nl> - EXPECT_CALL ( cm_ . async_client_manager_ , <nl> - factoryForGrpcService ( ProtoEq ( expected_grpc_service ) , _ , _ ) ) <nl> - . WillOnce ( Invoke ( [ ] ( const envoy : : api : : v2 : : core : : GrpcService & , Stats : : Scope & , bool ) { <nl> - auto async_client_factory = std : : make_unique < Grpc : : MockAsyncClientFactory > ( ) ; <nl> - EXPECT_CALL ( * async_client_factory , create ( ) ) . WillOnce ( Invoke ( [ ] { <nl> - return std : : make_unique < NiceMock < Grpc : : MockAsyncClient > > ( ) ; <nl> - } ) ) ; <nl> - return async_client_factory ; <nl> - } ) ) ; <nl> - EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) . WillOnce ( Return ( false ) ) ; <nl> - EXPECT_CALL ( * cluster . info_ , type ( ) ) . WillOnce ( Return ( envoy : : api : : v2 : : Cluster : : STATIC ) ) ; <nl> - EXPECT_CALL ( dispatcher_ , createTimer_ ( _ ) ) ; <nl> - <nl> - subscriptionFromConfigSource ( config ) ; <nl> - } <nl> - <nl> - TEST_F ( SubscriptionFactoryTest , RestClusterMultiton ) { <nl> - envoy : : api : : v2 : : core : : ConfigSource config ; <nl> - Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> - NiceMock < Upstream : : MockCluster > cluster ; <nl> - <nl> config . mutable_api_config_source ( ) - > set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : REST ) ; <nl> - <nl> - config . mutable_api_config_source ( ) - > add_cluster_names ( " static_cluster_foo " ) ; <nl> - cluster_map . emplace ( " static_cluster_foo " , cluster ) ; <nl> - <nl> - config . mutable_api_config_source ( ) - > add_cluster_names ( " static_cluster_bar " ) ; <nl> - cluster_map . emplace ( " static_cluster_bar " , cluster ) ; <nl> - <nl> - EXPECT_CALL ( cm_ , clusters ( ) ) . WillRepeatedly ( Return ( cluster_map ) ) ; <nl> - EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) . WillRepeatedly ( Return ( false ) ) ; <nl> - EXPECT_CALL ( * cluster . info_ , type ( ) ) . WillRepeatedly ( Return ( envoy : : api : : v2 : : Cluster : : STATIC ) ) ; <nl> + EXPECT_CALL ( cm_ , clusters ( ) ) ; <nl> EXPECT_THROW_WITH_MESSAGE ( <nl> subscriptionFromConfigSource ( config ) , EnvoyException , <nl> " envoy : : api : : v2 : : core : : ConfigSource must have a singleton cluster name specified " ) ; <nl> - } <nl> - <nl> - TEST_F ( SubscriptionFactoryTest , GrpcClusterMultiton ) { <nl> - envoy : : api : : v2 : : core : : ConfigSource config ; <nl> - Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> - NiceMock < Upstream : : MockCluster > cluster ; <nl> - <nl> - config . mutable_api_config_source ( ) - > set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) ; <nl> - <nl> - config . mutable_api_config_source ( ) - > add_grpc_services ( ) - > mutable_envoy_grpc ( ) - > set_cluster_name ( <nl> - " static_cluster_foo " ) ; <nl> - cluster_map . emplace ( " static_cluster_foo " , cluster ) ; <nl> - config . mutable_api_config_source ( ) - > add_grpc_services ( ) - > mutable_envoy_grpc ( ) - > set_cluster_name ( <nl> - " static_cluster_bar " ) ; <nl> - cluster_map . emplace ( " static_cluster_bar " , cluster ) ; <nl> - <nl> - EXPECT_CALL ( cm_ , clusters ( ) ) . WillRepeatedly ( Return ( cluster_map ) ) ; <nl> - EXPECT_CALL ( cm_ , grpcAsyncClientManager ( ) ) . WillRepeatedly ( ReturnRef ( cm_ . async_client_manager_ ) ) ; <nl> - EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) . WillRepeatedly ( Return ( false ) ) ; <nl> - EXPECT_CALL ( * cluster . info_ , type ( ) ) . WillRepeatedly ( Return ( envoy : : api : : v2 : : Cluster : : STATIC ) ) ; <nl> - <nl> + config . mutable_api_config_source ( ) - > add_cluster_names ( " foo " ) ; <nl> + config . mutable_api_config_source ( ) - > add_cluster_names ( " bar " ) ; <nl> + EXPECT_CALL ( cm_ , clusters ( ) ) ; <nl> EXPECT_THROW_WITH_MESSAGE ( <nl> subscriptionFromConfigSource ( config ) , EnvoyException , <nl> - " envoy : : api : : v2 : : core : : ConfigSource : : GRPC must have a single gRPC service specified " ) ; <nl> + " envoy : : api : : v2 : : core : : ConfigSource must have a singleton cluster name specified " ) ; <nl> } <nl> <nl> TEST_F ( SubscriptionFactoryTest , FilesystemSubscription ) { <nl> TEST_F ( SubscriptionFactoryTest , LegacySubscription ) { <nl> envoy : : api : : v2 : : core : : ConfigSource config ; <nl> auto * api_config_source = config . mutable_api_config_source ( ) ; <nl> api_config_source - > set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : REST_LEGACY ) ; <nl> - api_config_source - > add_cluster_names ( " static_cluster " ) ; <nl> + api_config_source - > add_cluster_names ( " eds_cluster " ) ; <nl> Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> Upstream : : MockCluster cluster ; <nl> - cluster_map . emplace ( " static_cluster " , cluster ) ; <nl> + cluster_map . emplace ( " eds_cluster " , cluster ) ; <nl> EXPECT_CALL ( cm_ , clusters ( ) ) . WillOnce ( Return ( cluster_map ) ) ; <nl> EXPECT_CALL ( cluster , info ( ) ) . Times ( 2 ) ; <nl> EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) ; <nl> EXPECT_CALL ( * legacy_subscription_ , start ( _ , _ ) ) ; <nl> - subscriptionFromConfigSource ( config ) - > start ( { " static_cluster " } , callbacks_ ) ; <nl> + subscriptionFromConfigSource ( config ) - > start ( { " foo " } , callbacks_ ) ; <nl> } <nl> <nl> TEST_F ( SubscriptionFactoryTest , HttpSubscription ) { <nl> envoy : : api : : v2 : : core : : ConfigSource config ; <nl> auto * api_config_source = config . mutable_api_config_source ( ) ; <nl> api_config_source - > set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : REST ) ; <nl> - api_config_source - > add_cluster_names ( " static_cluster " ) ; <nl> + api_config_source - > add_cluster_names ( " eds_cluster " ) ; <nl> api_config_source - > mutable_refresh_delay ( ) - > set_seconds ( 1 ) ; <nl> Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> Upstream : : MockCluster cluster ; <nl> - cluster_map . emplace ( " static_cluster " , cluster ) ; <nl> + cluster_map . emplace ( " eds_cluster " , cluster ) ; <nl> EXPECT_CALL ( cm_ , clusters ( ) ) . WillOnce ( Return ( cluster_map ) ) ; <nl> EXPECT_CALL ( cluster , info ( ) ) . Times ( 2 ) ; <nl> EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) ; <nl> EXPECT_CALL ( dispatcher_ , createTimer_ ( _ ) ) ; <nl> - EXPECT_CALL ( cm_ , httpAsyncClientForCluster ( " static_cluster " ) ) ; <nl> + EXPECT_CALL ( cm_ , httpAsyncClientForCluster ( " eds_cluster " ) ) ; <nl> EXPECT_CALL ( cm_ . async_client_ , send_ ( _ , _ , _ ) ) <nl> . WillOnce ( Invoke ( [ this ] ( Http : : MessagePtr & request , Http : : AsyncClient : : Callbacks & callbacks , <nl> const absl : : optional < std : : chrono : : milliseconds > & timeout ) { <nl> UNREFERENCED_PARAMETER ( callbacks ) ; <nl> UNREFERENCED_PARAMETER ( timeout ) ; <nl> EXPECT_EQ ( " POST " , std : : string ( request - > headers ( ) . Method ( ) - > value ( ) . c_str ( ) ) ) ; <nl> - EXPECT_EQ ( " static_cluster " , std : : string ( request - > headers ( ) . Host ( ) - > value ( ) . c_str ( ) ) ) ; <nl> + EXPECT_EQ ( " eds_cluster " , std : : string ( request - > headers ( ) . Host ( ) - > value ( ) . c_str ( ) ) ) ; <nl> EXPECT_EQ ( " / v2 / discovery : endpoints " , <nl> std : : string ( request - > headers ( ) . Path ( ) - > value ( ) . c_str ( ) ) ) ; <nl> return & http_request_ ; <nl> } ) ) ; <nl> EXPECT_CALL ( http_request_ , cancel ( ) ) ; <nl> - subscriptionFromConfigSource ( config ) - > start ( { " static_cluster " } , callbacks_ ) ; <nl> + subscriptionFromConfigSource ( config ) - > start ( { " foo " } , callbacks_ ) ; <nl> } <nl> <nl> / / Confirm error when no refresh delay is set ( not checked by schema ) . <nl> TEST_F ( SubscriptionFactoryTest , HttpSubscriptionNoRefreshDelay ) { <nl> envoy : : api : : v2 : : core : : ConfigSource config ; <nl> auto * api_config_source = config . mutable_api_config_source ( ) ; <nl> api_config_source - > set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : REST ) ; <nl> - api_config_source - > add_cluster_names ( " static_cluster " ) ; <nl> + api_config_source - > add_cluster_names ( " eds_cluster " ) ; <nl> Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> Upstream : : MockCluster cluster ; <nl> - cluster_map . emplace ( " static_cluster " , cluster ) ; <nl> + cluster_map . emplace ( " eds_cluster " , cluster ) ; <nl> EXPECT_CALL ( cm_ , clusters ( ) ) . WillOnce ( Return ( cluster_map ) ) ; <nl> EXPECT_CALL ( cluster , info ( ) ) . Times ( 2 ) ; <nl> EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( <nl> - subscriptionFromConfigSource ( config ) - > start ( { " static_cluster " } , callbacks_ ) , EnvoyException , <nl> - " refresh_delay is required for REST API configuration sources " ) ; <nl> + EXPECT_THROW_WITH_MESSAGE ( subscriptionFromConfigSource ( config ) - > start ( { " foo " } , callbacks_ ) , <nl> + EnvoyException , <nl> + " refresh_delay is required for REST API configuration sources " ) ; <nl> } <nl> <nl> TEST_F ( SubscriptionFactoryTest , GrpcSubscription ) { <nl> envoy : : api : : v2 : : core : : ConfigSource config ; <nl> auto * api_config_source = config . mutable_api_config_source ( ) ; <nl> api_config_source - > set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) ; <nl> - api_config_source - > add_grpc_services ( ) - > mutable_envoy_grpc ( ) - > set_cluster_name ( " static_cluster " ) ; <nl> + api_config_source - > add_cluster_names ( " eds_cluster " ) ; <nl> envoy : : api : : v2 : : core : : GrpcService expected_grpc_service ; <nl> - expected_grpc_service . mutable_envoy_grpc ( ) - > set_cluster_name ( " static_cluster " ) ; <nl> + expected_grpc_service . mutable_envoy_grpc ( ) - > set_cluster_name ( " eds_cluster " ) ; <nl> Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> - NiceMock < Upstream : : MockCluster > cluster ; <nl> - cluster_map . emplace ( " static_cluster " , cluster ) ; <nl> + Upstream : : MockCluster cluster ; <nl> + cluster_map . emplace ( " eds_cluster " , cluster ) ; <nl> EXPECT_CALL ( cm_ , clusters ( ) ) . WillOnce ( Return ( cluster_map ) ) ; <nl> + EXPECT_CALL ( cluster , info ( ) ) . Times ( 2 ) ; <nl> + EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) ; <nl> EXPECT_CALL ( cm_ , grpcAsyncClientManager ( ) ) . WillOnce ( ReturnRef ( cm_ . async_client_manager_ ) ) ; <nl> EXPECT_CALL ( cm_ . async_client_manager_ , <nl> factoryForGrpcService ( ProtoEq ( expected_grpc_service ) , _ , _ ) ) <nl> TEST_F ( SubscriptionFactoryTest , GrpcSubscription ) { <nl> } ) ) ; <nl> EXPECT_CALL ( dispatcher_ , createTimer_ ( _ ) ) ; <nl> EXPECT_CALL ( callbacks_ , onConfigUpdateFailed ( _ ) ) ; <nl> - subscriptionFromConfigSource ( config ) - > start ( { " static_cluster " } , callbacks_ ) ; <nl> + subscriptionFromConfigSource ( config ) - > start ( { " foo " } , callbacks_ ) ; <nl> } <nl> <nl> INSTANTIATE_TEST_CASE_P ( SubscriptionFactoryTestApiConfigSource , <nl> TEST_P ( SubscriptionFactoryTestApiConfigSource , NonExistentCluster ) { <nl> envoy : : api : : v2 : : core : : ConfigSource config ; <nl> auto * api_config_source = config . mutable_api_config_source ( ) ; <nl> api_config_source - > set_api_type ( GetParam ( ) ) ; <nl> - if ( api_config_source - > api_type ( ) = = envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) { <nl> - api_config_source - > add_grpc_services ( ) - > mutable_envoy_grpc ( ) - > set_cluster_name ( <nl> - " static_cluster " ) ; <nl> - } else { <nl> - api_config_source - > add_cluster_names ( " static_cluster " ) ; <nl> - } <nl> + api_config_source - > add_cluster_names ( " eds_cluster " ) ; <nl> Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> EXPECT_CALL ( cm_ , clusters ( ) ) . WillOnce ( Return ( cluster_map ) ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( <nl> - subscriptionFromConfigSource ( config ) - > start ( { " static_cluster " } , callbacks_ ) , EnvoyException , <nl> - " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined " <nl> - " non - EDS cluster : ' static_cluster ' does not exist , was added via api , or is an EDS cluster " ) ; <nl> + EXPECT_THROW_WITH_MESSAGE ( subscriptionFromConfigSource ( config ) - > start ( { " foo " } , callbacks_ ) , <nl> + EnvoyException , <nl> + " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined " <nl> + " non - EDS cluster : ' eds_cluster ' " <nl> + " does not exist , was added via api , or is an EDS cluster " ) ; <nl> } <nl> <nl> TEST_P ( SubscriptionFactoryTestApiConfigSource , DynamicCluster ) { <nl> envoy : : api : : v2 : : core : : ConfigSource config ; <nl> auto * api_config_source = config . mutable_api_config_source ( ) ; <nl> api_config_source - > set_api_type ( GetParam ( ) ) ; <nl> - if ( api_config_source - > api_type ( ) = = envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) { <nl> - api_config_source - > add_grpc_services ( ) - > mutable_envoy_grpc ( ) - > set_cluster_name ( <nl> - " static_cluster " ) ; <nl> - } else { <nl> - api_config_source - > add_cluster_names ( " static_cluster " ) ; <nl> - } <nl> + api_config_source - > add_cluster_names ( " eds_cluster " ) ; <nl> Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> Upstream : : MockCluster cluster ; <nl> - cluster_map . emplace ( " static_cluster " , cluster ) ; <nl> + cluster_map . emplace ( " eds_cluster " , cluster ) ; <nl> EXPECT_CALL ( cm_ , clusters ( ) ) . WillOnce ( Return ( cluster_map ) ) ; <nl> EXPECT_CALL ( cluster , info ( ) ) ; <nl> EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) . WillOnce ( Return ( true ) ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( <nl> - subscriptionFromConfigSource ( config ) - > start ( { " static_cluster " } , callbacks_ ) , EnvoyException , <nl> - " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined " <nl> - " non - EDS cluster : ' static_cluster ' does not exist , was added via api , or is an EDS cluster " ) ; <nl> + EXPECT_THROW_WITH_MESSAGE ( subscriptionFromConfigSource ( config ) - > start ( { " foo " } , callbacks_ ) , <nl> + EnvoyException , <nl> + " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined " <nl> + " non - EDS cluster : ' eds_cluster ' " <nl> + " does not exist , was added via api , or is an EDS cluster " ) ; <nl> } <nl> <nl> TEST_P ( SubscriptionFactoryTestApiConfigSource , EDSClusterBackingEDSCluster ) { <nl> envoy : : api : : v2 : : core : : ConfigSource config ; <nl> auto * api_config_source = config . mutable_api_config_source ( ) ; <nl> api_config_source - > set_api_type ( GetParam ( ) ) ; <nl> - if ( api_config_source - > api_type ( ) = = envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) { <nl> - api_config_source - > add_grpc_services ( ) - > mutable_envoy_grpc ( ) - > set_cluster_name ( <nl> - " static_cluster " ) ; <nl> - } else { <nl> - api_config_source - > add_cluster_names ( " static_cluster " ) ; <nl> - } <nl> + api_config_source - > add_cluster_names ( " eds_cluster " ) ; <nl> Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> Upstream : : MockCluster cluster ; <nl> - cluster_map . emplace ( " static_cluster " , cluster ) ; <nl> + cluster_map . emplace ( " eds_cluster " , cluster ) ; <nl> EXPECT_CALL ( cm_ , clusters ( ) ) . WillOnce ( Return ( cluster_map ) ) ; <nl> EXPECT_CALL ( cluster , info ( ) ) . Times ( 2 ) ; <nl> EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) ; <nl> EXPECT_CALL ( * cluster . info_ , type ( ) ) . WillOnce ( Return ( envoy : : api : : v2 : : Cluster : : EDS ) ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( <nl> - subscriptionFromConfigSource ( config ) - > start ( { " static_cluster " } , callbacks_ ) , EnvoyException , <nl> - " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined " <nl> - " non - EDS cluster : ' static_cluster ' does not exist , was added via api , or is an EDS cluster " ) ; <nl> + EXPECT_THROW_WITH_MESSAGE ( subscriptionFromConfigSource ( config ) - > start ( { " foo " } , callbacks_ ) , <nl> + EnvoyException , <nl> + " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined " <nl> + " non - EDS cluster : ' eds_cluster ' " <nl> + " does not exist , was added via api , or is an EDS cluster " ) ; <nl> } <nl> <nl> } / / namespace Config <nl> mmm a / test / common / config / utility_test . cc <nl> ppp b / test / common / config / utility_test . cc <nl> TEST ( UtilityTest , CheckFilesystemSubscriptionBackingPath ) { <nl> Utility : : checkFilesystemSubscriptionBackingPath ( test_path ) ; <nl> } <nl> <nl> - / / TEST ( UtilityTest , FactoryForGrpcApiConfigSource ) should catch misconfigured <nl> - / / API configs along the dimension of ApiConfigSource type . <nl> - TEST ( UtilityTest , FactoryForGrpcApiConfigSource ) { <nl> + TEST ( UtilityTest , CheckApiConfigSourceSubscriptionBackingCluster ) { <nl> + envoy : : api : : v2 : : core : : ConfigSource config ; <nl> + auto * api_config_source = config . mutable_api_config_source ( ) ; <nl> + api_config_source - > add_cluster_names ( " foo_cluster " ) ; <nl> + Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> + <nl> + / / Non - existent cluster . <nl> + EXPECT_THROW_WITH_MESSAGE ( <nl> + Utility : : checkApiConfigSourceSubscriptionBackingCluster ( cluster_map , * api_config_source ) , <nl> + EnvoyException , <nl> + " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined non - EDS cluster : " <nl> + " ' foo_cluster ' " <nl> + " does not exist , was added via api , or is an EDS cluster " ) ; <nl> + <nl> + / / Dynamic Cluster . <nl> + Upstream : : MockCluster cluster ; <nl> + cluster_map . emplace ( " foo_cluster " , cluster ) ; <nl> + EXPECT_CALL ( cluster , info ( ) ) ; <nl> + EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) . WillOnce ( Return ( true ) ) ; <nl> + EXPECT_THROW_WITH_MESSAGE ( <nl> + Utility : : checkApiConfigSourceSubscriptionBackingCluster ( cluster_map , * api_config_source ) , <nl> + EnvoyException , <nl> + " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined non - EDS cluster : " <nl> + " ' foo_cluster ' " <nl> + " does not exist , was added via api , or is an EDS cluster " ) ; <nl> + <nl> + / / EDS Cluster backing EDS Cluster . <nl> + EXPECT_CALL ( cluster , info ( ) ) . Times ( 2 ) ; <nl> + EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) ; <nl> + EXPECT_CALL ( * cluster . info_ , type ( ) ) . WillOnce ( Return ( envoy : : api : : v2 : : Cluster : : EDS ) ) ; <nl> + EXPECT_THROW_WITH_MESSAGE ( <nl> + Utility : : checkApiConfigSourceSubscriptionBackingCluster ( cluster_map , * api_config_source ) , <nl> + EnvoyException , <nl> + " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined non - EDS cluster : " <nl> + " ' foo_cluster ' " <nl> + " does not exist , was added via api , or is an EDS cluster " ) ; <nl> + <nl> + / / All ok . <nl> + EXPECT_CALL ( cluster , info ( ) ) . Times ( 2 ) ; <nl> + EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) ; <nl> + EXPECT_CALL ( * cluster . info_ , type ( ) ) ; <nl> + Utility : : checkApiConfigSourceSubscriptionBackingCluster ( cluster_map , * api_config_source ) ; <nl> + } <nl> + <nl> + TEST ( UtilityTest , FactoryForApiConfigSource ) { <nl> Grpc : : MockAsyncClientManager async_client_manager ; <nl> Stats : : MockStore scope ; <nl> <nl> TEST ( UtilityTest , FactoryForGrpcApiConfigSource ) { <nl> envoy : : api : : v2 : : core : : ApiConfigSource api_config_source ; <nl> api_config_source . set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) ; <nl> EXPECT_THROW_WITH_REGEX ( <nl> - Utility : : factoryForGrpcApiConfigSource ( async_client_manager , api_config_source , scope ) , <nl> - EnvoyException , " API configs must have either a gRPC service or a cluster name defined " ) ; <nl> + Utility : : factoryForApiConfigSource ( async_client_manager , api_config_source , scope ) , <nl> + EnvoyException , " Missing gRPC services in envoy : : api : : v2 : : core : : ApiConfigSource : " ) ; <nl> } <nl> <nl> { <nl> TEST ( UtilityTest , FactoryForGrpcApiConfigSource ) { <nl> api_config_source . add_grpc_services ( ) ; <nl> api_config_source . add_grpc_services ( ) ; <nl> EXPECT_THROW_WITH_REGEX ( <nl> - Utility : : factoryForGrpcApiConfigSource ( async_client_manager , api_config_source , scope ) , <nl> + Utility : : factoryForApiConfigSource ( async_client_manager , api_config_source , scope ) , <nl> EnvoyException , <nl> - " envoy : : api : : v2 : : core : : ConfigSource : : GRPC must have a single gRPC service specified " ) ; <nl> + " Only singleton gRPC service lists supported in envoy : : api : : v2 : : core : : ApiConfigSource : " ) ; <nl> } <nl> <nl> { <nl> TEST ( UtilityTest , FactoryForGrpcApiConfigSource ) { <nl> api_config_source . set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) ; <nl> api_config_source . add_cluster_names ( ) ; <nl> api_config_source . add_cluster_names ( ) ; <nl> - / / this also logs a warning for setting REST cluster names for a gRPC API config . <nl> - EXPECT_THROW_WITH_REGEX ( <nl> - Utility : : factoryForGrpcApiConfigSource ( async_client_manager , api_config_source , scope ) , <nl> - EnvoyException , <nl> - " envoy : : api : : v2 : : core : : ConfigSource must have a singleton cluster name specified " ) ; <nl> - } <nl> - <nl> - { <nl> - envoy : : api : : v2 : : core : : ApiConfigSource api_config_source ; <nl> - api_config_source . set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : REST ) ; <nl> - api_config_source . add_grpc_services ( ) - > mutable_envoy_grpc ( ) - > set_cluster_name ( " foo " ) ; <nl> - / / this also logs a warning for configuring gRPC clusters for a REST API config . <nl> EXPECT_THROW_WITH_REGEX ( <nl> - Utility : : factoryForGrpcApiConfigSource ( async_client_manager , api_config_source , scope ) , <nl> + Utility : : factoryForApiConfigSource ( async_client_manager , api_config_source , scope ) , <nl> EnvoyException , <nl> - " envoy : : api : : v2 : : core : : ConfigSource , if not of type gRPC , must not have a gRPC service " <nl> - " specified " ) ; <nl> + " Only singleton cluster name lists supported in envoy : : api : : v2 : : core : : ApiConfigSource : " ) ; <nl> } <nl> <nl> { <nl> envoy : : api : : v2 : : core : : ApiConfigSource api_config_source ; <nl> api_config_source . set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) ; <nl> - api_config_source . add_grpc_services ( ) - > mutable_envoy_grpc ( ) - > set_cluster_name ( " foo " ) ; <nl> + api_config_source . add_cluster_names ( " foo " ) ; <nl> envoy : : api : : v2 : : core : : GrpcService expected_grpc_service ; <nl> expected_grpc_service . mutable_envoy_grpc ( ) - > set_cluster_name ( " foo " ) ; <nl> EXPECT_CALL ( async_client_manager , <nl> factoryForGrpcService ( ProtoEq ( expected_grpc_service ) , Ref ( scope ) , _ ) ) ; <nl> - Utility : : factoryForGrpcApiConfigSource ( async_client_manager , api_config_source , scope ) ; <nl> + Utility : : factoryForApiConfigSource ( async_client_manager , api_config_source , scope ) ; <nl> } <nl> <nl> { <nl> TEST ( UtilityTest , FactoryForGrpcApiConfigSource ) { <nl> api_config_source . add_grpc_services ( ) - > mutable_envoy_grpc ( ) - > set_cluster_name ( " foo " ) ; <nl> EXPECT_CALL ( async_client_manager , <nl> factoryForGrpcService ( ProtoEq ( api_config_source . grpc_services ( 0 ) ) , Ref ( scope ) , _ ) ) ; <nl> - Utility : : factoryForGrpcApiConfigSource ( async_client_manager , api_config_source , scope ) ; <nl> + Utility : : factoryForApiConfigSource ( async_client_manager , api_config_source , scope ) ; <nl> } <nl> } <nl> <nl> - TEST ( CheckApiConfigSourceSubscriptionBackingClusterTest , GrpcClusterTestAcrossTypes ) { <nl> - envoy : : api : : v2 : : core : : ConfigSource config ; <nl> - auto * api_config_source = config . mutable_api_config_source ( ) ; <nl> - Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> - <nl> - / / API of type GRPC <nl> - api_config_source - > set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) ; <nl> - api_config_source - > add_cluster_names ( " foo_cluster " ) ; <nl> - <nl> - / / GRPC cluster without GRPC services . <nl> - EXPECT_THROW_WITH_MESSAGE ( <nl> - Utility : : checkApiConfigSourceSubscriptionBackingCluster ( cluster_map , * api_config_source ) , <nl> - EnvoyException , <nl> - " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined non - EDS cluster : " <nl> - " ' foo_cluster ' does not exist , was added via api , or is an EDS cluster " ) ; <nl> - <nl> - / / Non - existent cluster . <nl> - api_config_source - > add_grpc_services ( ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( <nl> - Utility : : checkApiConfigSourceSubscriptionBackingCluster ( cluster_map , * api_config_source ) , <nl> - EnvoyException , <nl> - " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined non - EDS cluster : " <nl> - " ' foo_cluster ' does not exist , was added via api , or is an EDS cluster " ) ; <nl> - <nl> - / / Dynamic Cluster . <nl> - Upstream : : MockCluster cluster ; <nl> - cluster_map . emplace ( " foo_cluster " , cluster ) ; <nl> - EXPECT_CALL ( cluster , info ( ) ) ; <nl> - EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) . WillOnce ( Return ( true ) ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( <nl> - Utility : : checkApiConfigSourceSubscriptionBackingCluster ( cluster_map , * api_config_source ) , <nl> - EnvoyException , <nl> - " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined non - EDS cluster : " <nl> - " ' foo_cluster ' does not exist , was added via api , or is an EDS cluster " ) ; <nl> - <nl> - / / EDS Cluster backing EDS Cluster . <nl> - EXPECT_CALL ( cluster , info ( ) ) . Times ( 2 ) ; <nl> - EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) ; <nl> - EXPECT_CALL ( * cluster . info_ , type ( ) ) . WillOnce ( Return ( envoy : : api : : v2 : : Cluster : : EDS ) ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( <nl> - Utility : : checkApiConfigSourceSubscriptionBackingCluster ( cluster_map , * api_config_source ) , <nl> - EnvoyException , <nl> - " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined non - EDS cluster : " <nl> - " ' foo_cluster ' does not exist , was added via api , or is an EDS cluster " ) ; <nl> - <nl> - / / All ok . <nl> - EXPECT_CALL ( cluster , info ( ) ) . Times ( 2 ) ; <nl> - EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) ; <nl> - EXPECT_CALL ( * cluster . info_ , type ( ) ) ; <nl> - Utility : : checkApiConfigSourceSubscriptionBackingCluster ( cluster_map , * api_config_source ) ; <nl> - } <nl> - <nl> - TEST ( CheckApiConfigSourceSubscriptionBackingClusterTest , RestClusterTestAcrossTypes ) { <nl> - envoy : : api : : v2 : : core : : ConfigSource config ; <nl> - auto * api_config_source = config . mutable_api_config_source ( ) ; <nl> - Upstream : : ClusterManager : : ClusterInfoMap cluster_map ; <nl> - api_config_source - > set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : REST ) ; <nl> - <nl> - / / Non - existent cluster . <nl> - api_config_source - > add_cluster_names ( " foo_cluster " ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( <nl> - Utility : : checkApiConfigSourceSubscriptionBackingCluster ( cluster_map , * api_config_source ) , <nl> - EnvoyException , <nl> - " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined non - EDS cluster : " <nl> - " ' foo_cluster ' does not exist , was added via api , or is an EDS cluster " ) ; <nl> - <nl> - / / Dynamic Cluster . <nl> - Upstream : : MockCluster cluster ; <nl> - cluster_map . emplace ( " foo_cluster " , cluster ) ; <nl> - EXPECT_CALL ( cluster , info ( ) ) ; <nl> - EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) . WillOnce ( Return ( true ) ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( <nl> - Utility : : checkApiConfigSourceSubscriptionBackingCluster ( cluster_map , * api_config_source ) , <nl> - EnvoyException , <nl> - " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined non - EDS cluster : " <nl> - " ' foo_cluster ' does not exist , was added via api , or is an EDS cluster " ) ; <nl> - <nl> - / / EDS Cluster backing EDS Cluster . <nl> - EXPECT_CALL ( cluster , info ( ) ) . Times ( 2 ) ; <nl> - EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) ; <nl> - EXPECT_CALL ( * cluster . info_ , type ( ) ) . WillOnce ( Return ( envoy : : api : : v2 : : Cluster : : EDS ) ) ; <nl> - EXPECT_THROW_WITH_MESSAGE ( <nl> - Utility : : checkApiConfigSourceSubscriptionBackingCluster ( cluster_map , * api_config_source ) , <nl> - EnvoyException , <nl> - " envoy : : api : : v2 : : core : : ConfigSource must have a statically defined non - EDS cluster : " <nl> - " ' foo_cluster ' does not exist , was added via api , or is an EDS cluster " ) ; <nl> - <nl> - / / All ok . <nl> - EXPECT_CALL ( cluster , info ( ) ) . Times ( 2 ) ; <nl> - EXPECT_CALL ( * cluster . info_ , addedViaApi ( ) ) ; <nl> - EXPECT_CALL ( * cluster . info_ , type ( ) ) ; <nl> - Utility : : checkApiConfigSourceSubscriptionBackingCluster ( cluster_map , * api_config_source ) ; <nl> - } <nl> - <nl> } / / namespace Config <nl> } / / namespace Envoy <nl> mmm a / test / integration / header_integration_test . cc <nl> ppp b / test / integration / header_integration_test . cc <nl> class HeaderIntegrationTest : public HttpIntegrationTest , <nl> eds_cluster_config : <nl> eds_config : <nl> api_config_source : <nl> + cluster_names : " eds - cluster " <nl> api_type : GRPC <nl> - grpc_services : <nl> - envoy_grpc : <nl> - cluster_name : " eds - cluster " <nl> ) EOF " ) ) ; <nl> <nl> / / TODO ( zuercher ) : Make ConfigHelper EDS - aware and get rid of this hack : <nl> mmm a / test / integration / load_stats_integration_test . cc <nl> ppp b / test / integration / load_stats_integration_test . cc <nl> class LoadStatsIntegrationTest : public HttpIntegrationTest , <nl> / / Setup load reporting and corresponding gRPC cluster . <nl> auto * loadstats_config = bootstrap . mutable_cluster_manager ( ) - > mutable_load_stats_config ( ) ; <nl> loadstats_config - > set_api_type ( envoy : : api : : v2 : : core : : ApiConfigSource : : GRPC ) ; <nl> - loadstats_config - > add_grpc_services ( ) - > mutable_envoy_grpc ( ) - > set_cluster_name ( " load_report " ) ; <nl> + loadstats_config - > add_cluster_names ( " load_report " ) ; <nl> auto * load_report_cluster = bootstrap . mutable_static_resources ( ) - > add_clusters ( ) ; <nl> load_report_cluster - > MergeFrom ( bootstrap . static_resources ( ) . clusters ( ) [ 0 ] ) ; <nl> load_report_cluster - > mutable_circuit_breakers ( ) - > Clear ( ) ; <nl>
|
Revert " Support api config source without cluster names ( ) " ( )
|
envoyproxy/envoy
|
8b4c2d6119a5163d4f781b57a5b14b7130ddc2a2
|
2018-04-12T17:45:03Z
|
mmm a / osquery / tables / utils / yara . cpp <nl> ppp b / osquery / tables / utils / yara . cpp <nl> QueryData genYara ( QueryContext & context ) { <nl> <nl> std : : string full_path ; <nl> if ( file [ 0 ] ! = ' / ' ) { <nl> - full_path = std : : string ( " / var / osquery / " ) + file ; <nl> + full_path = std : : string ( " / etc / osquery / yara / " ) + file ; <nl> } else { <nl> full_path = file ; <nl> } <nl> mmm a / osquery / tables / utils / yara_utils . cpp <nl> ppp b / osquery / tables / utils / yara_utils . cpp <nl> Status handleRuleFiles ( const std : : string & category , <nl> const auto rule = item . second . get ( " " , " " ) ; <nl> VLOG ( 1 ) < < " Loading " < < rule ; <nl> <nl> + std : : string full_path ; <nl> + if ( rule [ 0 ] ! = ' / ' ) { <nl> + full_path = std : : string ( " / etc / osquery / yara / " ) + rule ; <nl> + } else { <nl> + full_path = rule ; <nl> + } <nl> + <nl> / / First attempt to load the file , in case it is saved ( pre - compiled ) <nl> / / rules . Sadly there is no way to load multiple compiled rules in <nl> / / succession . This means that : <nl> Status handleRuleFiles ( const std : : string & category , <nl> / / <nl> / / If you want to use saved rule files you must have them all in a single <nl> / / file . This is easy to accomplish with yarac ( 1 ) . <nl> - result = yr_rules_load ( rule . c_str ( ) , & tmp_rules ) ; <nl> + result = yr_rules_load ( full_path . c_str ( ) , & tmp_rules ) ; <nl> if ( result ! = ERROR_SUCCESS & & result ! = ERROR_INVALID_FILE ) { <nl> yr_compiler_destroy ( compiler ) ; <nl> return Status ( 1 , " Error loading YARA rules : " + std : : to_string ( result ) ) ; <nl> Status handleRuleFiles ( const std : : string & category , <nl> } else { <nl> compiled = true ; <nl> / / Try to compile the rules . <nl> - FILE * rule_file = fopen ( rule . c_str ( ) , " r " ) ; <nl> + FILE * rule_file = fopen ( full_path . c_str ( ) , " r " ) ; <nl> <nl> if ( rule_file = = nullptr ) { <nl> yr_compiler_destroy ( compiler ) ; <nl> - return Status ( 1 , " Could not open file : " + rule ) ; <nl> + return Status ( 1 , " Could not open file : " + full_path ) ; <nl> } <nl> <nl> int errors = yr_compiler_add_file ( compiler , <nl> rule_file , <nl> NULL , <nl> - rule . c_str ( ) ) ; <nl> + full_path . c_str ( ) ) ; <nl> <nl> fclose ( rule_file ) ; <nl> rule_file = nullptr ; <nl>
|
Merge pull request from wxsBSD / yara_relative
|
osquery/osquery
|
c012d1c1d3656b569dfd3ccdbcd1e4d785ff78b4
|
2015-04-29T22:56:17Z
|
mmm a / tensorflow / core / common_runtime / function_test . cc <nl> ppp b / tensorflow / core / common_runtime / function_test . cc <nl> TEST_F ( FunctionLibraryRuntimeTest , Gradient_AddSum ) { <nl> <nl> GraphDef actual ; <nl> g - > ToGraphDef ( & actual ) ; <nl> - TF_EXPECT_GRAPH_EQ ( expected , actual ) ; <nl> + / / The optimizer is non - deterministic , so we only check that the number of <nl> + / / nodes is not greater than expected . <nl> + EXPECT_LE ( actual . node_size ( ) , expected . node_size ( ) ) ; <nl> } <nl> } <nl> <nl> mmm a / tensorflow / core / graph / edgeset . cc <nl> ppp b / tensorflow / core / graph / edgeset . cc <nl> std : : pair < EdgeSet : : const_iterator , bool > EdgeSet : : insert ( value_type value ) { <nl> } <nl> } <nl> / / array is full . convert to set . <nl> - s = new std : : set < const Edge * > ; <nl> + s = new gtl : : FlatSet < const Edge * > ; <nl> for ( int i = 0 ; i < kInline ; i + + ) { <nl> s - > insert ( static_cast < const Edge * > ( ptrs_ [ i ] ) ) ; <nl> } <nl> mmm a / tensorflow / core / graph / edgeset . h <nl> ppp b / tensorflow / core / graph / edgeset . h <nl> limitations under the License . <nl> # define TENSORFLOW_GRAPH_EDGESET_H_ <nl> <nl> # include < stddef . h > <nl> - # include < set > <nl> - # include " tensorflow / core / platform / macros . h " <nl> - # include " tensorflow / core / platform / types . h " <nl> <nl> + # include " tensorflow / core / lib / gtl / flatset . h " <nl> # include " tensorflow / core / platform / logging . h " <nl> + # include " tensorflow / core / platform / macros . h " <nl> + # include " tensorflow / core / platform / types . h " <nl> namespace tensorflow { <nl> <nl> class Edge ; <nl> <nl> / / An unordered set of edges . Uses very little memory for small sets . <nl> - / / Unlike std : : set , EdgeSet does NOT allow mutations during iteration . <nl> + / / Unlike gtl : : FlatSet , EdgeSet does NOT allow mutations during <nl> + / / iteration . <nl> class EdgeSet { <nl> public : <nl> EdgeSet ( ) ; <nl> class EdgeSet { <nl> private : <nl> / / Up to kInline elements are stored directly in ptrs_ ( nullptr means none ) . <nl> / / If ptrs_ [ 0 ] = = this then ptrs_ [ 1 ] points to a set < const Edge * > . <nl> - static const int kInline = 4 ; / / Must be > = 2 . <nl> + / / kInline must be > = 2 , and is chosen such that ptrs_ fills a 64 byte <nl> + / / cacheline . <nl> + static constexpr int kInline = 64 / sizeof ( const void * ) ; <nl> const void * ptrs_ [ kInline ] ; <nl> <nl> - std : : set < const Edge * > * get_set ( ) const { <nl> + gtl : : FlatSet < const Edge * > * get_set ( ) const { <nl> if ( ptrs_ [ 0 ] = = this ) { <nl> - return static_cast < std : : set < const Edge * > * > ( const_cast < void * > ( ptrs_ [ 1 ] ) ) ; <nl> + return static_cast < gtl : : FlatSet < const Edge * > * > ( <nl> + const_cast < void * > ( ptrs_ [ 1 ] ) ) ; <nl> } else { <nl> return nullptr ; <nl> } <nl> class EdgeSet : : const_iterator { <nl> friend class EdgeSet ; <nl> <nl> void const * const * array_iter_ = nullptr ; <nl> - typename std : : set < const Edge * > : : const_iterator tree_iter_ ; <nl> + typename gtl : : FlatSet < const Edge * > : : const_iterator tree_iter_ ; <nl> <nl> # ifdef NDEBUG <nl> inline void Init ( const EdgeSet * e ) { } <nl> mmm a / tensorflow / core / graph / edgeset_test . cc <nl> ppp b / tensorflow / core / graph / edgeset_test . cc <nl> limitations under the License . <nl> namespace tensorflow { <nl> class EdgeSetTest : public : : testing : : Test { <nl> public : <nl> - EdgeSetTest ( ) : edges_ ( nullptr ) , eset_ ( nullptr ) { } <nl> - <nl> - ~ EdgeSetTest ( ) override { <nl> - delete eset_ ; <nl> - delete [ ] edges_ ; <nl> - } <nl> + EdgeSetTest ( ) : edges_ ( nullptr ) { } <nl> + ~ EdgeSetTest ( ) override { delete [ ] edges_ ; } <nl> <nl> void MakeEdgeSet ( int n ) { <nl> - delete eset_ ; <nl> - delete [ ] edges_ ; <nl> + if ( edges_ ) { <nl> + delete [ ] edges_ ; <nl> + } <nl> edges_ = new Edge [ n ] ; <nl> - eset_ = new EdgeSet ; <nl> + eset_ . clear ( ) ; <nl> model_ . clear ( ) ; <nl> for ( int i = 0 ; i < n ; i + + ) { <nl> - eset_ - > insert ( & edges_ [ i ] ) ; <nl> + eset_ . insert ( & edges_ [ i ] ) ; <nl> model_ . insert ( & edges_ [ i ] ) ; <nl> } <nl> } <nl> <nl> void CheckSame ( ) { <nl> - EXPECT_EQ ( model_ . size ( ) , eset_ - > size ( ) ) ; <nl> - EXPECT_EQ ( model_ . empty ( ) , eset_ - > empty ( ) ) ; <nl> + EXPECT_EQ ( model_ . size ( ) , eset_ . size ( ) ) ; <nl> + EXPECT_EQ ( model_ . empty ( ) , eset_ . empty ( ) ) ; <nl> std : : vector < const Edge * > modelv ( model_ . begin ( ) , model_ . end ( ) ) ; <nl> - std : : vector < const Edge * > esetv ( eset_ - > begin ( ) , eset_ - > end ( ) ) ; <nl> + std : : vector < const Edge * > esetv ( eset_ . begin ( ) , eset_ . end ( ) ) ; <nl> std : : sort ( modelv . begin ( ) , modelv . end ( ) ) ; <nl> std : : sort ( esetv . begin ( ) , esetv . end ( ) ) ; <nl> EXPECT_EQ ( modelv . size ( ) , esetv . size ( ) ) ; <nl> class EdgeSetTest : public : : testing : : Test { <nl> } <nl> } <nl> <nl> + static constexpr int kInline = 64 / sizeof ( const void * ) ; <nl> Edge nonexistent_ ; <nl> Edge * edges_ ; <nl> - EdgeSet * eset_ ; <nl> - std : : set < const Edge * > model_ ; <nl> + EdgeSet eset_ ; <nl> + gtl : : FlatSet < const Edge * > model_ ; <nl> } ; <nl> <nl> namespace { <nl> <nl> TEST_F ( EdgeSetTest , Ops ) { <nl> - for ( int n : { 0 , 1 , 2 , 3 , 4 , 10 } ) { <nl> + for ( int n : { 0 , 1 , 2 , kInline + 1 } ) { <nl> MakeEdgeSet ( n ) ; <nl> CheckSame ( ) ; <nl> - EXPECT_EQ ( ( n = = 0 ) , eset_ - > empty ( ) ) ; <nl> - EXPECT_EQ ( n , eset_ - > size ( ) ) ; <nl> + EXPECT_EQ ( ( n = = 0 ) , eset_ . empty ( ) ) ; <nl> + EXPECT_EQ ( n , eset_ . size ( ) ) ; <nl> <nl> - eset_ - > clear ( ) ; <nl> + eset_ . clear ( ) ; <nl> model_ . clear ( ) ; <nl> CheckSame ( ) ; <nl> <nl> - eset_ - > insert ( & edges_ [ 0 ] ) ; <nl> + eset_ . insert ( & edges_ [ 0 ] ) ; <nl> model_ . insert ( & edges_ [ 0 ] ) ; <nl> CheckSame ( ) ; <nl> } <nl> TEST_F ( EdgeSetTest , Ops ) { <nl> <nl> / / Try insert / erase of existing elements at different positions . <nl> TEST_F ( EdgeSetTest , Exists ) { <nl> - for ( int n : { 0 , 1 , 2 , 3 , 4 , 10 } ) { <nl> + for ( int n : { 0 , 1 , 2 , kInline + 1 } ) { <nl> MakeEdgeSet ( n ) ; <nl> for ( int pos = 0 ; pos < n ; pos + + ) { <nl> - MakeEdgeSet ( n ) ; <nl> - auto p = eset_ - > insert ( & edges_ [ pos ] ) ; <nl> + auto p = eset_ . insert ( & edges_ [ pos ] ) ; <nl> EXPECT_FALSE ( p . second ) ; <nl> EXPECT_EQ ( & edges_ [ pos ] , * p . first ) ; <nl> <nl> - EXPECT_EQ ( 1 , eset_ - > erase ( & edges_ [ pos ] ) ) ; <nl> + EXPECT_EQ ( 1 , eset_ . erase ( & edges_ [ pos ] ) ) ; <nl> model_ . erase ( & edges_ [ pos ] ) ; <nl> CheckSame ( ) ; <nl> } <nl> TEST_F ( EdgeSetTest , Exists ) { <nl> <nl> / / Try insert / erase of non - existent element . <nl> TEST_F ( EdgeSetTest , DoesNotExist ) { <nl> - for ( int n : { 0 , 1 , 2 , 3 , 4 , 10 } ) { <nl> + for ( int n : { 0 , 1 , 2 , kInline + 1 } ) { <nl> MakeEdgeSet ( n ) ; <nl> - EXPECT_EQ ( 0 , eset_ - > erase ( & nonexistent_ ) ) ; <nl> - auto p = eset_ - > insert ( & nonexistent_ ) ; <nl> + EXPECT_EQ ( 0 , eset_ . erase ( & nonexistent_ ) ) ; <nl> + auto p = eset_ . insert ( & nonexistent_ ) ; <nl> EXPECT_TRUE ( p . second ) ; <nl> EXPECT_EQ ( & nonexistent_ , * p . first ) ; <nl> } <nl> mmm a / tensorflow / core / graph / graph_test . cc <nl> ppp b / tensorflow / core / graph / graph_test . cc <nl> BENCHMARK ( BM_GraphCreation ) - > ArgPair ( 1 < < 9 , 16 ) ; <nl> BENCHMARK ( BM_GraphCreation ) - > ArgPair ( 1 < < 12 , 16 ) ; <nl> BENCHMARK ( BM_GraphCreation ) - > ArgPair ( 1 < < 15 , 16 ) ; <nl> <nl> + static void BM_ToGraphDef ( int iters , int num_nodes , int num_edges_per_node ) { <nl> + testing : : StopTiming ( ) ; <nl> + const GraphDef graph_def = CreateGraphDef ( num_nodes , num_edges_per_node ) ; <nl> + const auto registry = OpRegistry : : Global ( ) ; <nl> + GraphConstructorOptions opts ; <nl> + / / Warmup step . <nl> + Graph graph ( registry ) ; <nl> + TF_CHECK_OK ( ConvertGraphDefToGraph ( opts , graph_def , & graph ) ) ; <nl> + int64 sum = 0 ; <nl> + testing : : StartTiming ( ) ; <nl> + for ( int i = 0 ; i < iters ; + + i ) { <nl> + GraphDef graph_def ; <nl> + graph . ToGraphDef ( & graph_def ) ; <nl> + sum + = graph_def . node_size ( ) ; <nl> + } <nl> + VLOG ( 1 ) < < sum ; <nl> + testing : : StopTiming ( ) ; <nl> + } <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 10 , 2 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 6 , 2 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 9 , 2 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 12 , 2 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 15 , 2 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 10 , 4 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 6 , 4 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 9 , 4 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 12 , 4 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 15 , 4 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 10 , 8 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 6 , 8 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 9 , 8 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 12 , 8 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 15 , 8 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 10 , 16 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 6 , 16 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 9 , 16 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 12 , 16 ) ; <nl> + BENCHMARK ( BM_ToGraphDef ) - > ArgPair ( 1 < < 15 , 16 ) ; <nl> + <nl> } / / namespace <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / graph / optimizer_cse_test . cc <nl> ppp b / tensorflow / core / graph / optimizer_cse_test . cc <nl> TEST_F ( OptimizerCSETest , Constant_Dedup ) { <nl> EXPECT_EQ ( OriginalGraph ( ) , <nl> " n / _0 ( Const ) ; n / _1 ( Const ) ; n / _2 ( Const ) ; n / _3 ( Const ) ; " <nl> " n / _4 ( Const ) ; n / _5 ( Const ) ; n / _6 ( Const ) ; n / _7 ( Const ) | " ) ; <nl> - / / In theory , there are 2 ^ 4 possible correct output of CSE . In this <nl> - / / test , it happens to eliminate the last 4 nodes . <nl> - EXPECT_EQ ( DoCSE ( ) , " n / _0 ( Const ) ; n / _1 ( Const ) ; n / _2 ( Const ) ; n / _3 ( Const ) | " ) ; <nl> + std : : vector < string > nodes = str_util : : Split ( DoCSE ( ) , " ; | " ) ; <nl> + std : : set < string > node_set ( nodes . begin ( ) , nodes . end ( ) ) ; <nl> + / / Expect exactly one of each type of node to be retained after CSE . <nl> + EXPECT_EQ ( node_set . count ( " n / _0 ( Const ) " ) + node_set . count ( " n / _7 ( Const ) " ) , 1 ) ; <nl> + EXPECT_EQ ( node_set . count ( " n / _1 ( Const ) " ) + node_set . count ( " n / _6 ( Const ) " ) , 1 ) ; <nl> + EXPECT_EQ ( node_set . count ( " n / _2 ( Const ) " ) + node_set . count ( " n / _5 ( Const ) " ) , 1 ) ; <nl> + EXPECT_EQ ( node_set . count ( " n / _3 ( Const ) " ) + node_set . count ( " n / _4 ( Const ) " ) , 1 ) ; <nl> } <nl> <nl> static void BM_CSE ( int iters , int op_nodes ) { <nl>
|
Optimize EdgeSet data structure :
|
tensorflow/tensorflow
|
0c1eb8861624d6d17c797b70d25330711df5eb2f
|
2018-10-31T18:32:16Z
|
mmm a / tests / unistd / links . c <nl> ppp b / tests / unistd / links . c <nl> int main ( ) { <nl> printf ( " result : % s \ n \ n " , buffer ) ; <nl> errno = 0 ; <nl> <nl> + buffer [ 0 ] = buffer [ 1 ] = buffer [ 2 ] = buffer [ 3 ] = buffer [ 4 ] = buffer [ 5 ] = ' * ' ; <nl> printf ( " readlink ( short buffer ) \ n " ) ; <nl> printf ( " ret : % d \ n " , readlink ( " link " , buffer , 4 ) ) ; <nl> printf ( " errno : % d \ n " , errno ) ; <nl> mmm a / tests / unistd / links . out <nl> ppp b / tests / unistd / links . out <nl> errno : 0 <nl> result : / working / folder / new - nonexistent - path <nl> <nl> readlink ( short buffer ) <nl> - ret : 3 <nl> + ret : 4 <nl> errno : 0 <nl> - result : / thrking / folder / new - nonexistent - path <nl> + result : / the * * ng / folder / new - nonexistent - path <nl>
|
fix test_unistd_links to musl and glibc behavior , looks like we were wrong earlier
|
emscripten-core/emscripten
|
f37e37f4cd2cb6d2758b40efe70785fffdd19b91
|
2015-05-28T04:03:34Z
|
mmm a / src / mongo / SConscript <nl> ppp b / src / mongo / SConscript <nl> serverOnlyFiles = [ " db / curop . cpp " , <nl> " db / index / 2d_access_method . cpp " , <nl> " db / index / 2d_index_cursor . cpp " , <nl> " db / index / btree_access_method . cpp " , <nl> + " db / index / btree_based_builder . cpp " , <nl> " db / index / btree_index_cursor . cpp " , <nl> " db / index / btree_interface . cpp " , <nl> " db / index / btree_key_generator . cpp " , <nl> mmm a / src / mongo / db / btree . cpp <nl> ppp b / src / mongo / db / btree . cpp <nl> <nl> # include " mongo / db / db . h " <nl> # include " mongo / db / dbhelpers . h " <nl> # include " mongo / db / dur_commitjob . h " <nl> - # include " mongo / db / index_insertion_continuation . h " <nl> # include " mongo / db / json . h " <nl> # include " mongo / db / kill_current_op . h " <nl> # include " mongo / db / pdfile . h " <nl> namespace mongo { <nl> } <nl> } <nl> <nl> - / * * @ thisLoc disk location of * this * / <nl> - template < class V > <nl> - void BtreeBucket < V > : : insertStepOne ( DiskLoc thisLoc , <nl> - IndexInsertionContinuationImpl < V > & c , <nl> - bool dupsAllowed ) const { <nl> - dassert ( c . key . dataSize ( ) < = this - > KeyMax ) ; <nl> - verify ( c . key . dataSize ( ) > 0 ) ; <nl> - <nl> - int pos ; <nl> - bool found = find ( c . idx , c . key , c . recordLoc , c . order , pos , ! dupsAllowed ) ; <nl> - <nl> - if ( found ) { <nl> - const _KeyNode & kn = k ( pos ) ; <nl> - if ( kn . isUnused ( ) ) { <nl> - LOG ( 4 ) < < " btree _insert : reusing unused key " < < endl ; <nl> - c . b = this ; <nl> - c . pos = pos ; <nl> - c . op = IndexInsertionContinuation : : SetUsed ; <nl> - return ; <nl> - } <nl> - <nl> - DEV { <nl> - log ( ) < < " _insert ( ) : key already exists in index ( ok for background : true ) \ n " ; <nl> - log ( ) < < " " < < c . idx . indexNamespace ( ) < < " thisLoc : " < < thisLoc . toString ( ) < < ' \ n ' ; <nl> - log ( ) < < " " < < c . key . toString ( ) < < ' \ n ' ; <nl> - log ( ) < < " " < < " recordLoc : " < < c . recordLoc . toString ( ) < < " pos : " < < pos < < endl ; <nl> - log ( ) < < " old l r : " < < this - > childForPos ( pos ) . toString ( ) < < ' ' < < this - > childForPos ( pos + 1 ) . toString ( ) < < endl ; <nl> - } <nl> - alreadyInIndex ( ) ; <nl> - } <nl> - <nl> - Loc ch = this - > childForPos ( pos ) ; <nl> - DiskLoc child = ch ; <nl> - <nl> - if ( child . isNull ( ) ) { <nl> - / / A this - > new key will be inserted at the same tree height as an adjacent existing key . <nl> - c . bLoc = thisLoc ; <nl> - c . b = this ; <nl> - c . pos = pos ; <nl> - c . op = IndexInsertionContinuation : : InsertHere ; <nl> - return ; <nl> - } <nl> - <nl> - child . btree < V > ( ) - > insertStepOne ( child , c , dupsAllowed ) ; <nl> - } <nl> - <nl> / * * @ thisLoc disk location of * this * / <nl> template < class V > <nl> int BtreeBucket < V > : : _insert ( const DiskLoc thisLoc , const DiskLoc recordLoc , <nl> namespace mongo { <nl> _log ( ) < < " \ n " < < indent < < " " < < hex < < this - > nextChild . getOfs ( ) < < dec < < endl ; <nl> } <nl> <nl> - template < class V > <nl> - void BtreeBucket < V > : : twoStepInsert ( DiskLoc thisLoc , IndexInsertionContinuationImpl < V > & c , <nl> - bool dupsAllowed ) const <nl> - { <nl> - <nl> - if ( c . key . dataSize ( ) > this - > KeyMax ) { <nl> - problem ( ) < < " ERROR : key too large len : " < < c . key . dataSize ( ) < < " max : " < < this - > KeyMax < < ' ' < < c . key . dataSize ( ) < < ' ' < < c . idx . indexNamespace ( ) < < endl ; <nl> - return ; / / op = Nothing <nl> - } <nl> - insertStepOne ( thisLoc , c , dupsAllowed ) ; <nl> - } <nl> - <nl> / * * todo : meaning of return code unclear clean up * / <nl> template < class V > <nl> int BtreeBucket < V > : : bt_insert ( const DiskLoc thisLoc , const DiskLoc recordLoc , <nl> namespace mongo { <nl> } <nl> } <nl> } btunittest ; <nl> - <nl> - <nl> - IndexInsertionContinuation : : ~ IndexInsertionContinuation ( ) { } <nl> } <nl> mmm a / src / mongo / db / btree . h <nl> ppp b / src / mongo / db / btree . h <nl> namespace mongo { <nl> } ; <nl> <nl> class IndexDetails ; <nl> - class IndexInsertionContinuation ; <nl> - template < class V > <nl> - struct IndexInsertionContinuationImpl ; <nl> <nl> / * * <nl> * This class adds functionality for manipulating buckets that are assembled <nl> namespace mongo { <nl> template < class V > <nl> class BtreeBucket : public BucketBasics < V > { <nl> friend class BtreeCursor ; <nl> - friend struct IndexInsertionContinuationImpl < V > ; <nl> public : <nl> / / make compiler happy : <nl> typedef typename V : : Key Key ; <nl> namespace mongo { <nl> const BSONObj & key , const Ordering & order , bool dupsAllowed , <nl> IndexDetails & idx , bool toplevel = true ) const ; <nl> <nl> - / * * does the insert in two steps - can then use an upgradable lock for step 1 , which <nl> - is the part which may have page faults . also that step is most of the computational work . <nl> - * / <nl> - void twoStepInsert ( DiskLoc thisLoc , IndexInsertionContinuationImpl < V > & c , bool dupsAllowed ) const ; <nl> - <nl> / * * <nl> * Preconditions : <nl> * - ' key ' has a valid schema for this index , and may have objsize ( ) > KeyMax . <nl> namespace mongo { <nl> const Key & key , const Ordering & order , bool dupsAllowed , <nl> const DiskLoc lChild , const DiskLoc rChild , IndexDetails & idx ) const ; <nl> <nl> - void insertStepOne ( <nl> - DiskLoc thisLoc , IndexInsertionContinuationImpl < V > & c , bool dupsAllowed ) const ; <nl> - <nl> bool find ( const IndexDetails & idx , const Key & key , const DiskLoc & recordLoc , const Ordering & order , int & pos , bool assertIfDup ) const ; <nl> static bool customFind ( int l , int h , const BSONObj & keyBegin , int keyBeginLen , bool afterKey , const vector < const BSONElement * > & keyEnd , const vector < bool > & keyEndInclusive , const Ordering & order , int direction , DiskLoc & thisLoc , int & keyOfs , pair < DiskLoc , int > & bestParent ) ; <nl> static void findLargestKey ( const DiskLoc & thisLoc , DiskLoc & largestLoc , int & largestKey ) ; <nl> mmm a / src / mongo / db / cloner . cpp <nl> ppp b / src / mongo / db / cloner . cpp <nl> <nl> # include " mongo / db / namespacestring . h " <nl> # include " mongo / db / repl / oplog . h " <nl> # include " mongo / db / pdfile . h " <nl> - # include " mongo / db / sort_phase_one . h " <nl> <nl> namespace mongo { <nl> <nl> namespace mongo { <nl> Cloner : : Cloner ( ) { } <nl> <nl> struct Cloner : : Fun { <nl> - Fun ( ) : lastLog ( 0 ) , _sortersForIndex ( NULL ) { } <nl> + Fun ( ) : lastLog ( 0 ) { } <nl> time_t lastLog ; <nl> void operator ( ) ( DBClientCursorBatchIterator & i ) { <nl> Lock : : GlobalWrite lk ; <nl> namespace mongo { <nl> } <nl> <nl> try { <nl> - / / add keys for presorting <nl> DiskLoc loc = theDataFileMgr . insertWithObjMod ( to_collection , js ) ; <nl> loc . assertOk ( ) ; <nl> - if ( _sortersForIndex ! = NULL ) { <nl> - / / add key to SortersForNS <nl> - for ( SortersForIndex : : iterator iSorter = _sortersForIndex - > begin ( ) ; <nl> - iSorter ! = _sortersForIndex - > end ( ) ; <nl> - + + iSorter ) { <nl> - iSorter - > second . preSortPhase . addKeys ( iSorter - > second . spec , js , <nl> - loc , false ) ; <nl> - } <nl> - } <nl> if ( logForRepl ) <nl> logOp ( " i " , to_collection , js ) ; <nl> <nl> namespace mongo { <nl> Client : : Context * context ; <nl> bool _mayYield ; <nl> bool _mayBeInterrupted ; <nl> - SortersForIndex * _sortersForIndex ; / / sorters that build index keys during query <nl> } ; <nl> <nl> / * copy the specified collection <nl> namespace mongo { <nl> f . _mayYield = mayYield ; <nl> f . _mayBeInterrupted = mayBeInterrupted ; <nl> <nl> - if ( ! isindex ) { <nl> - SortersForNS : : iterator it = _sortersForNS . find ( to_collection ) ; <nl> - if ( it ! = _sortersForNS . end ( ) ) <nl> - f . _sortersForIndex = & it - > second ; <nl> - } <nl> - <nl> int options = QueryOption_NoCursorTimeout | ( slaveOk ? QueryOption_SlaveOk : 0 ) ; <nl> { <nl> f . context = cc ( ) . getContext ( ) ; <nl> namespace mongo { <nl> BSONObj js = * i ; <nl> scoped_lock precalcLock ( theDataFileMgr . _precalcedMutex ) ; <nl> try { <nl> - / / set the ' precalculated ' index data and add the index <nl> - SortersForNS : : iterator sortIter = _sortersForNS . find ( js [ " ns " ] . String ( ) ) ; <nl> - if ( sortIter ! = _sortersForNS . end ( ) ) { <nl> - SortersForIndex : : iterator it = sortIter - > second . find ( js [ " name " ] . String ( ) ) ; <nl> - if ( it ! = sortIter - > second . end ( ) ) { <nl> - theDataFileMgr . setPrecalced ( & it - > second . preSortPhase ) ; <nl> - } <nl> - } <nl> theDataFileMgr . insertWithObjMod ( to_collection , js ) ; <nl> theDataFileMgr . setPrecalced ( NULL ) ; <nl> <nl> namespace mongo { <nl> mayInterrupt ( opts . mayBeInterrupted ) ; <nl> dbtempreleaseif r ( opts . mayYield ) ; <nl> <nl> - # if 0 <nl> - / / fetch index info <nl> - auto_ptr < DBClientCursor > cur = _conn - > query ( idxns . c_str ( ) , BSONObj ( ) , 0 , 0 , 0 , <nl> - opts . slaveOk ? QueryOption_SlaveOk : 0 ) ; <nl> - if ( ! validateQueryResults ( cur , errCode , errmsg ) ) { <nl> - errmsg = " index query on ns " + ns + " failed : " + errmsg ; <nl> - return false ; <nl> - } <nl> - while ( cur - > more ( ) ) { <nl> - BSONObj idxEntry = cur - > next ( ) ; <nl> - massert ( 16536 , " sync source has invalid index data " , <nl> - idxEntry . hasField ( " key " ) & & <nl> - idxEntry . hasField ( " ns " ) & & <nl> - idxEntry . hasField ( " name " ) ) ; <nl> - <nl> - / / validate index version ( similar to fixIndexVersion ( ) ) <nl> - SortPhaseOne initialSort ; <nl> - IndexInterface * interface = & IndexInterface : : defaultVersion ( ) ; <nl> - <nl> - / / initialize sorter for this index <nl> - PreSortDetails details ; <nl> - details . preSortPhase . sorter . reset ( <nl> - new BSONObjExternalSorter ( * interface , idxEntry [ " key " ] . Obj ( ) . copy ( ) ) ) ; <nl> - details . spec = IndexSpec ( idxEntry [ " key " ] . Obj ( ) . copy ( ) , idxEntry . copy ( ) ) ; <nl> - _sortersForNS [ idxEntry [ " ns " ] . String ( ) ] . insert ( make_pair ( idxEntry [ " name " ] . String ( ) , <nl> - details ) ) ; <nl> - } <nl> - # endif <nl> / / just using exhaust for collection copying right now <nl> <nl> / / todo : if snapshot ( bool param to this func ) is true , we need to snapshot this query ? <nl> mmm a / src / mongo / db / cloner . h <nl> ppp b / src / mongo / db / cloner . h <nl> <nl> # pragma once <nl> <nl> # include " mongo / db / jsobj . h " <nl> - # include " mongo / db / sort_phase_one . h " <nl> <nl> namespace mongo { <nl> <nl> namespace mongo { <nl> bool masterSameProcess , bool slaveOk , bool mayYield , bool mayBeInterrupted , <nl> Query q ) ; <nl> <nl> - / / index presort info <nl> - typedef struct { <nl> - IndexSpec spec ; <nl> - SortPhaseOne preSortPhase ; <nl> - } PreSortDetails ; <nl> - <nl> - typedef map < string , PreSortDetails > SortersForIndex ; / / map from index name to presorter <nl> - typedef map < string , SortersForIndex > SortersForNS ; / / map from ns to indices / sorters <nl> - <nl> struct Fun ; <nl> auto_ptr < DBClientBase > _conn ; <nl> - SortersForNS _sortersForNS ; <nl> } ; <nl> <nl> struct CloneOptions { <nl> mmm a / src / mongo / db / compact . cpp <nl> ppp b / src / mongo / db / compact . cpp <nl> <nl> # include " mongo / db / jsobj . h " <nl> # include " mongo / db / kill_current_op . h " <nl> # include " mongo / db / pdfile . h " <nl> - # include " mongo / db / sort_phase_one . h " <nl> # include " mongo / util / concurrency / task . h " <nl> # include " mongo / util / timer . h " <nl> # include " mongo / util / touch_pages . h " <nl> namespace mongo { <nl> <nl> / * * @ return number of skipped ( invalid ) documents * / <nl> unsigned compactExtent ( const char * ns , NamespaceDetails * d , const DiskLoc diskloc , int n , <nl> - const scoped_array < IndexSpec > & indexSpecs , <nl> - scoped_array < SortPhaseOne > & phase1 , int nidx , bool validate , <nl> - double pf , int pb ) <nl> - { <nl> + int nidx , bool validate , double pf , int pb ) { <nl> + <nl> log ( ) < < " compact begin extent # " < < n < < " for namespace " < < ns < < endl ; <nl> unsigned oldObjSize = 0 ; / / we ' ll report what the old padding was <nl> unsigned oldObjSizeWithPadding = 0 ; <nl> namespace mongo { <nl> recNew = ( Record * ) getDur ( ) . writingPtr ( recNew , lenWHdr ) ; <nl> addRecordToRecListInExtent ( recNew , loc ) ; <nl> memcpy ( recNew - > data ( ) , objOld . objdata ( ) , sz ) ; <nl> - <nl> - { <nl> - / / extract keys for all indexes we will be rebuilding <nl> - for ( int x = 0 ; x < nidx ; x + + ) { <nl> - phase1 [ x ] . addKeys ( indexSpecs [ x ] , objOld , loc , false ) ; <nl> - } <nl> - } <nl> } <nl> else { <nl> if ( + + skipped < = 10 ) <nl> namespace mongo { <nl> NamespaceDetailsTransient : : get ( ns ) . clearQueryCache ( ) ; <nl> <nl> int nidx = d - > nIndexes ; <nl> - scoped_array < IndexSpec > indexSpecs ( new IndexSpec [ nidx ] ) ; <nl> - scoped_array < SortPhaseOne > phase1 ( new SortPhaseOne [ nidx ] ) ; <nl> + scoped_array < BSONObj > indexSpecs ( new BSONObj [ nidx ] ) ; <nl> { <nl> NamespaceDetails : : IndexIterator ii = d - > ii ( ) ; <nl> / / For each existing index . . . <nl> namespace mongo { <nl> / / Pass the element through to the new index spec . <nl> b . append ( e ) ; <nl> } <nl> - / / Add the new index spec to ' indexSpecs ' . <nl> - BSONObj o = b . obj ( ) . getOwned ( ) ; <nl> - indexSpecs [ idxNo ] . reset ( o ) ; <nl> - / / Create an external sorter . <nl> - phase1 [ idxNo ] . sorter . reset <nl> - ( new BSONObjExternalSorter <nl> - / / Use the default index interface , since the new index will be created <nl> - / / with the default index version . <nl> - ( IndexInterface : : defaultVersion ( ) , <nl> - o . getObjectField ( " key " ) ) ) ; <nl> - phase1 [ idxNo ] . sorter - > hintNumObjects ( d - > stats . nrecords ) ; <nl> + indexSpecs [ idxNo ] = b . obj ( ) . getOwned ( ) ; <nl> } <nl> } <nl> <nl> namespace mongo { <nl> d - > deletedList [ i ] . writing ( ) . Null ( ) ; <nl> } <nl> <nl> - <nl> - <nl> / / Start over from scratch with our extent sizing and growth <nl> d - > lastExtentSize = 0 ; <nl> <nl> namespace mongo { <nl> } <nl> <nl> for ( list < DiskLoc > : : iterator i = extents . begin ( ) ; i ! = extents . end ( ) ; i + + ) { <nl> - skipped + = compactExtent ( ns , d , * i , n + + , indexSpecs , phase1 , nidx , validate , pf , pb ) ; <nl> + skipped + = compactExtent ( ns , d , * i , n + + , nidx , validate , pf , pb ) ; <nl> pm . hit ( ) ; <nl> } <nl> <nl> namespace mongo { <nl> string si = s . db + " . system . indexes " ; <nl> for ( int i = 0 ; i < nidx ; i + + ) { <nl> killCurrentOp . checkForInterrupt ( false ) ; <nl> - BSONObj info = indexSpecs [ i ] . info ; <nl> + BSONObj info = indexSpecs [ i ] ; <nl> log ( ) < < " compact create index " < < info [ " key " ] . Obj ( ) . toString ( ) < < endl ; <nl> - scoped_lock precalcLock ( theDataFileMgr . _precalcedMutex ) ; <nl> - try { <nl> - theDataFileMgr . setPrecalced ( & phase1 [ i ] ) ; <nl> - theDataFileMgr . insert ( si . c_str ( ) , info . objdata ( ) , info . objsize ( ) ) ; <nl> - } <nl> - catch ( . . . ) { <nl> - theDataFileMgr . setPrecalced ( NULL ) ; <nl> - throw ; <nl> - } <nl> - theDataFileMgr . setPrecalced ( NULL ) ; <nl> + theDataFileMgr . insert ( si . c_str ( ) , info . objdata ( ) , info . objsize ( ) ) ; <nl> } <nl> <nl> return true ; <nl> mmm a / src / mongo / db / index . cpp <nl> ppp b / src / mongo / db / index . cpp <nl> <nl> <nl> namespace mongo { <nl> <nl> - IndexInterface : : IndexInserter : : IndexInserter ( ) { } <nl> - IndexInterface : : IndexInserter : : ~ IndexInserter ( ) { <nl> - for ( size_t i = 0 ; i < _continuations . size ( ) ; + + i ) <nl> - delete _continuations [ i ] ; <nl> - } <nl> - <nl> - void IndexInterface : : IndexInserter : : addInsertionContinuation ( IndexInsertionContinuation * c ) { <nl> - _continuations . push_back ( c ) ; <nl> - } <nl> - <nl> - void IndexInterface : : IndexInserter : : finishAllInsertions ( ) { <nl> - for ( size_t i = 0 ; i < _continuations . size ( ) ; + + i ) { <nl> - _continuations [ i ] - > doIndexInsertionWrites ( ) ; <nl> - } <nl> - } <nl> - <nl> IndexInterface & IndexInterface : : defaultVersion ( ) { <nl> return * IndexDetails : : iis [ DefaultIndexVersionNumber ] ; <nl> } <nl> <nl> - <nl> template < class V > <nl> class IndexInterfaceImpl : public IndexInterface { <nl> public : <nl> namespace mongo { <nl> virtual int keyCompare ( const BSONObj & l , const BSONObj & r , const Ordering & ordering ) ; <nl> <nl> public : <nl> - IndexInsertionContinuation * beginInsertIntoIndex ( <nl> - int idxNo , IndexDetails & _idx , <nl> - DiskLoc _recordLoc , const BSONObj & _key , <nl> - const Ordering & _order , bool dupsAllowed ) { <nl> - <nl> - IndexInsertionContinuationImpl < V > * continuation = new IndexInsertionContinuationImpl < V > ( <nl> - _idx . head , _recordLoc , _key , _order , _idx ) ; <nl> - ScopeGuard allocGuard = MakeGuard ( boost : : checked_delete < IndexInsertionContinuation > , <nl> - continuation ) ; <nl> - _idx . head . btree < V > ( ) - > twoStepInsert ( _idx . head , * continuation , dupsAllowed ) ; <nl> - allocGuard . Dismiss ( ) ; <nl> - return continuation ; <nl> - } <nl> - <nl> virtual long long fullValidate ( const DiskLoc & thisLoc , const BSONObj & order ) { <nl> return thisLoc . btree < V > ( ) - > fullValidate ( thisLoc , order ) ; <nl> } <nl> namespace mongo { <nl> } <nl> } <nl> <nl> - void IndexDetails : : getKeysFromObject ( const BSONObj & obj , BSONObjSet & keys ) const { <nl> - getSpec ( ) . getKeys ( obj , keys ) ; <nl> - } <nl> - <nl> - void setDifference ( BSONObjSet & l , BSONObjSet & r , vector < BSONObj * > & diff ) { <nl> - / / l and r must use the same ordering spec . <nl> - verify ( l . key_comp ( ) . order ( ) = = r . key_comp ( ) . order ( ) ) ; <nl> - BSONObjSet : : iterator i = l . begin ( ) ; <nl> - BSONObjSet : : iterator j = r . begin ( ) ; <nl> - while ( 1 ) { <nl> - if ( i = = l . end ( ) ) <nl> - break ; <nl> - while ( j ! = r . end ( ) & & j - > woCompare ( * i ) < 0 ) <nl> - j + + ; <nl> - if ( j = = r . end ( ) | | i - > woCompare ( * j ) ! = 0 ) { <nl> - const BSONObj * jo = & * i ; <nl> - diff . push_back ( ( BSONObj * ) jo ) ; <nl> - } <nl> - i + + ; <nl> - } <nl> - } <nl> - <nl> / / should be { < something > : < simpletype [ 1 | - 1 ] > , . keyp . . } <nl> static bool validKeyPattern ( BSONObj kp ) { <nl> BSONObjIterator i ( kp ) ; <nl> mmm a / src / mongo / db / index . h <nl> ppp b / src / mongo / db / index . h <nl> <nl> # include < vector > <nl> <nl> # include " mongo / db / diskloc . h " <nl> - # include " mongo / db / index_insertion_continuation . h " <nl> # include " mongo / db / indexkey . h " <nl> # include " mongo / db / jsobj . h " <nl> # include " mongo / db / key . h " <nl> namespace mongo { <nl> protected : <nl> virtual ~ IndexInterface ( ) { } <nl> public : <nl> - class IndexInserter : private boost : : noncopyable { <nl> - public : <nl> - IndexInserter ( ) ; <nl> - ~ IndexInserter ( ) ; <nl> - <nl> - void addInsertionContinuation ( IndexInsertionContinuation * c ) ; <nl> - void finishAllInsertions ( ) ; <nl> - <nl> - private : <nl> - std : : vector < IndexInsertionContinuation * > _continuations ; <nl> - } ; <nl> - <nl> - virtual IndexInsertionContinuation * beginInsertIntoIndex ( <nl> - int idxNo , <nl> - IndexDetails & _idx , DiskLoc _recordLoc , const BSONObj & _key , <nl> - const Ordering & _order , bool dupsAllowed ) = 0 ; <nl> <nl> virtual int keyCompare ( const BSONObj & l , const BSONObj & r , const Ordering & ordering ) = 0 ; <nl> virtual long long fullValidate ( const DiskLoc & thisLoc , const BSONObj & order ) = 0 ; <nl> namespace mongo { <nl> return res ; <nl> } <nl> <nl> - / * pull out the relevant key objects from obj , so we <nl> - can index them . Note that the set is multiple elements <nl> - only when it ' s a " multikey " array . <nl> - keys will be left empty if key not found in the object . <nl> - * / <nl> - void getKeysFromObject ( const BSONObj & obj , BSONObjSet & keys ) const ; <nl> - <nl> / * get the key pattern for this object . <nl> e . g . , { lastname : 1 , firstname : 1 } <nl> * / <nl> mmm a / src / mongo / db / index / btree_access_method . cpp <nl> ppp b / src / mongo / db / index / btree_access_method . cpp <nl> <nl> # include " mongo / db / index / btree_index_cursor . h " <nl> # include " mongo / db / index / btree_interface . h " <nl> # include " mongo / db / jsobj . h " <nl> + # include " mongo / db / keypattern . h " <nl> # include " mongo / db / pdfile . h " <nl> # include " mongo / db / pdfile_private . h " <nl> <nl> namespace mongo { <nl> setDifference ( data - > oldKeys , data - > newKeys , & data - > removed ) ; <nl> setDifference ( data - > newKeys , data - > oldKeys , & data - > added ) ; <nl> <nl> - / / Check for dups . <nl> - if ( ! data - > added . empty ( ) & & _descriptor - > unique ( ) & & ! options . dupsAllowed ) { <nl> + bool checkForDups = ! data - > added . empty ( ) <nl> + & & ( KeyPattern : : isIdKeyPattern ( _descriptor - > keyPattern ( ) ) | | _descriptor - > unique ( ) ) <nl> + & & ! options . dupsAllowed ; <nl> + <nl> + if ( checkForDups ) { <nl> for ( vector < BSONObj * > : : iterator i = data - > added . begin ( ) ; i ! = data - > added . end ( ) ; i + + ) { <nl> if ( _interface - > wouldCreateDup ( _descriptor - > getOnDisk ( ) , _descriptor - > getHead ( ) , <nl> * * i , _ordering , record ) ) { <nl> mmm a / src / mongo / db / index / btree_access_method_internal . h <nl> ppp b / src / mongo / db / index / btree_access_method_internal . h <nl> namespace mongo { <nl> <nl> protected : <nl> / / Friends who need getKeys . <nl> - / / TODO : uncomment when builder is in . <nl> - / / template < class K > friend class BtreeBasedIndexBuilder ; <nl> + friend class BtreeBasedBuilder ; <nl> <nl> / / See below for body . <nl> class BtreeBasedPrivateUpdateData ; <nl> new file mode 100644 <nl> index 000000000000 . . 331d5e45500e <nl> mmm / dev / null <nl> ppp b / src / mongo / db / index / btree_based_builder . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2013 10gen Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU Affero General Public License , version 3 , <nl> + * as published by the Free Software Foundation . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU Affero General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU Affero General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * / <nl> + <nl> + # include " mongo / db / index / btree_based_builder . h " <nl> + <nl> + # include " mongo / db / btreebuilder . h " <nl> + # include " mongo / db / index / catalog_hack . h " <nl> + # include " mongo / db / index / index_descriptor . h " <nl> + # include " mongo / db / index / index_access_method . h " <nl> + # include " mongo / db / kill_current_op . h " <nl> + # include " mongo / db / repl / is_master . h " <nl> + # include " mongo / db / repl / rs . h " <nl> + # include " mongo / db / sort_phase_one . h " <nl> + # include " mongo / util / processinfo . h " <nl> + # include " mongo / db / pdfile_private . h " <nl> + <nl> + namespace mongo { <nl> + <nl> + template < class V > <nl> + void buildBottomUpPhases2And3 ( bool dupsAllowed , <nl> + IndexDetails & idx , <nl> + BSONObjExternalSorter & sorter , <nl> + bool dropDups , <nl> + set < DiskLoc > & dupsToDrop , <nl> + CurOp * op , <nl> + SortPhaseOne * phase1 , <nl> + ProgressMeterHolder & pm , <nl> + Timer & t , <nl> + bool mayInterrupt ) { <nl> + BtreeBuilder < V > btBuilder ( dupsAllowed , idx ) ; <nl> + BSONObj keyLast ; <nl> + auto_ptr < BSONObjExternalSorter : : Iterator > i = sorter . iterator ( ) ; <nl> + / / verifies that pm and op refer to the same ProgressMeter <nl> + verify ( pm = = op - > setMessage ( " index : ( 2 / 3 ) btree bottom up " , <nl> + " Index : ( 2 / 3 ) BTree Bottom Up Progress " , <nl> + phase1 - > nkeys , <nl> + 10 ) ) ; <nl> + while ( i - > more ( ) ) { <nl> + RARELY killCurrentOp . checkForInterrupt ( ! mayInterrupt ) ; <nl> + BSONObjExternalSorter : : Data d = i - > next ( ) ; <nl> + <nl> + try { <nl> + if ( ! dupsAllowed & & dropDups ) { <nl> + LastError : : Disabled led ( lastError . get ( ) ) ; <nl> + btBuilder . addKey ( d . first , d . second ) ; <nl> + } <nl> + else { <nl> + btBuilder . addKey ( d . first , d . second ) ; <nl> + } <nl> + } <nl> + catch ( AssertionException & e ) { <nl> + if ( dupsAllowed ) { <nl> + / / unknown exception ? ? <nl> + throw ; <nl> + } <nl> + <nl> + if ( e . interrupted ( ) ) { <nl> + killCurrentOp . checkForInterrupt ( ) ; <nl> + } <nl> + <nl> + if ( ! dropDups ) <nl> + throw ; <nl> + <nl> + / * we could queue these on disk , but normally there are very few dups , so instead we <nl> + keep in ram and have a limit . <nl> + * / <nl> + dupsToDrop . insert ( d . second ) ; <nl> + uassert ( 10092 , " too may dups on index build with dropDups = true " , dupsToDrop . size ( ) < 1000000 ) ; <nl> + } <nl> + pm . hit ( ) ; <nl> + } <nl> + pm . finished ( ) ; <nl> + op - > setMessage ( " index : ( 3 / 3 ) btree - middle " , " Index : ( 3 / 3 ) BTree Middle Progress " ) ; <nl> + LOG ( t . seconds ( ) > 10 ? 0 : 1 ) < < " \ t done building bottom layer , going to commit " < < endl ; <nl> + btBuilder . commit ( mayInterrupt ) ; <nl> + if ( btBuilder . getn ( ) ! = phase1 - > nkeys & & ! dropDups ) { <nl> + warning ( ) < < " not all entries were added to the index , probably some keys were too large " < < endl ; <nl> + } <nl> + } <nl> + <nl> + void BtreeBasedBuilder : : addKeysToPhaseOne ( NamespaceDetails * d , const char * ns , <nl> + const IndexDetails & idx , <nl> + const BSONObj & order , <nl> + SortPhaseOne * phaseOne , <nl> + int64_t nrecords , <nl> + ProgressMeter * progressMeter , <nl> + bool mayInterrupt , int idxNo ) { <nl> + shared_ptr < Cursor > cursor = theDataFileMgr . findAll ( ns ) ; <nl> + phaseOne - > sorter . reset ( new BSONObjExternalSorter ( idx . idxInterface ( ) , order ) ) ; <nl> + phaseOne - > sorter - > hintNumObjects ( nrecords ) ; <nl> + auto_ptr < IndexDescriptor > desc ( CatalogHack : : getDescriptor ( d , idxNo ) ) ; <nl> + auto_ptr < BtreeBasedAccessMethod > iam ( CatalogHack : : getBtreeBasedIndex ( desc . get ( ) ) ) ; <nl> + while ( cursor - > ok ( ) ) { <nl> + RARELY killCurrentOp . checkForInterrupt ( ! mayInterrupt ) ; <nl> + BSONObj o = cursor - > current ( ) ; <nl> + DiskLoc loc = cursor - > currLoc ( ) ; <nl> + BSONObjSet keys ; <nl> + iam - > getKeys ( o , & keys ) ; <nl> + phaseOne - > addKeys ( keys , loc , mayInterrupt ) ; <nl> + cursor - > advance ( ) ; <nl> + progressMeter - > hit ( ) ; <nl> + if ( logLevel > 1 & & phaseOne - > n % 10000 = = 0 ) { <nl> + printMemInfo ( " \ t iterating objects " ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + uint64_t BtreeBasedBuilder : : fastBuildIndex ( const char * ns , NamespaceDetails * d , <nl> + IndexDetails & idx , bool mayInterrupt , <nl> + int idxNo ) { <nl> + CurOp * op = cc ( ) . curop ( ) ; <nl> + <nl> + Timer t ; <nl> + <nl> + tlog ( 1 ) < < " fastBuildIndex " < < ns < < ' ' < < idx . info . obj ( ) . toString ( ) < < endl ; <nl> + <nl> + bool dupsAllowed = ! idx . unique ( ) | | ignoreUniqueIndex ( idx ) ; <nl> + bool dropDups = idx . dropDups ( ) | | inDBRepair ; <nl> + BSONObj order = idx . keyPattern ( ) ; <nl> + <nl> + getDur ( ) . writingDiskLoc ( idx . head ) . Null ( ) ; <nl> + <nl> + if ( logLevel > 1 ) printMemInfo ( " before index start " ) ; <nl> + <nl> + / * get and sort all the keys mmm - - * / <nl> + ProgressMeterHolder pm ( op - > setMessage ( " index : ( 1 / 3 ) external sort " , <nl> + " Index : ( 1 / 3 ) External Sort Progress " , <nl> + d - > stats . nrecords , <nl> + 10 ) ) ; <nl> + SortPhaseOne phase1 ; <nl> + addKeysToPhaseOne ( d , ns , idx , order , & phase1 , d - > stats . nrecords , pm . get ( ) , <nl> + mayInterrupt , idxNo ) ; <nl> + pm . finished ( ) ; <nl> + <nl> + BSONObjExternalSorter & sorter = * ( phase1 . sorter ) ; <nl> + / / Ensure the index and external sorter have a consistent index interface ( and sort order ) . <nl> + fassert ( 16408 , & idx . idxInterface ( ) = = & sorter . getIndexInterface ( ) ) ; <nl> + <nl> + if ( phase1 . multi ) { <nl> + d - > setIndexIsMultikey ( ns , idxNo ) ; <nl> + } <nl> + <nl> + if ( logLevel > 1 ) printMemInfo ( " before final sort " ) ; <nl> + phase1 . sorter - > sort ( mayInterrupt ) ; <nl> + if ( logLevel > 1 ) printMemInfo ( " after final sort " ) ; <nl> + <nl> + LOG ( t . seconds ( ) > 5 ? 0 : 1 ) < < " \ t external sort used : " < < sorter . numFiles ( ) < < " files " < < " in " < < t . seconds ( ) < < " secs " < < endl ; <nl> + <nl> + set < DiskLoc > dupsToDrop ; <nl> + <nl> + / * build index mmm * / <nl> + if ( idx . version ( ) = = 0 ) <nl> + buildBottomUpPhases2And3 < V0 > ( dupsAllowed , <nl> + idx , <nl> + sorter , <nl> + dropDups , <nl> + dupsToDrop , <nl> + op , <nl> + & phase1 , <nl> + pm , <nl> + t , <nl> + mayInterrupt ) ; <nl> + else if ( idx . version ( ) = = 1 ) <nl> + buildBottomUpPhases2And3 < V1 > ( dupsAllowed , <nl> + idx , <nl> + sorter , <nl> + dropDups , <nl> + dupsToDrop , <nl> + op , <nl> + & phase1 , <nl> + pm , <nl> + t , <nl> + mayInterrupt ) ; <nl> + else <nl> + verify ( false ) ; <nl> + <nl> + if ( dropDups ) <nl> + log ( ) < < " \ t fastBuildIndex dupsToDrop : " < < dupsToDrop . size ( ) < < endl ; <nl> + <nl> + BtreeBasedBuilder : : doDropDups ( ns , d , dupsToDrop , mayInterrupt ) ; <nl> + <nl> + return phase1 . n ; <nl> + } <nl> + <nl> + void BtreeBasedBuilder : : doDropDups ( const char * ns , NamespaceDetails * d , <nl> + const set < DiskLoc > & dupsToDrop , bool mayInterrupt ) { <nl> + <nl> + for ( set < DiskLoc > : : const_iterator i = dupsToDrop . begin ( ) ; i ! = dupsToDrop . end ( ) ; + + i ) { <nl> + RARELY killCurrentOp . checkForInterrupt ( ! mayInterrupt ) ; <nl> + theDataFileMgr . deleteRecord ( d , <nl> + ns , <nl> + i - > rec ( ) , <nl> + * i , <nl> + false / * cappedOk * / , <nl> + true / * noWarn * / , <nl> + isMaster ( ns ) / * logOp * / ) ; <nl> + getDur ( ) . commitIfNeeded ( ) ; <nl> + } <nl> + } <nl> + <nl> + } / / namespace mongo <nl> new file mode 100644 <nl> index 000000000000 . . 4f9c35fb1a13 <nl> mmm / dev / null <nl> ppp b / src / mongo / db / index / btree_based_builder . h <nl> <nl> + / * * <nl> + * Copyright ( C ) 2013 10gen Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU Affero General Public License , version 3 , <nl> + * as published by the Free Software Foundation . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU Affero General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU Affero General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include < set > <nl> + <nl> + # include " mongo / db / jsobj . h " <nl> + # include " mongo / db / pdfile . h " <nl> + <nl> + namespace IndexUpdateTests { <nl> + class AddKeysToPhaseOne ; <nl> + class InterruptAddKeysToPhaseOne ; <nl> + class DoDropDups ; <nl> + class InterruptDoDropDups ; <nl> + } <nl> + <nl> + namespace mongo { <nl> + <nl> + class BSONObjExternalSorter ; <nl> + class IndexDetails ; <nl> + class NamespaceDetails ; <nl> + class ProgressMeter ; <nl> + class ProgressMeterHolder ; <nl> + class SortPhaseOne ; <nl> + <nl> + class BtreeBasedBuilder { <nl> + public : <nl> + / * * <nl> + * Want to build an index ? Call this . Throws DBException . <nl> + * / <nl> + static uint64_t fastBuildIndex ( const char * ns , NamespaceDetails * d , IndexDetails & idx , <nl> + bool mayInterrupt , int idxNo ) ; <nl> + private : <nl> + friend class IndexUpdateTests : : AddKeysToPhaseOne ; <nl> + friend class IndexUpdateTests : : InterruptAddKeysToPhaseOne ; <nl> + friend class IndexUpdateTests : : DoDropDups ; <nl> + friend class IndexUpdateTests : : InterruptDoDropDups ; <nl> + <nl> + static void addKeysToPhaseOne ( NamespaceDetails * d , const char * ns , const IndexDetails & idx , <nl> + const BSONObj & order , SortPhaseOne * phaseOne , <nl> + int64_t nrecords , ProgressMeter * progressMeter , bool mayInterrupt , <nl> + int idxNo ) ; <nl> + <nl> + static void doDropDups ( const char * ns , NamespaceDetails * d , const set < DiskLoc > & dupsToDrop , <nl> + bool mayInterrupt ) ; <nl> + } ; <nl> + <nl> + / / Exposed for testing purposes . <nl> + template < class V > <nl> + void buildBottomUpPhases2And3 ( bool dupsAllowed , <nl> + IndexDetails & idx , <nl> + BSONObjExternalSorter & sorter , <nl> + bool dropDups , <nl> + set < DiskLoc > & dupsToDrop , <nl> + CurOp * op , <nl> + SortPhaseOne * phase1 , <nl> + ProgressMeterHolder & pm , <nl> + Timer & t , <nl> + bool mayInterrupt ) ; <nl> + <nl> + } / / namespace mongo <nl> mmm a / src / mongo / db / index / catalog_hack . h <nl> ppp b / src / mongo / db / index / catalog_hack . h <nl> <nl> * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> * / <nl> <nl> + # pragma once <nl> + <nl> # include " mongo / db / index / 2d_access_method . h " <nl> # include " mongo / db / index / btree_access_method . h " <nl> + # include " mongo / db / index / btree_access_method_internal . h " <nl> # include " mongo / db / index / fts_access_method . h " <nl> # include " mongo / db / index / hash_access_method . h " <nl> # include " mongo / db / index / haystack_access_method . h " <nl> namespace mongo { <nl> return new IndexDescriptor ( nsd , idxNo , & id , id . info . obj ( ) ) ; <nl> } <nl> <nl> + static BtreeBasedAccessMethod * getBtreeBasedIndex ( IndexDescriptor * desc ) { <nl> + string type = KeyPattern : : findPluginName ( desc - > keyPattern ( ) ) ; <nl> + if ( " hashed " = = type ) { <nl> + return new HashAccessMethod ( desc ) ; <nl> + } else if ( " 2dsphere " = = type ) { <nl> + return new S2AccessMethod ( desc ) ; <nl> + } else if ( " text " = = type | | " _fts " = = type ) { <nl> + return new FTSAccessMethod ( desc ) ; <nl> + } else if ( " geoHaystack " = = type ) { <nl> + return new HaystackAccessMethod ( desc ) ; <nl> + } else if ( " " = = type ) { <nl> + return new BtreeAccessMethod ( desc ) ; <nl> + } else if ( " 2d " = = type ) { <nl> + return new TwoDAccessMethod ( desc ) ; <nl> + } else { <nl> + cout < < " Can ' t find index for keypattern " < < desc - > keyPattern ( ) < < endl ; <nl> + verify ( 0 ) ; <nl> + return NULL ; <nl> + } <nl> + } <nl> + <nl> static IndexAccessMethod * getIndex ( IndexDescriptor * desc ) { <nl> string type = KeyPattern : : findPluginName ( desc - > keyPattern ( ) ) ; <nl> if ( " hashed " = = type ) { <nl> deleted file mode 100644 <nl> index 674931382964 . . 000000000000 <nl> mmm a / src / mongo / db / index_insertion_continuation . h <nl> ppp / dev / null <nl> <nl> - / * * <nl> - * Copyright ( C ) 2008 10gen Inc . <nl> - * <nl> - * This program is free software : you can redistribute it and / or modify <nl> - * it under the terms of the GNU Affero General Public License , version 3 , <nl> - * as published by the Free Software Foundation . <nl> - * <nl> - * This program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * GNU Affero General Public License for more details . <nl> - * <nl> - * You should have received a copy of the GNU Affero General Public License <nl> - * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> - * / <nl> - # pragma once <nl> - <nl> - # include " mongo / db / diskloc . h " <nl> - # include " mongo / db / jsobj . h " <nl> - <nl> - namespace mongo { <nl> - <nl> - class IndexDetails ; <nl> - template < typename V > class BtreeBucket ; <nl> - <nl> - / * * <nl> - * This class represents the write phase of the two - phase index insertion . <nl> - * / <nl> - class IndexInsertionContinuation : private boost : : noncopyable { <nl> - public : <nl> - enum Op { Nothing , SetUsed , InsertHere } ; <nl> - <nl> - virtual ~ IndexInsertionContinuation ( ) ; <nl> - virtual void doIndexInsertionWrites ( ) const = 0 ; <nl> - } ; <nl> - <nl> - template < class V > <nl> - struct IndexInsertionContinuationImpl : public IndexInsertionContinuation { <nl> - <nl> - IndexInsertionContinuationImpl ( DiskLoc thisLoc , DiskLoc _recordLoc , const BSONObj & _key , <nl> - Ordering _order , IndexDetails & _idx ) : <nl> - bLoc ( thisLoc ) , recordLoc ( _recordLoc ) , key ( _key ) , order ( _order ) , idx ( _idx ) { <nl> - op = Nothing ; <nl> - } <nl> - <nl> - DiskLoc bLoc ; <nl> - DiskLoc recordLoc ; <nl> - typename V : : KeyOwned key ; <nl> - const Ordering order ; <nl> - IndexDetails & idx ; <nl> - Op op ; <nl> - <nl> - int pos ; <nl> - const BtreeBucket < V > * b ; <nl> - <nl> - void doIndexInsertionWrites ( ) const { <nl> - if ( op = = Nothing ) <nl> - return ; <nl> - else if ( op = = SetUsed ) { <nl> - const typename V : : _KeyNode & kn = b - > k ( pos ) ; <nl> - kn . writing ( ) . setUsed ( ) ; <nl> - } <nl> - else { <nl> - b - > insertHere ( bLoc , pos , recordLoc , key , order , DiskLoc ( ) , DiskLoc ( ) , idx ) ; <nl> - } <nl> - } <nl> - } ; <nl> - <nl> - <nl> - } / / namespace mongo <nl> mmm a / src / mongo / db / index_update . cpp <nl> ppp b / src / mongo / db / index_update . cpp <nl> <nl> # include " mongo / db / clientcursor . h " <nl> # include " mongo / db / extsort . h " <nl> # include " mongo / db / index . h " <nl> + # include " mongo / db / index / btree_based_builder . h " <nl> # include " mongo / db / index / catalog_hack . h " <nl> # include " mongo / db / kill_current_op . h " <nl> # include " mongo / db / namespace_details . h " <nl> # include " mongo / db / pdfile_private . h " <nl> # include " mongo / db / repl / is_master . h " <nl> # include " mongo / db / repl / rs . h " <nl> - # include " mongo / db / sort_phase_one . h " <nl> # include " mongo / util / processinfo . h " <nl> # include " mongo / util / startup_test . h " <nl> <nl> namespace mongo { <nl> / / Bulk index building <nl> / / <nl> <nl> - void addKeysToPhaseOne ( const char * ns , <nl> - const IndexDetails & idx , <nl> - const BSONObj & order , <nl> - SortPhaseOne * phaseOne , <nl> - int64_t nrecords , <nl> - ProgressMeter * progressMeter , <nl> - bool mayInterrupt ) { <nl> - shared_ptr < Cursor > cursor = theDataFileMgr . findAll ( ns ) ; <nl> - phaseOne - > sorter . reset ( new BSONObjExternalSorter ( idx . idxInterface ( ) , order ) ) ; <nl> - phaseOne - > sorter - > hintNumObjects ( nrecords ) ; <nl> - const IndexSpec & spec = idx . getSpec ( ) ; <nl> - while ( cursor - > ok ( ) ) { <nl> - RARELY killCurrentOp . checkForInterrupt ( ! mayInterrupt ) ; <nl> - BSONObj o = cursor - > current ( ) ; <nl> - DiskLoc loc = cursor - > currLoc ( ) ; <nl> - phaseOne - > addKeys ( spec , o , loc , mayInterrupt ) ; <nl> - cursor - > advance ( ) ; <nl> - progressMeter - > hit ( ) ; <nl> - if ( logLevel > 1 & & phaseOne - > n % 10000 = = 0 ) { <nl> - printMemInfo ( " \ t iterating objects " ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - template < class V > <nl> - void buildBottomUpPhases2And3 ( bool dupsAllowed , <nl> - IndexDetails & idx , <nl> - BSONObjExternalSorter & sorter , <nl> - bool dropDups , <nl> - set < DiskLoc > & dupsToDrop , <nl> - CurOp * op , <nl> - SortPhaseOne * phase1 , <nl> - ProgressMeterHolder & pm , <nl> - Timer & t , <nl> - bool mayInterrupt ) { <nl> - BtreeBuilder < V > btBuilder ( dupsAllowed , idx ) ; <nl> - BSONObj keyLast ; <nl> - auto_ptr < BSONObjExternalSorter : : Iterator > i = sorter . iterator ( ) ; <nl> - / / verifies that pm and op refer to the same ProgressMeter <nl> - verify ( pm = = op - > setMessage ( " index : ( 2 / 3 ) btree bottom up " , <nl> - " Index : ( 2 / 3 ) BTree Bottom Up Progress " , <nl> - phase1 - > nkeys , <nl> - 10 ) ) ; <nl> - while ( i - > more ( ) ) { <nl> - RARELY killCurrentOp . checkForInterrupt ( ! mayInterrupt ) ; <nl> - BSONObjExternalSorter : : Data d = i - > next ( ) ; <nl> - <nl> - try { <nl> - if ( ! dupsAllowed & & dropDups ) { <nl> - LastError : : Disabled led ( lastError . get ( ) ) ; <nl> - btBuilder . addKey ( d . first , d . second ) ; <nl> - } <nl> - else { <nl> - btBuilder . addKey ( d . first , d . second ) ; <nl> - } <nl> - } <nl> - catch ( AssertionException & e ) { <nl> - if ( dupsAllowed ) { <nl> - / / unknown exception ? ? <nl> - throw ; <nl> - } <nl> - <nl> - if ( e . interrupted ( ) ) { <nl> - killCurrentOp . checkForInterrupt ( ) ; <nl> - } <nl> - <nl> - if ( ! dropDups ) <nl> - throw ; <nl> - <nl> - / * we could queue these on disk , but normally there are very few dups , so instead we <nl> - keep in ram and have a limit . <nl> - * / <nl> - dupsToDrop . insert ( d . second ) ; <nl> - uassert ( 10092 , " too may dups on index build with dropDups = true " , dupsToDrop . size ( ) < 1000000 ) ; <nl> - } <nl> - pm . hit ( ) ; <nl> - } <nl> - pm . finished ( ) ; <nl> - op - > setMessage ( " index : ( 3 / 3 ) btree - middle " , " Index : ( 3 / 3 ) BTree Middle Progress " ) ; <nl> - LOG ( t . seconds ( ) > 10 ? 0 : 1 ) < < " \ t done building bottom layer , going to commit " < < endl ; <nl> - btBuilder . commit ( mayInterrupt ) ; <nl> - if ( btBuilder . getn ( ) ! = phase1 - > nkeys & & ! dropDups ) { <nl> - warning ( ) < < " not all entries were added to the index , probably some keys were too large " < < endl ; <nl> - } <nl> - } <nl> - <nl> - void doDropDups ( const char * ns , <nl> - NamespaceDetails * d , <nl> - const set < DiskLoc > & dupsToDrop , <nl> - bool mayInterrupt ) { <nl> - for ( set < DiskLoc > : : const_iterator i = dupsToDrop . begin ( ) ; i ! = dupsToDrop . end ( ) ; + + i ) { <nl> - RARELY killCurrentOp . checkForInterrupt ( ! mayInterrupt ) ; <nl> - theDataFileMgr . deleteRecord ( d , <nl> - ns , <nl> - i - > rec ( ) , <nl> - * i , <nl> - false / * cappedOk * / , <nl> - true / * noWarn * / , <nl> - isMaster ( ns ) / * logOp * / ) ; <nl> - getDur ( ) . commitIfNeeded ( ) ; <nl> - } <nl> - } <nl> - <nl> - / / throws DBException <nl> - uint64_t fastBuildIndex ( const char * ns , <nl> - NamespaceDetails * d , <nl> - IndexDetails & idx , <nl> - bool mayInterrupt ) { <nl> - CurOp * op = cc ( ) . curop ( ) ; <nl> - <nl> - Timer t ; <nl> - <nl> - tlog ( 1 ) < < " fastBuildIndex " < < ns < < ' ' < < idx . info . obj ( ) . toString ( ) < < endl ; <nl> - <nl> - bool dupsAllowed = ! idx . unique ( ) | | ignoreUniqueIndex ( idx ) ; <nl> - bool dropDups = idx . dropDups ( ) | | inDBRepair ; <nl> - BSONObj order = idx . keyPattern ( ) ; <nl> - <nl> - getDur ( ) . writingDiskLoc ( idx . head ) . Null ( ) ; <nl> - <nl> - if ( logLevel > 1 ) printMemInfo ( " before index start " ) ; <nl> - <nl> - / * get and sort all the keys mmm - - * / <nl> - ProgressMeterHolder pm ( op - > setMessage ( " index : ( 1 / 3 ) external sort " , <nl> - " Index : ( 1 / 3 ) External Sort Progress " , <nl> - d - > stats . nrecords , <nl> - 10 ) ) ; <nl> - SortPhaseOne _ours ; <nl> - SortPhaseOne * phase1 = theDataFileMgr . getPrecalced ( ) ; <nl> - if ( phase1 = = 0 ) { <nl> - phase1 = & _ours ; <nl> - addKeysToPhaseOne ( ns , idx , order , phase1 , d - > stats . nrecords , pm . get ( ) , mayInterrupt ) ; <nl> - } <nl> - pm . finished ( ) ; <nl> - <nl> - BSONObjExternalSorter & sorter = * ( phase1 - > sorter ) ; <nl> - / / Ensure the index and external sorter have a consistent index interface ( and sort order ) . <nl> - fassert ( 16408 , & idx . idxInterface ( ) = = & sorter . getIndexInterface ( ) ) ; <nl> - <nl> - if ( phase1 - > multi ) { <nl> - int idxNo = IndexBuildsInProgress : : get ( ns , idx . info . obj ( ) [ " name " ] . valuestr ( ) ) ; <nl> - d - > setIndexIsMultikey ( ns , idxNo ) ; <nl> - } <nl> - <nl> - if ( logLevel > 1 ) printMemInfo ( " before final sort " ) ; <nl> - phase1 - > sorter - > sort ( mayInterrupt ) ; <nl> - if ( logLevel > 1 ) printMemInfo ( " after final sort " ) ; <nl> - <nl> - LOG ( t . seconds ( ) > 5 ? 0 : 1 ) < < " \ t external sort used : " < < sorter . numFiles ( ) < < " files " < < " in " < < t . seconds ( ) < < " secs " < < endl ; <nl> - <nl> - set < DiskLoc > dupsToDrop ; <nl> - <nl> - / * build index mmm * / <nl> - if ( idx . version ( ) = = 0 ) <nl> - buildBottomUpPhases2And3 < V0 > ( dupsAllowed , <nl> - idx , <nl> - sorter , <nl> - dropDups , <nl> - dupsToDrop , <nl> - op , <nl> - phase1 , <nl> - pm , <nl> - t , <nl> - mayInterrupt ) ; <nl> - else if ( idx . version ( ) = = 1 ) <nl> - buildBottomUpPhases2And3 < V1 > ( dupsAllowed , <nl> - idx , <nl> - sorter , <nl> - dropDups , <nl> - dupsToDrop , <nl> - op , <nl> - phase1 , <nl> - pm , <nl> - t , <nl> - mayInterrupt ) ; <nl> - else <nl> - verify ( false ) ; <nl> - <nl> - if ( dropDups ) <nl> - log ( ) < < " \ t fastBuildIndex dupsToDrop : " < < dupsToDrop . size ( ) < < endl ; <nl> - <nl> - doDropDups ( ns , d , dupsToDrop , mayInterrupt ) ; <nl> - <nl> - return phase1 - > n ; <nl> - } <nl> - <nl> class BackgroundIndexBuildJob : public BackgroundOperation { <nl> <nl> unsigned long long addExistingToIndex ( const char * ns , NamespaceDetails * d , <nl> namespace mongo { <nl> <nl> verify ( Lock : : isWriteLocked ( ns ) ) ; <nl> <nl> - / / Build index spec here in case the collection is empty and the index details are invalid <nl> - idx . getSpec ( ) ; <nl> - <nl> if ( inDBRepair | | ! background ) { <nl> - n = fastBuildIndex ( ns . c_str ( ) , d , idx , mayInterrupt ) ; <nl> + int idxNo = IndexBuildsInProgress : : get ( ns . c_str ( ) , idx . info . obj ( ) [ " name " ] . valuestr ( ) ) ; <nl> + n = BtreeBasedBuilder : : fastBuildIndex ( ns . c_str ( ) , d , idx , mayInterrupt , idxNo ) ; <nl> verify ( ! idx . head . isNull ( ) ) ; <nl> } <nl> else { <nl> namespace mongo { <nl> return true ; <nl> } <nl> <nl> - / * * <nl> - * DEPRECATED - - only used by prefetch . cpp <nl> - * step one of adding keys to index idxNo for a new record <nl> - * / <nl> - void fetchIndexInserters ( BSONObjSet & / * out * / keys , <nl> - IndexInterface : : IndexInserter & inserter , <nl> - NamespaceDetails * d , <nl> - int idxNo , <nl> - const BSONObj & obj , <nl> - DiskLoc recordLoc , <nl> - const bool allowDups ) { <nl> - IndexDetails & idx = d - > idx ( idxNo ) ; <nl> - idx . getKeysFromObject ( obj , keys ) ; <nl> - if ( keys . empty ( ) ) <nl> - return ; <nl> - bool dupsAllowed = ! idx . unique ( ) | | allowDups ; <nl> - Ordering ordering = Ordering : : make ( idx . keyPattern ( ) ) ; <nl> - <nl> - try { <nl> - / / we can ' t do the two step method with multi keys as insertion of one key changes the indexes <nl> - / / structure . however we can do the first key of the set so we go ahead and do that FWIW <nl> - inserter . addInsertionContinuation ( <nl> - idx . idxInterface ( ) . beginInsertIntoIndex ( <nl> - idxNo , idx , recordLoc , * keys . begin ( ) , ordering , dupsAllowed ) ) ; <nl> - } <nl> - catch ( AssertionException & e ) { <nl> - if ( e . getCode ( ) = = 10287 & & idxNo > = d - > nIndexes ) { <nl> - DEV log ( ) < < " info : caught key already in index on bg indexing ( ok ) " < < endl ; <nl> - } <nl> - else { <nl> - throw ; <nl> - } <nl> - } <nl> - } <nl> - <nl> class IndexUpdateTest : public StartupTest { <nl> public : <nl> void run ( ) { <nl> mmm a / src / mongo / db / index_update . h <nl> ppp b / src / mongo / db / index_update . h <nl> namespace mongo { <nl> class Record ; <nl> <nl> / / unindex all keys in index for this record . <nl> - void unindexRecord ( NamespaceDetails * d , Record * todelete , const DiskLoc & dl , bool noWarn = false ) ; <nl> + void unindexRecord ( NamespaceDetails * d , Record * todelete , const DiskLoc & dl , <nl> + bool noWarn = false ) ; <nl> <nl> / / Build an index in the foreground <nl> / / If background is false , uses fast index builder <nl> namespace mongo { <nl> / / add index keys for a newly inserted record <nl> void indexRecord ( const char * ns , NamespaceDetails * d , const BSONObj & obj , const DiskLoc & loc ) ; <nl> <nl> - / / Given an object , populate " inserter " with information necessary to update indexes . <nl> - void fetchIndexInserters ( BSONObjSet & / * out * / keys , <nl> - IndexInterface : : IndexInserter & inserter , <nl> - NamespaceDetails * d , <nl> - int idxNo , <nl> - const BSONObj & obj , <nl> - DiskLoc recordLoc , <nl> - const bool allowDups = false ) ; <nl> - <nl> - bool dropIndexes ( NamespaceDetails * d , const char * ns , const char * name , string & errmsg , BSONObjBuilder & anObjBuilder , bool maydeleteIdIndex ) ; <nl> + bool dropIndexes ( NamespaceDetails * d , const char * ns , const char * name , string & errmsg , <nl> + BSONObjBuilder & anObjBuilder , bool maydeleteIdIndex ) ; <nl> <nl> / * * <nl> * Add an _id index to namespace @ param ' ns ' if not already present . <nl> namespace mongo { <nl> struct SortPhaseOne ; <nl> class Timer ; <nl> <nl> - / * * Extract index keys from the @ param ' ns ' to the external sorter in @ param ' phaseOne ' . * / <nl> - void addKeysToPhaseOne ( const char * ns , <nl> - const IndexDetails & idx , <nl> - const BSONObj & order , <nl> - SortPhaseOne * phaseOne , <nl> - int64_t nrecords , <nl> - ProgressMeter * progressMeter , <nl> - bool mayInterrupt ) ; <nl> - <nl> - / * * Popuate the index @ param ' idx ' using the keys contained in @ param ' sorter ' . * / <nl> - template < class V > <nl> - void buildBottomUpPhases2And3 ( bool dupsAllowed , <nl> - IndexDetails & idx , <nl> - BSONObjExternalSorter & sorter , <nl> - bool dropDups , <nl> - set < DiskLoc > & dupsToDrop , <nl> - CurOp * op , <nl> - SortPhaseOne * phase1 , <nl> - ProgressMeterHolder & pm , <nl> - Timer & t , <nl> - bool mayInterrupt ) ; <nl> - <nl> - / * * Drop duplicate documents from the set @ param ' dupsToDrop ' . * / <nl> - void doDropDups ( const char * ns , <nl> - NamespaceDetails * d , <nl> - const set < DiskLoc > & dupsToDrop , <nl> - bool mayInterrupt ) ; <nl> - <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / pdfile . cpp <nl> ppp b / src / mongo / db / pdfile . cpp <nl> namespace mongo { <nl> IndexDetails & idx = d - > idx ( idxNo ) ; <nl> if ( ignoreUniqueIndex ( idx ) ) <nl> continue ; <nl> - BSONObjSet keys ; <nl> - idx . getKeysFromObject ( obj , keys ) ; <nl> - BSONObj order = idx . keyPattern ( ) ; <nl> - IndexInterface & ii = idx . idxInterface ( ) ; <nl> - for ( BSONObjSet : : iterator i = keys . begin ( ) ; i ! = keys . end ( ) ; i + + ) { <nl> - / / WARNING : findSingle may not be compound index safe . this may need to change . see notes in <nl> - / / findSingle code . <nl> - uassert ( 12582 , " duplicate key insert for unique index of capped collection " , <nl> - ii . findSingle ( idx , idx . head , * i ) . isNull ( ) ) ; <nl> + auto_ptr < IndexDescriptor > descriptor ( CatalogHack : : getDescriptor ( d , idxNo ) ) ; <nl> + auto_ptr < IndexAccessMethod > iam ( CatalogHack : : getIndex ( descriptor . get ( ) ) ) ; <nl> + InsertDeleteOptions options ; <nl> + options . logIfError = false ; <nl> + options . dupsAllowed = false ; <nl> + UpdateTicket ticket ; <nl> + Status ret = iam - > validateUpdate ( BSONObj ( ) , obj , DiskLoc ( ) , options , & ticket ) ; <nl> + if ( ret ! = Status : : OK ( ) ) { <nl> + uasserted ( 12582 , " duplicate key insert for unique index of capped collection " ) ; <nl> } <nl> } <nl> } <nl> mmm a / src / mongo / db / prefetch . cpp <nl> ppp b / src / mongo / db / prefetch . cpp <nl> <nl> <nl> # include " mongo / db / dbhelpers . h " <nl> # include " mongo / db / diskloc . h " <nl> + # include " mongo / db / index / catalog_hack . h " <nl> # include " mongo / db / index . h " <nl> # include " mongo / db / index_update . h " <nl> # include " mongo / db / jsobj . h " <nl> namespace mongo { <nl> <nl> void prefetchIndexPages ( NamespaceDetails * nsd , const BSONObj & obj ) { <nl> DiskLoc unusedDl ; / / unused <nl> - IndexInterface : : IndexInserter inserter ; <nl> BSONObjSet unusedKeys ; <nl> ReplSetImpl : : IndexPrefetchConfig prefetchConfig = theReplSet - > getIndexPrefetchConfig ( ) ; <nl> <nl> namespace mongo { <nl> int indexNo = nsd - > findIdIndex ( ) ; <nl> if ( indexNo = = - 1 ) return ; <nl> try { <nl> - fetchIndexInserters ( / * out * / unusedKeys , <nl> - inserter , <nl> - nsd , <nl> - indexNo , <nl> - obj , <nl> - unusedDl , <nl> - / * allowDups * / true ) ; <nl> + auto_ptr < IndexDescriptor > desc ( CatalogHack : : getDescriptor ( nsd , indexNo ) ) ; <nl> + auto_ptr < IndexAccessMethod > iam ( CatalogHack : : getIndex ( desc . get ( ) ) ) ; <nl> + iam - > touch ( obj ) ; <nl> } <nl> catch ( const DBException & e ) { <nl> LOG ( 2 ) < < " ignoring exception in prefetchIndexPages ( ) : " < < e . what ( ) < < endl ; <nl> namespace mongo { <nl> TimerHolder timer ( & prefetchIndexStats ) ; <nl> / / This will page in all index pages for the given object . <nl> try { <nl> - fetchIndexInserters ( / * out * / unusedKeys , <nl> - inserter , <nl> - nsd , <nl> - indexNo , <nl> - obj , <nl> - unusedDl , <nl> - / * allowDups * / true ) ; <nl> + auto_ptr < IndexDescriptor > desc ( CatalogHack : : getDescriptor ( nsd , indexNo ) ) ; <nl> + auto_ptr < IndexAccessMethod > iam ( CatalogHack : : getIndex ( desc . get ( ) ) ) ; <nl> + iam - > touch ( obj ) ; <nl> } <nl> catch ( const DBException & e ) { <nl> LOG ( 2 ) < < " ignoring exception in prefetchIndexPages ( ) : " < < e . what ( ) < < endl ; <nl> mmm a / src / mongo / db / sort_phase_one . h <nl> ppp b / src / mongo / db / sort_phase_one . h <nl> namespace mongo { <nl> unsigned long long nkeys ; <nl> bool multi ; / / multikey index <nl> <nl> - void addKeys ( const IndexSpec & spec , const BSONObj & o , DiskLoc loc , bool mayInterrupt ) { <nl> - BSONObjSet keys ; <nl> - spec . getKeys ( o , keys ) ; <nl> - int k = 0 ; <nl> - for ( BSONObjSet : : iterator i = keys . begin ( ) ; i ! = keys . end ( ) ; i + + ) { <nl> - if ( + + k = = 2 ) { <nl> - multi = true ; <nl> - } <nl> - sorter - > add ( * i , loc , mayInterrupt ) ; <nl> - nkeys + + ; <nl> + void addKeys ( const BSONObjSet & keys , const DiskLoc & loc , bool mayInterrupt ) { <nl> + multi = multi | | ( keys . size ( ) > 1 ) ; <nl> + for ( BSONObjSet : : iterator it = keys . begin ( ) ; it ! = keys . end ( ) ; + + it ) { <nl> + sorter - > add ( * it , loc , mayInterrupt ) ; <nl> + + + nkeys ; <nl> } <nl> - n + + ; <nl> + + + n ; <nl> } <nl> } ; <nl> <nl> - } <nl> + } / / namespace mongo <nl> mmm a / src / mongo / dbtests / btreetests . cpp <nl> ppp b / src / mongo / dbtests / btreetests . cpp <nl> <nl> # define BtreeBucket BtreeBucket < V0 > <nl> # define btree btree < V0 > <nl> # define btreemod btreemod < V0 > <nl> - # define Continuation IndexInsertionContinuationImpl < V0 > <nl> # define testName " btree " <nl> # define BTVERSION 0 <nl> namespace BtreeTests0 { <nl> namespace BtreeTests0 { <nl> # undef BtreeBucket <nl> # undef btree <nl> # undef btreemod <nl> - # undef Continuation <nl> # define BtreeBucket BtreeBucket < V1 > <nl> # define btree btree < V1 > <nl> # define btreemod btreemod < V1 > <nl> - # define Continuation IndexInsertionContinuationImpl < V1 > <nl> # undef testName <nl> # define testName " btree1 " <nl> # undef BTVERSION <nl> namespace BtreeTests0 { <nl> namespace BtreeTests1 { <nl> # include " btreetests . inl " <nl> } <nl> - <nl> - # undef testName <nl> - # define testName " btree1_twostep " <nl> - # define TESTTWOSTEP 1 <nl> - <nl> - namespace BtreeTests2 { <nl> - # include " btreetests . inl " <nl> - } <nl> mmm a / src / mongo / dbtests / btreetests . inl <nl> ppp b / src / mongo / dbtests / btreetests . inl <nl> <nl> } <nl> void insert ( BSONObj & key ) { <nl> const BtreeBucket * b = bt ( ) ; <nl> - <nl> - # if defined ( TESTTWOSTEP ) <nl> - { <nl> - Continuation c ( dl ( ) , recordLoc ( ) , key , Ordering : : make ( order ( ) ) , id ( ) ) ; <nl> - b - > twoStepInsert ( dl ( ) , c , true ) ; <nl> - c . doIndexInsertionWrites ( ) ; <nl> - } <nl> - # else <nl> - { <nl> - b - > bt_insert ( dl ( ) , recordLoc ( ) , key , Ordering : : make ( order ( ) ) , true , id ( ) , true ) ; <nl> - } <nl> - # endif <nl> + b - > bt_insert ( dl ( ) , recordLoc ( ) , key , Ordering : : make ( order ( ) ) , true , id ( ) , true ) ; <nl> getDur ( ) . commitIfNeeded ( ) ; <nl> } <nl> bool unindex ( BSONObj & key ) { <nl> mmm a / src / mongo / dbtests / indexupdatetests . cpp <nl> ppp b / src / mongo / dbtests / indexupdatetests . cpp <nl> <nl> # include " mongo / db / btree . h " <nl> # include " mongo / db / btreecursor . h " <nl> # include " mongo / db / dbhelpers . h " <nl> + # include " mongo / db / index / btree_based_builder . h " <nl> # include " mongo / db / kill_current_op . h " <nl> # include " mongo / db / pdfile . h " <nl> # include " mongo / db / sort_phase_one . h " <nl> namespace IndexUpdateTests { <nl> nDocs , <nl> nDocs ) ) ; <nl> / / Add keys to phaseOne . <nl> - addKeysToPhaseOne ( _ns , id , BSON ( " a " < < 1 ) , & phaseOne , nDocs , pm . get ( ) , true ) ; <nl> + BtreeBasedBuilder : : addKeysToPhaseOne ( nsdetails ( _ns ) , _ns , id , BSON ( " a " < < 1 ) , & phaseOne , nDocs , pm . get ( ) , true , <nl> + nsdetails ( _ns ) - > idxNo ( id ) ) ; <nl> / / Keys for all documents were added to phaseOne . <nl> ASSERT_EQUALS ( static_cast < uint64_t > ( nDocs ) , phaseOne . n ) ; <nl> } <nl> namespace IndexUpdateTests { <nl> cc ( ) . curop ( ) - > kill ( ) ; <nl> if ( _mayInterrupt ) { <nl> / / Add keys to phaseOne . <nl> - ASSERT_THROWS ( addKeysToPhaseOne ( _ns , <nl> + ASSERT_THROWS ( BtreeBasedBuilder : : addKeysToPhaseOne ( nsdetails ( _ns ) , _ns , <nl> id , <nl> BSON ( " a " < < 1 ) , <nl> & phaseOne , <nl> nDocs , <nl> pm . get ( ) , <nl> - _mayInterrupt ) , <nl> + _mayInterrupt , <nl> + nsdetails ( _ns ) - > idxNo ( id ) ) , <nl> UserException ) ; <nl> / / Not all keys were added to phaseOne due to the interrupt . <nl> ASSERT ( static_cast < uint64_t > ( nDocs ) > phaseOne . n ) ; <nl> } <nl> else { <nl> / / Add keys to phaseOne . <nl> - addKeysToPhaseOne ( _ns , <nl> + BtreeBasedBuilder : : addKeysToPhaseOne ( nsdetails ( _ns ) , _ns , <nl> id , <nl> BSON ( " a " < < 1 ) , <nl> & phaseOne , <nl> nDocs , <nl> pm . get ( ) , <nl> - _mayInterrupt ) ; <nl> + _mayInterrupt , nsdetails ( _ns ) - > idxNo ( id ) ) ; <nl> / / All keys were added to phaseOne despite to the kill request , because <nl> / / mayInterrupt = = false . <nl> ASSERT_EQUALS ( static_cast < uint64_t > ( nDocs ) , phaseOne . n ) ; <nl> namespace IndexUpdateTests { <nl> / / Check the expected number of dups . <nl> ASSERT_EQUALS ( static_cast < uint32_t > ( nDocs / 4 * 3 ) , dups . size ( ) ) ; <nl> / / Drop the dups . <nl> - doDropDups ( _ns , nsdetails ( _ns ) , dups , true ) ; <nl> + BtreeBasedBuilder : : doDropDups ( _ns , nsdetails ( _ns ) , dups , true ) ; <nl> / / Check that the expected number of documents remain . <nl> ASSERT_EQUALS ( static_cast < uint32_t > ( nDocs / 4 ) , _client . count ( _ns ) ) ; <nl> } <nl> namespace IndexUpdateTests { <nl> cc ( ) . curop ( ) - > kill ( ) ; <nl> if ( _mayInterrupt ) { <nl> / / doDropDups ( ) aborts . <nl> - ASSERT_THROWS ( doDropDups ( _ns , nsdetails ( _ns ) , dups , _mayInterrupt ) , <nl> + ASSERT_THROWS ( BtreeBasedBuilder : : doDropDups ( _ns , nsdetails ( _ns ) , dups , _mayInterrupt ) , <nl> UserException ) ; <nl> / / Not all dups are dropped . <nl> ASSERT ( static_cast < uint32_t > ( nDocs / 4 ) < _client . count ( _ns ) ) ; <nl> } <nl> else { <nl> / / doDropDups ( ) succeeds . <nl> - doDropDups ( _ns , nsdetails ( _ns ) , dups , _mayInterrupt ) ; <nl> + BtreeBasedBuilder : : doDropDups ( _ns , nsdetails ( _ns ) , dups , _mayInterrupt ) ; <nl> / / The expected number of documents were dropped . <nl> ASSERT_EQUALS ( static_cast < uint32_t > ( nDocs / 4 ) , _client . count ( _ns ) ) ; <nl> } <nl> mmm a / src / mongo / dbtests / namespacetests . cpp <nl> ppp b / src / mongo / dbtests / namespacetests . cpp <nl> <nl> # include " . . / db / db . h " <nl> # include " . . / db / json . h " <nl> # include " mongo / db / hashindex . h " <nl> + # include " mongo / db / index / btree_key_generator . h " <nl> # include " mongo / db / queryutil . h " <nl> <nl> # include " dbtests . h " <nl> namespace NamespaceTests { <nl> id_ . info = theDataFileMgr . insert ( ns ( ) , bobj . objdata ( ) , bobj . objsize ( ) ) ; <nl> / / head not needed for current tests <nl> / / idx_ . head = BtreeBucket : : addHead ( id_ ) ; <nl> + <nl> + _keyPattern = key ( ) . getOwned ( ) ; <nl> + / / The key generation wants these values . <nl> + vector < const char * > fieldNames ; <nl> + vector < BSONElement > fixed ; <nl> + <nl> + BSONObjIterator it ( _keyPattern ) ; <nl> + while ( it . more ( ) ) { <nl> + BSONElement elt = it . next ( ) ; <nl> + fieldNames . push_back ( elt . fieldName ( ) ) ; <nl> + fixed . push_back ( BSONElement ( ) ) ; <nl> + } <nl> + <nl> + _keyGen . reset ( new BtreeKeyGeneratorV1 ( fieldNames , fixed , sparse ) ) ; <nl> } <nl> + <nl> + scoped_ptr < BtreeKeyGenerator > _keyGen ; <nl> + BSONObj _keyPattern ; <nl> + <nl> static const char * ns ( ) { <nl> return " unittests . indexdetailstests " ; <nl> } <nl> + <nl> IndexDetails & id ( ) { <nl> return id_ ; <nl> } <nl> + <nl> + / / TODO : This is testing Btree key creation , not IndexDetails . <nl> + void getKeysFromObject ( const BSONObj & obj , BSONObjSet & out ) { <nl> + _keyGen - > getKeys ( obj , & out ) ; <nl> + } <nl> + <nl> virtual BSONObj key ( ) const { <nl> BSONObjBuilder k ; <nl> k . append ( " a " , 1 ) ; <nl> namespace NamespaceTests { <nl> b . append ( " a " , 5 ) ; <nl> e . append ( " " , 5 ) ; <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( b . done ( ) , keys ) ; <nl> + getKeysFromObject ( b . done ( ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> assertEquals ( e . obj ( ) , * keys . begin ( ) ) ; <nl> } <nl> namespace NamespaceTests { <nl> a . append ( " c " , " foo " ) ; <nl> e . append ( " " , 4 ) ; <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( a . done ( ) , keys ) ; <nl> + getKeysFromObject ( a . done ( ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( e . obj ( ) , * keys . begin ( ) ) ; <nl> } <nl> namespace NamespaceTests { <nl> b . append ( " a " , shortArray ( ) ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( b . done ( ) , keys ) ; <nl> + getKeysFromObject ( b . done ( ) , keys ) ; <nl> checkSize ( 3 , keys ) ; <nl> int j = 1 ; <nl> for ( BSONObjSet : : iterator i = keys . begin ( ) ; i ! = keys . end ( ) ; + + i , + + j ) { <nl> namespace NamespaceTests { <nl> b . append ( " b " , 2 ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( b . done ( ) , keys ) ; <nl> + getKeysFromObject ( b . done ( ) , keys ) ; <nl> checkSize ( 3 , keys ) ; <nl> int j = 1 ; <nl> for ( BSONObjSet : : iterator i = keys . begin ( ) ; i ! = keys . end ( ) ; + + i , + + j ) { <nl> namespace NamespaceTests { <nl> b . append ( " a " , shortArray ( ) ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( b . done ( ) , keys ) ; <nl> + getKeysFromObject ( b . done ( ) , keys ) ; <nl> checkSize ( 3 , keys ) ; <nl> int j = 1 ; <nl> for ( BSONObjSet : : iterator i = keys . begin ( ) ; i ! = keys . end ( ) ; + + i , + + j ) { <nl> namespace NamespaceTests { <nl> a . append ( " a " , b . done ( ) ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( a . done ( ) , keys ) ; <nl> + getKeysFromObject ( a . done ( ) , keys ) ; <nl> checkSize ( 3 , keys ) ; <nl> int j = 1 ; <nl> for ( BSONObjSet : : iterator i = keys . begin ( ) ; i ! = keys . end ( ) ; + + i , + + j ) { <nl> namespace NamespaceTests { <nl> b . append ( " b " , shortArray ( ) ) ; <nl> <nl> BSONObjSet keys ; <nl> - ASSERT_THROWS ( id ( ) . getKeysFromObject ( b . done ( ) , keys ) , <nl> + ASSERT_THROWS ( getKeysFromObject ( b . done ( ) , keys ) , <nl> UserException ) ; <nl> } <nl> private : <nl> namespace NamespaceTests { <nl> b . append ( " a " , elts ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( b . done ( ) , keys ) ; <nl> + getKeysFromObject ( b . done ( ) , keys ) ; <nl> checkSize ( 3 , keys ) ; <nl> int j = 1 ; <nl> for ( BSONObjSet : : iterator i = keys . begin ( ) ; i ! = keys . end ( ) ; + + i , + + j ) { <nl> namespace NamespaceTests { <nl> b . append ( " d " , 99 ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( b . done ( ) , keys ) ; <nl> + getKeysFromObject ( b . done ( ) , keys ) ; <nl> checkSize ( 3 , keys ) ; <nl> int j = 1 ; <nl> for ( BSONObjSet : : iterator i = keys . begin ( ) ; i ! = keys . end ( ) ; + + i , + + j ) { <nl> namespace NamespaceTests { <nl> BSONObj obj = b . obj ( ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( obj , keys ) ; <nl> + getKeysFromObject ( obj , keys ) ; <nl> checkSize ( 4 , keys ) ; <nl> BSONObjSet : : iterator i = keys . begin ( ) ; <nl> assertEquals ( nullObj ( ) , * i + + ) ; / / see SERVER - 3377 <nl> namespace NamespaceTests { <nl> b . append ( " a " , elts ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( b . done ( ) , keys ) ; <nl> + getKeysFromObject ( b . done ( ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> assertEquals ( nullObj ( ) , * keys . begin ( ) ) ; <nl> } <nl> namespace NamespaceTests { <nl> void run ( ) { <nl> create ( ) ; <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( BSON ( " b " < < 1 ) , keys ) ; <nl> + getKeysFromObject ( BSON ( " b " < < 1 ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> assertEquals ( nullObj ( ) , * keys . begin ( ) ) ; <nl> } <nl> namespace NamespaceTests { <nl> void run ( ) { <nl> create ( ) ; <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ 1 , 2 ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ 1 , 2 ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> assertEquals ( nullObj ( ) , * keys . begin ( ) ) ; <nl> } <nl> namespace NamespaceTests { <nl> <nl> { <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { x : ' a ' , y : ' b ' } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { x : ' a ' , y : ' b ' } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> assertEquals ( BSON ( " " < < " a " < < " " < < " b " ) , * keys . begin ( ) ) ; <nl> } <nl> <nl> { <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { x : ' a ' } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { x : ' a ' } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> BSONObjBuilder b ; <nl> b . append ( " " , " a " ) ; <nl> namespace NamespaceTests { <nl> void run ( ) { <nl> create ( ) ; <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ { b : [ 2 ] } ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ { b : [ 2 ] } ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> assertEquals ( BSON ( " " < < 2 ) , * keys . begin ( ) ) ; <nl> } <nl> namespace NamespaceTests { <nl> void run ( ) { <nl> create ( ) ; <nl> BSONObjSet keys ; <nl> - ASSERT_THROWS ( id ( ) . getKeysFromObject ( fromjson ( " { a : [ { b : [ 1 ] , c : [ 2 ] } ] } " ) , keys ) , <nl> + ASSERT_THROWS ( getKeysFromObject ( fromjson ( " { a : [ { b : [ 1 ] , c : [ 2 ] } ] } " ) , keys ) , <nl> UserException ) ; <nl> } <nl> private : <nl> namespace NamespaceTests { <nl> void run ( ) { <nl> create ( ) ; <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ { b : 1 } , { c : 2 } ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ { b : 1 } , { c : 2 } ] } " ) , keys ) ; <nl> checkSize ( 2 , keys ) ; <nl> BSONObjSet : : iterator i = keys . begin ( ) ; <nl> { <nl> namespace NamespaceTests { <nl> void run ( ) { <nl> create ( ) ; <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ { b : 1 } , { b : [ 1 , 2 , 3 ] } ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ { b : 1 } , { b : [ 1 , 2 , 3 ] } ] } " ) , keys ) ; <nl> checkSize ( 3 , keys ) ; <nl> } <nl> private : <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ 1 , 2 ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ 1 , 2 ] } " ) , keys ) ; <nl> checkSize ( 2 , keys ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ 1 ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ 1 ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : null } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : null } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( Undefined , keys . begin ( ) - > firstElement ( ) . type ( ) ) ; <nl> keys . clear ( ) ; <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ 1 , 2 ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ 1 , 2 ] } " ) , keys ) ; <nl> checkSize ( 2 , keys ) ; <nl> BSONObjSet : : const_iterator i = keys . begin ( ) ; <nl> ASSERT_EQUALS ( BSON ( " " < < 1 < < " " < < 1 ) , * i ) ; <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : undefined , ' ' : undefined } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : 1 , b : [ 1 , 2 ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : 1 , b : [ 1 , 2 ] } " ) , keys ) ; <nl> checkSize ( 2 , keys ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : 1 , b : [ 1 ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : 1 , b : [ 1 ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : 1 , b : null } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : 1 , b : null } " ) , keys ) ; <nl> / / cout < < " YO : " < < * ( keys . begin ( ) ) < < endl ; <nl> checkSize ( 1 , keys ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : 1 , b : [ ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : 1 , b : [ ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> / / cout < < " YO : " < < * ( keys . begin ( ) ) < < endl ; <nl> BSONObjIterator i ( * keys . begin ( ) ) ; <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : null } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : null , ' ' : null } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : undefined , ' ' : null } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ { b : 1 } ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ { b : 1 } ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : { b : 1 } , ' ' : 1 } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ { b : [ ] } ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ { b : [ ] } ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : { b : [ ] } , ' ' : undefined } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : null , ' ' : undefined } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> namespace NamespaceTests { <nl> create ( true ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : null , ' ' : undefined } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> namespace NamespaceTests { <nl> create ( true ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : 1 } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : 1 } " ) , keys ) ; <nl> checkSize ( 0 , keys ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> checkSize ( 0 , keys ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ { c : 1 } ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ { c : 1 } ] } " ) , keys ) ; <nl> checkSize ( 0 , keys ) ; <nl> keys . clear ( ) ; <nl> } <nl> namespace NamespaceTests { <nl> create ( true ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : 1 } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : 1 } " ) , keys ) ; <nl> checkSize ( 0 , keys ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> checkSize ( 0 , keys ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ { c : 1 } ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ { c : 1 } ] } " ) , keys ) ; <nl> checkSize ( 0 , keys ) ; <nl> keys . clear ( ) ; <nl> } <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : null } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ 1 ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ 1 ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : null } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ 1 , { b : 1 } ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ 1 , { b : 1 } ] } " ) , keys ) ; <nl> checkSize ( 2 , keys ) ; <nl> BSONObjSet : : const_iterator c = keys . begin ( ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : null } " ) , * c ) ; <nl> namespace NamespaceTests { <nl> create ( true ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> checkSize ( 0 , keys ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ 1 ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ 1 ] } " ) , keys ) ; <nl> checkSize ( 0 , keys ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ 1 , { b : 1 } ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ 1 , { b : 1 } ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : 1 } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ 1 ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ 1 ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( BSON ( " " < < 1 ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ [ 1 ] ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ [ 1 ] ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : [ 1 ] } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ [ ] ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ [ ] ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : undefined } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : { ' 0 ' : 1 } } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : { ' 0 ' : 1 } } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( BSON ( " " < < 1 ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - ASSERT_THROWS ( id ( ) . getKeysFromObject ( fromjson ( " { a : [ { ' 0 ' : 1 } ] } " ) , keys ) , UserException ) ; <nl> + ASSERT_THROWS ( getKeysFromObject ( fromjson ( " { a : [ { ' 0 ' : 1 } ] } " ) , keys ) , UserException ) ; <nl> <nl> - ASSERT_THROWS ( id ( ) . getKeysFromObject ( fromjson ( " { a : [ 1 , { ' 0 ' : 2 } ] } " ) , keys ) , UserException ) ; <nl> + ASSERT_THROWS ( getKeysFromObject ( fromjson ( " { a : [ 1 , { ' 0 ' : 2 } ] } " ) , keys ) , UserException ) ; <nl> } <nl> protected : <nl> BSONObj key ( ) const { return BSON ( " a . 0 " < < 1 ) ; } <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ [ 1 ] ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ [ 1 ] ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : 1 } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ [ ] ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ [ ] ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : null } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : null } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ [ [ ] ] ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ [ [ ] ] ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : undefined } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ { b : 1 } ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ { b : 1 } ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : 1 } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ { b : [ 1 ] } ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ { b : [ 1 ] } ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : 1 } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ { b : [ [ 1 ] ] } ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ { b : [ [ 1 ] ] } ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : [ 1 ] } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ [ { b : 1 } ] ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ [ { b : 1 } ] ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : 1 } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ [ { b : [ 1 ] } ] ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ [ { b : [ 1 ] } ] ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : 1 } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ [ { b : [ [ 1 ] ] } ] ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ [ { b : [ [ 1 ] ] } ] ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : [ 1 ] } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ [ { b : [ ] } ] ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ [ { b : [ ] } ] ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : undefined } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl> namespace NamespaceTests { <nl> create ( ) ; <nl> <nl> BSONObjSet keys ; <nl> - id ( ) . getKeysFromObject ( fromjson ( " { a : [ { b : [ 1 ] } ] } " ) , keys ) ; <nl> + getKeysFromObject ( fromjson ( " { a : [ { b : [ 1 ] } ] } " ) , keys ) ; <nl> checkSize ( 1 , keys ) ; <nl> ASSERT_EQUALS ( fromjson ( " { ' ' : 1 } " ) , * keys . begin ( ) ) ; <nl> keys . clear ( ) ; <nl>
|
SERVER - 8791 SERVER - 9165 SERVER - 9212 move build into own class , clean up getKeys calls
|
mongodb/mongo
|
f75c238b3363048f91ed22d9db0cb83383b1ebd4
|
2013-04-19T16:02:28Z
|
mmm a / src / compiler / heap - refs . h <nl> ppp b / src / compiler / heap - refs . h <nl> enum class OddballType : uint8_t { <nl> / * Subtypes of FixedArray * / \ <nl> V ( ObjectBoilerplateDescription ) \ <nl> V ( ScopeInfo ) \ <nl> + / * Subtypes of String * / \ <nl> + V ( InternalizedString ) \ <nl> / * Subtypes of Name * / \ <nl> V ( Symbol ) \ <nl> / * Subtypes of HeapObject * / \ <nl> enum class OddballType : uint8_t { <nl> V ( FixedArray ) \ <nl> V ( FixedDoubleArray ) \ <nl> / * Subtypes of Name * / \ <nl> - V ( InternalizedString ) \ <nl> V ( String ) \ <nl> / * Subtypes of JSReceiver * / \ <nl> V ( JSObject ) \ <nl> mmm a / src / compiler / js - heap - broker . cc <nl> ppp b / src / compiler / js - heap - broker . cc <nl> StringData : : StringData ( JSHeapBroker * broker , ObjectData * * storage , <nl> class InternalizedStringData : public StringData { <nl> public : <nl> InternalizedStringData ( JSHeapBroker * broker , ObjectData * * storage , <nl> - Handle < InternalizedString > object ) ; <nl> - <nl> - uint32_t array_index ( ) const { return array_index_ ; } <nl> - <nl> - private : <nl> - uint32_t array_index_ ; <nl> + Handle < InternalizedString > object ) <nl> + : StringData ( broker , storage , object ) { <nl> + DCHECK ( ! FLAG_turbo_direct_heap_access ) ; <nl> + } <nl> } ; <nl> <nl> ObjectData * StringData : : GetCharAsString ( JSHeapBroker * broker , uint32_t index , <nl> ObjectData * StringData : : GetCharAsString ( JSHeapBroker * broker , uint32_t index , <nl> return result ; <nl> } <nl> <nl> - InternalizedStringData : : InternalizedStringData ( <nl> - JSHeapBroker * broker , ObjectData * * storage , <nl> - Handle < InternalizedString > object ) <nl> - : StringData ( broker , storage , object ) { } <nl> - <nl> namespace { <nl> <nl> bool IsFastLiteralHelper ( Handle < JSObject > boilerplate , int max_depth , <nl>
|
Reland " [ compiler ] Move InternalizedString to kNeverSerialized "
|
v8/v8
|
fa0c2fa286906665438f24342b734f41691671b8
|
2020-11-13T15:17:14Z
|
mmm a / tools / ctor_evaller . py <nl> ppp b / tools / ctor_evaller . py <nl> def add_func ( asm , func ) : <nl> shared . logging . debug ( ' ctor_evaller : push , but no ctors ' ) <nl> sys . exit ( 0 ) <nl> <nl> - num_ctors = ctors_text . count ( ' , ' ) + 1 <nl> - shared . logging . debug ( ' ctor_evaller : % d ctors ' % num_ctors ) <nl> + num_ctors = ctors_text . count ( ' function ( ) ' ) <nl> + shared . logging . debug ( ' ctor_evaller : % d ctors , from | % s | ' % ( num_ctors , ctors_text ) ) <nl> <nl> if os . path . exists ( mem_init_file ) : <nl> mem_init = json . dumps ( map ( ord , open ( mem_init_file , ' rb ' ) . read ( ) ) ) <nl>
|
improve ctors counting logic
|
emscripten-core/emscripten
|
ec8a49ccb03be3bbe009f3c8fc71d86f45c0353e
|
2016-03-28T02:42:38Z
|
mmm a / tools / cocos2d - console <nl> ppp b / tools / cocos2d - console <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 26b4f825e2d56f8de83e784216f3da8e7cd96c63 <nl> + Subproject commit 0da825a0256ff8f7edc33b4930ecdc703eaebe41 <nl>
|
Merge pull request from natural - law / v3
|
cocos2d/cocos2d-x
|
956b5567a2051e75c1b9e95c38b79f5ffe535583
|
2014-12-31T07:05:55Z
|
mmm a / doc / classes / Input . xml <nl> ppp b / doc / classes / Input . xml <nl> <nl> Makes the mouse cursor hidden if it is visible . <nl> < / constant > <nl> < constant name = " MOUSE_MODE_CAPTURED " value = " 2 " enum = " MouseMode " > <nl> - Captures the mouse . The mouse will be hidden and unable to leave the game window , but it will still register movement and mouse button presses . <nl> + Captures the mouse . The mouse will be hidden and unable to leave the game window , but it will still register movement and mouse button presses . On Windows and Linux , the mouse will use raw input mode , which means the reported movement will be unaffected by the OS ' mouse acceleration settings . <nl> < / constant > <nl> < constant name = " MOUSE_MODE_CONFINED " value = " 3 " enum = " MouseMode " > <nl> Makes the mouse cursor visible but confines it to the game window . <nl>
|
Merge pull request from Calinou / input - mouse - captured - raw
|
godotengine/godot
|
49ce6bacc338d4279c121e13d4c7434d4318e4e4
|
2019-07-11T06:51:44Z
|
mmm a / src / gui / popup_frame . cpp <nl> ppp b / src / gui / popup_frame . cpp <nl> bool PopupFrame : : onProcessMessage ( JMessage msg ) <nl> break ; <nl> <nl> case JM_KEYPRESSED : <nl> - if ( m_filtering & & msg - > key . scancode < KEY_MODIFIERS ) <nl> + if ( m_filtering & & <nl> + ( msg - > key . scancode = = KEY_ESC | | <nl> + msg - > key . scancode = = KEY_ENTER | | <nl> + msg - > key . scancode = = KEY_ENTER_PAD ) ) { <nl> closeWindow ( NULL ) ; <nl> - break ; <nl> + } <nl> + return false ; <nl> <nl> case JM_BUTTONPRESSED : <nl> / * if the user click outside the window , we have to close the <nl>
|
Avoid sending keys to gui manager when a popup frame is visible .
|
aseprite/aseprite
|
2ac1d38d623c82e7e79d5c038f7ca85854ae5b99
|
2011-03-02T01:37:00Z
|
mmm a / cocos2dx / sprite_nodes / CCSpriteFrame . cpp <nl> ppp b / cocos2dx / sprite_nodes / CCSpriteFrame . cpp <nl> SpriteFrame * SpriteFrame : : clone ( ) const <nl> SpriteFrame * copy = new SpriteFrame ( ) ; <nl> copy - > initWithTextureFilename ( _textureFilename . c_str ( ) , _rectInPixels , _rotated , _offsetInPixels , _originalSizeInPixels ) ; <nl> copy - > setTexture ( _texture ) ; <nl> + copy - > autorelease ( ) ; <nl> return copy ; <nl> } <nl> <nl>
|
Merge pull request from minggo / develop
|
cocos2d/cocos2d-x
|
334c9c9e02f114ca0bc58015f22ca93a59f106a8
|
2013-09-16T15:30:48Z
|
mmm a / js / server / modules / org / arangodb / foxx / console . js <nl> ppp b / js / server / modules / org / arangodb / foxx / console . js <nl> extend ( Console . prototype , { <nl> Error . captureStackTrace ( e , callee | | this . _log ) ; <nl> doc . stack = e . stack . replace ( / \ n $ / , ' ' ) . split ( ' \ n ' ) . slice ( 2 ) <nl> . map ( function ( line ) { <nl> - var tokens = line . replace ( / ^ \ s * at \ s + / , ' ' ) . split ( ' : ' ) ; <nl> - return { <nl> - fileName : tokens . slice ( 0 , tokens . length - 2 ) . join ( ' : ' ) , <nl> - lineNumber : Number ( tokens [ tokens . length - 2 ] ) , <nl> - columnNumber : Number ( tokens [ tokens . length - 1 ] ) <nl> - } ; <nl> - } ) ; <nl> + var tokens = line . match ( / \ s + at \ s + ( . + ) \ s + \ ( ( . + ) : ( \ d + ) : ( \ d + ) \ ) / ) ; <nl> + if ( tokens ) { <nl> + return { <nl> + functionName : tokens [ 1 ] , <nl> + fileName : tokens [ 2 ] , <nl> + lineNumber : Number ( tokens [ 3 ] ) , <nl> + columnNumber : Number ( tokens [ 4 ] ) <nl> + } ; <nl> + } <nl> + tokens = line . match ( / \ s + at \ s + ( . + ) : ( \ d + ) : ( \ d + ) / ) ; <nl> + if ( tokens ) { <nl> + return { <nl> + functionName : null , <nl> + fileName : tokens [ 1 ] , <nl> + lineNumber : Number ( tokens [ 2 ] ) , <nl> + columnNumber : Number ( tokens [ 3 ] ) <nl> + } ; <nl> + } <nl> + return false ; <nl> + } ) . filter ( Boolean ) ; <nl> } <nl> <nl> if ( ! db . _foxxlog ) { <nl> mmm a / js / server / tests / shell - foxx - console . js <nl> ppp b / js / server / tests / shell - foxx - console . js <nl> function ConsoleTestSuite ( ) { <nl> var logs = ls ( ) ; <nl> expect ( logs ) . to . be . empty ( ) ; <nl> } , <nl> - / * Disabled ; fails in cluster . <nl> testConsoleTracingAddsInfo : function ( ) { <nl> rmrf ( ) ; <nl> console . setTracing ( false ) ; <nl> function ConsoleTestSuite ( ) { <nl> expect ( logs [ 1 ] . stack [ 0 ] ) . to . have . property ( ' lineNumber ' ) ; <nl> expect ( logs [ 1 ] . stack [ 0 ] ) . to . have . property ( ' columnNumber ' ) ; <nl> } , <nl> - * / <nl> testCustomLogLevels : function ( ) { <nl> rmrf ( ) ; <nl> var log = console . custom ( ' BATMAN ' , 9000 ) ; <nl>
|
Fixed stack parsing .
|
arangodb/arangodb
|
a36c96060ac881b5b7e893c3f98d1e72cb51969b
|
2015-02-03T10:34:26Z
|
mmm a / stdlib / public / core / HashedCollections . swift . gyb <nl> ppp b / stdlib / public / core / HashedCollections . swift . gyb <nl> internal protocol _HashStorage { <nl> var endIndex : Index { get } <nl> <nl> @ warn_unused_result <nl> - func indexForKey ( key : Key ) - > Index ? <nl> + func index ( forKey key : Key ) - > Index ? <nl> <nl> @ warn_unused_result <nl> func assertingGet ( i : Index ) - > SequenceElement <nl> internal protocol _HashStorage { <nl> @ warn_unused_result <nl> func maybeGet ( key : Key ) - > Value ? <nl> <nl> - mutating func updateValue ( value : Value , forKey : Key ) - > Value ? <nl> + mutating func updateValue ( value : Value , forKey key : Key ) - > Value ? <nl> mutating func remove ( at index : Index ) - > SequenceElement <nl> - mutating func removeValueForKey ( key : Key ) - > Value ? <nl> + mutating func removeValue ( forKey key : Key ) - > Value ? <nl> mutating func removeAll ( keepingCapacity keepCapacity : Bool ) <nl> var count : Int { get } <nl> <nl> public struct Set < Element : Hashable > : <nl> / / / present in the set . <nl> @ warn_unused_result <nl> public func indexOf ( member : Element ) - > Index ? { <nl> - return _variantStorage . indexForKey ( member ) <nl> + return _variantStorage . index ( forKey : member ) <nl> } <nl> <nl> / / APINAMING : say what happens when the element is already there . <nl> public struct Set < Element : Hashable > : <nl> <nl> / / / Remove the member from the set and return it if it was present . <nl> public mutating func remove ( member : Element ) - > Element ? { <nl> - return _variantStorage . removeValueForKey ( member ) <nl> + return _variantStorage . removeValue ( forKey : member ) <nl> } <nl> <nl> / / / Remove the element at the given ` position ` . <nl> public func = = < Element : Hashable > ( lhs : Set < Element > , rhs : Set < Element > ) - > Boo <nl> } <nl> <nl> for member in lhs { <nl> - let ( _ , found ) = rhsNative . _find ( member , rhsNative . _bucket ( member ) ) <nl> + let ( _ , found ) = <nl> + rhsNative . _find ( member , startBucket : rhsNative . _bucket ( member ) ) <nl> if ! found { <nl> return false <nl> } <nl> public struct Dictionary < Key : Hashable , Value > : <nl> / / / Returns the ` Index ` for the given key , or ` nil ` if the key is not <nl> / / / present in the dictionary . <nl> @ warn_unused_result <nl> - public func indexForKey ( key : Key ) - > Index ? { <nl> + public func index ( forKey key : Key ) - > Index ? { <nl> / / Complexity : amortized O ( 1 ) for native storage , O ( N ) when wrapping an <nl> / / NSDictionary . <nl> - return _variantStorage . indexForKey ( key ) <nl> + return _variantStorage . index ( forKey : key ) <nl> } <nl> <nl> / / / Returns the key - value pair at ` position ` . <nl> public struct Dictionary < Key : Hashable , Value > : <nl> } <nl> else { <nl> / / FIXME ( performance ) : this loads and discards the old value . <nl> - removeValueForKey ( key ) <nl> + removeValue ( forKey : key ) <nl> } <nl> } <nl> } <nl> public struct Dictionary < Key : Hashable , Value > : <nl> / / / Remove a given key and the associated value from the dictionary . <nl> / / / Returns the value that was removed , or ` nil ` if the key was not present <nl> / / / in the dictionary . <nl> - public mutating func removeValueForKey ( key : Key ) - > Value ? { <nl> - return _variantStorage . removeValueForKey ( key ) <nl> + public mutating func removeValue ( forKey key : Key ) - > Value ? { <nl> + return _variantStorage . removeValue ( forKey : key ) <nl> } <nl> <nl> / / / Remove all the elements . If ` keepingCapacity ` is ` true ` , existing <nl> public func = = < Key : Equatable , Value : Equatable > ( <nl> } <nl> <nl> for ( k , v ) in lhs { <nl> - let ( pos , found ) = rhsNative . _find ( k , rhsNative . _bucket ( k ) ) <nl> + let ( pos , found ) = rhsNative . _find ( k , startBucket : rhsNative . _bucket ( k ) ) <nl> / / FIXME : Can ' t write the simple code pending <nl> / / < rdar : / / problem / 15484639 > Refcounting bug <nl> / * <nl> public func = = < Key : Equatable , Value : Equatable > ( <nl> if ! found { <nl> return false <nl> } <nl> - if rhsNative . valueAt ( pos . offset ) ! = v { <nl> + if rhsNative . value ( at : pos . offset ) ! = v { <nl> return false <nl> } <nl> } <nl> final internal class _Native $ { Self } StorageImpl < $ { TypeParameters } > : <nl> / / / Returns the bytes necessary to store a bit map of ' capacity ' bytes and <nl> / / / padding to align the start to word alignment . <nl> @ warn_unused_result <nl> - internal static func bytesForBitMap ( capacity : Int ) - > Int { <nl> + internal static func bytesForBitMap ( capacity capacity : Int ) - > Int { <nl> let numWords = _BitMap . wordsFor ( capacity ) <nl> return numWords * sizeof ( UInt ) + alignof ( UInt ) <nl> } <nl> final internal class _Native $ { Self } StorageImpl < $ { TypeParameters } > : <nl> / / / the start to the alignment of the ' Key ' type assuming a word aligned base <nl> / / / address . <nl> @ warn_unused_result <nl> - internal static func bytesForKeys ( capacity : Int ) - > Int { <nl> + internal static func bytesForKeys ( capacity capacity : Int ) - > Int { <nl> let padding = max ( 0 , alignof ( Key . self ) - alignof ( UInt ) ) <nl> return strideof ( Key . self ) * capacity + padding <nl> } <nl> final internal class _Native $ { Self } StorageImpl < $ { TypeParameters } > : <nl> <nl> % if Self = = ' Dictionary ' : <nl> @ warn_unused_result <nl> - internal static func bytesForValues ( capacity : Int ) - > Int { <nl> + internal static func bytesForValues ( capacity capacity : Int ) - > Int { <nl> let maxPrevAlignment = max ( alignof ( Key . self ) , alignof ( UInt ) ) <nl> let padding = max ( 0 , alignof ( Value . self ) - maxPrevAlignment ) <nl> return strideof ( Value . self ) * capacity + padding <nl> final internal class _Native $ { Self } StorageImpl < $ { TypeParameters } > : <nl> / / / Create a storage instance with room for ' capacity ' entries and all entries <nl> / / / marked invalid . <nl> internal class func create ( capacity : Int ) - > StorageImpl { <nl> - let requiredCapacity = bytesForBitMap ( capacity ) + bytesForKeys ( capacity ) <nl> + let requiredCapacity = <nl> + bytesForBitMap ( capacity : capacity ) + bytesForKeys ( capacity : capacity ) <nl> % if Self = = ' Dictionary ' : <nl> - + bytesForValues ( capacity ) <nl> + + bytesForValues ( capacity : capacity ) <nl> % end <nl> <nl> let r = super . create ( requiredCapacity ) { _ in <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> } <nl> <nl> @ warn_unused_result <nl> - internal func keyAt ( i : Int ) - > Key { <nl> + internal func key ( at i : Int ) - > Key { <nl> _require ( i > = 0 & & i < capacity ) <nl> - _sanityCheck ( isInitializedEntry ( i ) ) <nl> + _sanityCheck ( isInitializedEntry ( at : i ) ) <nl> <nl> let res = ( keys + i ) . pointee <nl> _fixLifetime ( self ) <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> } <nl> <nl> @ warn_unused_result <nl> - internal func isInitializedEntry ( i : Int ) - > Bool { <nl> + internal func isInitializedEntry ( at i : Int ) - > Bool { <nl> _require ( i > = 0 & & i < capacity ) <nl> return initializedEntries [ i ] <nl> } <nl> <nl> @ _transparent <nl> - internal func destroyEntryAt ( i : Int ) { <nl> - _sanityCheck ( isInitializedEntry ( i ) ) <nl> + internal func destroyEntry ( at i : Int ) { <nl> + _sanityCheck ( isInitializedEntry ( at : i ) ) <nl> ( keys + i ) . deinitializePointee ( ) <nl> % if Self = = ' Dictionary ' : <nl> ( values + i ) . deinitializePointee ( ) <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> % if Self = = ' Set ' : <nl> @ _transparent <nl> internal func initializeKey ( k : Key , at i : Int ) { <nl> - _sanityCheck ( ! isInitializedEntry ( i ) ) <nl> + _sanityCheck ( ! isInitializedEntry ( at : i ) ) <nl> <nl> ( keys + i ) . initializePointee ( k ) <nl> initializedEntries [ i ] = true <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> } <nl> <nl> @ _transparent <nl> - internal func moveInitializeFrom ( from : Storage , at : Int , toEntryAt : Int ) { <nl> - _sanityCheck ( ! isInitializedEntry ( toEntryAt ) ) <nl> + internal func moveInitializeEntry ( <nl> + from from : Storage , at : Int , toEntryAt : Int <nl> + ) { <nl> + _sanityCheck ( ! isInitializedEntry ( at : toEntryAt ) ) <nl> ( keys + toEntryAt ) . initializePointee ( ( from . keys + at ) . take ( ) ) <nl> from . initializedEntries [ at ] = false <nl> initializedEntries [ toEntryAt ] = true <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> <nl> internal func setKey ( key : Key , at i : Int ) { <nl> _require ( i > = 0 & & i < capacity ) <nl> - _sanityCheck ( isInitializedEntry ( i ) ) <nl> + _sanityCheck ( isInitializedEntry ( at : i ) ) <nl> <nl> ( keys + i ) . pointee = key <nl> _fixLifetime ( self ) <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> % elif Self = = ' Dictionary ' : <nl> @ _transparent <nl> internal func initializeKey ( k : Key , value v : Value , at i : Int ) { <nl> - _sanityCheck ( ! isInitializedEntry ( i ) ) <nl> + _sanityCheck ( ! isInitializedEntry ( at : i ) ) <nl> <nl> ( keys + i ) . initializePointee ( k ) <nl> ( values + i ) . initializePointee ( v ) <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> } <nl> <nl> @ _transparent <nl> - internal func moveInitializeFrom ( from : Storage , at : Int , toEntryAt : Int ) { <nl> - _sanityCheck ( ! isInitializedEntry ( toEntryAt ) ) <nl> + internal func moveInitializeEntry ( <nl> + from from : Storage , at : Int , toEntryAt : Int <nl> + ) { <nl> + _sanityCheck ( ! isInitializedEntry ( at : toEntryAt ) ) <nl> ( keys + toEntryAt ) . initializePointee ( ( from . keys + at ) . take ( ) ) <nl> ( values + toEntryAt ) . initializePointee ( ( from . values + at ) . take ( ) ) <nl> from . initializedEntries [ at ] = false <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> <nl> @ _transparent <nl> @ warn_unused_result <nl> - internal func valueAt ( i : Int ) - > Value { <nl> - _sanityCheck ( isInitializedEntry ( i ) ) <nl> + internal func value ( at i : Int ) - > Value { <nl> + _sanityCheck ( isInitializedEntry ( at : i ) ) <nl> <nl> let res = ( values + i ) . pointee <nl> _fixLifetime ( self ) <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> <nl> @ _transparent <nl> internal func setKey ( key : Key , value : Value , at i : Int ) { <nl> - _sanityCheck ( isInitializedEntry ( i ) ) <nl> + _sanityCheck ( isInitializedEntry ( at : i ) ) <nl> ( keys + i ) . pointee = key <nl> ( values + i ) . pointee = value <nl> _fixLifetime ( self ) <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> / / / If the key is not present , returns the position where it could be <nl> / / / inserted . <nl> @ warn_unused_result <nl> - internal <nl> - func _find ( key : Key , _ startBucket : Int ) - > ( pos : Index , found : Bool ) { <nl> + internal func _find ( key : Key , startBucket : Int ) <nl> + - > ( pos : Index , found : Bool ) { <nl> + <nl> var bucket = startBucket <nl> <nl> / / The invariant guarantees there ' s always a hole , so we just loop <nl> / / until we find one <nl> while true { <nl> - let isHole = ! isInitializedEntry ( bucket ) <nl> + let isHole = ! isInitializedEntry ( at : bucket ) <nl> if isHole { <nl> return ( Index ( nativeStorage : self , offset : bucket ) , false ) <nl> } <nl> - if keyAt ( bucket ) = = key { <nl> + if self . key ( at : bucket ) = = key { <nl> return ( Index ( nativeStorage : self , offset : bucket ) , true ) <nl> } <nl> bucket = _next ( bucket ) <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> <nl> @ _transparent <nl> @ warn_unused_result <nl> - internal static func getMinCapacity ( <nl> - requestedCount : Int , _ maxLoadFactorInverse : Double ) - > Int { <nl> - / / ` requestedCount + 1 ` below ensures that we don ' t fill in the last hole <nl> - return max ( Int ( Double ( requestedCount ) * maxLoadFactorInverse ) , <nl> - requestedCount + 1 ) <nl> + internal static func minimumCapacity ( <nl> + minimumCount minimumCount : Int , <nl> + maxLoadFactorInverse : Double <nl> + ) - > Int { <nl> + / / ` minimumCount + 1 ` below ensures that we don ' t fill in the last hole <nl> + return max ( Int ( Double ( minimumCount ) * maxLoadFactorInverse ) , <nl> + minimumCount + 1 ) <nl> } <nl> <nl> / / / Storage should be uniquely referenced . <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> % if Self = = ' Set ' : <nl> <nl> internal mutating func unsafeAddNew ( key newKey : Element ) { <nl> - let ( i , found ) = _find ( newKey , _bucket ( newKey ) ) <nl> + let ( i , found ) = _find ( newKey , startBucket : _bucket ( newKey ) ) <nl> _sanityCheck ( <nl> ! found , " unsafeAddNew was called , but the key is already present " ) <nl> initializeKey ( newKey , at : i . offset ) <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> % elif Self = = ' Dictionary ' : <nl> <nl> internal mutating func unsafeAddNew ( key newKey : Key , value : Value ) { <nl> - let ( i , found ) = _find ( newKey , _bucket ( newKey ) ) <nl> + let ( i , found ) = _find ( newKey , startBucket : _bucket ( newKey ) ) <nl> _sanityCheck ( <nl> ! found , " unsafeAddNew was called , but the key is already present " ) <nl> initializeKey ( newKey , value : value , at : i . offset ) <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> var result = " " <nl> # if INTERNAL_CHECKS_ENABLED <nl> for i in 0 . . < capacity { <nl> - if isInitializedEntry ( i ) { <nl> - let key = keyAt ( i ) <nl> + if isInitializedEntry ( at : i ) { <nl> + let key = self . key ( at : i ) <nl> result + = " bucket \ ( i ) , ideal bucket = \ ( _bucket ( key ) ) , key = \ ( key ) \ n " <nl> } else { <nl> result + = " bucket \ ( i ) , empty \ n " <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> } <nl> <nl> @ warn_unused_result <nl> - internal func indexForKey ( key : Key ) - > Index ? { <nl> + internal func index ( forKey key : Key ) - > Index ? { <nl> if count = = 0 { <nl> / / Fast path that avoids computing the hash of the key . <nl> return nil <nl> } <nl> - let ( i , found ) = _find ( key , _bucket ( key ) ) <nl> + let ( i , found ) = _find ( key , startBucket : _bucket ( key ) ) <nl> return found ? i : nil <nl> } <nl> <nl> @ warn_unused_result <nl> internal func assertingGet ( i : Index ) - > SequenceElement { <nl> _require ( <nl> - isInitializedEntry ( i . offset ) , <nl> + isInitializedEntry ( at : i . offset ) , <nl> " attempting to access $ { Self } elements using an invalid Index " ) <nl> - let key = keyAt ( i . offset ) <nl> + let key = self . key ( at : i . offset ) <nl> % if Self = = ' Set ' : <nl> return key <nl> % elif Self = = ' Dictionary ' : <nl> - return ( key , valueAt ( i . offset ) ) <nl> + return ( key , self . value ( at : i . offset ) ) <nl> % end <nl> <nl> } <nl> <nl> @ warn_unused_result <nl> internal func assertingGet ( key : Key ) - > Value { <nl> - let ( i , found ) = _find ( key , _bucket ( key ) ) <nl> + let ( i , found ) = _find ( key , startBucket : _bucket ( key ) ) <nl> _require ( found , " key not found " ) <nl> % if Self = = ' Set ' : <nl> - return keyAt ( i . offset ) <nl> + return self . key ( at : i . offset ) <nl> % elif Self = = ' Dictionary ' : <nl> - return valueAt ( i . offset ) <nl> + return self . value ( at : i . offset ) <nl> % end <nl> } <nl> <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> return nil <nl> } <nl> <nl> - let ( i , found ) = _find ( key , _bucket ( key ) ) <nl> + let ( i , found ) = _find ( key , startBucket : _bucket ( key ) ) <nl> if found { <nl> % if Self = = ' Set ' : <nl> - return keyAt ( i . offset ) <nl> + return self . key ( at : i . offset ) <nl> % elif Self = = ' Dictionary ' : <nl> - return valueAt ( i . offset ) <nl> + return self . value ( at : i . offset ) <nl> % end <nl> } <nl> return nil <nl> } <nl> <nl> - internal mutating func updateValue ( value : Value , forKey : Key ) - > Value ? { <nl> + internal mutating func updateValue ( value : Value , forKey key : Key ) - > Value ? { <nl> _sanityCheckFailure ( <nl> " don ' t call mutating methods on _Native $ { Self } Storage " ) <nl> } <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> " don ' t call mutating methods on _Native $ { Self } Storage " ) <nl> } <nl> <nl> - internal mutating func removeValueForKey ( key : Key ) - > Value ? { <nl> + internal mutating func removeValue ( forKey key : Key ) - > Value ? { <nl> _sanityCheckFailure ( <nl> " don ' t call mutating methods on _Native $ { Self } Storage " ) <nl> } <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> - > _Native $ { Self } Storage < $ { TypeParameters } > { <nl> <nl> let requiredCapacity = <nl> - _Native $ { Self } Storage < $ { TypeParameters } > . getMinCapacity ( <nl> - elements . count , _hashContainerDefaultMaxLoadFactorInverse ) <nl> + _Native $ { Self } Storage < $ { TypeParameters } > . minimumCapacity ( <nl> + minimumCount : elements . count , <nl> + maxLoadFactorInverse : _hashContainerDefaultMaxLoadFactorInverse ) <nl> let nativeStorage = _Native $ { Self } Storage < $ { TypeParameters } > ( <nl> minimumCapacity : requiredCapacity ) <nl> <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> <nl> var count = 0 <nl> for key in elements { <nl> - let ( i , found ) = nativeStorage . _find ( key , nativeStorage . _bucket ( key ) ) <nl> + let ( i , found ) = <nl> + nativeStorage . _find ( key , startBucket : nativeStorage . _bucket ( key ) ) <nl> if found { <nl> continue <nl> } <nl> struct _Native $ { Self } Storage < $ { TypeParametersDecl } > : <nl> % elif Self = = ' Dictionary ' : <nl> <nl> for ( key , value ) in elements { <nl> - let ( i , found ) = nativeStorage . _find ( key , nativeStorage . _bucket ( key ) ) <nl> + let ( i , found ) = <nl> + nativeStorage . _find ( key , startBucket : nativeStorage . _bucket ( key ) ) <nl> _require ( ! found , " $ { Self } literal contains duplicate keys " ) <nl> nativeStorage . initializeKey ( key , value : value , at : i . offset ) <nl> } <nl> internal struct _BridgedNative $ { Self } Storage { <nl> } <nl> } <nl> <nl> - internal func isInitializedEntry ( i : Int ) - > Bool { <nl> + internal func isInitializedEntry ( at i : Int ) - > Bool { <nl> return initializedEntries [ i ] <nl> } <nl> <nl> - internal func keyAt ( i : Int ) - > AnyObject { <nl> + internal func key ( at i : Int ) - > AnyObject { <nl> _require ( i > = 0 & & i < capacity ) <nl> - _sanityCheck ( isInitializedEntry ( i ) ) <nl> + _sanityCheck ( isInitializedEntry ( at : i ) ) <nl> <nl> let res = ( keys + i ) . pointee <nl> _fixLifetime ( self ) <nl> internal struct _BridgedNative $ { Self } Storage { <nl> <nl> internal func setKey ( key : AnyObject , at i : Int ) { <nl> _require ( i > = 0 & & i < capacity ) <nl> - _sanityCheck ( isInitializedEntry ( i ) ) <nl> + _sanityCheck ( isInitializedEntry ( at : i ) ) <nl> <nl> ( keys + i ) . pointee = key <nl> _fixLifetime ( self ) <nl> internal struct _BridgedNative $ { Self } Storage { <nl> % if Self = = ' Set ' : <nl> @ _transparent <nl> internal func initializeKey ( k : AnyObject , at i : Int ) { <nl> - _sanityCheck ( ! isInitializedEntry ( i ) ) <nl> + _sanityCheck ( ! isInitializedEntry ( at : i ) ) <nl> <nl> ( keys + i ) . initializePointee ( k ) <nl> initializedEntries [ i ] = true <nl> internal struct _BridgedNative $ { Self } Storage { <nl> @ _transparent <nl> internal func initializeKey ( k : AnyObject , value v : AnyObject , at i : Int <nl> ) { <nl> - _sanityCheck ( ! isInitializedEntry ( i ) ) <nl> + _sanityCheck ( ! isInitializedEntry ( at : i ) ) <nl> <nl> ( keys + i ) . initializePointee ( k ) <nl> ( values + i ) . initializePointee ( v ) <nl> internal struct _BridgedNative $ { Self } Storage { <nl> <nl> @ _transparent <nl> @ warn_unused_result <nl> - internal func valueAt ( i : Int ) - > AnyObject { <nl> - _sanityCheck ( isInitializedEntry ( i ) ) <nl> + internal func value ( at i : Int ) - > AnyObject { <nl> + _sanityCheck ( isInitializedEntry ( at : i ) ) <nl> let res = ( values + i ) . pointee <nl> _fixLifetime ( self ) <nl> return res <nl> internal struct _BridgedNative $ { Self } Storage { <nl> @ warn_unused_result <nl> internal func assertingGet ( i : Int ) - > SequenceElement { <nl> _require ( <nl> - isInitializedEntry ( i ) , <nl> + isInitializedEntry ( at : i ) , <nl> " attempting to access $ { Self } elements using an invalid Index " ) <nl> - let key = keyAt ( i ) <nl> + let key = self . key ( at : i ) <nl> % if Self = = ' Set ' : <nl> return key <nl> % elif Self = = ' Dictionary ' : <nl> - return ( key , valueAt ( i ) ) <nl> + return ( key , self . value ( at : i ) ) <nl> % end <nl> } <nl> } <nl> final internal class _Native $ { Self } StorageOwner < $ { TypeParametersDecl } > <nl> <nl> / / Bridge everything . <nl> for i in 0 . . < nativeStorage . capacity { <nl> - if nativeStorage . isInitializedEntry ( i ) { <nl> - let key = _bridgeToObjectiveCUnconditional ( nativeStorage . keyAt ( i ) ) <nl> + if nativeStorage . isInitializedEntry ( at : i ) { <nl> + let key = _bridgeToObjectiveCUnconditional ( nativeStorage . key ( at : i ) ) <nl> % if Self = = ' Set ' : <nl> bridged . initializeKey ( key , at : i ) <nl> % elif Self = = ' Dictionary ' : <nl> - let val = _bridgeToObjectiveCUnconditional ( nativeStorage . valueAt ( i ) ) <nl> + let val = _bridgeToObjectiveCUnconditional ( nativeStorage . value ( at : i ) ) <nl> bridged . initializeKey ( key , value : val , at : i ) <nl> % end <nl> } <nl> final internal class _Native $ { Self } StorageOwner < $ { TypeParametersDecl } > <nl> } else { <nl> / / keys null , objects nonnull <nl> while position < capacity { <nl> - if bridgedNativeStorage . isInitializedEntry ( position ) { <nl> - unmanagedObjects [ i ] = bridgedNativeStorage . valueAt ( position ) <nl> + if bridgedNativeStorage . isInitializedEntry ( at : position ) { <nl> + unmanagedObjects [ i ] = bridgedNativeStorage . value ( at : position ) <nl> i + = 1 <nl> } <nl> position + = 1 <nl> final internal class _Native $ { Self } StorageOwner < $ { TypeParametersDecl } > <nl> if objects = = nil { <nl> / / keys nonnull , objects null <nl> while position < capacity { <nl> - if bridgedNativeStorage . isInitializedEntry ( position ) { <nl> - unmanagedKeys [ i ] = bridgedNativeStorage . keyAt ( position ) <nl> + if bridgedNativeStorage . isInitializedEntry ( at : position ) { <nl> + unmanagedKeys [ i ] = bridgedNativeStorage . key ( at : position ) <nl> i + = 1 <nl> } <nl> position + = 1 <nl> final internal class _Native $ { Self } StorageOwner < $ { TypeParametersDecl } > <nl> } else { <nl> / / keys nonnull , objects nonnull <nl> while position < capacity { <nl> - if bridgedNativeStorage . isInitializedEntry ( position ) { <nl> - unmanagedObjects [ i ] = bridgedNativeStorage . valueAt ( position ) <nl> - unmanagedKeys [ i ] = bridgedNativeStorage . keyAt ( position ) <nl> + if bridgedNativeStorage . isInitializedEntry ( at : position ) { <nl> + unmanagedObjects [ i ] = bridgedNativeStorage . value ( at : position ) <nl> + unmanagedKeys [ i ] = bridgedNativeStorage . key ( at : position ) <nl> i + = 1 <nl> } <nl> position + = 1 <nl> final internal class _Native $ { Self } StorageOwner < $ { TypeParametersDecl } > <nl> - > AnyObject ? { <nl> let nativeKey = _forceBridgeFromObjectiveC ( aKey , Key . self ) <nl> let ( i , found ) = nativeStorage . _find ( <nl> - nativeKey , nativeStorage . _bucket ( nativeKey ) ) <nl> + nativeKey , startBucket : nativeStorage . _bucket ( nativeKey ) ) <nl> if found { <nl> return _getBridgedValue ( i ) <nl> } <nl> internal struct _Cocoa $ { Self } Storage : _HashStorage { <nl> } <nl> <nl> @ warn_unused_result <nl> - internal func indexForKey ( key : Key ) - > Index ? { <nl> + internal func index ( forKey key : Key ) - > Index ? { <nl> / / Fast path that does not involve creating an array of all keys . In case <nl> / / the key is present , this lookup is a penalty for the slow path , but the <nl> / / potential savings are significant : we could skip a memory allocation and <nl> internal struct _Cocoa $ { Self } Storage : _HashStorage { <nl> <nl> } <nl> <nl> - internal mutating func updateValue ( value : Value , forKey : Key ) - > Value ? { <nl> + internal mutating func updateValue ( value : Value , forKey key : Key ) - > Value ? { <nl> _sanityCheckFailure ( " cannot mutate NS $ { Self } " ) <nl> } <nl> <nl> internal struct _Cocoa $ { Self } Storage : _HashStorage { <nl> _sanityCheckFailure ( " cannot mutate NS $ { Self } " ) <nl> } <nl> <nl> - internal mutating func removeValueForKey ( key : Key ) - > Value ? { <nl> + internal mutating func removeValue ( forKey key : Key ) - > Value ? { <nl> _sanityCheckFailure ( " cannot mutate NS $ { Self } " ) <nl> } <nl> <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> var newNativeStorage = newNativeOwner . nativeStorage <nl> let newCapacity = newNativeStorage . capacity <nl> for i in 0 . . < oldCapacity { <nl> - if oldNativeStorage . isInitializedEntry ( i ) { <nl> + if oldNativeStorage . isInitializedEntry ( at : i ) { <nl> if oldCapacity = = newCapacity { <nl> - let key = oldNativeStorage . keyAt ( i ) <nl> + let key = oldNativeStorage . key ( at : i ) <nl> % if Self = = ' Set ' : <nl> newNativeStorage . initializeKey ( key , at : i ) <nl> % elif Self = = ' Dictionary ' : <nl> - let value = oldNativeStorage . valueAt ( i ) <nl> + let value = oldNativeStorage . value ( at : i ) <nl> newNativeStorage . initializeKey ( key , value : value , at : i ) <nl> % end <nl> } else { <nl> - let key = oldNativeStorage . keyAt ( i ) <nl> + let key = oldNativeStorage . key ( at : i ) <nl> % if Self = = ' Set ' : <nl> newNativeStorage . unsafeAddNew ( key : key ) <nl> % elif Self = = ' Dictionary ' : <nl> newNativeStorage . unsafeAddNew ( <nl> key : key , <nl> - value : oldNativeStorage . valueAt ( i ) ) <nl> + value : oldNativeStorage . value ( at : i ) ) <nl> % end <nl> } <nl> } <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> internal mutating func migrateDataToNativeStorage ( <nl> cocoaStorage : _Cocoa $ { Self } Storage <nl> ) { <nl> - let minCapacity = NativeStorage . getMinCapacity ( <nl> - cocoaStorage . count , _hashContainerDefaultMaxLoadFactorInverse ) <nl> + let minCapacity = NativeStorage . minimumCapacity ( <nl> + minimumCount : cocoaStorage . count , <nl> + maxLoadFactorInverse : _hashContainerDefaultMaxLoadFactorInverse ) <nl> let allocated = ensureUniqueNativeStorage ( minCapacity ) . reallocated <nl> _sanityCheck ( allocated , " failed to allocate native $ { Self } storage " ) <nl> } <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> } <nl> <nl> @ warn_unused_result <nl> - internal func indexForKey ( key : Key ) - > Index ? { <nl> + internal func index ( forKey key : Key ) - > Index ? { <nl> if _fastPath ( guaranteedNative ) { <nl> - if let nativeIndex = native . indexForKey ( key ) { <nl> + if let nativeIndex = native . index ( forKey : key ) { <nl> return . _Native ( nativeIndex ) <nl> } <nl> return nil <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> <nl> switch self { <nl> case . Native : <nl> - if let nativeIndex = native . indexForKey ( key ) { <nl> + if let nativeIndex = native . index ( forKey : key ) { <nl> return . _Native ( nativeIndex ) <nl> } <nl> return nil <nl> case . Cocoa ( let cocoaStorage ) : <nl> # if _runtime ( _ObjC ) <nl> let anyObjectKey : AnyObject = _bridgeToObjectiveCUnconditional ( key ) <nl> - if let cocoaIndex = cocoaStorage . indexForKey ( anyObjectKey ) { <nl> + if let cocoaIndex = cocoaStorage . index ( forKey : anyObjectKey ) { <nl> return . _Cocoa ( cocoaIndex ) <nl> } <nl> return nil <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> internal mutating func nativeUpdateValue ( <nl> value : Value , forKey key : Key <nl> ) - > Value ? { <nl> - var ( i , found ) = native . _find ( key , native . _bucket ( key ) ) <nl> + var ( i , found ) = native . _find ( key , startBucket : native . _bucket ( key ) ) <nl> <nl> let minCapacity = found <nl> ? native . capacity <nl> - : NativeStorage . getMinCapacity ( <nl> - native . count + 1 , <nl> - native . maxLoadFactorInverse ) <nl> + : NativeStorage . minimumCapacity ( <nl> + minimumCount : native . count + 1 , <nl> + maxLoadFactorInverse : native . maxLoadFactorInverse ) <nl> <nl> let ( _ , capacityChanged ) = ensureUniqueNativeStorage ( minCapacity ) <nl> if capacityChanged { <nl> - i = native . _find ( key , native . _bucket ( key ) ) . pos <nl> + i = native . _find ( key , startBucket : native . _bucket ( key ) ) . pos <nl> } <nl> <nl> % if Self = = ' Set ' : <nl> - let oldValue : Value ? = found ? native . keyAt ( i . offset ) : nil <nl> + let oldValue : Value ? = found ? native . key ( at : i . offset ) : nil <nl> if found { <nl> native . setKey ( key , at : i . offset ) <nl> } else { <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> native . count + = 1 <nl> } <nl> % elif Self = = ' Dictionary ' : <nl> - let oldValue : Value ? = found ? native . valueAt ( i . offset ) : nil <nl> + let oldValue : Value ? = found ? native . value ( at : i . offset ) : nil <nl> if found { <nl> native . setKey ( key , value : value , at : i . offset ) <nl> } else { <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> nativeStorage : NativeStorage , idealBucket : Int , offset : Int <nl> ) { <nl> _sanityCheck ( <nl> - nativeStorage . isInitializedEntry ( offset ) , " expected initialized entry " ) <nl> + nativeStorage . isInitializedEntry ( at : offset ) , " expected initialized entry " ) <nl> <nl> / / remove the element <nl> - nativeStorage . destroyEntryAt ( offset ) <nl> + nativeStorage . destroyEntry ( at : offset ) <nl> nativeStorage . count - = 1 <nl> <nl> / / If we ' ve put a hole in a chain of contiguous elements , some <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> <nl> / / Find the first bucket in the contiguous chain <nl> var start = idealBucket <nl> - while nativeStorage . isInitializedEntry ( nativeStorage . _prev ( start ) ) { <nl> + while nativeStorage . isInitializedEntry ( at : nativeStorage . _prev ( start ) ) { <nl> start = nativeStorage . _prev ( start ) <nl> } <nl> <nl> / / Find the last bucket in the contiguous chain <nl> var lastInChain = hole <nl> var b = nativeStorage . _next ( lastInChain ) <nl> - while nativeStorage . isInitializedEntry ( b ) { <nl> + while nativeStorage . isInitializedEntry ( at : b ) { <nl> lastInChain = b <nl> b = nativeStorage . _next ( b ) <nl> } <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> / / something out - of - place . <nl> var b = lastInChain <nl> while b ! = hole { <nl> - let idealBucket = nativeStorage . _bucket ( nativeStorage . keyAt ( b ) ) <nl> + let idealBucket = nativeStorage . _bucket ( nativeStorage . key ( at : b ) ) <nl> <nl> / / Does this element belong between start and hole ? We need <nl> / / two separate tests depending on whether [ start , hole ] wraps <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> } <nl> <nl> / / Move the found element into the hole <nl> - nativeStorage . moveInitializeFrom ( nativeStorage , at : b , toEntryAt : hole ) <nl> + nativeStorage . moveInitializeEntry ( <nl> + from : nativeStorage , <nl> + at : b , <nl> + toEntryAt : hole ) <nl> hole = b <nl> } <nl> } <nl> <nl> - internal mutating func nativeRemoveObjectForKey ( key : Key ) - > Value ? { <nl> + internal mutating func nativeRemoveObject ( forKey key : Key ) - > Value ? { <nl> var nativeStorage = native <nl> var idealBucket = nativeStorage . _bucket ( key ) <nl> - var ( index , found ) = nativeStorage . _find ( key , idealBucket ) <nl> + var ( index , found ) = nativeStorage . _find ( key , startBucket : idealBucket ) <nl> <nl> / / Fast path : if the key is not present , we will not mutate the set , <nl> / / so don ' t force unique storage . <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> } <nl> if capacityChanged { <nl> idealBucket = nativeStorage . _bucket ( key ) <nl> - ( index , found ) = nativeStorage . _find ( key , idealBucket ) <nl> + ( index , found ) = nativeStorage . _find ( key , startBucket : idealBucket ) <nl> _sanityCheck ( found , " key was lost during storage migration " ) <nl> } <nl> % if Self = = ' Set ' : <nl> - let oldValue = nativeStorage . keyAt ( index . offset ) <nl> + let oldValue = nativeStorage . key ( at : index . offset ) <nl> % elif Self = = ' Dictionary ' : <nl> - let oldValue = nativeStorage . valueAt ( index . offset ) <nl> + let oldValue = nativeStorage . value ( at : index . offset ) <nl> % end <nl> nativeDeleteImpl ( nativeStorage , idealBucket : idealBucket , <nl> offset : index . offset ) <nl> return oldValue <nl> } <nl> <nl> - internal mutating func nativeRemoveAt ( <nl> - nativeIndex : NativeIndex <nl> + internal mutating func nativeRemove ( <nl> + at nativeIndex : NativeIndex <nl> ) - > SequenceElement { <nl> var nativeStorage = native <nl> <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> <nl> internal mutating func remove ( at index : Index ) - > SequenceElement { <nl> if _fastPath ( guaranteedNative ) { <nl> - return nativeRemoveAt ( index . _nativeIndex ) <nl> + return nativeRemove ( at : index . _nativeIndex ) <nl> } <nl> <nl> switch self { <nl> case . Native : <nl> - return nativeRemoveAt ( index . _nativeIndex ) <nl> + return nativeRemove ( at : index . _nativeIndex ) <nl> case . Cocoa ( let cocoaStorage ) : <nl> # if _runtime ( _ObjC ) <nl> / / We have to migrate the data first . But after we do so , the Cocoa <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> cocoaIndex . allKeys [ cocoaIndex . currentKeyIndex ] <nl> migrateDataToNativeStorage ( cocoaStorage ) <nl> let key = _forceBridgeFromObjectiveC ( anyObjectKey , Key . self ) <nl> - let value = nativeRemoveObjectForKey ( key ) <nl> + let value = nativeRemoveObject ( forKey : key ) <nl> <nl> % if Self = = ' Set ' : <nl> _sanityCheck ( key = = value , " bridging did not preserve equality " ) <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> } <nl> } <nl> <nl> - internal mutating func removeValueForKey ( key : Key ) - > Value ? { <nl> + internal mutating func removeValue ( forKey key : Key ) - > Value ? { <nl> if _fastPath ( guaranteedNative ) { <nl> - return nativeRemoveObjectForKey ( key ) <nl> + return nativeRemoveObject ( forKey : key ) <nl> } <nl> <nl> switch self { <nl> case . Native : <nl> - return nativeRemoveObjectForKey ( key ) <nl> + return nativeRemoveObject ( forKey : key ) <nl> case . Cocoa ( let cocoaStorage ) : <nl> # if _runtime ( _ObjC ) <nl> let anyObjectKey : AnyObject = _bridgeToObjectiveCUnconditional ( key ) <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> return nil <nl> } <nl> migrateDataToNativeStorage ( cocoaStorage ) <nl> - return nativeRemoveObjectForKey ( key ) <nl> + return nativeRemoveObject ( forKey : key ) <nl> # else <nl> _sanityCheckFailure ( " internal error : unexpected cocoa $ { Self } " ) <nl> # endif <nl> internal enum _Variant $ { Self } Storage < $ { TypeParametersDecl } > : _HashStorage { <nl> } <nl> <nl> for b in 0 . . < nativeStorage . capacity { <nl> - if nativeStorage . isInitializedEntry ( b ) { <nl> - nativeStorage . destroyEntryAt ( b ) <nl> + if nativeStorage . isInitializedEntry ( at : b ) { <nl> + nativeStorage . destroyEntry ( at : b ) <nl> } <nl> } <nl> nativeStorage . count = 0 <nl> internal struct _Native $ { Self } Index < $ { TypeParametersDecl } > : <nl> / / < rdar : / / problem / 15484639 > Refcounting bug <nl> while i < nativeStorage . capacity / * & & ! nativeStorage [ i ] * / { <nl> / / FIXME : workaround for < rdar : / / problem / 15484639 > <nl> - if nativeStorage . isInitializedEntry ( i ) { <nl> + if nativeStorage . isInitializedEntry ( at : i ) { <nl> break <nl> } <nl> / / end workaround <nl> public struct _ $ { Self } Builder < $ { TypeParametersDecl } > { <nl> <nl> public init ( count : Int ) { <nl> let requiredCapacity = <nl> - _Native $ { Self } Storage < $ { TypeParameters } > . getMinCapacity ( <nl> - count , _hashContainerDefaultMaxLoadFactorInverse ) <nl> + _Native $ { Self } Storage < $ { TypeParameters } > . minimumCapacity ( <nl> + minimumCount : count , <nl> + maxLoadFactorInverse : _hashContainerDefaultMaxLoadFactorInverse ) <nl> _result = $ { Self } < $ { TypeParameters } > ( minimumCapacity : requiredCapacity ) <nl> _nativeStorage = _result . _variantStorage . native <nl> _requestedCount = count <nl> extension Set { <nl> } <nl> <nl> extension Set { <nl> - @ available ( * , unavailable , renamed = " removeAt " ) <nl> + @ available ( * , unavailable , renamed = " remove ( at : ) " ) <nl> public mutating func removeAtIndex ( index : Index ) - > Element { <nl> fatalError ( " unavailable function can ' t be called " ) <nl> } <nl> extension Set { <nl> } <nl> } <nl> <nl> - <nl> extension Dictionary { <nl> - @ available ( * , unavailable , renamed = " removeAt " ) <nl> + @ available ( * , unavailable , renamed = " remove ( at : ) " ) <nl> public mutating func removeAtIndex ( index : Index ) - > Element { <nl> fatalError ( " unavailable function can ' t be called " ) <nl> } <nl> <nl> + @ available ( * , unavailable , renamed = " index ( forKey : ) " ) <nl> + public func indexForKey ( key : Key ) - > Index ? { <nl> + fatalError ( " unavailable function can ' t be called " ) <nl> + } <nl> + <nl> + @ available ( * , unavailable , renamed = " removeValue ( forKey : ) " ) <nl> + public mutating func removeValueForKey ( key : Key ) - > Value ? { <nl> + fatalError ( " unavailable function can ' t be called " ) <nl> + } <nl> + <nl> @ available ( * , unavailable , renamed = " iterator " ) <nl> public func generate ( ) - > DictionaryIterator < Key , Value > { <nl> fatalError ( " unavailable function can ' t be called " ) <nl> mmm a / test / 1_stdlib / DictionaryTraps . swift <nl> ppp b / test / 1_stdlib / DictionaryTraps . swift <nl> DictionaryTraps . test ( " RemoveInvalidIndex4 " ) <nl> reason : " this trap is not guaranteed to happen in - Ounchecked " ) ) <nl> . code { <nl> var d = [ 10 : 1010 ] <nl> - let index = d . indexForKey ( 10 ) ! <nl> + let index = d . index ( forKey : 10 ) ! <nl> d . remove ( at : index ) <nl> expectEmpty ( d [ 10 ] ) <nl> expectCrashLater ( ) <nl> mmm a / validation - test / stdlib / Dictionary . swift <nl> ppp b / validation - test / stdlib / Dictionary . swift <nl> DictionaryTestSuite . test ( " COW . Fast . IndexForKeyDoesNotReallocate " ) { <nl> <nl> / / Find an existing key . <nl> do { <nl> - var foundIndex1 = d . indexForKey ( 10 ) ! <nl> + var foundIndex1 = d . index ( forKey : 10 ) ! <nl> assert ( identity1 = = unsafeBitCast ( d , to : Int . self ) ) <nl> <nl> - var foundIndex2 = d . indexForKey ( 10 ) ! <nl> + var foundIndex2 = d . index ( forKey : 10 ) ! <nl> assert ( foundIndex1 = = foundIndex2 ) <nl> <nl> assert ( d [ foundIndex1 ] . 0 = = 10 ) <nl> DictionaryTestSuite . test ( " COW . Fast . IndexForKeyDoesNotReallocate " ) { <nl> <nl> / / Try to find a key that is not present . <nl> do { <nl> - var foundIndex1 = d . indexForKey ( 1111 ) <nl> + var foundIndex1 = d . index ( forKey : 1111 ) <nl> assert ( foundIndex1 = = nil ) <nl> assert ( identity1 = = unsafeBitCast ( d , to : Int . self ) ) <nl> } <nl> DictionaryTestSuite . test ( " COW . Fast . IndexForKeyDoesNotReallocate " ) { <nl> var d2 : [ MinimalHashableValue : OpaqueValue < Int > ] = [ : ] <nl> MinimalHashableValue . timesEqualEqualWasCalled = 0 <nl> MinimalHashableValue . timesHashValueWasCalled = 0 <nl> - expectEmpty ( d2 . indexForKey ( MinimalHashableValue ( 42 ) ) ) <nl> + expectEmpty ( d2 . index ( forKey : MinimalHashableValue ( 42 ) ) ) <nl> <nl> / / If the dictionary is empty , we shouldn ' t be computing the hash value of <nl> / / the provided key . <nl> DictionaryTestSuite . test ( " COW . Slow . IndexForKeyDoesNotReallocate " ) { <nl> <nl> / / Find an existing key . <nl> do { <nl> - var foundIndex1 = d . indexForKey ( TestKeyTy ( 10 ) ) ! <nl> + var foundIndex1 = d . index ( forKey : TestKeyTy ( 10 ) ) ! <nl> assert ( identity1 = = unsafeBitCast ( d , to : Int . self ) ) <nl> <nl> - var foundIndex2 = d . indexForKey ( TestKeyTy ( 10 ) ) ! <nl> + var foundIndex2 = d . index ( forKey : TestKeyTy ( 10 ) ) ! <nl> assert ( foundIndex1 = = foundIndex2 ) <nl> <nl> assert ( d [ foundIndex1 ] . 0 = = TestKeyTy ( 10 ) ) <nl> DictionaryTestSuite . test ( " COW . Slow . IndexForKeyDoesNotReallocate " ) { <nl> <nl> / / Try to find a key that is not present . <nl> do { <nl> - var foundIndex1 = d . indexForKey ( TestKeyTy ( 1111 ) ) <nl> + var foundIndex1 = d . index ( forKey : TestKeyTy ( 1111 ) ) <nl> assert ( foundIndex1 = = nil ) <nl> assert ( identity1 = = unsafeBitCast ( d , to : Int . self ) ) <nl> } <nl> DictionaryTestSuite . test ( " COW . Slow . IndexForKeyDoesNotReallocate " ) { <nl> var d2 : [ MinimalHashableClass : OpaqueValue < Int > ] = [ : ] <nl> MinimalHashableClass . timesEqualEqualWasCalled = 0 <nl> MinimalHashableClass . timesHashValueWasCalled = 0 <nl> - expectEmpty ( d2 . indexForKey ( MinimalHashableClass ( 42 ) ) ) <nl> + expectEmpty ( d2 . index ( forKey : MinimalHashableClass ( 42 ) ) ) <nl> <nl> / / If the dictionary is empty , we shouldn ' t be computing the hash value of <nl> / / the provided key . <nl> DictionaryTestSuite . test ( " COW . Fast . RemoveAtDoesNotReallocate " ) { <nl> var d = getCOWFastDictionary ( ) <nl> var identity1 = unsafeBitCast ( d , to : Int . self ) <nl> <nl> - let foundIndex1 = d . indexForKey ( 10 ) ! <nl> + let foundIndex1 = d . index ( forKey : 10 ) ! <nl> assert ( identity1 = = unsafeBitCast ( d , to : Int . self ) ) <nl> <nl> assert ( d [ foundIndex1 ] . 0 = = 10 ) <nl> DictionaryTestSuite . test ( " COW . Fast . RemoveAtDoesNotReallocate " ) { <nl> assert ( removed . 1 = = 1010 ) <nl> <nl> assert ( identity1 = = unsafeBitCast ( d , to : Int . self ) ) <nl> - assert ( d . indexForKey ( 10 ) = = nil ) <nl> + assert ( d . index ( forKey : 10 ) = = nil ) <nl> } <nl> <nl> do { <nl> DictionaryTestSuite . test ( " COW . Fast . RemoveAtDoesNotReallocate " ) { <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> assert ( identity1 = = unsafeBitCast ( d2 , to : Int . self ) ) <nl> <nl> - var foundIndex1 = d2 . indexForKey ( 10 ) ! <nl> + var foundIndex1 = d2 . index ( forKey : 10 ) ! <nl> assert ( d2 [ foundIndex1 ] . 0 = = 10 ) <nl> assert ( d2 [ foundIndex1 ] . 1 = = 1010 ) <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> DictionaryTestSuite . test ( " COW . Fast . RemoveAtDoesNotReallocate " ) { <nl> <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> assert ( identity1 ! = unsafeBitCast ( d2 , to : Int . self ) ) <nl> - assert ( d2 . indexForKey ( 10 ) = = nil ) <nl> + assert ( d2 . index ( forKey : 10 ) = = nil ) <nl> } <nl> } <nl> <nl> DictionaryTestSuite . test ( " COW . Slow . RemoveAtDoesNotReallocate " ) { <nl> var d = getCOWSlowDictionary ( ) <nl> var identity1 = unsafeBitCast ( d , to : Int . self ) <nl> <nl> - var foundIndex1 = d . indexForKey ( TestKeyTy ( 10 ) ) ! <nl> + var foundIndex1 = d . index ( forKey : TestKeyTy ( 10 ) ) ! <nl> assert ( identity1 = = unsafeBitCast ( d , to : Int . self ) ) <nl> <nl> assert ( d [ foundIndex1 ] . 0 = = TestKeyTy ( 10 ) ) <nl> DictionaryTestSuite . test ( " COW . Slow . RemoveAtDoesNotReallocate " ) { <nl> assert ( removed . 1 . value = = 1010 ) <nl> <nl> assert ( identity1 = = unsafeBitCast ( d , to : Int . self ) ) <nl> - assert ( d . indexForKey ( TestKeyTy ( 10 ) ) = = nil ) <nl> + assert ( d . index ( forKey : TestKeyTy ( 10 ) ) = = nil ) <nl> } <nl> <nl> do { <nl> DictionaryTestSuite . test ( " COW . Slow . RemoveAtDoesNotReallocate " ) { <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> assert ( identity1 = = unsafeBitCast ( d2 , to : Int . self ) ) <nl> <nl> - var foundIndex1 = d2 . indexForKey ( TestKeyTy ( 10 ) ) ! <nl> + var foundIndex1 = d2 . index ( forKey : TestKeyTy ( 10 ) ) ! <nl> assert ( d2 [ foundIndex1 ] . 0 = = TestKeyTy ( 10 ) ) <nl> assert ( d2 [ foundIndex1 ] . 1 . value = = 1010 ) <nl> <nl> DictionaryTestSuite . test ( " COW . Slow . RemoveAtDoesNotReallocate " ) { <nl> <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> assert ( identity1 ! = unsafeBitCast ( d2 , to : Int . self ) ) <nl> - assert ( d2 . indexForKey ( TestKeyTy ( 10 ) ) = = nil ) <nl> + assert ( d2 . index ( forKey : TestKeyTy ( 10 ) ) = = nil ) <nl> } <nl> } <nl> <nl> DictionaryTestSuite . test ( " COW . Fast . RemoveValueForKeyDoesNotReallocate " ) { <nl> var d1 = getCOWFastDictionary ( ) <nl> var identity1 = unsafeBitCast ( d1 , to : Int . self ) <nl> <nl> - var deleted = d1 . removeValueForKey ( 0 ) <nl> + var deleted = d1 . removeValue ( forKey : 0 ) <nl> assert ( deleted = = nil ) <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> <nl> - deleted = d1 . removeValueForKey ( 10 ) <nl> + deleted = d1 . removeValue ( forKey : 10 ) <nl> assert ( deleted ! = = 1010 ) <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> <nl> DictionaryTestSuite . test ( " COW . Fast . RemoveValueForKeyDoesNotReallocate " ) { <nl> var identity1 = unsafeBitCast ( d1 , to : Int . self ) <nl> <nl> var d2 = d1 <nl> - var deleted = d2 . removeValueForKey ( 0 ) <nl> + var deleted = d2 . removeValue ( forKey : 0 ) <nl> assert ( deleted = = nil ) <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> assert ( identity1 = = unsafeBitCast ( d2 , to : Int . self ) ) <nl> <nl> - deleted = d2 . removeValueForKey ( 10 ) <nl> + deleted = d2 . removeValue ( forKey : 10 ) <nl> assert ( deleted ! = = 1010 ) <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> assert ( identity1 ! = unsafeBitCast ( d2 , to : Int . self ) ) <nl> DictionaryTestSuite . test ( " COW . Slow . RemoveValueForKeyDoesNotReallocate " ) { <nl> var d1 = getCOWSlowDictionary ( ) <nl> var identity1 = unsafeBitCast ( d1 , to : Int . self ) <nl> <nl> - var deleted = d1 . removeValueForKey ( TestKeyTy ( 0 ) ) <nl> + var deleted = d1 . removeValue ( forKey : TestKeyTy ( 0 ) ) <nl> assert ( deleted = = nil ) <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> <nl> - deleted = d1 . removeValueForKey ( TestKeyTy ( 10 ) ) <nl> + deleted = d1 . removeValue ( forKey : TestKeyTy ( 10 ) ) <nl> assert ( deleted ! . value = = 1010 ) <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> <nl> DictionaryTestSuite . test ( " COW . Slow . RemoveValueForKeyDoesNotReallocate " ) { <nl> var identity1 = unsafeBitCast ( d1 , to : Int . self ) <nl> <nl> var d2 = d1 <nl> - var deleted = d2 . removeValueForKey ( TestKeyTy ( 0 ) ) <nl> + var deleted = d2 . removeValue ( forKey : TestKeyTy ( 0 ) ) <nl> assert ( deleted = = nil ) <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> assert ( identity1 = = unsafeBitCast ( d2 , to : Int . self ) ) <nl> <nl> - deleted = d2 . removeValueForKey ( TestKeyTy ( 10 ) ) <nl> + deleted = d2 . removeValue ( forKey : TestKeyTy ( 10 ) ) <nl> assert ( deleted ! . value = = 1010 ) <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> assert ( identity1 ! = unsafeBitCast ( d2 , to : Int . self ) ) <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Verbatim . DictionaryIsCopied " ) { <nl> <nl> / / Find an existing key . <nl> do { <nl> - var kv = d [ d . indexForKey ( TestObjCKeyTy ( 10 ) ) ! ] <nl> + var kv = d [ d . index ( forKey : TestObjCKeyTy ( 10 ) ) ! ] <nl> assert ( kv . 0 = = TestObjCKeyTy ( 10 ) ) <nl> assert ( kv . 1 . value = = 1010 ) <nl> } <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Verbatim . DictionaryIsCopied " ) { <nl> <nl> / / Find an existing key , again . <nl> do { <nl> - var kv = d [ d . indexForKey ( TestObjCKeyTy ( 10 ) ) ! ] <nl> + var kv = d [ d . index ( forKey : TestObjCKeyTy ( 10 ) ) ! ] <nl> assert ( kv . 0 = = TestObjCKeyTy ( 10 ) ) <nl> assert ( kv . 1 . value = = 1010 ) <nl> } <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Nonverbatim . DictionaryIsCopied " ) { <nl> <nl> / / Find an existing key . <nl> do { <nl> - var kv = d [ d . indexForKey ( TestBridgedKeyTy ( 10 ) ) ! ] <nl> + var kv = d [ d . index ( forKey : TestBridgedKeyTy ( 10 ) ) ! ] <nl> assert ( kv . 0 = = TestBridgedKeyTy ( 10 ) ) <nl> assert ( kv . 1 . value = = 1010 ) <nl> } <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Nonverbatim . DictionaryIsCopied " ) { <nl> <nl> / / Find an existing key , again . <nl> do { <nl> - var kv = d [ d . indexForKey ( TestBridgedKeyTy ( 10 ) ) ! ] <nl> + var kv = d [ d . index ( forKey : TestBridgedKeyTy ( 10 ) ) ! ] <nl> assert ( kv . 0 = = TestBridgedKeyTy ( 10 ) ) <nl> assert ( kv . 1 . value = = 1010 ) <nl> } <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Verbatim . IndexForKey " ) { <nl> <nl> / / Find an existing key . <nl> do { <nl> - var kv = d [ d . indexForKey ( TestObjCKeyTy ( 10 ) ) ! ] <nl> + var kv = d [ d . index ( forKey : TestObjCKeyTy ( 10 ) ) ! ] <nl> assert ( kv . 0 = = TestObjCKeyTy ( 10 ) ) <nl> assert ( kv . 1 . value = = 1010 ) <nl> <nl> - kv = d [ d . indexForKey ( TestObjCKeyTy ( 20 ) ) ! ] <nl> + kv = d [ d . index ( forKey : TestObjCKeyTy ( 20 ) ) ! ] <nl> assert ( kv . 0 = = TestObjCKeyTy ( 20 ) ) <nl> assert ( kv . 1 . value = = 1020 ) <nl> <nl> - kv = d [ d . indexForKey ( TestObjCKeyTy ( 30 ) ) ! ] <nl> + kv = d [ d . index ( forKey : TestObjCKeyTy ( 30 ) ) ! ] <nl> assert ( kv . 0 = = TestObjCKeyTy ( 30 ) ) <nl> assert ( kv . 1 . value = = 1030 ) <nl> } <nl> <nl> / / Try to find a key that does not exist . <nl> - assert ( d . indexForKey ( TestObjCKeyTy ( 40 ) ) = = nil ) <nl> + assert ( d . index ( forKey : TestObjCKeyTy ( 40 ) ) = = nil ) <nl> assert ( identity1 = = unsafeBitCast ( d , to : Int . self ) ) <nl> } <nl> <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Nonverbatim . IndexForKey " ) { <nl> <nl> / / Find an existing key . <nl> do { <nl> - var kv = d [ d . indexForKey ( TestBridgedKeyTy ( 10 ) ) ! ] <nl> + var kv = d [ d . index ( forKey : TestBridgedKeyTy ( 10 ) ) ! ] <nl> assert ( kv . 0 = = TestBridgedKeyTy ( 10 ) ) <nl> assert ( kv . 1 . value = = 1010 ) <nl> <nl> - kv = d [ d . indexForKey ( TestBridgedKeyTy ( 20 ) ) ! ] <nl> + kv = d [ d . index ( forKey : TestBridgedKeyTy ( 20 ) ) ! ] <nl> assert ( kv . 0 = = TestBridgedKeyTy ( 20 ) ) <nl> assert ( kv . 1 . value = = 1020 ) <nl> <nl> - kv = d [ d . indexForKey ( TestBridgedKeyTy ( 30 ) ) ! ] <nl> + kv = d [ d . index ( forKey : TestBridgedKeyTy ( 30 ) ) ! ] <nl> assert ( kv . 0 = = TestBridgedKeyTy ( 30 ) ) <nl> assert ( kv . 1 . value = = 1030 ) <nl> } <nl> <nl> / / Try to find a key that does not exist . <nl> - assert ( d . indexForKey ( TestBridgedKeyTy ( 40 ) ) = = nil ) <nl> + assert ( d . index ( forKey : TestBridgedKeyTy ( 40 ) ) = = nil ) <nl> assert ( identity1 = = unsafeBitCast ( d , to : Int . self ) ) <nl> } <nl> <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Verbatim . RemoveAt " ) { <nl> var identity1 = unsafeBitCast ( d , to : Int . self ) <nl> assert ( isCocoaDictionary ( d ) ) <nl> <nl> - let foundIndex1 = d . indexForKey ( TestObjCKeyTy ( 10 ) ) ! <nl> + let foundIndex1 = d . index ( forKey : TestObjCKeyTy ( 10 ) ) ! <nl> assert ( d [ foundIndex1 ] . 0 = = TestObjCKeyTy ( 10 ) ) <nl> assert ( d [ foundIndex1 ] . 1 . value = = 1010 ) <nl> assert ( identity1 = = unsafeBitCast ( d , to : Int . self ) ) <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Verbatim . RemoveAt " ) { <nl> assert ( removedElement . 0 = = TestObjCKeyTy ( 10 ) ) <nl> assert ( removedElement . 1 . value = = 1010 ) <nl> assert ( d . count = = 2 ) <nl> - assert ( d . indexForKey ( TestObjCKeyTy ( 10 ) ) = = nil ) <nl> + assert ( d . index ( forKey : TestObjCKeyTy ( 10 ) ) = = nil ) <nl> } <nl> <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Nonverbatim . RemoveAt " ) { <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Nonverbatim . RemoveAt " ) { <nl> var identity1 = unsafeBitCast ( d , to : Int . self ) <nl> assert ( isNativeDictionary ( d ) ) <nl> <nl> - let foundIndex1 = d . indexForKey ( TestBridgedKeyTy ( 10 ) ) ! <nl> + let foundIndex1 = d . index ( forKey : TestBridgedKeyTy ( 10 ) ) ! <nl> assert ( d [ foundIndex1 ] . 0 = = TestBridgedKeyTy ( 10 ) ) <nl> assert ( d [ foundIndex1 ] . 1 . value = = 1010 ) <nl> assert ( identity1 = = unsafeBitCast ( d , to : Int . self ) ) <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Nonverbatim . RemoveAt " ) { <nl> assert ( removedElement . 0 = = TestObjCKeyTy ( 10 ) ) <nl> assert ( removedElement . 1 . value = = 1010 ) <nl> assert ( d . count = = 2 ) <nl> - assert ( d . indexForKey ( TestBridgedKeyTy ( 10 ) ) = = nil ) <nl> + assert ( d . index ( forKey : TestBridgedKeyTy ( 10 ) ) = = nil ) <nl> } <nl> <nl> <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Verbatim . RemoveValueForKey " ) { <nl> var identity1 = unsafeBitCast ( d , to : Int . self ) <nl> assert ( isCocoaDictionary ( d ) ) <nl> <nl> - var deleted : AnyObject ? = d . removeValueForKey ( TestObjCKeyTy ( 0 ) ) <nl> + var deleted : AnyObject ? = d . removeValue ( forKey : TestObjCKeyTy ( 0 ) ) <nl> assert ( deleted = = nil ) <nl> assert ( identity1 = = unsafeBitCast ( d , to : Int . self ) ) <nl> assert ( isCocoaDictionary ( d ) ) <nl> <nl> - deleted = d . removeValueForKey ( TestObjCKeyTy ( 10 ) ) <nl> + deleted = d . removeValue ( forKey : TestObjCKeyTy ( 10 ) ) <nl> assert ( deleted ! . value = = 1010 ) <nl> var identity2 = unsafeBitCast ( d , to : Int . self ) <nl> assert ( identity1 ! = identity2 ) <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Verbatim . RemoveValueForKey " ) { <nl> assert ( isCocoaDictionary ( d1 ) ) <nl> assert ( isCocoaDictionary ( d2 ) ) <nl> <nl> - var deleted : AnyObject ? = d2 . removeValueForKey ( TestObjCKeyTy ( 0 ) ) <nl> + var deleted : AnyObject ? = d2 . removeValue ( forKey : TestObjCKeyTy ( 0 ) ) <nl> assert ( deleted = = nil ) <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> assert ( identity1 = = unsafeBitCast ( d2 , to : Int . self ) ) <nl> assert ( isCocoaDictionary ( d1 ) ) <nl> assert ( isCocoaDictionary ( d2 ) ) <nl> <nl> - deleted = d2 . removeValueForKey ( TestObjCKeyTy ( 10 ) ) <nl> + deleted = d2 . removeValue ( forKey : TestObjCKeyTy ( 10 ) ) <nl> assert ( deleted ! . value = = 1010 ) <nl> var identity2 = unsafeBitCast ( d2 , to : Int . self ) <nl> assert ( identity1 ! = identity2 ) <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Nonverbatim . RemoveValueForKey " ) { <nl> var identity1 = unsafeBitCast ( d , to : Int . self ) <nl> assert ( isNativeDictionary ( d ) ) <nl> <nl> - var deleted = d . removeValueForKey ( TestBridgedKeyTy ( 0 ) ) <nl> + var deleted = d . removeValue ( forKey : TestBridgedKeyTy ( 0 ) ) <nl> assert ( deleted = = nil ) <nl> assert ( identity1 = = unsafeBitCast ( d , to : Int . self ) ) <nl> assert ( isNativeDictionary ( d ) ) <nl> <nl> - deleted = d . removeValueForKey ( TestBridgedKeyTy ( 10 ) ) <nl> + deleted = d . removeValue ( forKey : TestBridgedKeyTy ( 10 ) ) <nl> assert ( deleted ! . value = = 1010 ) <nl> var identity2 = unsafeBitCast ( d , to : Int . self ) <nl> assert ( identity1 = = identity2 ) <nl> DictionaryTestSuite . test ( " BridgedFromObjC . Nonverbatim . RemoveValueForKey " ) { <nl> assert ( isNativeDictionary ( d1 ) ) <nl> assert ( isNativeDictionary ( d2 ) ) <nl> <nl> - var deleted = d2 . removeValueForKey ( TestBridgedKeyTy ( 0 ) ) <nl> + var deleted = d2 . removeValue ( forKey : TestBridgedKeyTy ( 0 ) ) <nl> assert ( deleted = = nil ) <nl> assert ( identity1 = = unsafeBitCast ( d1 , to : Int . self ) ) <nl> assert ( identity1 = = unsafeBitCast ( d2 , to : Int . self ) ) <nl> assert ( isNativeDictionary ( d1 ) ) <nl> assert ( isNativeDictionary ( d2 ) ) <nl> <nl> - deleted = d2 . removeValueForKey ( TestBridgedKeyTy ( 10 ) ) <nl> + deleted = d2 . removeValue ( forKey : TestBridgedKeyTy ( 10 ) ) <nl> assert ( deleted ! . value = = 1010 ) <nl> var identity2 = unsafeBitCast ( d2 , to : Int . self ) <nl> assert ( identity1 ! = identity2 ) <nl> DictionaryTestSuite . test ( " mutationDoesNotAffectIterator / subscript / store " ) { <nl> DictionaryTestSuite . test ( " mutationDoesNotAffectIterator / removeValueForKey , 1 " ) { <nl> var dict = getDerivedAPIsDictionary ( ) <nl> var iter = dict . iterator ( ) <nl> - expectOptionalEqual ( 1010 , dict . removeValueForKey ( 10 ) ) <nl> + expectOptionalEqual ( 1010 , dict . removeValue ( forKey : 10 ) ) <nl> <nl> expectEqualsUnordered ( <nl> [ ( 10 , 1010 ) , ( 20 , 1020 ) , ( 30 , 1030 ) ] , <nl> DictionaryTestSuite . test ( " mutationDoesNotAffectIterator / removeValueForKey , 1 " ) { <nl> DictionaryTestSuite . test ( " mutationDoesNotAffectIterator / removeValueForKey , all " ) { <nl> var dict = getDerivedAPIsDictionary ( ) <nl> var iter = dict . iterator ( ) <nl> - expectOptionalEqual ( 1010 , dict . removeValueForKey ( 10 ) ) <nl> - expectOptionalEqual ( 1020 , dict . removeValueForKey ( 20 ) ) <nl> - expectOptionalEqual ( 1030 , dict . removeValueForKey ( 30 ) ) <nl> + expectOptionalEqual ( 1010 , dict . removeValue ( forKey : 10 ) ) <nl> + expectOptionalEqual ( 1020 , dict . removeValue ( forKey : 20 ) ) <nl> + expectOptionalEqual ( 1030 , dict . removeValue ( forKey : 30 ) ) <nl> <nl> expectEqualsUnordered ( <nl> [ ( 10 , 1010 ) , ( 20 , 1020 ) , ( 30 , 1030 ) ] , <nl> DictionaryTestSuite . test ( " removeAt " ) { <nl> 20 : 2020 , <nl> 30 : 3030 , <nl> ] <nl> - let removed = d . remove ( at : d . indexForKey ( i * 10 ) ! ) <nl> + let removed = d . remove ( at : d . index ( forKey : i * 10 ) ! ) <nl> expectEqual ( i * 10 , removed . 0 ) <nl> expectEqual ( i * 1010 , removed . 1 ) <nl> expectEqual ( 2 , d . count ) <nl> - expectEmpty ( d . indexForKey ( i ) ) <nl> + expectEmpty ( d . index ( forKey : i ) ) <nl> let origKeys : [ Int ] = [ 10 , 20 , 30 ] <nl> expectEqual ( origKeys . filter { $ 0 ! = ( i * 10 ) } , d . keys . sorted ( ) ) <nl> } <nl>
|
stdlib : add first argument labels to Set and Dictionary
|
apple/swift
|
25cc2d56959e0f491f92701de788bdda3d018486
|
2016-02-16T07:48:02Z
|
similarity index 100 % <nl> rename from jstests / sharding / gridfs . js <nl> rename to jstests / tool / gridfs . js <nl>
|
SERVER - 15547 move gridfs sharding test to the " tool " tests
|
mongodb/mongo
|
ff94c06cbdbfb6e0c1cd290a0ffa3989a6f68306
|
2014-10-09T19:12:23Z
|
mmm a / lib / AST / Attr . cpp <nl> ppp b / lib / AST / Attr . cpp <nl> void DeclAttributes : : print ( ASTPrinter & Printer , <nl> DA - > print ( Printer ) ; <nl> } <nl> <nl> + if ( ! Options . ExclusiveAttrList . empty ( ) ) <nl> + return ; <nl> + <nl> if ( isConversion ( ) ) <nl> Printer < < " @ conversion " ; <nl> if ( Options . PrintAttrTransparent & & isTransparent ( ) ) <nl> mmm a / lib / AST / LookupVisibleDecls . cpp <nl> ppp b / lib / AST / LookupVisibleDecls . cpp <nl> static void lookupDeclsFromProtocolsBeingConformedTo ( <nl> ProtocolsWithConformances . insert ( Protocols . begin ( ) , Protocols . end ( ) ) ; <nl> } <nl> <nl> - CurrNominal = BaseTy - > getAnyNominal ( ) ; <nl> - <nl> auto TopProtocols = CurrNominal - > getProtocols ( ) ; <nl> SmallVector < ProtocolDecl * , 8 > Worklist ( TopProtocols . begin ( ) , <nl> TopProtocols . end ( ) ) ; <nl> static void lookupDeclsFromProtocolsBeingConformedTo ( <nl> if ( ! Visited . insert ( Proto ) ) <nl> return ; <nl> <nl> - bool ShouldFindValueRequirements = ! ProtocolsWithConformances . count ( Proto ) ; <nl> + bool ShouldFindNonOptionalValueRequirements = <nl> + ! ProtocolsWithConformances . count ( Proto ) ; <nl> DeclVisibilityKind ReasonForThisProtocol ; <nl> if ( Reason = = DeclVisibilityKind : : MemberOfCurrentNominal ) <nl> ReasonForThisProtocol = <nl> static void lookupDeclsFromProtocolsBeingConformedTo ( <nl> } <nl> continue ; <nl> } <nl> - if ( ShouldFindValueRequirements ) { <nl> - if ( auto * VD = dyn_cast < ValueDecl > ( Member ) ) <nl> + if ( auto * VD = dyn_cast < ValueDecl > ( Member ) ) { <nl> + / / Skip non - optional value requirements from protocols that the type <nl> + / / correctly conforms to . This is done so that we don ' t return <nl> + / / duplicate members . <nl> + if ( ShouldFindNonOptionalValueRequirements | | <nl> + VD - > getAttrs ( ) . isOptional ( ) ) { <nl> Consumer . foundDecl ( VD , ReasonForThisProtocol ) ; <nl> + } <nl> } <nl> } <nl> auto Protocols = Proto - > getProtocols ( ) ; <nl> mmm a / test / IDE / complete_override . swift <nl> ppp b / test / IDE / complete_override . swift <nl> <nl> / / RUN : % swift - ide - test - code - completion - source - filename % s - code - completion - token = NESTED_NOMINAL > % t . txt <nl> / / RUN : FileCheck % s - check - prefix = NESTED_NOMINAL < % t . txt <nl> <nl> - struct TagPA { } <nl> + @ objc <nl> + class TagPA { } <nl> + @ objc <nl> protocol ProtocolA { <nl> init ( fromProtocolA : Int ) <nl> <nl> func protoAFunc ( ) <nl> + @ optional func protoAFuncOptional ( ) <nl> <nl> @ noreturn <nl> func protoAFuncWithAttr ( ) <nl> protocol ProtocolA { <nl> / / WITH_PA : Begin completions <nl> / / WITH_PA - DAG : Decl [ Constructor ] / Super : init ( fromProtocolA : Int ) { | } { { $ } } <nl> / / WITH_PA - DAG : Decl [ InstanceMethod ] / Super : func protoAFunc ( ) { | } { { $ } } <nl> + / / WITH_PA - DAG : Decl [ InstanceMethod ] / Super : func protoAFuncOptional ( ) { | } { { $ } } <nl> / / WITH_PA - DAG : Decl [ InstanceMethod ] / Super : @ noreturn func protoAFuncWithAttr ( ) { | } { { $ } } <nl> / / WITH_PA : End completions <nl> <nl> protocol ProtocolE { <nl> <nl> func protoEFunc ( ) <nl> <nl> - subscript ( a : TagPB ) - > Int { get } <nl> + subscript ( a : TagPE ) - > Int { get } <nl> <nl> var protoEVarRW : Int { get set } <nl> var protoEVarRO : Int { get } <nl> } <nl> / / WITH_PE : Begin completions <nl> + / / WITH_PE - DAG : Decl [ Constructor ] / Super : init ( fromProtocolE : Int ) { | } { { $ } } <nl> / / WITH_PE - DAG : Decl [ InstanceMethod ] / Super : func protoEFunc ( ) { | } { { $ } } <nl> / / WITH_PE : End completions <nl> <nl> class BaseE : ProtocolE { <nl> <nl> func protoEFunc ( ) { } <nl> <nl> - subscript ( a : TagPB ) - > Int { return 0 } <nl> + subscript ( a : TagPE ) - > Int { return 0 } <nl> <nl> var protoEVarRW : Int { get { return 0 } set { } } <nl> var protoEVarRO : Int { return 0 } <nl> class ProtocolEImpl / * : ProtocolE but does not implement the protocol * / { <nl> <nl> func protoEFunc ( ) { } <nl> <nl> - subscript ( a : TagPB ) - > Int { return 0 } <nl> + subscript ( a : TagPE ) - > Int { return 0 } <nl> <nl> var protoEVarRW : Int { get { return 0 } set { } } <nl> var protoEVarRO : Int { return 0 } <nl> class TestClass_PA : ProtocolA { <nl> <nl> # ^ CLASS_PA ^ # <nl> } <nl> - / / CLASS_PA : Begin completions , 3 items <nl> + / / CLASS_PA : Begin completions , 4 items <nl> <nl> class TestClass_PB : ProtocolB { <nl> # ^ CLASS_PB ^ # <nl> } <nl> - / / CLASS_PB : Begin completions , 5 items <nl> + / / CLASS_PB : Begin completions , 6 items <nl> <nl> class TestClass_PA_PB : ProtocolA , ProtocolB { <nl> # ^ CLASS_PA_PB ^ # <nl> } <nl> - / / CLASS_PA_PB : Begin completions , 5 items <nl> + / / CLASS_PA_PB : Begin completions , 6 items <nl> <nl> class TestClass_BA : BaseA { <nl> # ^ CLASS_BA ^ # <nl> class TestClass_BA : BaseA { <nl> class TestClass_BA_PA : BaseA , ProtocolA { <nl> # ^ CLASS_BA_PA ^ # <nl> } <nl> - / / CLASS_BA_PA : Begin completions , 7 items <nl> + / / CLASS_BA_PA : Begin completions , 8 items <nl> <nl> class TestClass_BA_PB : BaseA , ProtocolB { <nl> # ^ CLASS_BA_PB ^ # <nl> } <nl> - / / CLASS_BA_PB : Begin completions , 9 items <nl> + / / CLASS_BA_PB : Begin completions , 10 items <nl> <nl> class TestClass_BB : BaseB { <nl> # ^ CLASS_BB ^ # <nl> class TestClass_BE : BaseE { <nl> class TestClass_BE_PA : BaseE , ProtocolA { <nl> # ^ CLASS_BE_PA ^ # <nl> } <nl> - / / CLASS_BE_PA : Begin completions , 7 items <nl> + / / CLASS_BE_PA : Begin completions , 8 items <nl> <nl> class TestClass_BE_PA_PE : BaseE , ProtocolA , ProtocolE { <nl> # ^ CLASS_BE_PA_PE ^ # <nl> } <nl> - / / CLASS_BE_PA_PE : Begin completions , 7 items <nl> + / / CLASS_BE_PA_PE : Begin completions , 8 items <nl> <nl> class TestClass_PEI_PE : ProtocolEImpl , ProtocolE { <nl> # ^ CLASS_PEI_PE ^ # <nl>
|
Code completion for overrides : complete @ optional members
|
apple/swift
|
11b23bbf1af578004ecc35e205a171c1af6e96b1
|
2014-06-09T11:40:53Z
|
mmm a / dbms / include / DB / DataStreams / NativeBlockOutputStream . h <nl> ppp b / dbms / include / DB / DataStreams / NativeBlockOutputStream . h <nl> <nl> # pragma once <nl> <nl> # include < DB / DataStreams / IBlockOutputStream . h > <nl> - <nl> + # include < DB / DataTypes / IDataType . h > <nl> <nl> namespace DB <nl> { <nl>
|
add missed header
|
ClickHouse/ClickHouse
|
1ec7e349ac0b0be9d0d8b9034c774590533868b4
|
2017-02-07T10:41:44Z
|
mmm a / arangosh / CMakeLists . txt <nl> ppp b / arangosh / CMakeLists . txt <nl> target_link_libraries ( $ { BIN_ARANGOEXPORT } <nl> $ { SYSTEM_LIBRARIES } <nl> boost_system <nl> boost_boost <nl> - ldap <nl> - lber <nl> ) <nl> <nl> install ( <nl>
|
wintendo happiness
|
arangodb/arangodb
|
567fe2a83c2ec0b07cb29bc3206ae92ac68b99ee
|
2017-03-14T00:52:02Z
|
mmm a / xbmc / pvr / channels / PVRChannel . cpp <nl> ppp b / xbmc / pvr / channels / PVRChannel . cpp <nl> bool CPVRChannel : : UpdateFromClient ( const CPVRChannel & channel ) <nl> CSingleLock lock ( m_critSection ) ; <nl> if ( m_strChannelName . empty ( ) ) <nl> SetChannelName ( channel . ClientChannelName ( ) ) ; <nl> - if ( m_strIconPath . empty ( ) | | ( ! m_strIconPath . Equals ( channel . IconPath ( ) ) & & ! IsUserSetIcon ( ) ) ) <nl> + if ( m_strIconPath . empty ( ) | | ! IsUserSetIcon ( ) ) <nl> SetIconPath ( channel . IconPath ( ) ) ; <nl> <nl> return m_bChanged ; <nl>
|
[ pvr ] remove unnecessary condition ( it is checked inside SetIconPath ( ) )
|
xbmc/xbmc
|
86097a7e984e27a18b1e42e6db62e27f04266ad6
|
2014-05-15T10:17:14Z
|
mmm a / modules / planning / conf / planning . conf <nl> ppp b / modules / planning / conf / planning . conf <nl> <nl> <nl> - - enable_nonscenario_side_pass <nl> - - noenable_smoother_failsafe <nl> + - - noenable_parallel_trajectory_smoothing <nl> \ No newline at end of file <nl> mmm a / modules / planning / open_space / coarse_trajectory_generator / hybrid_a_star . cc <nl> ppp b / modules / planning / open_space / coarse_trajectory_generator / hybrid_a_star . cc <nl> bool HybridAStar : : GetResult ( HybridAStartResult * result ) { <nl> std : : vector < double > hybrid_a_x ; <nl> std : : vector < double > hybrid_a_y ; <nl> std : : vector < double > hybrid_a_phi ; <nl> - std : : vector < bool > hybrid_a_gear ; <nl> while ( current_node - > GetPreNode ( ) ! = nullptr ) { <nl> std : : vector < double > x = current_node - > GetXs ( ) ; <nl> std : : vector < double > y = current_node - > GetYs ( ) ; <nl> bool HybridAStar : : GetResult ( HybridAStartResult * result ) { <nl> hybrid_a_x . insert ( hybrid_a_x . end ( ) , x . begin ( ) , x . end ( ) ) ; <nl> hybrid_a_y . insert ( hybrid_a_y . end ( ) , y . begin ( ) , y . end ( ) ) ; <nl> hybrid_a_phi . insert ( hybrid_a_phi . end ( ) , phi . begin ( ) , phi . end ( ) ) ; <nl> - <nl> - size_t node_step_size = x . size ( ) ; <nl> - for ( size_t i = 0 ; i < node_step_size ; + + i ) { <nl> - hybrid_a_gear . push_back ( current_node - > GetDirec ( ) ) ; <nl> - } <nl> current_node = current_node - > GetPreNode ( ) ; <nl> } <nl> hybrid_a_x . push_back ( current_node - > GetX ( ) ) ; <nl> hybrid_a_y . push_back ( current_node - > GetY ( ) ) ; <nl> hybrid_a_phi . push_back ( current_node - > GetPhi ( ) ) ; <nl> - hybrid_a_gear . push_back ( current_node - > GetDirec ( ) ) ; <nl> std : : reverse ( hybrid_a_x . begin ( ) , hybrid_a_x . end ( ) ) ; <nl> std : : reverse ( hybrid_a_y . begin ( ) , hybrid_a_y . end ( ) ) ; <nl> std : : reverse ( hybrid_a_phi . begin ( ) , hybrid_a_phi . end ( ) ) ; <nl> - std : : reverse ( hybrid_a_gear . begin ( ) , hybrid_a_gear . end ( ) ) ; <nl> ( * result ) . x = hybrid_a_x ; <nl> ( * result ) . y = hybrid_a_y ; <nl> ( * result ) . phi = hybrid_a_phi ; <nl> - ( * result ) . gear = hybrid_a_gear ; <nl> <nl> if ( FLAGS_use_s_curve_speed_smooth ) { <nl> if ( ! GenerateSCurveSpeedAcceleration ( result ) ) { <nl> bool HybridAStar : : TrajectoryPartition ( <nl> const auto & x = result . x ; <nl> const auto & y = result . y ; <nl> const auto & phi = result . phi ; <nl> - const auto & gear = result . gear ; <nl> if ( x . size ( ) ! = y . size ( ) | | x . size ( ) ! = phi . size ( ) ) { <nl> AERROR < < " states sizes are not equal when do trajectory partitioning of " <nl> " Hybrid A Star result " ; <nl> return false ; <nl> } <nl> - size_t horizon = x . size ( ) ; <nl> <nl> + size_t horizon = x . size ( ) ; <nl> partitioned_result - > clear ( ) ; <nl> partitioned_result - > emplace_back ( ) ; <nl> auto * current_traj = & ( partitioned_result - > back ( ) ) ; <nl> - bool current_gear = gear . front ( ) ; <nl> - for ( size_t i = 0 ; i < horizon ; + + i ) { <nl> - if ( gear [ i ] ! = current_gear ) { <nl> + double heading_angle = phi . front ( ) ; <nl> + const Vec2d init_tracking_vector ( x [ 1 ] - x [ 0 ] , y [ 1 ] - y [ 0 ] ) ; <nl> + double tracking_angle = init_tracking_vector . Angle ( ) ; <nl> + bool current_gear = <nl> + std : : abs ( common : : math : : NormalizeAngle ( tracking_angle - heading_angle ) ) < <nl> + ( M_PI / 2 . 0 ) ; <nl> + for ( size_t i = 0 ; i < horizon - 1 ; + + i ) { <nl> + heading_angle = phi [ i ] ; <nl> + const Vec2d tracking_vector ( x [ i + 1 ] - x [ i ] , y [ i + 1 ] - y [ i ] ) ; <nl> + tracking_angle = tracking_vector . Angle ( ) ; <nl> + bool gear = <nl> + std : : abs ( common : : math : : NormalizeAngle ( tracking_angle - heading_angle ) ) < <nl> + ( M_PI / 2 . 0 ) ; <nl> + if ( gear ! = current_gear ) { <nl> + current_traj - > x . push_back ( x [ i ] ) ; <nl> + current_traj - > y . push_back ( y [ i ] ) ; <nl> + current_traj - > phi . push_back ( phi [ i ] ) ; <nl> partitioned_result - > emplace_back ( ) ; <nl> current_traj = & ( partitioned_result - > back ( ) ) ; <nl> - current_gear = gear [ i ] ; <nl> - / / Use last trajectory point as the start of next trajectory <nl> - current_traj - > x . push_back ( x [ i - 1 ] ) ; <nl> - current_traj - > y . push_back ( y [ i - 1 ] ) ; <nl> - current_traj - > phi . push_back ( phi [ i - 1 ] ) ; <nl> - current_traj - > gear . push_back ( ! gear [ i - 1 ] ) ; <nl> + current_gear = gear ; <nl> } <nl> current_traj - > x . push_back ( x [ i ] ) ; <nl> current_traj - > y . push_back ( y [ i ] ) ; <nl> current_traj - > phi . push_back ( phi [ i ] ) ; <nl> - current_traj - > gear . push_back ( gear [ i ] ) ; <nl> } <nl> + current_traj - > x . push_back ( x . back ( ) ) ; <nl> + current_traj - > y . push_back ( y . back ( ) ) ; <nl> + current_traj - > phi . push_back ( phi . back ( ) ) ; <nl> <nl> / / Retrieve v , a and steer from path <nl> size_t traj_size = partitioned_result - > size ( ) ; <nl> mmm a / modules / planning / open_space / coarse_trajectory_generator / hybrid_a_star . h <nl> ppp b / modules / planning / open_space / coarse_trajectory_generator / hybrid_a_star . h <nl> struct HybridAStartResult { <nl> std : : vector < double > a ; <nl> std : : vector < double > steer ; <nl> std : : vector < double > accumulated_s ; <nl> - std : : vector < bool > gear ; <nl> } ; <nl> <nl> class HybridAStar { <nl> mmm a / modules / tools / open_space_visualization / distance_approach_problem_wrapper . cc <nl> ppp b / modules / tools / open_space_visualization / distance_approach_problem_wrapper . cc <nl> bool DistancePlan ( HybridAStar * hybridA_ptr , ObstacleContainer * obstacles_ptr , <nl> < < " Failed to load open space config file " <nl> < < FLAGS_planner_open_space_config_filename ; <nl> <nl> + std : : string flag_file_path = " / apollo / modules / planning / conf / planning . conf " ; <nl> + google : : SetCommandLineOption ( " flagfile " , flag_file_path . c_str ( ) ) ; <nl> + <nl> HybridAStartResult hybrid_astar_result ; <nl> std : : vector < double > XYbounds_ ( XYbounds , XYbounds + 4 ) ; <nl> if ( ! hybridA_ptr - > Plan ( sx , sy , sphi , ex , ey , ephi , XYbounds_ , <nl> bool DistancePlan ( HybridAStar * hybridA_ptr , ObstacleContainer * obstacles_ptr , <nl> time_result_ds_vec . resize ( size ) ; <nl> dual_l_result_ds_vec . resize ( size ) ; <nl> dual_n_result_ds_vec . resize ( size ) ; <nl> + std : : vector < std : : future < bool > > results ; <nl> + <nl> + / / In parallel <nl> + / / TODO ( Jinyun ) : fix memory issue <nl> + / / for ( size_t i = 0 ; i < size ; + + i ) { <nl> + / / double piece_wise_sx = partition_trajectories [ i ] . x . front ( ) ; <nl> + / / double piece_wise_sy = partition_trajectories [ i ] . y . front ( ) ; <nl> + / / double piece_wise_sphi = partition_trajectories [ i ] . phi . front ( ) ; <nl> + / / double piece_wise_ex = partition_trajectories [ i ] . x . back ( ) ; <nl> + / / double piece_wise_ey = partition_trajectories [ i ] . y . back ( ) ; <nl> + / / double piece_wise_ephi = partition_trajectories [ i ] . phi . back ( ) ; <nl> + / / auto * ith_trajectories = & partition_trajectories [ i ] ; <nl> + / / auto * ith_state_result = & state_result_ds_vec [ i ] ; <nl> + / / auto * ith_control_result = & control_result_ds_vec [ i ] ; <nl> + / / auto * ith_time_result = & time_result_ds_vec [ i ] ; <nl> + / / auto * ith_dual_l_result = & dual_l_result_ds_vec [ i ] ; <nl> + / / auto * ith_dual_n_result = & dual_n_result_ds_vec [ i ] ; <nl> + / / results . push_back ( <nl> + / / cyber : : Async ( & DistanceSmoothing , <nl> + / / std : : ref ( planner_open_space_config_ ) , <nl> + / / std : : ref ( * obstacles_ptr ) , piece_wise_sx , <nl> + / / piece_wise_sy , piece_wise_sphi , piece_wise_ex , <nl> + / / piece_wise_ey , piece_wise_ephi , std : : ref ( XYbounds_ ) , <nl> + / / ith_trajectories , ith_state_result , <nl> + / / ith_control_result , ith_time_result , <nl> + / / ith_dual_l_result , ith_dual_n_result ) ) ; <nl> + / / } <nl> + / / for ( auto & result : results ) { <nl> + / / if ( ! result . get ( ) ) { <nl> + / / AERROR < < " Failure in a piece of trajectory . " ; <nl> + / / return false ; <nl> + / / } <nl> + / / } <nl> + / / In for loop <nl> for ( size_t i = 0 ; i < size ; + + i ) { <nl> - if ( ! DistanceSmoothing ( planner_open_space_config_ , * obstacles_ptr , sx , sy , <nl> - sphi , ex , ey , ephi , XYbounds_ , <nl> - & partition_trajectories [ i ] , <nl> + double piece_wise_sx = partition_trajectories [ i ] . x . front ( ) ; <nl> + double piece_wise_sy = partition_trajectories [ i ] . y . front ( ) ; <nl> + double piece_wise_sphi = partition_trajectories [ i ] . phi . front ( ) ; <nl> + double piece_wise_ex = partition_trajectories [ i ] . x . back ( ) ; <nl> + double piece_wise_ey = partition_trajectories [ i ] . y . back ( ) ; <nl> + double piece_wise_ephi = partition_trajectories [ i ] . phi . back ( ) ; <nl> + if ( ! DistanceSmoothing ( planner_open_space_config_ , * obstacles_ptr , <nl> + piece_wise_sx , piece_wise_sy , piece_wise_sphi , <nl> + piece_wise_ex , piece_wise_ey , piece_wise_ephi , <nl> + XYbounds_ , & partition_trajectories [ i ] , <nl> & state_result_ds_vec [ i ] , & control_result_ds_vec [ i ] , <nl> & time_result_ds_vec [ i ] , & dual_l_result_ds_vec [ i ] , <nl> & dual_n_result_ds_vec [ i ] ) ) { <nl> return false ; <nl> } <nl> } <nl> + <nl> / / Retrieve result in one single trajectory <nl> - / / size_t trajectory_point_size = 0 ; <nl> - / / for ( size_t i = 0 ; i < size ; + + i ) { <nl> - / / trajectory_point_size + = <nl> - / / static_cast < size_t > ( state_result_ds_vec [ i ] . cols ( ) ) - 1 ; <nl> - / / } <nl> - / / + + trajectory_point_size ; <nl> - / / Eigen : : MatrixXd state_result_ds ; <nl> - / / state_result_ds . resize ( state_result_ds_vec . front ( ) . rows ( ) , <nl> - / / trajectory_point_size ) ; <nl> - / / Eigen : : MatrixXd control_result_ds ; <nl> - / / control_result_ds . resize ( control_result_ds_vec . front ( ) . rows ( ) , <nl> - / / trajectory_point_size - 1 ) ; <nl> - / / int k = 0 ; <nl> - / / for ( size_t i = 0 ; i < size ; + + i ) { <nl> - / / int state_col_num = state_result_ds_vec [ i ] . cols ( ) - 1 ; <nl> - / / for ( int j = 0 ; j < state_col_num ; + + j ) { <nl> - / / state_result_ds . block < state_result_ds . rows ( ) , 1 > ( 0 , k ) = <nl> - / / state_result_ds_vec [ i ] . block < state_result_ds_vec [ i ] . rows ( ) , 1 > ( 0 , <nl> - / / j ) ; <nl> - / / state_result_ds_vec [ i ] . col ( j ) ; <nl> - / / control_result_ds < < control_result_ds_vec [ i ] . col ( j ) ; <nl> - / / + + k ; <nl> - / / } <nl> - / / } <nl> - / / state_result_ds < < state_result_ds_vec . back ( ) . col ( <nl> - / / state_result_ds_vec . back ( ) . cols ( ) - 1 ) ; <nl> + size_t trajectory_point_size = 0 ; <nl> + for ( size_t i = 0 ; i < size ; + + i ) { <nl> + trajectory_point_size + = <nl> + static_cast < size_t > ( state_result_ds_vec [ i ] . cols ( ) ) - 1 ; <nl> + } <nl> + + + trajectory_point_size ; <nl> + <nl> + const long int state_dimension = state_result_ds_vec . front ( ) . rows ( ) ; <nl> + Eigen : : MatrixXd state_result_ds ; <nl> + state_result_ds . resize ( state_dimension , trajectory_point_size ) ; <nl> + long int k = 0 ; <nl> + for ( size_t i = 0 ; i < size ; + + i ) { <nl> + / / leave out the last repeated point so set column minus one <nl> + long int state_col_num = state_result_ds_vec [ i ] . cols ( ) - 1 ; <nl> + for ( long int j = 0 ; j < state_col_num ; + + j ) { <nl> + state_result_ds . col ( k ) = state_result_ds_vec [ i ] . col ( j ) ; <nl> + + + k ; <nl> + } <nl> + } <nl> + state_result_ds . col ( k ) = <nl> + state_result_ds_vec . back ( ) . col ( state_result_ds_vec . back ( ) . cols ( ) - 1 ) ; <nl> + <nl> + const long int control_dimension = control_result_ds_vec . front ( ) . rows ( ) ; <nl> + Eigen : : MatrixXd control_result_ds ; <nl> + control_result_ds . resize ( control_dimension , trajectory_point_size - 1 ) ; <nl> + k = 0 ; <nl> + <nl> + for ( size_t i = 0 ; i < size ; + + i ) { <nl> + long int control_col_num = control_result_ds_vec [ i ] . cols ( ) - 1 ; <nl> + for ( long int j = 0 ; j < control_col_num ; + + j ) { <nl> + control_result_ds . col ( k ) = control_result_ds_vec [ i ] . col ( j ) ; <nl> + + + k ; <nl> + } <nl> + } <nl> <nl> * ( result_ptr - > PrepareHybridAResult ( ) ) = hybrid_astar_result ; <nl> - * ( result_ptr - > PrepareStateResult ( ) ) = state_result_ds_vec [ 0 ] ; <nl> - * ( result_ptr - > PrepareControlResult ( ) ) = control_result_ds_vec [ 0 ] ; <nl> + * ( result_ptr - > PrepareStateResult ( ) ) = state_result_ds ; <nl> + * ( result_ptr - > PrepareControlResult ( ) ) = control_result_ds ; <nl> <nl> } else { <nl> Eigen : : MatrixXd state_result_ds ; <nl> void DistanceGetResult ( ResultContainer * result_ptr , <nl> size_t size = result_ptr - > GetX ( ) - > size ( ) ; <nl> size_t size_by_distance = result_ptr - > PrepareStateResult ( ) - > cols ( ) ; <nl> if ( size ! = size_by_distance ) { <nl> - AINFO < < " sizes by hybrid A and distance approach not consistent " ; <nl> + AERROR < < " sizes by hybrid A and distance approach not consistent " ; <nl> } <nl> - std : : cout < < " return size is " < < size < < std : : endl ; <nl> for ( size_t i = 0 ; i < size ; i + + ) { <nl> x [ i ] = result_ptr - > GetX ( ) - > at ( i ) ; <nl> y [ i ] = result_ptr - > GetY ( ) - > at ( i ) ; <nl>
|
Planning : open_space : trails on smoother parallel
|
ApolloAuto/apollo
|
222bc53c6298571b454dc9fbb82f9ee65b16ce45
|
2019-05-08T20:52:46Z
|
mmm a / . travis . yml <nl> ppp b / . travis . yml <nl> install : <nl> # build script # <nl> # # # # # # # # # # # # # # # # <nl> <nl> - script : | <nl> - if [ [ " $ { SPECIAL } " = = " " ] ] ; then <nl> - # show OS / compiler version <nl> - uname - a <nl> - $ CXX - - version <nl> - <nl> - # compile <nl> - make <nl> - <nl> - # execute unit tests <nl> - test / json_unit " * " <nl> - <nl> - # check if homebrew works ( only checks develop branch ) <nl> - if [ ` which brew ` ] ; then <nl> - brew update ; <nl> - brew tap nlohmann / json ; <nl> - brew install nlohmann_json - - HEAD ; <nl> - brew test nlohmann_json ; <nl> + script : <nl> + # show OS / compiler version <nl> + - uname - a <nl> + - $ CXX - - version <nl> + <nl> + # compile <nl> + - make <nl> + <nl> + # execute unit tests <nl> + - test / json_unit " * " <nl> + <nl> + # check if homebrew works ( only checks develop branch ) <nl> + - if [ ` which brew ` ] ; then <nl> + brew update ; <nl> + brew tap nlohmann / json ; <nl> + brew install nlohmann_json - - HEAD ; <nl> + brew test nlohmann_json ; <nl> fi <nl> - fi <nl> <nl> # language : cpp <nl> # <nl>
|
reverted . travis file
|
nlohmann/json
|
b688119aa480d81153cee037e27ca2933cda15d1
|
2016-08-22T18:54:32Z
|
mmm a / regression / xgboost_reg . h <nl> ppp b / regression / xgboost_reg . h <nl> namespace xgboost { <nl> inline void UpdateInteract ( std : : string action ) { <nl> this - > InteractPredict ( preds_ , * train_ , 0 ) ; <nl> <nl> - if ( action = = " remove " ) { <nl> - base_gbm . DelteBooster ( ) ; return ; <nl> - } <nl> - <nl> int buffer_offset = static_cast < int > ( train_ - > Size ( ) ) ; <nl> for ( size_t i = 0 ; i < evals_ . size ( ) ; + + i ) { <nl> std : : vector < float > & preds = this - > eval_preds_ [ i ] ; <nl> this - > InteractPredict ( preds , * evals_ [ i ] , buffer_offset ) ; <nl> buffer_offset + = static_cast < int > ( evals_ [ i ] - > Size ( ) ) ; <nl> } <nl> + <nl> + if ( action = = " remove " ) { <nl> + base_gbm . DelteBooster ( ) ; return ; <nl> + } <nl> <nl> this - > GetGradient ( preds_ , train_ - > labels , grad_ , hess_ ) ; <nl> std : : vector < unsigned > root_index ; <nl>
|
fixed remove bug
|
dmlc/xgboost
|
d3fe4b26a919326ba346d1d7defb12de116e49fa
|
2014-03-13T20:42:40Z
|
mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> Object * HashTable < Shape , Key > : : EnsureCapacity ( int n , Key key ) { <nl> / / 50 % is still free after adding n elements and <nl> / / at most 50 % of the free elements are deleted elements . <nl> if ( ( nof + ( nof > > 1 ) < = capacity ) & & <nl> - ( nod < = ( capacity - nof ) > > 1 ) ) return this ; <nl> + ( nod < = ( capacity - nof ) > > 1 ) ) return this ; <nl> <nl> Object * obj = Allocate ( nof * 2 ) ; <nl> if ( obj - > IsFailure ( ) ) return obj ; <nl> mmm a / src / runtime . cc <nl> ppp b / src / runtime . cc <nl> class ArrayConcatVisitor { <nl> } <nl> } <nl> <nl> - Handle < FixedArray > storage ( ) { return storage_ ; } <nl> - <nl> private : <nl> Handle < FixedArray > storage_ ; <nl> / / Limit on the accepted indices . Elements with indices larger than the <nl> static Object * Runtime_ArrayConcat ( Arguments args ) { <nl> IterateArguments ( arguments , & visitor ) ; <nl> <nl> result - > set_length ( * len ) ; <nl> - / / Please note the storage might have changed in the visitor . <nl> - result - > set_elements ( * visitor . storage ( ) ) ; <nl> + result - > set_elements ( * storage ) ; <nl> <nl> return * result ; <nl> } <nl>
|
Temporary backing out r3538 to see impact on DOM benchmarks .
|
v8/v8
|
6cabd63572c0bc21fd6fc326b63f89bc0181462f
|
2010-01-12T16:57:18Z
|
new file mode 100644 <nl> index 000000000000 . . 6acd3d9479e7 <nl> mmm / dev / null <nl> ppp b / test / expect / TestJit . test_peephole . expect <nl> <nl> + graph ( % 0 : Double ( 1 ) <nl> + % 1 : Double ( 1 ) ) { <nl> + return ( % 0 ) ; <nl> + } <nl> new file mode 100644 <nl> index 000000000000 . . 6f399dac5244 <nl> mmm / dev / null <nl> ppp b / test / expect / TestJit . test_peephole_cuda - different_device . expect <nl> <nl> + graph ( % 0 : Double ( 1 ) <nl> + % 1 : Double ( 1 ) ) { <nl> + % 2 : Double ( 1 ) = aten : : type_as ( % 0 , % 1 ) <nl> + return ( % 2 ) ; <nl> + } <nl> new file mode 100644 <nl> index 000000000000 . . 6acd3d9479e7 <nl> mmm / dev / null <nl> ppp b / test / expect / TestJit . test_peephole_cuda - same_device . expect <nl> <nl> + graph ( % 0 : Double ( 1 ) <nl> + % 1 : Double ( 1 ) ) { <nl> + return ( % 0 ) ; <nl> + } <nl> mmm a / test / test_jit . py <nl> ppp b / test / test_jit . py <nl> def f ( x , y ) : <nl> self . assertExpectedGraph ( trace ) <nl> self . assertExportImport ( trace , ( x , y ) ) <nl> <nl> + def test_peephole ( self ) : <nl> + a = torch . tensor ( [ 0 . 4 ] , requires_grad = True ) <nl> + b = torch . tensor ( [ 0 . 7 ] , requires_grad = True ) <nl> + c = torch . tensor ( [ 0 ] , dtype = torch . int32 ) <nl> + <nl> + def f ( x , y ) : <nl> + return x . type_as ( y ) <nl> + <nl> + trace , z = torch . jit . get_trace_graph ( f , ( a , b ) ) <nl> + self . run_pass ( ' peephole ' , trace ) <nl> + self . assertExpectedGraph ( trace ) <nl> + trace , z = torch . jit . get_trace_graph ( f , ( a , c ) ) <nl> + s = str ( trace ) <nl> + self . run_pass ( ' peephole ' , trace ) <nl> + self . assertEqual ( s , str ( trace ) ) <nl> + <nl> + def test_peephole_dynamic ( self ) : <nl> + def f ( x , y ) : <nl> + return x . type_as ( y ) <nl> + <nl> + fn = torch . jit . script ( f ) <nl> + s = str ( fn . graph ) <nl> + torch . _C . _jit_pass_peephole ( fn . graph ) <nl> + self . assertEqual ( s , str ( fn . graph ) ) <nl> + <nl> + @ unittest . skipIf ( not RUN_CUDA , " cpp tests require CUDA " ) <nl> + def test_peephole_cuda ( self ) : <nl> + a = torch . tensor ( [ 0 . 4 ] , requires_grad = True , device = ' cpu ' ) <nl> + b = torch . tensor ( [ 0 . 7 ] , requires_grad = True , device = ' cuda ' ) <nl> + c = torch . tensor ( [ 0 . 7 ] , requires_grad = True , device = ' cuda ' ) <nl> + <nl> + def f ( x , y ) : <nl> + return x . type_as ( y ) <nl> + <nl> + trace , z = torch . jit . get_trace_graph ( f , ( a , c ) ) <nl> + s = str ( trace ) <nl> + self . run_pass ( ' peephole ' , trace ) <nl> + self . assertEqual ( s , str ( trace ) ) <nl> + trace , z = torch . jit . get_trace_graph ( f , ( b , c ) ) <nl> + self . run_pass ( ' peephole ' , trace ) <nl> + self . assertExpectedGraph ( trace , subname = " same_device " ) <nl> + <nl> def test_index ( self ) : <nl> x = torch . tensor ( [ 0 . 4 ] , requires_grad = True ) <nl> y = torch . tensor ( [ 0 ] , dtype = torch . int64 ) <nl> mmm a / torch / csrc / jit / passes / peephole . cpp <nl> ppp b / torch / csrc / jit / passes / peephole . cpp <nl> void PeepholeOptimize ( Block * block ) { <nl> / / Let DCE clean up any unused nodes at this point <nl> } <nl> } break ; <nl> + case aten : : type_as : { <nl> + JIT_ASSERT ( n - > inputs ( ) . size ( ) = = 2 ) ; <nl> + Value * lhs = n - > input ( 0 ) ; <nl> + Value * rhs = n - > input ( 1 ) ; <nl> + / / If LHS and RHS have the same static type , remove the type_as operator . <nl> + if ( lhs - > type ( ) - > kind ( ) = = TypeKind : : TensorType & & <nl> + rhs - > type ( ) - > kind ( ) = = TypeKind : : TensorType ) { <nl> + auto ltype = ( * lhs - > type ( ) ) . cast < TensorType > ( ) ; <nl> + auto rtype = ( * rhs - > type ( ) ) . cast < TensorType > ( ) ; <nl> + if ( ltype - > device ( ) = = rtype - > device ( ) & & <nl> + ltype - > scalarType ( ) = = rtype - > scalarType ( ) ) { <nl> + n - > output ( ) - > replaceAllUsesWith ( lhs ) ; <nl> + } <nl> + } <nl> + } break ; <nl> / / Fuse mm + add into addmm <nl> case aten : : add : { <nl> / / Must have two inputs <nl>
|
Add peephole optimization for type_as operators . ( )
|
pytorch/pytorch
|
66fe3b5c068e41eabebe991afe38db00f10ffb5a
|
2018-07-16T17:26:56Z
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.