diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / xbmc / cores / dvdplayer / DVDDemuxers / DVDDemuxFFmpeg . cpp <nl> ppp b / xbmc / cores / dvdplayer / DVDDemuxers / DVDDemuxFFmpeg . cpp <nl> double CDVDDemuxFFmpeg : : ConvertTimestamp ( int64_t pts , int den , int num ) <nl> <nl> if ( timestamp > starttime ) <nl> timestamp - = starttime ; <nl> - else if ( timestamp + 0 . 1f > starttime ) <nl> + / / allow for largest possible difference in pts and dts for a single packet <nl> + else if ( timestamp + 0 . 5f > starttime ) <nl> timestamp = 0 ; <nl> <nl> return timestamp * DVD_TIME_BASE ; <nl> | Merge pull request from popcornmix / pts_dts_adjust | xbmc/xbmc | 12090bf5a1e270534505c1f5dceed12b63db585d | 2013-06-14T10:47:50Z |
mmm a / cocos / network / HttpClient - android . cpp <nl> ppp b / cocos / network / HttpClient - android . cpp <nl> class HttpURLConnection <nl> <nl> ~ HttpURLConnection ( ) <nl> { <nl> - <nl> + if ( _httpURLConnection ! = nullptr ) <nl> + { <nl> + JniHelper : : getEnv ( ) - > DeleteGlobalRef ( _httpURLConnection ) ; <nl> + } <nl> } <nl> <nl> void setRequestMethod ( const char * method ) <nl> | Merge pull request from pajeroquan / v3 | cocos2d/cocos2d-x | 5ee22c4142ef688b29c85c795c87a0f69c394260 | 2015-06-17T07:14:36Z |
mmm a / tensorflow / core / kernels / argmax_op . cc <nl> ppp b / tensorflow / core / kernels / argmax_op . cc <nl> class ArgOp : public OpKernel { <nl> Tensor * output = nullptr ; <nl> OP_REQUIRES_OK ( context , context - > allocate_output ( 0 , output_shape , & output ) ) ; <nl> <nl> + if ( output_shape . num_elements ( ) = = 0 ) { <nl> + return ; <nl> + } <nl> + <nl> # define HANDLE_DIM ( NDIM ) \ <nl> case NDIM : \ <nl> ArgFunctor : : Reduce # # NDIM ( context - > eigen_device < Device > ( ) , \ <nl> mmm a / tensorflow / python / kernel_tests / argmax_op_test . py <nl> ppp b / tensorflow / python / kernel_tests / argmax_op_test . py <nl> <nl> import numpy as np <nl> <nl> from tensorflow . python . framework import dtypes <nl> + from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . platform import test <nl> <nl> def testDefaultAxis ( self ) : <nl> ans = op ( [ 1 ] ) . eval ( ) <nl> self . assertAllEqual ( ans , 0 ) <nl> <nl> + def testOutputEmpty ( self ) : <nl> + with self . test_session ( ) : <nl> + for op in math_ops . argmin , math_ops . argmax : <nl> + ret = op ( array_ops . zeros ( shape = [ 1 , 0 , 2 ] ) , axis = - 1 ) . eval ( ) <nl> + self . assertEqual ( ret . shape , ( 1 , 0 ) ) <nl> + <nl> <nl> if __name__ = = " __main__ " : <nl> test . main ( ) <nl> | Avoid argmax / argmin divide by 0 when the output tensor is an empty tensor . | tensorflow/tensorflow | 57e5dfa76a32ff0ee6ec4b72a2461487b7969a3e | 2018-07-21T00:18:04Z |
mmm a / README . md <nl> ppp b / README . md <nl> Remark : <nl> <nl> # # Releases <nl> <nl> + * 2017 - 04 - 18 , [ Release v2 . 0 - r1 ] [ r2 . 0r1 ] , 2 . 0 release1 , 2 . 0 . 239 , 86515 lines . <nl> * 2017 - 03 - 03 , [ Release v2 . 0 - r0 ] [ r2 . 0r0 ] , 2 . 0 release0 , 2 . 0 . 234 , 86373 lines . <nl> * 2017 - 01 - 18 , [ Release v2 . 0 - b4 ] [ r2 . 0b4 ] , 2 . 0 beta4 , 2 . 0 . 230 , 86334 lines . <nl> * 2016 - 11 - 13 , [ Release v2 . 0 - b3 ] [ r2 . 0b3 ] , 2 . 0 beta3 , 2 . 0 . 223 , 86685 lines . <nl> Remark : <nl> <nl> # # History <nl> <nl> + * < strong > v2 . 0 , 2017 - 04 - 18 , [ 2 . 0 release1 ( 2 . 0 . 239 ) ] [ r2 . 0r1 ] released . 86515 lines . < / strong > <nl> * v2 . 0 , 2017 - 04 - 18 , Fix [ # 848 ] [ bug # 848 ] , crash at HTTP fast buffer grow . 2 . 0 . 239 <nl> * v2 . 0 , 2017 - 04 - 15 , Fix [ # 844 ] [ bug # 844 ] , support Haivision encoder . 2 . 0 . 238 <nl> * v2 . 0 , 2017 - 04 - 15 , Merge [ # 846 ] [ bug # 846 ] , fix fd leak for FLV stream caster . 2 . 0 . 237 <nl> Winlin <nl> <nl> [ exo # 828 ] : https : / / github . com / google / ExoPlayer / pull / 828 <nl> <nl> + [ r2 . 0r1 ] : https : / / github . com / ossrs / srs / releases / tag / v2 . 0 - r1 <nl> [ r2 . 0r0 ] : https : / / github . com / ossrs / srs / releases / tag / v2 . 0 - r0 <nl> [ r2 . 0b4 ] : https : / / github . com / ossrs / srs / releases / tag / v2 . 0 - b4 <nl> [ r2 . 0b3 ] : https : / / github . com / ossrs / srs / releases / tag / v2 . 0 - b3 <nl> Winlin <nl> [ branch2 ] : https : / / github . com / ossrs / srs / tree / 2 . 0release <nl> [ release2 ] : https : / / github . com / ossrs / srs / wiki / v1_CN_Product # release20 <nl> [ release3 ] : https : / / github . com / ossrs / srs / wiki / v1_CN_Product # release30 <nl> - [ centos0 ] : http : / / winlinvip . github . io / srs . release / releases / files / SRS - CentOS6 - x86_64 - 2 . 0 . 234 . zip <nl> - [ centos1 ] : http : / / www . ossrs . net / srs . release / releases / files / SRS - CentOS6 - x86_64 - 2 . 0 . 234 . zip <nl> + [ centos0 ] : http : / / winlinvip . github . io / srs . release / releases / files / SRS - CentOS6 - x86_64 - 2 . 0 . 239 . zip <nl> + [ centos1 ] : http : / / www . ossrs . net / srs . release / releases / files / SRS - CentOS6 - x86_64 - 2 . 0 . 239 . zip <nl> <nl> | Release 2 . 0r1 , 2 . 0 . 239 | ossrs/srs | 4167715153fc02dfeccccb9f4f010f7747814346 | 2017-04-18T13:08:31Z |
mmm a / . gitignore <nl> ppp b / . gitignore <nl> lib / V8 / v8 - json . cpp <nl> Installation / arangod . conf <nl> Installation / epm / arangodb . sublist <nl> nbproject / <nl> - <nl> mmm a / CHANGELOG <nl> ppp b / CHANGELOG <nl> v1 . 2 . alpha ( XXXX - XX - XX ) <nl> figure out these edges ' roles . Additionally , bidirectional edges return the <nl> ` _bidirectional ` attribute with a value of ` true ` . <nl> <nl> - v1 . 1 . 1 ( xxxx - xx - xx ) <nl> + <nl> + v1 . 1 . 1 ( 2012 - 12 - 18 ) <nl> mmmmmmmmmmmmmmmmmm - <nl> <nl> + * fixed issue # 321 : Problem upgrading arangodb 1 . 0 . 4 to 1 . 1 . 0 with Homebrew ( OSX 10 . 8 . 2 ) <nl> + <nl> * fixed issue # 230 : add navigation and search for online documentation <nl> <nl> * fixed issue # 315 : Strange result in PATH <nl> v1 . 1 . 1 ( xxxx - xx - xx ) <nl> v1 . 1 . 0 ( 2012 - 12 - 05 ) <nl> mmmmmmmmmmmmmmmmmm - <nl> <nl> + * WARNING : <nl> + arangod now performs a database version check at startup . It will look for a file <nl> + named " VERSION " in its database directory . If the file is not present , arangod will <nl> + perform an automatic upgrade of the database directory . This should be the normal <nl> + case when upgrading from ArangoDB 1 . 0 to ArangoDB 1 . 1 . <nl> + <nl> + If the VERSION file is present but is from an older version of ArangoDB , arangod <nl> + will refuse to start and ask the user to run a manual upgrade first . A manual upgrade <nl> + can be performed by starting arangod with the option ` - - upgrade ` . <nl> + <nl> + This upgrade procedure shall ensure that users have full control over when they <nl> + perform any updates / upgrades of their data , and can plan backups accordingly . The <nl> + procedure also guarantees that the server is not run without any required system <nl> + collections or with in incompatible data state . <nl> + <nl> * added AQL function DOCUMENT ( ) to retrieve a document by its _id value <nl> <nl> * fixed issue # 311 : fixed segfault on unload <nl> <nl> * fixed issue # 309 : renamed stub " import " button from web interface <nl> <nl> - * fixed issue # 307 : show waitForSync in collections overview in web interface , <nl> - make waitForSync and journalSize editable in web interface <nl> + * fixed issue # 307 : added WaitForSync column in collections list in in web interface <nl> <nl> * fixed issue # 306 : naming in web interface <nl> <nl> v1 . 1 . 0 ( 2012 - 12 - 05 ) <nl> <nl> * fixed issue # 296 : system collections not usable from AQL <nl> <nl> + * fixed issue # 295 : deadlock on shutdown <nl> + <nl> * added collection type label to web interface <nl> <nl> * fixed issue # 290 : the web interface now disallows creating non - edges in edge collections <nl> v1 . 1 . 0 ( 2012 - 12 - 05 ) <nl> <nl> * added UPGRADING help text <nl> <nl> - * WARNING : <nl> - arangod now performs a database version check at startup . It will look for a file <nl> - named " VERSION " in its database directory . If the file is not present , arangod will <nl> - perform an automatic upgrade of the database directory . This should be the normal <nl> - case when upgrading from ArangoDB 1 . 0 to ArangoDB 1 . 1 . <nl> - <nl> - If the VERSION file is present but is from an older version of ArangoDB , arangod <nl> - will refuse to start and ask the user to run a manual upgrade first . A manual upgrade <nl> - can be performed by starting arangod with the option ` - - upgrade ` . <nl> - <nl> - This upgrade procedure shall ensure that users have full control over when they <nl> - perform any updates / upgrades of their data , and can plan backups accordingly . The <nl> - procedure also guarantees that the server is not run without any required system <nl> - collections or with in incompatible data state . <nl> - <nl> * fixed issue # 284 : fixed Javascript errors when adding edges / vertices without own <nl> attributes <nl> <nl> v1 . 1 . 0 ( 2012 - 12 - 05 ) <nl> the REST edge create method . edges with invalid collection ids or names in the <nl> " from " or " to " values will be rejected and not saved <nl> <nl> + <nl> v1 . 1 . beta2 ( 2012 - 11 - 13 ) <nl> mmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> v1 . 1 . beta1 ( 2012 - 10 - 24 ) <nl> as before . The Javascript API in the shell also offers a new update ( ) method in extension to <nl> the previously existing replace ( ) method . <nl> <nl> + <nl> v1 . 0 . 4 ( 2012 - 11 - 12 ) <nl> mmmmmmmmmmmmmmmmmm - <nl> <nl> - * strange error message in arangosh 1 . 0 . 3 at startup <nl> + * issue # 275 : strange error message in arangosh 1 . 0 . 3 at startup <nl> <nl> <nl> v1 . 0 . 3 ( 2012 - 11 - 08 ) <nl> v1 . 0 . 2 ( 2012 - 10 - 22 ) <nl> <nl> * added AQL function TO_LIST <nl> <nl> + <nl> v1 . 0 . 1 ( 2012 - 09 - 30 ) <nl> mmmmmmmmmmmmmmmmmm - <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 5d04f8ce629 <nl> mmm / dev / null <nl> ppp b / Documentation / DbaManual / IndexBitArray . md <nl> <nl> + BitArray Indexes { # IndexBitArray } <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + @ EMBEDTOC { IndexBitArrayTOC } <nl> + <nl> + Introduction to Bit - Array Indexes { # IndexBitArrayIntro } <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + It is possible to define a bit - array index on one or more attributes ( or paths ) <nl> + of a documents . <nl> + <nl> + Accessing BitArray Indexes from the Shell { # IndexBitArrayShell } <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + @ anchor IndexBitArrayShellEnsureBitarray <nl> + @ copydetails JS_EnsureBitarrayVocbaseCol <nl> new file mode 100644 <nl> index 00000000000 . . 93236057bdb <nl> mmm / dev / null <nl> ppp b / Documentation / DbaManual / IndexBitArrayTOC . md <nl> <nl> + TOC { # IndexBitArrayTOC } <nl> + = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + - @ ref IndexBitArray <nl> + - @ ref IndexBitArrayIntro <nl> + - @ ref IndexBitArrayShell <nl> + - @ ref IndexBitArrayShellEnsureBitarray " collection . ensureBitarray " <nl> new file mode 100644 <nl> index 00000000000 . . be3de15ece6 <nl> mmm / dev / null <nl> ppp b / Documentation / InstallationManual / InstallationManual . md <nl> <nl> + ArangoDB ' s Installation Manual ( @ VERSION ) { # InstallManual } <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + @ NAVIGATE_InstallManual <nl> + <nl> + @ if LATEX <nl> + - @ ref Installing <nl> + - @ ref Compiling <nl> + @ else <nl> + @ CHAPTER_REF { Installing } <nl> + @ CHAPTER_REF { Compiling } <nl> + @ endif <nl> new file mode 100644 <nl> index 00000000000 . . 89390d0e0e0 <nl> mmm / dev / null <nl> ppp b / Documentation / InstallationManual / Installing . md <nl> <nl> + Installing ArangoDB { # Installing } <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + @ NAVIGATE_Installing <nl> + @ EMBEDTOC { InstallingTOC } <nl> + <nl> + Linux { # InstallingLinux } <nl> + = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + You can find binary packages for various Linux distributions here : <nl> + <nl> + @ EXTREF { http : / / www . arangodb . org / download / , http : / / www . arangodb . org / download / } <nl> + <nl> + We provide packages for <nl> + <nl> + - Centos <nl> + - Debian <nl> + - Fedora <nl> + - Mandriva <nl> + - OpenSUSE <nl> + - RedHat RHEL <nl> + - SUSE SLE <nl> + - Ubuntu <nl> + <nl> + Using a Package Manager to install ArangoDB { # InstallingLinuxPackageManager } <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + <nl> + Follow the instructions on the download page to use your favorite package manager <nl> + for the major distributions . After setting up the ArangoDB repository you can then <nl> + easily install ArangoDB using yum , aptitude , urpmi , or zypper . <nl> + <nl> + # # # Gentoo <nl> + <nl> + Please use the <nl> + @ EXTREF_S { https : / / github . com / mgiken / portage - overlay / tree / master / dev - db / ArangoDB , portage } <nl> + provided by @ @ mgiken . <nl> + <nl> + # # # Linux - Mint { # InstallingDebian } <nl> + <nl> + Download and import GPG - PublicKey <nl> + <nl> + wget - O RPM - GPG - KEY - www . arangodb . org http : / / www . arangodb . org / repositories / PublicKey <nl> + apt - key add RPM - GPG - KEY - www . arangodb . org <nl> + <nl> + Add the corresponding repository in file ` / etc / apt / sources . list ` : <nl> + <nl> + deb http : / / www . arangodb . org / repositories LinuxMint - 13 main <nl> + <nl> + Update the repository data : <nl> + <nl> + aptitude update <nl> + <nl> + Now you should be able to search for arangodb : <nl> + <nl> + aptitude search arangodb <nl> + <nl> + In order to install arangodb : <nl> + <nl> + aptitude install arangodb <nl> + <nl> + Using Vagrant and Chef <nl> + mmmmmmmmmmmmmmmmmmmmm - <nl> + <nl> + A Chef recipe is available from jbianquetti at <nl> + <nl> + https : / / github . com / jbianquetti / chef - arangodb <nl> + <nl> + Mac OS X { # InstallingMacOSX } <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + You can find the Mac OS X packages here : <nl> + <nl> + http : / / www . arangodb . org / repositories / MacOSX <nl> + <nl> + Homebrew { # InstallingMacOSXHomebrew } <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + If you are using @ EXTREF { http : / / mxcl . github . com / homebrew / , homebrew } , <nl> + then you can install the ArangoDB using ` brew ` as follows : <nl> + <nl> + brew install arangodb <nl> + <nl> + This will install the current stable version of ArangoDB and all <nl> + dependencies within your Homebrew tree . Note that the server will be <nl> + installed as <nl> + <nl> + / usr / local / sbin / arangod <nl> + <nl> + The ArangoDB shell will be install as <nl> + <nl> + / usr / local / bin / arangosh <nl> + <nl> + If you want to install the latest version use : <nl> + <nl> + brew install - - HEAD arangodb <nl> + <nl> + Apples App Store { # InstallingMacOSXAppStore } <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> + ArangoDB is available in Apple ' s App - Store . Please note , that it <nl> + sometimes take a few days or weeks until the latest versions will be <nl> + available . <nl> new file mode 100644 <nl> index 00000000000 . . 8787e15c077 <nl> mmm / dev / null <nl> ppp b / Documentation / InstallationManual / InstallingTOC . md <nl> <nl> + TOC { # InstallingTOC } <nl> + = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + - @ ref Installing <nl> + - @ ref InstallingLinux <nl> + - @ ref InstallingLinuxPackageManager <nl> + - @ ref InstallingDebian <nl> + - @ ref InstallingMacOSX <nl> + - @ ref InstallingMacOSXHomebrew <nl> + - @ ref InstallingMacOSXAppStore <nl> mmm a / Documentation / Makefile . files <nl> ppp b / Documentation / Makefile . files <nl> <nl> # # - - SECTION - - DOCUMENTATION <nl> # # mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # # # @ brief TOC files <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + DOXYGEN_TOC = \ <nl> + Documentation / InstallationManual / InstallationManual . md \ <nl> + Documentation / Manual / Home . md \ <nl> + Documentation / UserManual / UserManual . md <nl> + <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # # # @ brief JavaScript files <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> WIKI = \ <nl> ImplementorManual \ <nl> ImpManual \ <nl> ImpManualBasics \ <nl> + IndexBitArray \ <nl> IndexCap \ <nl> IndexCapHttp \ <nl> IndexGeo \ <nl> Doxygen / js / server / modules / % . c : @ srcdir @ / js / server / modules / % . js Doxygen / . setup - di <nl> Doxygen / xml / % . md : Doxygen / xml / % . xml <nl> @ python @ top_srcdir @ / Doxygen / Scripts / xml2md . py $ < > $ @ <nl> <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # # # @ brief doxygen toc <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + . PHONY : doxygen - toc <nl> + <nl> + doxygen - toc : <nl> + python @ top_srcdir @ / Documentation / Scripts / generateTOC . py $ ( DOXYGEN_TOC ) > > Doxygen / toc . doxy . tmp <nl> + cmp - s Doxygen / toc . doxy Doxygen / toc . doxy . tmp | | mv Doxygen / toc . doxy . tmp Doxygen / toc . doxy <nl> + <nl> + Doxygen / toc . doxy : doxygen - toc <nl> + <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # # # @ brief doxygen <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> <nl> . PHONY : doxygen <nl> <nl> - Doxygen / arango - html . doxy : Documentation / arango . template <nl> + Doxygen / arango - html . doxy : Documentation / arango . template Doxygen / toc . doxy <nl> sed - e ' s : GENERATE_HTML * = * NO : GENERATE_HTML = YES : ' - e ' s : ENABLED_SECTIONS * = : ENABLED_SECTIONS = HTML : ' < $ < > $ @ <nl> - $ ( MAKE ) lib / BasicsC / voc - errors . h <nl> + cat Doxygen / toc . doxy > > $ @ <nl> <nl> doxygen : Doxygen / . setup - directories Doxygen / arango - html . doxy $ ( DOXYGEN ) <nl> + $ ( MAKE ) lib / BasicsC / voc - errors . h <nl> doxygen Doxygen / arango - html . doxy > / dev / null <nl> <nl> @ for w in $ ( WIKI ) ; do @ top_srcdir @ / Documentation / Scripts / html2html . sh - - keep - title Doxygen / html / $ $ w . html Doxygen / website / $ $ w . html ; done <nl> doxygen : Doxygen / . setup - directories Doxygen / arango - html . doxy $ ( DOXYGEN ) <nl> # # # @ brief wiki <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> <nl> - . PHONY : wiki wiki2 <nl> + . PHONY : wiki wiki - raw <nl> <nl> - Doxygen / arango - xml . doxy : Documentation / arango . template <nl> + Doxygen / arango - xml . doxy : Documentation / arango . template Doxygen / toc . doxy <nl> sed - e ' s : GENERATE_XML * = * NO : GENERATE_XML = YES : ' - e ' s : ENABLED_SECTIONS * = : ENABLED_SECTIONS = XML : ' < $ < > $ @ <nl> - $ ( MAKE ) lib / BasicsC / voc - errors . h <nl> + cat Doxygen / toc . doxy > > $ @ <nl> <nl> - wiki : wiki2 <nl> + wiki : wiki - raw <nl> @ test - d Doxygen / wiki | | mkdir Doxygen / wiki <nl> @ for w in $ ( WIKI ) ; do python @ top_srcdir @ / Documentation / Scripts / xml2md . py Doxygen / xml / $ $ w . xml > Doxygen / xml / $ $ w . md ; done <nl> @ for w in $ ( WIKI ) ; do @ top_srcdir @ / Documentation / Scripts / fixmd . sh Doxygen / xml / $ $ w . md ; done <nl> <nl> - wiki2 : Doxygen / arango - xml . doxy $ ( DOXYGEN ) <nl> + wiki - raw : Doxygen / arango - xml . doxy $ ( DOXYGEN ) <nl> + $ ( MAKE ) lib / BasicsC / voc - errors . h <nl> doxygen Doxygen / arango - xml . doxy > / dev / null <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> wiki2 : Doxygen / arango - xml . doxy $ ( DOXYGEN ) <nl> <nl> . PHONY : latex <nl> <nl> - Doxygen / arango - latex . doxy : Documentation / arango . template <nl> + Doxygen / arango - latex . doxy : Documentation / arango . template Doxygen / toc . doxy <nl> sed - e ' s : GENERATE_LATEX * = * NO : GENERATE_LATEX = YES : ' - e ' s : ENABLED_SECTIONS * = : ENABLED_SECTIONS = LATEX : ' < $ < > $ @ <nl> - $ ( MAKE ) lib / BasicsC / voc - errors . h <nl> + cat Doxygen / toc . doxy > > $ @ <nl> <nl> latex : Doxygen / . setup - directories Doxygen / arango - latex . doxy $ ( DOXYGEN ) <nl> + $ ( MAKE ) lib / BasicsC / voc - errors . h <nl> doxygen Doxygen / arango - latex . doxy > / dev / null <nl> <nl> echo " \ def \ arangodbversion { @ PACKAGE_VERSION @ } " > Doxygen / latex / version . tex <nl> mmm a / Documentation / Manual / Home . md <nl> ppp b / Documentation / Manual / Home . md <nl> The HTML and PDF versions of the manual can be found <nl> Please contact @ EXTREF_S { http : / / www . arangodb . org / connect , us } if you <nl> have any questions . <nl> <nl> - New Features in ArangoDB 1 . 1 <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - <nl> - - @ ref NewFeatures11 <nl> - <nl> Upgrading to ArangoDB 1 . 1 { # ArangoDBUpgrading } <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> - - @ ref Upgrading <nl> + - @ BOOK_REF { NewFeatures11 } <nl> + - @ BOOK_REF { Upgrading } <nl> <nl> ArangoDB ' s User Manuals { # ArangoDBUserManual } <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> - - @ ref UserManual @ ifnot XML @ EXTREF { user - manual . pdf , pdf } @ endif <nl> + - @ BOOK_REF { UserManual } @ ifnot XML @ EXTREF { user - manual . pdf , pdf } @ endif <nl> <nl> - - @ ref DbaManual @ ifnot XML @ EXTREF { dba - manual . pdf , pdf } @ endif <nl> + - @ BOOK_REF { DbaManual } @ ifnot XML @ EXTREF { dba - manual . pdf , pdf } @ endif <nl> <nl> - - @ ref ImpManual @ ifnot XML @ EXTREF { imp - manual . pdf , pdf } @ endif <nl> + - @ BOOK_REF { ImpManual } @ ifnot XML @ EXTREF { imp - manual . pdf , pdf } @ endif <nl> <nl> - - @ ref Glossary <nl> + - @ BOOK_REF { Glossary } <nl> <nl> @ ifnot XML <nl> - @ EXTREF { arangodb_1 . 0_shell_reference_card . pdf , Cheat Sheet } <nl> ArangoDB ' s User Manuals { # ArangoDBUserManual } <nl> ArangoDB ' s Administrator Manuals { # ArangoDBAdminManual } <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> - - @ ref InstallManual @ ifnot XML @ EXTREF { install - manual . pdf , pdf } @ endif <nl> + - @ BOOK_REF { InstallManual } @ ifnot XML @ EXTREF { install - manual . pdf , pdf } @ endif <nl> <nl> ArangoDB ' s Developer Manuals { # ArangoDBAPIManual } <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> - - @ ref ImplementorManual @ ifnot XML @ EXTREF { implementor - manual . pdf , pdf } @ endif <nl> + - @ BOOK_REF { ImplementorManual } @ ifnot XML @ EXTREF { implementor - manual . pdf , pdf } @ endif <nl> <nl> - - @ ref RefManual @ ifnot XML @ EXTREF { ref - manual . pdf , pdf } @ endif <nl> + - @ BOOK_REF { RefManual } @ ifnot XML @ EXTREF { ref - manual . pdf , pdf } @ endif <nl> <nl> API aka Drivers { # HomeApi } <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> mmm a / Documentation / Manual / NewFeatures11 . md <nl> ppp b / Documentation / Manual / NewFeatures11 . md <nl> <nl> New Features in ArangoDB 1 . 1 { # NewFeatures11 } <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> - @ NAVIGATE_FIRST { Home , Upgrading } <nl> + @ NAVIGATE_NewFeatures11 <nl> @ EMBEDTOC { NewFeatures11TOC } <nl> <nl> Features and Improvements { # NewFeatures11Introduction } <nl> _arangoimp_ also supports importing input files in TSV format . TSV is <nl> a simple separated format such as CSV , but with the tab character as <nl> the separator , no quoting for values and thus no support for line <nl> breaks inside the values . <nl> + <nl> + # # # libicu <nl> + <nl> + ArangoDb uses ICU - International Components for Unicode ( icu - project . org ) <nl> + for string sorting and string normalization . <nl> + <nl> + ArangoDB 1 . 1 adds the option ` - - default - language ` to select a locale for <nl> + sorting and comparing strings . The default locale is set to be the system <nl> + locale on that platform . <nl> + <nl> mmm a / Documentation / Manual / Upgrading . md <nl> ppp b / Documentation / Manual / Upgrading . md <nl> <nl> Upgrading to ArangoDB 1 . 1 { # Upgrading } <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> - @ NAVIGATE { NewFeatures11 , Home , UserManual } <nl> + @ NAVIGATE_Upgrading <nl> + @ EMBEDTOC { UpgradingTOC } <nl> + <nl> + Upgrading { # UpgradingIntroduction } <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> ArangoDB 1 . 1 introduces new features but may in some respect have <nl> slightly different behavior than 1 . 0 . <nl> downwards - compatible to ArangoDB 1 . 0 . <nl> <nl> Existing users of ArangoDB 1 . 0 should read the list carefully and make <nl> sure they have undertaken all necessary steps and precautions before <nl> - upgrading from ArangoDB 1 . 0 to ArangoDB 1 . 1 . <nl> + upgrading from ArangoDB 1 . 0 to ArangoDB 1 . 1 . Also check <nl> + @ ref UpgradingTroubleshooting . <nl> <nl> - New dependencies <nl> mmmmmmmmmmmmmmm - - <nl> + New Dependencies { # UpgradingNewDependencies } <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> As ArangoDB 1 . 1 supports SSL connections , ArangoDB can only be built <nl> on servers with the OpenSSL library installed . The OpenSSL is not <nl> bundled with ArangoDB and must be installed separately . <nl> <nl> - Database directory version check and upgrade <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + Database Directory Version Check and Upgrade { # UpgradingVersionCheck } <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> <nl> Starting with ArangoDB 1 . 1 , _arangod_ will perform a database version <nl> check at startup . <nl> that it cannot fix , it will halt on the first error and warn the user . <nl> Re - starting arangod with the ` - - upgrade ` option will execute only the <nl> previously failed and not yet executed tasks . <nl> <nl> - Server startup options changes <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + Server Startup Options Changes { # UpgradingServerOptions } <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> # # # Port options and endpoints <nl> <nl> _arangod_ will refuse to start . <nl> The server can be bound to one or multiple endpoints at once . The <nl> following endpoint specification sytnax is currently supported : <nl> <nl> - - ` tcp : / / host : port ( HTTP over IPv4 ) ` <nl> - - ` tcp : / / [ host ] : port ( HTTP over IPv6 ) ` <nl> - - ` ssl : / / host : port ( HTTP over SSL - encrypted IPv4 ) ` <nl> - - ` ssl : / / [ host ] : port ( HTTP over SSL - encrypted IPv6 ) ` <nl> - - ` unix : / / path / to / socket ( HTTP over UNIX socket ) ` <nl> + - ` tcp : / / host : port ` ( HTTP over IPv4 ) <nl> + - ` tcp : / / [ host ] : port ` ( HTTP over IPv6 ) <nl> + - ` ssl : / / host : port ` ( HTTP over SSL - encrypted IPv4 ) <nl> + - ` ssl : / / [ host ] : port ` ( HTTP over SSL - encrypted IPv6 ) <nl> + - ` unix : / / path / to / socket ` ( HTTP over UNIX socket ) <nl> <nl> An example value for the option is ` - - server . endpoint <nl> tcp : / / 127 . 0 . 0 . 1 : 8529 ` . This will make the server listen to requests <nl> requests from a browser , you should either set <nl> ` - - server . keep - alive - timeout ` to a value of ` 0 ` , or make your browser <nl> send ` Connection : close ` HTTP headers with its requests . <nl> <nl> - Start / stop scripts <nl> mmmmmmmmmmmmmmmmmmmmm <nl> + Start / Stop Scripts { # UpgradingStartScripts } <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> <nl> The user used in start and stop scripts has changed from _arango_ to <nl> _arangodb_ . Furthermore , the start script name itself has changed from <nl> _arangod_ to _arangodb_ . Additionally , the default database directory <nl> name changed from _ / var / arangodb_ to _ / var / lib / arangodb_ . This was <nl> - done to be more compliant with various Linux policies . <nl> + necessary to be more compliant with various Linux policies . <nl> <nl> - Collection types <nl> mmmmmmmmmmmmmmm - - <nl> + Collection Types { # UpgradingCollectionTypes } <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> In ArangoDB 1 . 1 , collection types have been introduced : <nl> <nl> the server , directly in ArangoDB . The ` ArangoEdges ` or <nl> ` ArangoEdgesCollection ` objects were not exposed to _arangosh_ or any <nl> other clients . <nl> <nl> - arangoimp / arangosh <nl> mmmmmmmmmmmmmmmmmmmmm <nl> + arangoimp / arangosh { # UpgradingShellImport } <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> The parameters ` - - connect - timeout ` and ` - - request - timeout ` for <nl> _arangosh_ and _arangoimp_ have been renamed to <nl> interactively prompt for a password . If no username is specified on <nl> the command line , the default user _root_ will be used but there will <nl> still be a password prompt . <nl> <nl> - Change of syslog usage <nl> mmmmmmmmmmmmmmmmmmmmm - - <nl> + Change of Syslog Usage { # UpgradingSyslog } <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> In 1 . 0 , arangod always logged its output to the syslog , regardless of <nl> - any other logging that was configured . In 1 . 1 , this has changed . Log <nl> + any other logging that was configured . In 1 . 1 , this has changed . Log <nl> messages will be sent to the syslog only if the server is started with <nl> - the ` - - log . syslog ` option and a non - empty string ( the log facility ) <nl> - is given to it . <nl> + the ` - - log . syslog ` option and a non - empty string ( the log facility ) is <nl> + given to it . This is in accordance with the 1 . 0 documentation . <nl> + <nl> + Troubleshooting { # UpgradingTroubleshooting } <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + If you cannot find a solution here , please ask the Google - Group at <nl> + http : / / groups . google . com / group / arangodb <nl> + <nl> + Problem : ArangoDB does not start after upgrade <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + <nl> + - Check the logfile ` / var / log / arangodb / arangod . log ` <nl> + <nl> + - Check the permissions of these directories : <nl> + <nl> + - ` / var / lib / arangodb / ` <nl> + - ` / var / run / arangodb / ` <nl> + - ` / var / log / arangodb / ` <nl> + <nl> + These directories and all files have to be readable and writable for the user <nl> + " arangodb " and group " arangodb " ( not for MacOSX ) . Double check that the user <nl> + is " arangodb " not " arango " . <nl> + <nl> + Change the permissions using : <nl> + <nl> + unix > chown - R arangodb : arangodb / var / lib / arangodb / / var / run / arangodb / / var / log / arangodb / <nl> + <nl> + - Check the configuration file in : <nl> + <nl> + / etc / arangodb / arangod . conf <nl> + <nl> + Problem : Packet manager finds no upgrade <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> <nl> - Removed functionality <nl> mmmmmmmmmmmmmmmmmmmmm - <nl> + - Check the name of the repository here : <nl> + <nl> + http : / / www . arangodb . org / download <nl> + <nl> + Problem : Database is empty <nl> + mmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> + Check that the database file <nl> + <nl> + / var / lib / arangodb <nl> + <nl> + contains your collections . If it is empty , check the old location <nl> + of the database at <nl> + <nl> + / var / arangodb <nl> + <nl> + If necessary , stop the server , copy the files using <nl> + <nl> + cp / var / arangodb / * / var / lib / arangodb <nl> + <nl> + and start the server agaib . <nl> + <nl> + Removed Features { # UpgradingRemovedFeatures } <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + Removed Dependencies { # UpgradingRemovedDependencies } <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + <nl> + ArangoDB no longer requires BOOST , ZeroMQ , or ProtocolBuffers . <nl> + <nl> + Removed Functionality { # UpgradingRemovedFunctionality } <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + # # # Configuration <nl> <nl> In 1 . 0 , there were unfinished REST APIs available at the <nl> ` / _admin / config ` URL suffix . These APIs were stubs only and have been <nl> removed in ArangoDB 1 . 1 . <nl> + <nl> + # # # Front - End User and Session Management <nl> + <nl> + In 1 . 0 , there was an API to manage user and session for the GUI <nl> + administraion interface . In 1 . 1 the user management is part of the <nl> + database ( not just the front - end ) . There the calls to <nl> + ` _admin / user - manager ` where removed . <nl> new file mode 100644 <nl> index 00000000000 . . 1088d043604 <nl> mmm / dev / null <nl> ppp b / Documentation / Manual / UpgradingTOC . md <nl> <nl> + TOC { # UpgradingTOC } <nl> + = = = = = = = = = = = = = = = = = = = <nl> + <nl> + - @ ref UpgradingIntroduction <nl> + - @ ref UpgradingNewDependencies <nl> + - @ ref UpgradingVersionCheck <nl> + - @ ref UpgradingServerOptions <nl> + - @ ref UpgradingStartScripts <nl> + - @ ref UpgradingCollectionTypes <nl> + - @ ref UpgradingShellImport <nl> + - @ ref UpgradingSyslog <nl> + - @ ref UpgradingTroubleshooting <nl> + - @ ref UpgradingRemovedFeatures <nl> + - @ ref UpgradingRemovedDependencies <nl> new file mode 100755 <nl> index 00000000000 . . 31830217159 <nl> mmm / dev / null <nl> ppp b / Documentation / Scripts / generateTOC . py <nl> <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # # # @ brief create a C stub from a Python file <nl> + # # # <nl> + # # # @ file <nl> + # # # <nl> + # # # DISCLAIMER <nl> + # # # <nl> + # # # Copyright by triAGENS GmbH - All rights reserved . <nl> + # # # <nl> + # # # The Programs ( which include both the software and documentation ) <nl> + # # # contain proprietary information of triAGENS GmbH ; they are <nl> + # # # provided under a license agreement containing restrictions on use and <nl> + # # # disclosure and are also protected by copyright , patent and other <nl> + # # # intellectual and industrial property laws . Reverse engineering , <nl> + # # # disassembly or decompilation of the Programs , except to the extent <nl> + # # # required to obtain interoperability with other independently created <nl> + # # # software or as specified by law , is prohibited . <nl> + # # # <nl> + # # # The Programs are not intended for use in any nuclear , aviation , mass <nl> + # # # transit , medical , or other inherently dangerous applications . It shall <nl> + # # # be the licensee ' s responsibility to take all appropriate fail - safe , <nl> + # # # backup , redundancy , and other measures to ensure the safe use of such <nl> + # # # applications if the Programs are used for such purposes , and triAGENS <nl> + # # # GmbH disclaims liability for any damages caused by such use of <nl> + # # # the Programs . <nl> + # # # <nl> + # # # This software is the confidential and proprietary information of <nl> + # # # triAGENS GmbH . You shall not disclose such confidential and <nl> + # # # proprietary information and shall use it only in accordance with the <nl> + # # # terms of the license agreement you entered into with triAGENS GmbH . <nl> + # # # <nl> + # # # Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + # # # <nl> + # # # @ author Dr . Frank Celler <nl> + # # # @ author Copyright 2011 , triagens GmbH , Cologne , Germany <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + import re , sys , string <nl> + <nl> + argv = sys . argv <nl> + argv . pop ( 0 ) <nl> + <nl> + DEBUG = False <nl> + <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # # # @ brief parse file <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + r1 = re . compile ( r ' ^ - * @ BOOK_REF { ( [ ^ } ] * ) } ' ) <nl> + r2 = re . compile ( r ' ^ @ CHAPTER_REF { ( [ ^ } ] * ) } ' ) <nl> + r3 = re . compile ( r ' ^ . * { # ( [ ^ } ] * ) } ' ) <nl> + <nl> + books = [ ] <nl> + chapters = [ ] <nl> + homes = { } <nl> + <nl> + for filename in argv : <nl> + f = open ( filename , " r " ) <nl> + num = 0 <nl> + superentry = " " <nl> + <nl> + for line in f : <nl> + line = line . rstrip ( ' \ n ' ) <nl> + num = num + 1 <nl> + <nl> + # first entry is home <nl> + if num = = 1 : <nl> + m = r3 . match ( line ) <nl> + <nl> + if m : <nl> + superentry = m . group ( 1 ) <nl> + <nl> + # books <nl> + m = r1 . match ( line ) <nl> + <nl> + if m : <nl> + entry = m . group ( 1 ) <nl> + books . append ( entry ) <nl> + homes [ entry ] = ' Home ' <nl> + continue <nl> + <nl> + # chapters <nl> + m = r2 . match ( line ) <nl> + <nl> + if m : <nl> + entry = m . group ( 1 ) <nl> + chapters . append ( entry ) <nl> + homes [ entry ] = superentry <nl> + continue <nl> + <nl> + f . close ( ) <nl> + <nl> + def generate ( l ) : <nl> + for i in range ( 0 , len ( l ) ) : <nl> + entry = l [ i ] <nl> + prev = " " <nl> + next = " " <nl> + home = " " <nl> + <nl> + if 0 < i : <nl> + prev = l [ i - 1 ] <nl> + <nl> + if i + 1 < len ( l ) : <nl> + next = l [ i + 1 ] <nl> + <nl> + if entry in homes : <nl> + home = homes [ entry ] <nl> + <nl> + if prev = = " " : <nl> + print ' ALIASES + = " NAVIGATE_ % s = @ NAVIGATE_FIRST { % s , % s } " ' % ( entry , home , next ) <nl> + elif next = = " " : <nl> + print ' ALIASES + = " NAVIGATE_ % s = @ NAVIGATE_LAST { % s , % s } " ' % ( entry , prev , home ) <nl> + else : <nl> + print ' ALIASES + = " NAVIGATE_ % s = @ NAVIGATE { % s , % s , % s } " ' % ( entry , prev , home , next ) <nl> + <nl> + generate ( books ) <nl> + generate ( chapters ) <nl> mmm a / Documentation / Scripts / xml2md . py <nl> ppp b / Documentation / Scripts / xml2md . py <nl> <nl> replDict [ " s_sp " ] = " " <nl> replDict [ " e_sp " ] = " " <nl> <nl> + replDict [ " s_ndash " ] = " - - " <nl> + replDict [ " e_ndash " ] = " " <nl> + <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # # # # @ brief generate code for text value <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> <nl> gencDict [ " highlight " ] = True <nl> <nl> gencDict [ " sp " ] = False <nl> + gencDict [ " ndash " ] = False <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # # # # @ brief table entry <nl> def start_element ( name , attrs ) : <nl> elif name = = " itemizedlist " : listlevel = listlevel + 1 <nl> elif name = = " orderedlist " : listlevel = listlevel + 1 <nl> # endif <nl> - <nl> + <nl> + if name = = " heading " : <nl> + titlevel = int ( attrs [ " level " ] ) <nl> + name = " title " <nl> + # endif <nl> + <nl> text = " " <nl> <nl> if name = = " title " : <nl> def end_element ( name ) : <nl> elif name = = " orderedlist " : listlevel = listlevel - 1 <nl> # endif <nl> <nl> + if name = = " heading " : <nl> + titlevel = titlevel - 1 <nl> + name = " title " <nl> + # endif <nl> + <nl> if name = = " title " : <nl> titafter = True <nl> text = replDict [ " e_ % s_ % d " % ( name , titlevel ) ] <nl> mmm a / Documentation / UserManual / FirstStepsArangoDB . md <nl> ppp b / Documentation / UserManual / FirstStepsArangoDB . md <nl> <nl> First Steps with ArangoDB { # FirstStepsArangoDB } <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> - @ NAVIGATE_FIRST { UserManual , UserManualArangosh } <nl> + @ NAVIGATE_FirstStepsArangoDB <nl> @ EMBEDTOC { FirstStepsArangoDBTOC } <nl> <nl> What is ArangoDB ? { # FirstStepsArangoDBIntro } <nl> Command - Line Options { # FirstStepsShellStartStopOptions } <nl> <nl> Use ` - - help ` to get a list of command - line options : <nl> <nl> - > . / arangosh - - help <nl> + unix > . / arangosh - - help <nl> STANDARD options : <nl> - - help help message <nl> - - javascript . modules - path < string > one or more directories separated by cola ( default : " . . . " ) <nl> new file mode 100644 <nl> index 00000000000 . . 6b9ea3d39e5 <nl> mmm / dev / null <nl> ppp b / Documentation / UserManual / UserManual . md <nl> <nl> + ArangoDB ' s User Manual ( @ VERSION ) { # UserManual } <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + @ NAVIGATE_UserManual <nl> + <nl> + @ if LATEX <nl> + - @ ref FirstStepsArangoDB <nl> + - @ ref UserManualArangosh <nl> + - @ ref UserManualWebInterface <nl> + - @ ref ShellCollection <nl> + - @ ref ShellDocument <nl> + - @ ref ShellEdge <nl> + - @ ref SimpleQueries <nl> + - @ ref Aql <nl> + - @ ref UserManualActions <nl> + @ latexonly \ appendix @ endlatexonly <nl> + - @ ref CommandLine <nl> + - @ ref Glossary <nl> + @ else <nl> + @ CHAPTER_REF { FirstStepsArangoDB } <nl> + @ CHAPTER_REF { UserManualArangosh } <nl> + @ CHAPTER_REF { UserManualWebInterface } <nl> + @ CHAPTER_REF { ShellCollection } <nl> + @ CHAPTER_REF { ShellDocument } <nl> + @ CHAPTER_REF { ShellEdge } <nl> + @ CHAPTER_REF { SimpleQueries } <nl> + @ CHAPTER_REF { Aql } <nl> + @ CHAPTER_REF { UserManualActions } <nl> + @ CHAPTER_REF { CommandLine } <nl> + @ endif <nl> mmm a / Documentation / UserManual / WebInterface . md <nl> ppp b / Documentation / UserManual / WebInterface . md <nl> <nl> ArangoDB ' s Web - Interface { # UserManualWebInterface } <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> - @ NAVIGATE { PREV , UserManual , NEXT } <nl> + @ NAVIGATE_UserManualWebInterface <nl> @ EMBEDTOC { UserManualWebInterfaceTOC } <nl> <nl> Accessing the Web - Interface { # UserManualWebInterfaceAccess } <nl> mmm a / Documentation / arango . template . in <nl> ppp b / Documentation / arango . template . in <nl> TAB_SIZE = 8 <nl> # You can put \ n ' s in the value part of an alias to insert newlines . <nl> <nl> # function definition <nl> - ALIASES = \ <nl> + ALIASES = \ <nl> " FUN { 1 } = @ latexonly \ functionsignature { @ endlatexonly @ htmlonly < div class = \ " functionsignature \ " > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly \ 1 @ latexonly } @ endlatexonly @ htmlonly < / div > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly " \ <nl> " FUN { 2 } = @ latexonly \ functionsignature { @ endlatexonly @ htmlonly < div class = \ " functionsignature \ " > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly \ 1 , \ 2 @ latexonly } @ endlatexonly @ htmlonly < / div > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly " \ <nl> " FUN { 3 } = @ latexonly \ functionsignature { @ endlatexonly @ htmlonly < div class = \ " functionsignature \ " > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly \ 1 , \ 2 , \ 3 @ latexonly } @ endlatexonly @ htmlonly < / div > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly " \ <nl> ALIASES = \ <nl> " FN { 1 } = @ latexonly \ functionname { @ endlatexonly @ htmlonly < span class = \ " functionname \ " > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly \ 1 @ latexonly } @ endlatexonly @ htmlonly < / span > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly " <nl> <nl> # command line option <nl> - ALIASES + = \ <nl> + ALIASES + = \ <nl> " CMDOPT { 1 } = @ latexonly \ commandlineoption { @ endlatexonly @ htmlonly < div class = \ " commandlineoption \ " > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly \ 1 @ latexonly } @ endlatexonly @ htmlonly < / div > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly " \ <nl> " CA { 1 } = @ latexonly \ commandlineargument { @ endlatexonly @ htmlonly < span class = \ " commandlineargument \ " > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly \ 1 @ latexonly } @ endlatexonly @ htmlonly < / span > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly " \ <nl> " CO { 1 } = @ latexonly \ commandoption { @ endlatexon \ @ htmlonly < span class = \ " commandoption \ " > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly \ 1 @ latexonly } @ endlatexonly @ htmlonly < / span > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly " <nl> <nl> # rest calls <nl> - ALIASES + = \ <nl> + ALIASES + = \ <nl> " RESTHEADER { 2 } = @ latexonly \ restheader { @ endlatexonly @ htmlonly < div class = \ " restheader \ " > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly \ 1 @ latexonly , @ endlatexonly @ htmlonly < div class = \ " restheaderremark \ " > ( @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly \ 2 @ latexonly } @ endlatexonly @ htmlonly ) < / div > < / div > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly " \ <nl> " REST { 1 } = @ latexonly \ restcall { @ endlatexonly @ htmlonly < div class = \ " restcall \ " > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly \ 1 @ latexonly } @ endlatexonly @ htmlonly < / div > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly " <nl> <nl> # navigation <nl> - ALIASES + = \ <nl> + ALIASES + = \ <nl> " NAVIGATE { 3 } = @ htmlonly < div class = \ " navigate \ " > < a href = \ " \ 1 . html \ " > prev < / a > | < a href = \ " \ 2 . html \ " > home < / a > | < a href = \ " \ 3 . html \ " > next < / a > < / div > @ endhtmlonly " \ <nl> " NAVIGATE_FIRST { 2 } = @ htmlonly < div class = \ " navigate \ " > prev | < a href = \ " \ 1 . html \ " > home < / a > | < a href = \ " \ 2 . html \ " > next < / a > < / div > @ endhtmlonly " \ <nl> " NAVIGATE_LAST { 2 } = @ htmlonly < div class = \ " navigate \ " > < a href = \ " \ 1 . html \ " > prev < / a > | < a href = \ " \ 2 . html \ " > home < / a > | next < / div > @ endhtmlonly " \ <nl> " EMBEDTOC { 1 } = @ if LATEX @ else @ htmlonly < div class = \ " toc \ " > @ endhtmlonly @ copydoc \ 1 \ n @ htmlonly < / div > @ endhtmlonly @ endif " <nl> <nl> # glossary <nl> - ALIASES + = \ <nl> + ALIASES + = \ <nl> " GE { 1 } = @ latexonly \ glossaryentry { @ endlatexonly @ htmlonly < span class = \ " glossaryentry \ " > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly \ 1 @ latexonly } @ endlatexonly @ htmlonly < / span > @ endhtmlonly @ xmlonly XMLMISSING @ endxmlonly " <nl> <nl> # examples <nl> - ALIASES + = \ <nl> + ALIASES + = \ <nl> " EXAMPLES = @ htmlonly < div class = \ " example \ " > Examples < / div > @ endhtmlonly " \ <nl> " EXAMPLE { 2 } = @ latexonly \ renewcommand { \ examplecap } { \ 2 } \ setboolean { hascap } { true } @ endlatexonly @ verbinclude \ 1 @ latexonly \ setboolean { hascap } { false } @ endlatexonly " \ <nl> " TINYEXAMPLE { 2 } = @ latexonly \ renewcommand { \ examplecap } { \ 2 } \ setboolean { hascap } { true } \ renewcommand { \ examplesize } { \ tiny } @ endlatexonly @ verbinclude \ 1 @ latexonly \ setboolean { hascap } { false } \ renewcommand { \ examplesize } { \ ttfamily } @ endlatexonly " <nl> <nl> # references <nl> - ALIASES + = \ <nl> + ALIASES + = \ <nl> " EXTREF { 2 } = @ if LATEX \ 2 ( see @ latexonly \ url { \ 1 } @ endlatexonly ) @ else < a href = \ 1 > \ 2 < / a > @ endif " \ <nl> " EXTREF_S { 2 } = @ if LATEX \ 2 ( see @ latexonly \ url { \ 1 } @ endlatexonly ) @ else < a href = \ 1 > \ 2 < / a > @ endif " \ <nl> " S_EXTREF { 2 } = @ if LATEX \ 2 ( see @ latexonly \ url { \ 1 } @ endlatexonly ) @ else < a href = \ 1 > \ 2 < / a > @ endif " \ <nl> " INTREF { 2 } = @ if LATEX \ 2 @ else @ ref \ 1 \ " \ 2 \ " @ endif " <nl> <nl> + # table of content <nl> + ALIASES + = \ <nl> + " BOOK_REF { 1 } = @ ref \ 1 " \ <nl> + " CHAPTER_REF { 1 } = @ copydetails \ 1TOC " <nl> + <nl> # other aliases <nl> - ALIASES + = \ <nl> + ALIASES + = \ <nl> " VERSION = @ PACKAGE_VERSION @ " \ <nl> " LIT { 1 } = < tt > \ 1 < / tt > " \ <nl> " LIT { 2 } = < tt > \ 1 , \ 2 < / tt > " \ <nl> WARN_LOGFILE = <nl> # directories like " / usr / src / myproject " . Separate the files or directories <nl> # with spaces . <nl> <nl> - INPUT = @ srcdir @ / Documentation / UserManual \ <nl> + INPUT = @ srcdir @ / Documentation / DbaManual \ <nl> + @ srcdir @ / Documentation / InstallationManual \ <nl> @ srcdir @ / Documentation / Manual \ <nl> - @ srcdir @ / Doxygen / js \ <nl> + @ srcdir @ / Documentation / UserManual \ <nl> + @ srcdir @ / Doxygen / js \ <nl> @ srcdir @ / arangod \ <nl> @ srcdir @ / lib <nl> <nl> mmm a / Documentation / arangodb . css <nl> ppp b / Documentation / arangodb . css <nl> <nl> margin - bottom : 0px ; <nl> } <nl> <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - / * pre * / <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - <nl> - # content div . arangodb pre { <nl> - background - color : # FBFCFD ! important ; <nl> - border : 1px solid # C4CFE5 ! important ; <nl> - font - family : monospace , fixed ; <nl> - font - size : 105 % ; <nl> - font - size : 15px ; <nl> - line - height : 21px ; <nl> - margin : 4px 8px 24px 2px ; <nl> - overflow : auto ; <nl> - padding : 4px 6px ; <nl> - word - wrap : break - word ; <nl> - } <nl> - <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> / * function signature * / <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> <nl> } <nl> <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - / * code * / <nl> + / * pre & code * / <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> <nl> + # content div . arangodb pre { <nl> + background - color : # FBFCFD ! important ; <nl> + border : 1px solid # C4CFE5 ! important ; <nl> + font - family : monospace , fixed ; <nl> + font - size : 105 % ; <nl> + font - size : 15px ; <nl> + line - height : 21px ; <nl> + margin : 4px 8px 24px 2px ; <nl> + overflow : auto ; <nl> + padding : 4px 6px ; <nl> + word - wrap : break - word ; <nl> + } <nl> + <nl> + # content div . arangodb div . fragment { <nl> + background - color : # FBFCFD ! important ; <nl> + border : 1px solid # C4CFE5 ! important ; <nl> + font - family : monospace , fixed ; <nl> + font - size : 105 % ; <nl> + font - size : 15px ; <nl> + line - height : 21px ; <nl> + margin : 4px 8px 24px 2px ; <nl> + overflow : auto ; <nl> + padding : 4px 6px ; <nl> + word - wrap : break - word ; <nl> + } <nl> + <nl> # content div . arangodb code { <nl> background - color : # FFFFFF ! important ; <nl> border : 0px ! important ; <nl> <nl> font - family : monospace , fixed ; <nl> } <nl> <nl> - # content div . arangodb div . fragment { <nl> - background - color : # FFFFFF ! important ; <nl> - border : 1px solid # C4CFE5 ! important ; <nl> - margin : 0px 0px 0px 0px ; <nl> - padding : 8px 8px ; <nl> - overflow : auto ; <nl> - line - height : 11px ; <nl> - font - size : 15px ; <nl> - font - family : monospace , fixed ; <nl> - white - space : pre ; <nl> - } <nl> - <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> / * other * / <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> new file mode 100755 <nl> index 00000000000 . . 1b9f50d1ece <nl> mmm / dev / null <nl> ppp b / Installation / OBS / clean - repo . sh <nl> <nl> + # ! / bin / bash <nl> + <nl> + rm - f ' download / robots . txt ' <nl> + <nl> + test - d stable | | mkdir stable | | exit 1 <nl> + <nl> + mv download / repositories / home : / fceller / * stable | | exit 1 <nl> + <nl> + rm - rf ' download ' <nl> + rm - rf ' stable / xUbuntu_10 . 04 ' <nl> + rm - rf ' stable / CentOS_CentOS - 5 ' <nl> + rm - rf ' stable / RedHat_RHEL - 5 ' <nl> + rm - rf ' stable / SLE_10_SDK ' <nl> + <nl> + find stable - name " index . html * " - exec ' rm ' ' { } ' ' ; ' <nl> + find stable - name " home : fceller . repo * " - exec ' rm ' ' { } ' ' ; ' <nl> + find stable - name " * . meta4 " - exec ' rm ' ' { } ' ' ; ' <nl> + find stable - name " * . metalink " - exec ' rm ' ' { } ' ' ; ' <nl> + find stable - name " * . mirrorlist " - exec ' rm ' ' { } ' ' ; ' <nl> + find stable - name " repocache " - prune - exec ' rmdir ' ' { } ' ' ; ' <nl> new file mode 100755 <nl> index 00000000000 . . 651e9f6cc4e <nl> mmm / dev / null <nl> ppp b / Installation / OBS / create - repo . sh <nl> <nl> + # ! / bin / bash <nl> + <nl> + DISTROS = " CentOS_CentOS - 6 Fedora_16 Fedora_17 openSUSE_11 . 4 openSUSE_12 . 1 openSUSE_12 . 2 openSUSE_Factory openSUSE_Tumbleweed RedHat_RHEL - 6 SLE_11 SLE_11_SP1 SLE_11_SP2 " <nl> + <nl> + test - d stable | | mkdir stable | | exit 1 <nl> + <nl> + for distro in $ DISTROS ; do <nl> + tag = ` echo $ distro | sed - e ' s / openSUSE_ / openSUSE : / ' | sed - e ' s / SLE_11 / SUSE : SLE_11 / ' | sed - e ' s / SLE_11_ / SLE_11 : / ' ` <nl> + test - d stable / $ distro | | mkdir stable / $ distro | | exit 1 <nl> + <nl> + echo " [ arangodb ] <nl> + name = ArangoDB Project ( $ tag ) <nl> + type = rpm - md <nl> + baseurl = http : / / www . arangodb . org / repositories / stable / $ distro / <nl> + gpgcheck = 1 <nl> + gpgkey = http : / / www . arangodb . org / repositories / stable / $ distro / repodata / repomd . xml . key <nl> + enabled = 1 " > stable / $ distro / arangodb . repo <nl> + <nl> + done <nl> new file mode 100755 <nl> index 00000000000 . . 14665308a6a <nl> mmm / dev / null <nl> ppp b / Installation / OBS / create - ymp . sh <nl> <nl> + # ! / bin / bash <nl> + <nl> + DISTROS = " openSUSE_Tumbleweed openSUSE_Factory openSUSE_12 . 2 openSUSE_12 . 1 openSUSE_11 . 4 SLE_11_SP2 SLE_11_SP1 SLE_11 " <nl> + <nl> + test - d stable | | mkdir stable | | exit 1 <nl> + test - d stable / ymp | | mkdir stable / ymp | | exit 1 <nl> + <nl> + for distro in $ DISTROS ; do <nl> + tag = ` echo $ distro | sed - e ' s / openSUSE_ / openSUSE : / ' | sed - e ' s / SLE_11 / SUSE : SLE_11 / ' | sed - e ' s / SLE_11_ / SLE_11 : / ' ` <nl> + test - d stable / ymp / $ distro | | mkdir stable / ymp / $ distro | | exit 1 <nl> + <nl> + echo " < metapackage xmlns : os = \ " http : / / opensuse . org / Standards / One_Click_Install \ " xmlns = \ " http : / / opensuse . org / Standards / One_Click_Install \ " > <nl> + < group > <nl> + < repositories > <nl> + < repository recommended = \ " true \ " > <nl> + < name > ArangoDB < / name > <nl> + < summary > ArangoDB Project < / summary > <nl> + < description > ArangoDB Repository for $ tag < / description > <nl> + < url > http : / / www . arangodb . org / repositories / stable / repositories / $ distro / < / url > <nl> + < / repository > <nl> + < / repositories > <nl> + < software > <nl> + < item > <nl> + < name > arangodb < / name > <nl> + < summary > An open - source , multi - model NoSQL database < / summary > <nl> + < description > ArangoDB is a durable , reliable , transactional multi - model database . It ' s key - features are : Schema - free schemata , an integrated application server , flexible data modelling , free index choice , and configurable durability . <nl> + <nl> + The ArangoDB consists of a server , a separate shell , which allows you to administrate the server , and a set of client API for various languages . <nl> + <nl> + It is written in C / C + + . < / description > <nl> + < / item > <nl> + < / software > <nl> + < / group > <nl> + < / metapackage > " > stable / ymp / $ distro / arangodb . ymp <nl> + <nl> + done <nl> new file mode 100644 <nl> index 00000000000 . . 2d92e685c14 <nl> mmm / dev / null <nl> ppp b / Installation / OBS / index . html <nl> <nl> + < ! DOCTYPE html > <nl> + < html > <nl> + < head > <nl> + < script src = " https : / / ajax . googleapis . com / ajax / libs / jquery / 1 . 4 . 4 / jquery . min . js " type = " text / javascript " > < / script > <nl> + < link rel = " stylesheet " type = " text / css " href = " style . css " media = " all " > <nl> + < script src = " effects . js " type = " text / javascript " > < / script > <nl> + < / head > <nl> + < body > <nl> + < div class = " soo_box " > <nl> + < p class = " soo_line soo_line_visible " > Select Your Operating System < / p > <nl> + <nl> + < div class = " soo_flavour_line " > <nl> + <nl> + < div class = " soo_button " id = " soo_button_Source " > < img src = " / repositories / images / distributions / source . png " <nl> + alt = " Source " / > <nl> + <nl> + < p > Source < / p > < / div > <nl> + < div class = " soo_button " id = " soo_button_MacOSX " > < img src = " / repositories / images / distributions / macosx . png " <nl> + alt = " MacOSX " / > <nl> + <nl> + < p > Mac OS X < / p > < / div > <nl> + < div class = " soo_button " id = " soo_button_Windows " > < img src = " / repositories / images / distributions / win8 . png " <nl> + alt = " Windows " / > <nl> + <nl> + < p > Windows < / p > < / div > <nl> + < div class = " soo_button " id = " soo_button_CentOS " > < img src = " / repositories / images / distributions / centos . png " <nl> + alt = " CentOS " / > <nl> + <nl> + < p > CentOS < / p > < / div > <nl> + < div class = " soo_button " id = " soo_button_Debian " > < img src = " / repositories / images / distributions / debian . png " <nl> + alt = " Debian " / > <nl> + <nl> + < p > Debian < / p > < / div > <nl> + < div class = " soo_button " id = " soo_button_Fedora " > < img src = " / repositories / images / distributions / fedora . png " <nl> + alt = " Fedora " / > <nl> + <nl> + < p > Fedora < / p > < / div > <nl> + < div class = " soo_button " id = " soo_button_Mandriva " > < img src = " / repositories / images / distributions / mandriva . png " <nl> + alt = " Mandriva " / > <nl> + <nl> + < p > Mandriva < / p > < / div > <nl> + < / div > <nl> + < div class = " soo_flavour_line " > <nl> + <nl> + < div class = " soo_button " id = " soo_button_openSUSE " > < img src = " / repositories / images / distributions / opensuse . png " <nl> + alt = " openSUSE " / > <nl> + <nl> + < p > openSUSE < / p > < / div > <nl> + < div class = " soo_button " id = " soo_button_RHEL " > < img src = " / repositories / images / distributions / redhat . png " <nl> + alt = " RHEL " / > <nl> + <nl> + < p > RHEL < / p > < / div > <nl> + < div class = " soo_button " id = " soo_button_SLE " > < img src = " / repositories / images / distributions / suse . png " alt = " SLE " / > <nl> + <nl> + < p > SLE < / p > < / div > <nl> + < div class = " soo_button " id = " soo_button_Ubuntu " > < img src = " / repositories / images / distributions / ubuntu . png " <nl> + alt = " Ubuntu " / > <nl> + <nl> + < p > Ubuntu < / p > < / div > <nl> + < div class = " soo_button " id = " soo_button_Livecd " > < img src = " / repositories / images / distributions / cd . png " <nl> + alt = " Livecd " / > <nl> + <nl> + < p > Live CD < / p > < / div > <nl> + < / div > <nl> + <nl> + < / div > <nl> + <nl> + <nl> + <nl> + < div style = " clear : both " / > <nl> + < div id = " soo_ymp " class = " soo_box " > <nl> + < p class = " soo_line " id = " soo_section_toggle_ymp " > Install using One Click Install < span > < / span > < / p > <nl> + <nl> + < div id = " soo_section_ymp " > <nl> + < a class = " soo_ymplink soo_distro soo_distro_openSUSE soo_distro_openSUSE_Tumbleweed " <nl> + href = " http : / / www . arangodb . org / repositories / stable / ymp / openSUSE_Tumbleweed / arangodb . ymp " > openSUSE & nbsp ; Tumbleweed < / a > <nl> + < a class = " soo_ymplink soo_distro soo_distro_openSUSE soo_distro_openSUSE_Factory " <nl> + href = " http : / / www . arangodb . org / repositories / stable / ymp / openSUSE_Factory / arangodb . ymp " > openSUSE & nbsp ; Factory < / a > <nl> + < a class = " soo_ymplink soo_distro soo_distro_openSUSE soo_distro_openSUSE_12 . 2 " <nl> + href = " http : / / www . arangodb . org / repositories / stable / ymp / openSUSE_12 . 2 / arangodb . ymp " > openSUSE & nbsp ; 12 . 2 < / a > <nl> + < a class = " soo_ymplink soo_distro soo_distro_openSUSE soo_distro_openSUSE_12 . 1 " <nl> + href = " http : / / www . arangodb . org / repositories / stable / ymp / openSUSE_12 . 1 / arangodb . ymp " > openSUSE & nbsp ; 12 . 1 < / a > <nl> + < a class = " soo_ymplink soo_distro soo_distro_openSUSE soo_distro_openSUSE_11 . 4 " <nl> + href = " http : / / www . arangodb . org / repositories / stable / ymp / openSUSE_11 . 4 / arangodb . ymp " > openSUSE & nbsp ; 11 . 4 < / a > <nl> + < a class = " soo_ymplink soo_distro soo_distro_SLE soo_distro_SLE_11_SP2 " <nl> + href = " http : / / www . arangodb . org / repositories / stable / ymp / SLE_11_SP2 / arangodb . ymp " > SLE & nbsp ; 11 & nbsp ; SP2 < / a > <nl> + < a class = " soo_ymplink soo_distro soo_distro_SLE soo_distro_SLE_11_SP1 " <nl> + href = " http : / / www . arangodb . org / repositories / stable / ymp / SLE_11_SP1 / arangodb . ymp " > SLE & nbsp ; 11 & nbsp ; SP1 < / a > <nl> + < a class = " soo_ymplink soo_distro soo_distro_SLE soo_distro_SLE_11 " <nl> + href = " http : / / www . arangodb . org / repositories / stable / ymp / SLE_11 / arangodb . ymp " > SLE & nbsp ; 11 < / a > <nl> + < / div > <nl> + < / div > <nl> + <nl> + < div id = " soo_repo " class = " soo_box " > <nl> + < p class = " soo_line " id = " soo_section_toggle_repo " > Add repository and install manually < span > < / span > < / p > <nl> + <nl> + < div id = " soo_section_repo " > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_Ubuntu soo_distro_xUbuntu_12 . 10 " > <nl> + < p > For < strong > Ubuntu & nbsp ; 12 . 10 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > echo & # x27 ; deb http : / / www . arangodb . org / repositories / stable / xUbuntu_12 . 10 / / & # x27 ; & gt ; & gt ; / etc / apt / sources . list . d / arangodb . list <nl> + apt - get update <nl> + apt - get install arangodb < / pre > <nl> + < p > You can add the repository key to apt like this : < / p > <nl> + < pre > wget http : / / www . arangodb . org / repositories / stable / xUbuntu_12 . 10 / Release . key <nl> + apt - key add - < Release . key < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_Ubuntu soo_distro_xUbuntu_12 . 04 " > <nl> + < p > For < strong > Ubuntu & nbsp ; 12 . 04 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > echo & # x27 ; deb http : / / www . arangodb . org / repositories / stable / xUbuntu_12 . 04 / / & # x27 ; & gt ; & gt ; / etc / apt / sources . list . d / arangodb . list <nl> + apt - get update <nl> + apt - get install arangodb < / pre > <nl> + < p > You can add the repository key to apt like this : < / p > <nl> + < pre > wget http : / / www . arangodb . org / repositories / stable / xUbuntu_12 . 04 / Release . key <nl> + apt - key add - < Release . key < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_Ubuntu soo_distro_xUbuntu_11 . 10 " > <nl> + < p > For < strong > Ubuntu & nbsp ; 11 . 10 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > echo & # x27 ; deb http : / / www . arangodb . org / repositories / stable / xUbuntu_11 . 10 / / & # x27 ; & gt ; & gt ; / etc / apt / sources . list . d / arangodb . list <nl> + apt - get update <nl> + apt - get install arangodb < / pre > <nl> + < p > You can add the repository key to apt like this : < / p > <nl> + < pre > wget http : / / www . arangodb . org / repositories / stable / xUbuntu_11 . 10 / Release . key <nl> + apt - key add - < Release . key < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_Ubuntu soo_distro_xUbuntu_11 . 04 " > <nl> + < p > For < strong > Ubuntu & nbsp ; 11 . 04 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > echo & # x27 ; deb http : / / www . arangodb . org / repositories / stable / xUbuntu_11 . 04 / / & # x27 ; & gt ; & gt ; / etc / apt / sources . list . d / arangodb . list <nl> + apt - get update <nl> + apt - get install arangodb < / pre > <nl> + < p > You can add the repository key to apt like this : < / p > <nl> + < pre > wget http : / / www . arangodb . org / repositories / stable / xUbuntu_11 . 04 / Release . key <nl> + apt - key add - < Release . key < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_openSUSE soo_distro_openSUSE_Tumbleweed " > <nl> + < p > For < strong > openSUSE & nbsp ; Tumbleweed < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > zypper addrepo http : / / www . arangodb . org / repositories / stable / openSUSE_Tumbleweed / arangodb . repo <nl> + zypper refresh <nl> + zypper install arangodb < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_openSUSE soo_distro_openSUSE_Factory " > <nl> + < p > For < strong > openSUSE & nbsp ; Factory < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > zypper addrepo http : / / www . arangodb . org / repositories / stable / openSUSE_Factory / arangodb . repo <nl> + zypper refresh <nl> + zypper install arangodb < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_openSUSE soo_distro_openSUSE_12 . 2 " > <nl> + < p > For < strong > openSUSE & nbsp ; 12 . 2 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > zypper addrepo http : / / www . arangodb . org / repositories / stable / openSUSE_12 . 2 / arangodb . repo <nl> + zypper refresh <nl> + zypper install arangodb < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_openSUSE soo_distro_openSUSE_12 . 1 " > <nl> + < p > For < strong > openSUSE & nbsp ; 12 . 1 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > zypper addrepo http : / / www . arangodb . org / repositories / stable / openSUSE_12 . 1 / arangodb . repo <nl> + zypper refresh <nl> + zypper install arangodb < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_openSUSE soo_distro_openSUSE_11 . 4 " > <nl> + < p > For < strong > openSUSE & nbsp ; 11 . 4 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > zypper addrepo http : / / www . arangodb . org / repositories / stable / openSUSE_11 . 4 / arangodb . repo <nl> + zypper refresh <nl> + zypper install arangodb < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_SLE soo_distro_SLE_11_SP2 " > <nl> + < p > For < strong > SLE & nbsp ; 11 & nbsp ; SP2 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > zypper addrepo http : / / www . arangodb . org / repositories / stable / SLE_11_SP2 / arangodb . repo <nl> + zypper refresh <nl> + zypper install arangodb < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_SLE soo_distro_SLE_11_SP1 " > <nl> + < p > For < strong > SLE & nbsp ; 11 & nbsp ; SP1 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > zypper addrepo http : / / www . arangodb . org / repositories / stable / SLE_11_SP1 / arangodb . repo <nl> + zypper refresh <nl> + zypper install arangodb < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_SLE soo_distro_SLE_11 " > <nl> + < p > For < strong > SLE & nbsp ; 11 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > zypper addrepo http : / / www . arangodb . org / repositories / stable / SLE_11 / arangodb . repo <nl> + zypper refresh <nl> + zypper install arangodb < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_RHEL soo_distro_RedHat_RHEL - 6 " > <nl> + < p > For < strong > RedHat & nbsp ; RHEL - 6 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > cd / etc / yum . repos . d / <nl> + wget http : / / www . arangodb . org / repositories / stable / RedHat_RHEL - 6 / arangodb . repo <nl> + yum install arangodb < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_Mandriva soo_distro_Mandriva_2011 " > <nl> + < p > For < strong > Mandriva & nbsp ; 2011 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > urpmi . addmedia arangodb http : / / www . arangodb . org / repositories / stable / Mandriva_2011 / <nl> + urpmi . update - a <nl> + urpmi arangodb < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_Fedora soo_distro_Fedora_17 " > <nl> + < p > For < strong > Fedora & nbsp ; 17 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > cd / etc / yum . repos . d / <nl> + wget http : / / www . arangodb . org / repositories / stable / Fedora_17 / arangodb . repo <nl> + yum install arangodb < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_Fedora soo_distro_Fedora_16 " > <nl> + < p > For < strong > Fedora & nbsp ; 16 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > cd / etc / yum . repos . d / <nl> + wget http : / / www . arangodb . org / repositories / stable / Fedora_16 / arangodb . repo <nl> + yum install arangodb < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_Debian soo_distro_Debian_6 . 0 " > <nl> + < p > For < strong > Debian & nbsp ; 6 . 0 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > echo & # x27 ; deb http : / / www . arangodb . org / repositories / stable / Debian_6 . 0 / / & # x27 ; & gt ; & gt ; / etc / apt / sources . list . d / arangodb . list <nl> + apt - get update <nl> + apt - get install arangodb < / pre > <nl> + <nl> + < p > You can add the repository key to apt like this : < / p > <nl> + < pre > wget http : / / www . arangodb . org / repositories / stable / Debian_6 . 0 / Release . key <nl> + apt - key add - < Release . key < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_CentOS soo_distro_CentOS_CentOS - 6 " > <nl> + < p > For < strong > CentOS - 6 < / strong > run the following as < strong > root < / strong > : < / p > <nl> + < pre > cd / etc / yum . repos . d / <nl> + wget http : / / www . arangodb . org / repositories / stable / CentOS_CentOS - 6 / arangodb . repo <nl> + yum install arangodb < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_MacOSX soo_distro_MacOSX_MacOSX - 10 " > <nl> + < p > For < strong > Mac OS X < / strong > you can use use homebrew to install ArangoDB . Run the following : < / p > <nl> + < pre > brew install arangodb < / pre > <nl> + < / div > <nl> + < div class = " soo_repoinfo soo_distro soo_distro_Windows soo_distro_Windows - Win8 " > <nl> + < p > A first version of ArangoDB for Windows is available . Please contact us , if you are interested in trying <nl> + it . < / pre > <nl> + < / div > <nl> + < / div > <nl> + < / div > <nl> + <nl> + < div id = " soo_pkg " class = " soo_box " > <nl> + < p class = " soo_line " id = " soo_section_toggle_pkg " > Grab binary packages directly < span > < / span > < / p > <nl> + <nl> + < div id = " soo_section_pkg " > <nl> + < table > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_Ubuntu soo_distro_xUbuntu_12 . 10 " > <nl> + < td > Packages for < strong > Ubuntu & nbsp ; 12 . 10 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / xUbuntu_12 . 10 / amd64 / arangodb_1 . 1 . 0 - 0_amd64 . deb " > arangodb_1 . 1 . 0 - 0_amd64 . deb < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / xUbuntu_12 . 10 / i386 / arangodb_1 . 1 . 0 - 0_i386 . deb " > arangodb_1 . 1 . 0 - 0_i386 . deb < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_Ubuntu soo_distro_xUbuntu_12 . 04 " > <nl> + < td > Packages for < strong > Ubuntu & nbsp ; 12 . 04 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / xUbuntu_12 . 04 / amd64 / arangodb_1 . 1 . 0 - 0_amd64 . deb " > arangodb_1 . 1 . 0 - 0_amd64 . deb < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / xUbuntu_12 . 04 / i386 / arangodb_1 . 1 . 0 - 0_i386 . deb " > arangodb_1 . 1 . 0 - 0_i386 . deb < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_Ubuntu soo_distro_xUbuntu_11 . 10 " > <nl> + < td > Packages for < strong > Ubuntu & nbsp ; 11 . 10 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / xUbuntu_11 . 10 / amd64 / arangodb_1 . 1 . 0 - 0_amd64 . deb " > arangodb_1 . 1 . 0 - 0_amd64 . deb < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / xUbuntu_11 . 10 / i386 / arangodb_1 . 1 . 0 - 0_i386 . deb " > arangodb_1 . 1 . 0 - 0_i386 . deb < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_Ubuntu soo_distro_xUbuntu_11 . 04 " > <nl> + < td > Packages for < strong > Ubuntu & nbsp ; 11 . 04 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / xUbuntu_11 . 04 / amd64 / arangodb_1 . 1 . 0 - 0_amd64 . deb " > arangodb_1 . 1 . 0 - 0_amd64 . deb < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / xUbuntu_11 . 04 / i386 / arangodb_1 . 1 . 0 - 0_i386 . deb " > arangodb_1 . 1 . 0 - 0_i386 . deb < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_openSUSE soo_distro_openSUSE_Tumbleweed " > <nl> + < td > Packages for < strong > openSUSE & nbsp ; Tumbleweed < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_Tumbleweed / i586 / arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_Tumbleweed / src / arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_Tumbleweed / x86_64 / arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_openSUSE soo_distro_openSUSE_Factory " > <nl> + < td > Packages for < strong > openSUSE & nbsp ; Factory < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_Factory / i586 / arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_Factory / src / arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_Factory / x86_64 / arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_openSUSE soo_distro_openSUSE_12 . 2 " > <nl> + < td > Packages for < strong > openSUSE & nbsp ; 12 . 2 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_12 . 2 / i586 / arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_12 . 2 / src / arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_12 . 2 / x86_64 / arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_openSUSE soo_distro_openSUSE_12 . 1 " > <nl> + < td > Packages for < strong > openSUSE & nbsp ; 12 . 1 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_12 . 1 / i586 / arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_12 . 1 / src / arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_12 . 1 / x86_64 / arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_openSUSE soo_distro_openSUSE_11 . 4 " > <nl> + < td > Packages for < strong > openSUSE & nbsp ; 11 . 4 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_11 . 4 / i586 / arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_11 . 4 / src / arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / openSUSE_11 . 4 / x86_64 / arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_SLE soo_distro_SLE_11_SP2 " > <nl> + < td > Packages for < strong > SLE & nbsp ; 11 & nbsp ; SP2 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / SLE_11_SP2 / i586 / arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / SLE_11_SP2 / src / arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / SLE_11_SP2 / x86_64 / arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_SLE soo_distro_SLE_11_SP1 " > <nl> + < td > Packages for < strong > SLE & nbsp ; 11 & nbsp ; SP1 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / SLE_11_SP1 / i586 / arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / SLE_11_SP1 / src / arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / SLE_11_SP1 / x86_64 / arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_SLE soo_distro_SLE_11 " > <nl> + < td > Packages for < strong > SLE & nbsp ; 11 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / SLE_11 / i586 / arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . i586 . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / SLE_11 / src / arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / SLE_11 / x86_64 / arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_RHEL soo_distro_RedHat_RHEL - 6 " > <nl> + < td > Packages for < strong > RedHat & nbsp ; RHEL - 6 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / RedHat_RHEL - 6 / i686 / arangodb - 1 . 1 . 0 - 35 . 1 . i686 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . i686 . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / RedHat_RHEL - 6 / src / arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / RedHat_RHEL - 6 / x86_64 / arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_Mandriva soo_distro_Mandriva_2011 " > <nl> + < td > Packages for < strong > Mandriva & nbsp ; 2011 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / Mandriva_2011 / src / arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_Fedora soo_distro_Fedora_17 " > <nl> + < td > Packages for < strong > Fedora & nbsp ; 17 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / Fedora_17 / i686 / arangodb - 1 . 1 . 0 - 35 . 1 . i686 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . i686 . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / Fedora_17 / src / arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / Fedora_17 / x86_64 / arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_Fedora soo_distro_Fedora_16 " > <nl> + < td > Packages for < strong > Fedora & nbsp ; 16 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / Fedora_16 / i686 / arangodb - 1 . 1 . 0 - 35 . 1 . i686 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . i686 . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / Fedora_16 / src / arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / Fedora_16 / x86_64 / arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_Debian soo_distro_Debian_6 . 0 " > <nl> + < td > Packages for < strong > Debian & nbsp ; 6 . 0 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / Debian_6 . 0 / amd64 / arangodb_1 . 1 . 0 - 0_amd64 . deb " > arangodb_1 . 1 . 0 - 0_amd64 . deb < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / Debian_6 . 0 / i386 / arangodb_1 . 1 . 0 - 0_i386 . deb " > arangodb_1 . 1 . 0 - 0_i386 . deb < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_CentOS soo_distro_CentOS_CentOS - 6 " > <nl> + < td > Packages for < strong > CentOS - 6 < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / CentOS_CentOS - 6 / i686 / arangodb - 1 . 1 . 0 - 35 . 1 . i686 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . i686 . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / CentOS_CentOS - 6 / src / arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . src . rpm < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / CentOS_CentOS - 6 / x86_64 / arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm " > arangodb - 1 . 1 . 0 - 35 . 1 . x86_64 . rpm < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_MacOSX soo_distro_MacOSX_MacOSX - 10 " > <nl> + < td > Packages for < strong > Mac OS X < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / MacOSX / arangodb - 1 . 1 - 0 - MacOSX - 10 . 6 - x86_64 . dmg " > arangodb - 1 . 1 - 0 - MacOSX - 10 . 6 - x86_64 . dmg < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / MacOSX / arangodb - 1 . 1 - 0 - MacOSX - 10 . 7 - x86_64 . dmg " > arangodb - 1 . 1 - 0 - MacOSX - 10 . 7 - x86_64 . dmg < / a > <nl> + < / li > <nl> + < li > <nl> + < a href = " http : / / www . arangodb . org / repositories / stable / MacOSX / arangodb - 1 . 1 - 0 - MacOSX - 10 . 8 - x86_64 . dmg " > arangodb - 1 . 1 - 0 - MacOSX - 10 . 8 - x86_64 . dmg < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_Windows soo_distro_Windows - Win8 " > <nl> + < td > Packages for < strong > Windows < / strong > : < / td > <nl> + < td > <nl> + < ul > <nl> + < li > Coming Soon < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_Source " > <nl> + < td > <nl> + < ul > <nl> + < li > < a href = " http : / / www . arangodb . org / repositories / stable / Source / ArangoDB - 1 . 1 . 0 . tar . bz2 " > ArangoDB - 1 . 1 . 0 . tar . bz2 < / a > <nl> + < / li > <nl> + < li > < a href = " http : / / www . arangodb . org / repositories / stable / Source / ArangoDB - 1 . 1 . 0 . tar . gz " > ArangoDB - 1 . 1 . 0 . tar . gz < / a > <nl> + < / li > <nl> + < li > < a href = " http : / / www . arangodb . org / repositories / stable / Source / ArangoDB - 1 . 1 . 0 . zip " > ArangoDB - 1 . 1 . 0 . zip < / a > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + < tr class = " soo_pkginfo soo_distro soo_distro_Livecd " > <nl> + < td > <nl> + < ul > <nl> + < li > < a href = " http : / / www . arangodb . org / repositories / stable / iso / ArangoDB_1 . 1 . 0_live . i686 - 1 . 0 . 1 . iso " > ArangoDB 1 . 1 . 0 Live CD based on openSUSE 12 . 2 32 bit < / a > < p > 32Bit 429 MB MD5 : 221a0fbf322bdb8bbc26a10f815428e9 < / p > <nl> + < / li > <nl> + < li > < a href = " http : / / www . arangodb . org / repositories / stable / iso / ArangoDB_1 . 1 . 0_live . x86_64 - 1 . 0 . 1 . iso " > ArangoDB 1 . 1 . 0 Live CD based on openSUSE 12 . 2 64 bit < / a > < p > 64Bit 432 MB MD5 : 595d2bd4d296b4d88db9381c484261c0 < / p > <nl> + < / li > <nl> + < / ul > <nl> + < / td > <nl> + < / tr > <nl> + <nl> + < / table > <nl> + < / div > <nl> + < / div > <nl> + < / body > <nl> + < / html > <nl> new file mode 100755 <nl> index 00000000000 . . 43c3c4b1ba6 <nl> mmm / dev / null <nl> ppp b / Installation / OBS / load - repo . sh <nl> <nl> + # ! / bin / bash <nl> + <nl> + if test " $ 1 " = = " " ; then <nl> + echo " usage : $ 0 < obs - server > " <nl> + exit 1 <nl> + fi <nl> + <nl> + wget - r - np " http : / / $ 1 / repositories / home : / fceller / " <nl> + mv " $ 1 " download <nl> mmm a / Installation / build . sh <nl> ppp b / Installation / build . sh <nl> case $ TRI_OS_LONG in <nl> echo " Using configuration for DARWIN " <nl> CPPFLAGS = ' - isystem / usr / include - isystem / opt / local / include - Wno - deprecated - declarations ' <nl> LDFLAGS = ' - L / usr / lib - L / opt / local / lib ' # need to use OpenSSL from system <nl> - OPTIONS = " $ OPTIONS - - enable - all - in - one - libev - - enable - all - in - one - v8 - - enable - all - in - one - icu - - enable - mruby " <nl> + OPTIONS = " $ OPTIONS - - enable - all - in - one - libev - - enable - all - in - one - v8 - - enable - all - in - one - icu - - disable - mruby " <nl> RESULTS = " $ RESULTS arangoirb " <nl> if [ " $ { TRI_MACH } " = = " x86_64 " ] ; then <nl> X = $ ( uname - r ) <nl> mkdir Doxygen / man > / dev / null 2 > & 1 <nl> mkdir Doxygen / man / man1 > / dev / null 2 > & 1 <nl> mkdir Doxygen / man / man8 > / dev / null 2 > & 1 <nl> <nl> - make man <nl> \ No newline at end of file <nl> + make man <nl> mmm a / Makefile . am <nl> ppp b / Makefile . am <nl> if ENABLE_HTML2TEXT <nl> BUILT_SOURCES + = README <nl> <nl> README : README . md <nl> - @ MARKDOWN_EXEC @ - o html $ < | @ HTML2TEXT_EXEC @ - style pretty | fgrep - v _Build_Status_ > $ @ . tmp <nl> + fgrep - v " [ Build Status ] " $ < \ <nl> + | @ MARKDOWN_EXEC @ \ <nl> + | @ HTML2TEXT_EXEC @ - style pretty - nobs \ <nl> + | sed - e ' s : & gt ; : > : g ' \ <nl> + > $ @ . tmp <nl> mv $ @ . tmp $ @ <nl> <nl> endif <nl> mmm a / README <nl> ppp b / README <nl> For more in - depth information <nl> * or give it a try . <nl> <nl> <nl> - Compilation <nl> + For the Impatient <nl> <nl> - Please check the Installation_Manual for installation and compilation <nl> - instructions . <nl> - <nl> - Mac OS X Hints <nl> - <nl> - On Mac OS X you can install ArangoDB using the packagemanager Homebrew : <nl> + For Mac OSX execute <nl> <nl> - * brew install arangodb ( use - - HEAD in order to build ArangoDB from current <nl> - master ) <nl> + brew install arangodb <nl> <nl> - This will install ArangoDB and all dependencies . Note that the server will be <nl> - installed as <nl> + For Linux use the distribution dependend package manager , see Installation <nl> + Manual for details . <nl> + For Windows users : we are working hard on a Windows version . A first version <nl> + will become available soon . <nl> + If the package manager has not already started the ArangoDB server , use <nl> <nl> - / usr / local / sbin / arangod <nl> - <nl> - The ArangoDB shell will be install as <nl> + unix > / path / to / sbin / arangod <nl> + 2012 - 03 - 30T12 : 54 : 19Z [ 11794 ] INFO ArangoDB ( version 1 . x . y ) is ready for <nl> + business <nl> + 2012 - 03 - 30T12 : 54 : 19Z [ 11794 ] INFO Have Fun ! <nl> <nl> - / usr / local / bin / arangosh <nl> + / path / to / sbin is OS dependent . It will normally by either / usr / sbin or / user / <nl> + local / sbin . Point your browser to <nl> <nl> + http : / / localhost : 8529 / <nl> <nl> - First Steps <nl> + and select the tab Shell . You can now use the Arango shell from within your <nl> + browser . Alternative , it is available as command - line tool arangosh . <nl> <nl> - Start the server : <nl> + arangosh > db . _create ( " hallo " ) ; <nl> + arangosh > db . hallo . save ( { world : " earth " } ) ; <nl> <nl> - > / usr / sbin / arangod <nl> - 2012 - 03 - 30T12 : 54 : 19Z [ 11794 ] INFO ArangoDB ( version 1 . x . y ) is ready for <nl> - business <nl> - 2012 - 03 - 30T12 : 54 : 19Z [ 11794 ] INFO Have Fun ! <nl> + Congratulations ! You have created your first collection called hallo and your <nl> + first document . To verify your achievements <nl> <nl> - Start the shell in another windows : <nl> + arangosh > db . hallo . toArray ( ) ; <nl> <nl> - > / usr / bin / arangosh <nl> - _ <nl> - __ _ _ __ __ _ _ __ __ _ ___ ___ | | __ <nl> - / _ ` | ' __ / _ ` | ' _ \ / _ ` | / _ \ / __ | ' _ \ <nl> - | ( _ | | | | ( _ | | | | | ( _ | | ( _ ) \ __ \ | | | <nl> - \ __ , _ | _ | \ __ , _ | _ | | _ | \ __ , | \ ___ / | ___ / _ | | _ | <nl> - | ___ / <nl> <nl> - Welcome to arangosh 1 . x . y . Copyright ( c ) 2012 triAGENS GmbH . <nl> - Using Google V8 3 . 9 . 4 . 0 JavaScript engine . <nl> - Using READLINE 6 . 1 . <nl> + More Information <nl> <nl> - Connected to Arango DB 127 . 0 . 0 . 1 : 8529 Version 1 . x . y <nl> + Please check the Installation_Manual for installation and compilation <nl> + instructions . <nl> + The User_Manual has an introductory chapter showing the basic operations of <nl> + ArangoDB . <nl> + Or you can use the online_tutorial to play with ArangoDB without installing it <nl> + locally . <nl> <nl> - arangosh > db . _create ( " examples " ) <nl> - [ ArangoCollection 106097 , " examples ] <nl> + Stay in Contact <nl> <nl> - arangosh > db . examples . save ( { Hallo : " World " } ) ; <nl> - { " error " : false , " _id " : " 106097 / 2333739 " , " _rev " : 2333739 } <nl> + Please note that there will be bugs and we ' d really appreciate it if you report <nl> + them : <nl> <nl> - arangosh > db . examples . all ( ) ; <nl> - [ { _id : " 82883 / 1524675 " , _rev : 1524675 , Hallo : " World " } ] <nl> + https : / / github . com / triAGENS / ArangoDB / issues <nl> <nl> + You can use the Google group for improvements , feature requests , comments <nl> <nl> - Caveat <nl> + http : / / www . arangodb . org / connect <nl> <nl> - Please note that this is an early version of ArangoDB . There will be bugs and <nl> - we ' d really appreciate it if you report them : <nl> - https : / / github . com / triAGENS / ArangoDB / issues <nl> mmm a / README . md <nl> ppp b / README . md <nl> <nl> - # ArangoDB <nl> + ArangoDB <nl> + = = = = = = = = <nl> + <nl> [ ! [ Build Status ] ( https : / / secure . travis - ci . org / triAGENS / ArangoDB . png ) ] ( http : / / travis - ci . org / triAGENS / ArangoDB ) <nl> <nl> ArangoDB is a universal open - source database with flexible data model for documents , graphs , and key - values . Build high performance application using a convenient sql - like query language or JavaScript / Ruby extensions . <nl> For more in - depth information <nl> <nl> * read more on the [ design goals of ArangoDB ] ( http : / / www . arangodb . org / 2012 / 03 / 07 / avocadodbs - design - objectives ) <nl> * [ watch the video ] ( http : / / vimeo . com / 36411892 ) - Martin Schoenert , architect of ArangoDB , gives an introduction of what the ArangoDB project is about . <nl> - * or give it a try . <nl> + * or give it a [ try ] ( http : / / www . arangodb . org / try ) . <nl> <nl> - # # Compilation <nl> <nl> - Please check the <nl> - [ Installation Manual ] ( http : / / www . arangodb . org / manuals / current / InstallManual . html ) <nl> - for installation and compilation instructions . <nl> + For the Impatient <nl> + mmmmmmmmmmmmmmm - - <nl> <nl> - # # # Mac OS X Hints <nl> + For Mac OSX execute <nl> <nl> - On Mac OS X you can install ArangoDB using the packagemanager [ Homebrew ] ( http : / / mxcl . github . com / homebrew / ) : <nl> + brew install arangodb <nl> <nl> - * ` brew install arangodb ` ( use ` - - HEAD ` in order to build ArangoDB from current master ) <nl> + For Linux use the distribution dependend package manager , see <nl> + [ Installation Manual ] ( http : / / www . arangodb . org / manuals / current / Installing . html ) <nl> + for details . <nl> <nl> - This will install ArangoDB and all dependencies . Note that the server will be installed as <nl> + For Windows users : we are working hard on a Windows version . A first version will become available soon . <nl> <nl> - / usr / local / sbin / arangod <nl> + If the package manager has not already started the ArangoDB server , use <nl> <nl> - The ArangoDB shell will be install as <nl> + unix > / path / to / sbin / arangod <nl> + 2012 - 03 - 30T12 : 54 : 19Z [ 11794 ] INFO ArangoDB ( version 1 . x . y ) is ready for business <nl> + 2012 - 03 - 30T12 : 54 : 19Z [ 11794 ] INFO Have Fun ! <nl> <nl> - / usr / local / bin / arangosh <nl> + ` / path / to / sbin ` is OS dependent . It will normally by either ` / usr / sbin ` or ` / user / local / sbin ` . Point your browser to <nl> <nl> - # # First Steps <nl> + http : / / localhost : 8529 / <nl> <nl> - Start the server : <nl> + and select the tab ` Shell ` . You can now use the Arango shell from within your browser . Alternative , it is available as command - line tool _arangosh_ . <nl> + <nl> + arangosh > db . _create ( " hallo " ) ; <nl> + arangosh > db . hallo . save ( { world : " earth " } ) ; <nl> + <nl> + Congratulations ! You have created your first collection called ` hallo ` and your first document . To verify your achievements <nl> + <nl> + arangosh > db . hallo . toArray ( ) ; <nl> <nl> - > / usr / sbin / arangod <nl> - 2012 - 03 - 30T12 : 54 : 19Z [ 11794 ] INFO ArangoDB ( version 1 . x . y ) is ready for business <nl> - 2012 - 03 - 30T12 : 54 : 19Z [ 11794 ] INFO Have Fun ! <nl> <nl> - Start the shell in another windows : <nl> + More Information <nl> + mmmmmmmmmmmmmmm - <nl> <nl> - > / usr / bin / arangosh <nl> - _ <nl> - __ _ _ __ __ _ _ __ __ _ ___ ___ | | __ <nl> - / _ ` | ' __ / _ ` | ' _ \ / _ ` | / _ \ / __ | ' _ \ <nl> - | ( _ | | | | ( _ | | | | | ( _ | | ( _ ) \ __ \ | | | <nl> - \ __ , _ | _ | \ __ , _ | _ | | _ | \ __ , | \ ___ / | ___ / _ | | _ | <nl> - | ___ / <nl> + Please check the <nl> + [ Installation Manual ] ( http : / / www . arangodb . org / manuals / current / InstallManual . html ) <nl> + for installation and compilation instructions . <nl> <nl> - Welcome to arangosh 1 . x . y . Copyright ( c ) 2012 triAGENS GmbH . <nl> - Using Google V8 3 . 9 . 4 . 0 JavaScript engine . <nl> - Using READLINE 6 . 1 . <nl> + The <nl> + [ User Manual ] ( http : / / www . arangodb . org / manuals / current / UserManual . html ) <nl> + has an introductory chapter showing the basic operations of ArangoDB . <nl> <nl> - Connected to Arango DB 127 . 0 . 0 . 1 : 8529 Version 1 . x . y <nl> + Or you can use the <nl> + [ online tutorial ] ( http : / / www . arangodb . org / try ) <nl> + to play with ArangoDB without installing it locally . <nl> <nl> - arangosh > db . _create ( " examples " ) <nl> - [ ArangoCollection 106097 , " examples ] <nl> <nl> - arangosh > db . examples . save ( { Hallo : " World " } ) ; <nl> - { " error " : false , " _id " : " 106097 / 2333739 " , " _rev " : 2333739 } <nl> + Stay in Contact <nl> + mmmmmmmmmmmmmmm <nl> <nl> - arangosh > db . examples . all ( ) . toArray ( ) ; <nl> - [ { _id : " 82883 / 1524675 " , _rev : 1524675 , Hallo : " World " } ] <nl> + Please note that there will be bugs and we ' d really appreciate it if <nl> + you report them : <nl> <nl> - # # Caveat <nl> + https : / / github . com / triAGENS / ArangoDB / issues <nl> <nl> - Please note that this is an early version of ArangoDB . There will be <nl> - bugs and we ' d really appreciate it if you [ report ] ( https : / / github . com / triAGENS / ArangoDB / issues " ) <nl> - them : <nl> + You can use the Google group for improvements , feature requests , comments <nl> <nl> - https : / / github . com / triAGENS / ArangoDB / issues <nl> + http : / / www . arangodb . org / connect <nl> mmm a / UnitTests / HttpInterface / api - collection - spec . rb <nl> ppp b / UnitTests / HttpInterface / api - collection - spec . rb <nl> <nl> collections = doc . parsed_response [ ' collections ' ] <nl> names = doc . parsed_response [ ' names ' ] <nl> <nl> - found = 0 <nl> - for n in names do <nl> - if n [ 0 ] = = " units " or n [ 0 ] = = " employees " or n [ 0 ] = = " locations " <nl> - found = found + 1 <nl> + # filter out system collections <nl> + realCollections = [ ] <nl> + <nl> + collections . each { | collection | <nl> + if collection [ ' name ' ] . slice ( 0 , 1 ) ! = " _ " <nl> + realCollections . push ( collection ) <nl> + end <nl> + } <nl> + <nl> + realNames = { } <nl> + <nl> + names . each do | name , collection | <nl> + if name . slice ( 0 , 1 ) ! = ' _ ' <nl> + realNames [ name ] = collection <nl> end <nl> end <nl> <nl> - found . should eq ( 3 ) <nl> + realCollections . length . should eq ( 3 ) <nl> + realNames . length . should eq ( 3 ) <nl> <nl> - for collection in collections do <nl> - names [ collection [ ' name ' ] ] . should eq ( collection ) <nl> + for collection in realCollections do <nl> + realNames [ collection [ ' name ' ] ] . should eq ( collection ) <nl> end <nl> end <nl> end <nl> mmm a / arangod / Ahuacatl / ahuacatl - grammar . c <nl> ppp b / arangod / Ahuacatl / ahuacatl - grammar . c <nl> <nl> typedef union YYSTYPE <nl> { <nl> <nl> - / * Line 301 of yacc . c * / <nl> + / * Line 293 of yacc . c * / <nl> # line 26 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> <nl> TRI_aql_node_t * node ; <nl> typedef union YYSTYPE <nl> <nl> <nl> <nl> - / * Line 301 of yacc . c * / <nl> + / * Line 293 of yacc . c * / <nl> # line 192 " arangod / Ahuacatl / ahuacatl - grammar . c " <nl> } YYSTYPE ; <nl> # define YYSTYPE_IS_TRIVIAL 1 <nl> YYLTYPE yylloc ; <nl> { <nl> case 2 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 176 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 3 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 181 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 4 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 183 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 5 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 188 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 6 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 190 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 7 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 192 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 8 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 194 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 9 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 196 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 10 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 198 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 11 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 203 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node ; <nl> YYLTYPE yylloc ; <nl> <nl> case 12 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 222 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeFilterAql ( context , ( yyvsp [ ( 2 ) - ( 2 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 13 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 235 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeLetAql ( context , ( yyvsp [ ( 2 ) - ( 4 ) ] . strval ) , ( yyvsp [ ( 4 ) - ( 4 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 14 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 248 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeListAql ( context ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 15 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 256 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeCollectAql ( context , TRI_PopStackParseAql ( context ) , ( yyvsp [ ( 4 ) - ( 4 ) ] . strval ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 16 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 269 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 17 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 271 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 18 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 276 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeAssignAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . strval ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 19 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 289 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . strval ) = NULL ; <nl> YYLTYPE yylloc ; <nl> <nl> case 20 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 292 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . strval ) = ( yyvsp [ ( 2 ) - ( 2 ) ] . strval ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 21 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 298 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeListAql ( context ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 22 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 306 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * list = TRI_PopStackParseAql ( context ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 23 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 320 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> if ( ! TRI_PushListAql ( context , ( yyvsp [ ( 1 ) - ( 1 ) ] . node ) ) ) { <nl> YYLTYPE yylloc ; <nl> <nl> case 24 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 325 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> if ( ! TRI_PushListAql ( context , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ) { <nl> YYLTYPE yylloc ; <nl> <nl> case 25 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 333 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeSortElementAql ( context , ( yyvsp [ ( 1 ) - ( 2 ) ] . node ) , ( yyvsp [ ( 2 ) - ( 2 ) ] . boolval ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 26 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 344 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . boolval ) = true ; <nl> YYLTYPE yylloc ; <nl> <nl> case 27 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 347 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . boolval ) = true ; <nl> YYLTYPE yylloc ; <nl> <nl> case 28 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 350 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . boolval ) = false ; <nl> YYLTYPE yylloc ; <nl> <nl> case 29 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 356 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeLimitAql ( context , TRI_CreateNodeValueIntAql ( context , 0 ) , TRI_CreateNodeValueIntAql ( context , ( yyvsp [ ( 2 ) - ( 2 ) ] . intval ) ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 30 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 370 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeLimitAql ( context , TRI_CreateNodeValueIntAql ( context , ( yyvsp [ ( 2 ) - ( 4 ) ] . intval ) ) , TRI_CreateNodeValueIntAql ( context , ( yyvsp [ ( 4 ) - ( 4 ) ] . intval ) ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 31 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 383 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeReturnAql ( context , ( yyvsp [ ( 2 ) - ( 2 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 32 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 403 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . node ) = ( yyvsp [ ( 2 ) - ( 3 ) ] . node ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 33 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 406 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> if ( ! TRI_StartScopeAql ( context , TRI_AQL_SCOPE_SUBQUERY ) ) { <nl> YYLTYPE yylloc ; <nl> <nl> case 34 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 411 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * result ; <nl> YYLTYPE yylloc ; <nl> <nl> case 35 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 442 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . node ) = ( yyvsp [ ( 1 ) - ( 1 ) ] . node ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 36 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 445 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . node ) = ( yyvsp [ ( 1 ) - ( 1 ) ] . node ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 37 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 448 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . node ) = ( yyvsp [ ( 1 ) - ( 1 ) ] . node ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 38 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 451 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node ; <nl> YYLTYPE yylloc ; <nl> <nl> case 39 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 464 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * list = TRI_PopStackParseAql ( context ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 40 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 473 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . node ) = ( yyvsp [ ( 1 ) - ( 1 ) ] . node ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 41 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 476 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . node ) = ( yyvsp [ ( 1 ) - ( 1 ) ] . node ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 42 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 479 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . node ) = ( yyvsp [ ( 1 ) - ( 1 ) ] . node ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 43 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 485 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorUnaryPlusAql ( context , ( yyvsp [ ( 2 ) - ( 2 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 44 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 493 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorUnaryMinusAql ( context , ( yyvsp [ ( 2 ) - ( 2 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 45 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 501 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorUnaryNotAql ( context , ( yyvsp [ ( 2 ) - ( 2 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 46 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 512 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorBinaryOrAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 47 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 520 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorBinaryAndAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 48 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 528 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorBinaryPlusAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 49 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 536 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorBinaryMinusAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 50 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 544 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorBinaryTimesAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 51 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 552 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorBinaryDivAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 52 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 560 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorBinaryModAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 53 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 568 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorBinaryEqAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 54 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 576 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorBinaryNeAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 55 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 584 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorBinaryLtAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 56 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 592 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorBinaryGtAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 57 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 600 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorBinaryLeAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 58 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 608 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorBinaryGeAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 59 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 616 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorBinaryInAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 60 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 627 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeOperatorTernaryAql ( context , ( yyvsp [ ( 1 ) - ( 5 ) ] . node ) , ( yyvsp [ ( 3 ) - ( 5 ) ] . node ) , ( yyvsp [ ( 5 ) - ( 5 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 61 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 638 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 62 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 640 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 63 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 645 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_PushListAql ( context , ( yyvsp [ ( 1 ) - ( 1 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 64 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 648 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_PushListAql ( context , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 65 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 654 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . node ) = ( yyvsp [ ( 1 ) - ( 1 ) ] . node ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 66 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 657 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . node ) = ( yyvsp [ ( 1 ) - ( 1 ) ] . node ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 67 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 663 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeListAql ( context ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 68 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 670 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . node ) = TRI_PopStackParseAql ( context ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 69 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 676 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 70 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 678 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 71 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 683 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> if ( ! TRI_PushListAql ( context , ( yyvsp [ ( 1 ) - ( 1 ) ] . node ) ) ) { <nl> YYLTYPE yylloc ; <nl> <nl> case 72 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 688 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> if ( ! TRI_PushListAql ( context , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ) { <nl> YYLTYPE yylloc ; <nl> <nl> case 73 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 696 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeArrayAql ( context ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 74 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 703 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . node ) = TRI_PopStackParseAql ( context ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 75 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 709 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 76 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 711 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 77 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 716 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 78 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 718 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> } <nl> YYLTYPE yylloc ; <nl> <nl> case 79 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 723 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> if ( ! TRI_PushArrayAql ( context , ( yyvsp [ ( 1 ) - ( 3 ) ] . strval ) , ( yyvsp [ ( 3 ) - ( 3 ) ] . node ) ) ) { <nl> YYLTYPE yylloc ; <nl> <nl> case 80 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 732 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> / / start of reference ( collection or variable name ) <nl> YYLTYPE yylloc ; <nl> <nl> case 81 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 736 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> / / expanded variable access , e . g . variable [ * ] <nl> YYLTYPE yylloc ; <nl> <nl> case 82 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 760 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> / / return from the " expansion " subrule <nl> YYLTYPE yylloc ; <nl> <nl> case 83 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 789 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> / / variable or collection <nl> YYLTYPE yylloc ; <nl> <nl> case 84 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 806 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> / / named variable access , e . g . variable . reference <nl> YYLTYPE yylloc ; <nl> <nl> case 85 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 814 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> / / indexed variable access , e . g . variable [ index ] <nl> YYLTYPE yylloc ; <nl> <nl> case 86 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 825 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> / / named variable access , continuation from * expansion , e . g . [ * ] . variable . reference <nl> YYLTYPE yylloc ; <nl> <nl> case 87 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 835 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> / / indexed variable access , continuation from * expansion , e . g . [ * ] . variable [ index ] <nl> YYLTYPE yylloc ; <nl> <nl> case 88 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 845 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> / / named variable access , continuation from * expansion , e . g . [ * ] . variable . xx . reference <nl> YYLTYPE yylloc ; <nl> <nl> case 89 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 852 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> / / indexed variable access , continuation from * expansion , e . g . [ * ] . variable . xx . [ index ] <nl> YYLTYPE yylloc ; <nl> <nl> case 90 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 862 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . node ) = ( yyvsp [ ( 1 ) - ( 1 ) ] . node ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 91 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 865 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . node ) = ( yyvsp [ ( 1 ) - ( 1 ) ] . node ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 92 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 871 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeValueStringAql ( context , ( yyvsp [ ( 1 ) - ( 1 ) ] . strval ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 93 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 879 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node ; <nl> YYLTYPE yylloc ; <nl> <nl> case 94 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 893 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeValueNullAql ( context ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 95 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 901 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeValueBoolAql ( context , true ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 96 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 909 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeValueBoolAql ( context , false ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 97 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 920 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> TRI_aql_node_t * node = TRI_CreateNodeParameterAql ( context , ( yyvsp [ ( 1 ) - ( 1 ) ] . strval ) ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 98 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 931 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> if ( ! ( yyvsp [ ( 1 ) - ( 1 ) ] . strval ) ) { <nl> YYLTYPE yylloc ; <nl> <nl> case 99 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 938 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> if ( ! ( yyvsp [ ( 1 ) - ( 1 ) ] . strval ) ) { <nl> YYLTYPE yylloc ; <nl> <nl> case 100 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 947 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> ( yyval . strval ) = ( yyvsp [ ( 1 ) - ( 1 ) ] . strval ) ; <nl> YYLTYPE yylloc ; <nl> <nl> case 101 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 953 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> if ( ! ( yyvsp [ ( 1 ) - ( 1 ) ] . strval ) ) { <nl> YYLTYPE yylloc ; <nl> <nl> case 102 : <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 960 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> { <nl> if ( ! ( yyvsp [ ( 2 ) - ( 2 ) ] . strval ) ) { <nl> YYLTYPE yylloc ; <nl> <nl> <nl> <nl> - / * Line 1821 of yacc . c * / <nl> + / * Line 1806 of yacc . c * / <nl> # line 3016 " arangod / Ahuacatl / ahuacatl - grammar . c " <nl> default : break ; <nl> } <nl> mmm a / arangod / Ahuacatl / ahuacatl - grammar . h <nl> ppp b / arangod / Ahuacatl / ahuacatl - grammar . h <nl> <nl> typedef union YYSTYPE <nl> { <nl> <nl> - / * Line 2132 of yacc . c * / <nl> + / * Line 2068 of yacc . c * / <nl> # line 26 " arangod / Ahuacatl / ahuacatl - grammar . y " <nl> <nl> TRI_aql_node_t * node ; <nl> typedef union YYSTYPE <nl> <nl> <nl> <nl> - / * Line 2132 of yacc . c * / <nl> + / * Line 2068 of yacc . c * / <nl> # line 110 " arangod / Ahuacatl / ahuacatl - grammar . h " <nl> } YYSTYPE ; <nl> # define YYSTYPE_IS_TRIVIAL 1 <nl> mmm a / arangod / Documentation / aql . dox <nl> ppp b / arangod / Documentation / aql . dox <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ page Aql ArangoDB Query Language ( AQL ) <nl> / / / <nl> + / / / @ NAVIGATE_Aql <nl> / / / @ EMBEDTOC { AqlTOC } <nl> / / / <nl> / / / @ section AqlPurpose Introduction <nl> mmm a / arangod / Documentation / command - line - options . dox <nl> ppp b / arangod / Documentation / command - line - options . dox <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ page CommandLine Command - Line Options <nl> / / / <nl> + / / / @ NAVIGATE_CommandLine <nl> / / / @ EMBEDTOC { CommandLineTOC } <nl> / / / <nl> / / / @ section CommandLineGeneralOptions General Options <nl> mmm a / arangod / Documentation / dba - manual . dox <nl> ppp b / arangod / Documentation / dba - manual . dox <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ page DbaManual ArangoDB ' s DBA Manual ( @ VERSION ) <nl> / / / <nl> - / / / @ NAVIGATE { UserManual , Home , ImpManual } <nl> + / / / @ NAVIGATE_DbaManual <nl> / / / <nl> / / / @ if LATEX <nl> / / / < ul > <nl> <nl> / / / < li > @ ref IndexGeo < / li > <nl> / / / < li > @ ref IndexHash < / li > <nl> / / / < li > @ ref IndexSkiplist < / li > <nl> + / / / < li > @ ref IndexBitArray < / li > <nl> / / / <nl> / / / @ latexonly \ appendix @ endlatexonly <nl> / / / < li > @ ref Glossary < / li > <nl> mmm a / arangod / Documentation / imp - manual . dox <nl> ppp b / arangod / Documentation / imp - manual . dox <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ page ImpManual ArangoDB ' s Importer Manual ( @ VERSION ) <nl> / / / <nl> - / / / @ NAVIGATE { DbaManual , Home , Glossary } <nl> + / / / @ NAVIGATE_ImpManual <nl> / / / <nl> / / / @ if LATEX <nl> / / / < ul > <nl> mmm a / arangod / Documentation / implementor - manual . dox <nl> ppp b / arangod / Documentation / implementor - manual . dox <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ page ImplementorManual ArangoDB for API Implementors ( @ VERSION ) <nl> / / / <nl> - / / / @ NAVIGATE { InstallManual , Home , RefManual } <nl> + / / / @ NAVIGATE_ImplementorManual <nl> / / / <nl> / / / @ if LATEX <nl> / / / < ul > <nl> mmm a / arangod / Documentation / indexes . dox <nl> ppp b / arangod / Documentation / indexes . dox <nl> <nl> / / / @ copydetails IndexGeoTOC <nl> / / / @ copydetails IndexHashTOC <nl> / / / @ copydetails IndexSkiplistTOC <nl> + / / / @ copydetails IndexBitArrayTOC <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> / / Local Variables : <nl> mmm a / arangod / Documentation / install - manual . dox <nl> ppp b / arangod / Documentation / install - manual . dox <nl> <nl> / / / @ author Copyright 2012 , triAGENS GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - INSTALLATION MANUAL <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ page InstallManual ArangoDB ' s Installation Manual ( @ VERSION ) <nl> - / / / <nl> - / / / @ NAVIGATE { Glossary , Home , ImplementorManual } <nl> - / / / <nl> - / / / @ if LATEX <nl> - / / / < ul > <nl> - / / / < li > @ ref Installing < / li > <nl> - / / / < li > @ ref Compiling < / li > <nl> - / / / < / ul > <nl> - / / / @ else <nl> - / / / @ copydetails InstallingTOC <nl> - / / / @ copydetails CompilingTOC <nl> - / / / @ endif <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - INSTALLING <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ page InstallingTOC <nl> - / / / <nl> - / / / < ul > <nl> - / / / < li > @ ref Installing <nl> - / / / < ul > <nl> - / / / < li > @ ref InstallingLinux <nl> - / / / < / li > <nl> - / / / < li > @ ref InstallingLinuxPackageManager <nl> - / / / < ul > <nl> - / / / < li > @ ref InstallingCentOS <nl> - / / / < li > @ ref InstallingDebian <nl> - / / / < li > @ ref InstallingOpenSUSE <nl> - / / / < / ul > <nl> - / / / < / li > <nl> - / / / < li > @ ref InstallingMacOSX <nl> - / / / < ul > <nl> - / / / < li > @ ref InstallingMacOSXHomebrew <nl> - / / / < / ul > <nl> - / / / < / li > <nl> - / / / < / ul > <nl> - / / / < / li > <nl> - / / / < / ul > <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ page Installing Installing ArangoDB <nl> - / / / <nl> - / / / @ EMBEDTOC { InstallingTOC } <nl> - / / / <nl> - / / / @ section InstallingLinux Linux <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / <nl> - / / / You can find binary packages for various Linux distributions here : <nl> - / / / <nl> - / / / @ LIT { http : / / www . arangodb . org / repositories / } <nl> - / / / <nl> - / / / @ subsection InstallingLinuxPackageManager Using a Package Manager to install ArangoDB <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / <nl> - / / / @ subsubsection InstallingCentOS CentoOS <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / <nl> - / / / Download and import GPG - PublicKey <nl> - / / / <nl> - / / / @ code <nl> - / / / wget - O RPM - GPG - KEY - www . arangodb . org http : / / www . arangodb . org / repositories / PublicKey <nl> - / / / rpm - - import RPM - GPG - KEY - www . arangodb . org <nl> - / / / @ endcode <nl> - / / / <nl> - / / / Add a repository file @ LIT { / etc / yum . repos . d / arangodb . repo } <nl> - / / / <nl> - / / / @ code <nl> - / / / [ ArangoDB ] <nl> - / / / name = ArangoDB Repository for CentOS <nl> - / / / failovermethod = priority <nl> - / / / baseurl = http : / / www . arangodb . org / repositories / CentOS - 6 <nl> - / / / enabled = 1 <nl> - / / / gpgcheck = 1 <nl> - / / / gpgkey = file : / / / etc / pki / rpm - gpg / RPM - GPG - KEY - www . arangodb . org <nl> - / / / @ endcode <nl> - / / / <nl> - / / / Update the repository data : <nl> - / / / <nl> - / / / @ code <nl> - / / / yum clean metadata <nl> - / / / yum update <nl> - / / / @ endcode <nl> - / / / <nl> - / / / Search for arangodb : <nl> - / / / <nl> - / / / @ code <nl> - / / / yum search arangodb <nl> - / / / @ endcode <nl> - / / / <nl> - / / / Install arangodb : <nl> - / / / <nl> - / / / @ code <nl> - / / / yum install arangodb <nl> - / / / @ endcode <nl> - / / / <nl> - / / / @ subsubsection InstallingDebian Debian , Linux - Mint , and Ubuntu <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / <nl> - / / / Download and import GPG - PublicKey <nl> - / / / <nl> - / / / @ code <nl> - / / / wget - O RPM - GPG - KEY - www . arangodb . org http : / / www . arangodb . org / repositories / PublicKey <nl> - / / / apt - key add RPM - GPG - KEY - www . arangodb . org <nl> - / / / @ endcode <nl> - / / / <nl> - / / / Add the corresponding repository in file @ LIT { / etc / apt / sources . list } : <nl> - / / / @ code <nl> - / / / deb http : / / www . arangodb . org / repositories Debian - 6 main <nl> - / / / @ endcode <nl> - / / / or <nl> - / / / @ code <nl> - / / / deb http : / / www . arangodb . org / repositories LinuxMint - 13 main <nl> - / / / @ endcode <nl> - / / / or <nl> - / / / @ code <nl> - / / / deb http : / / www . arangodb . org / repositories Ubuntu - 11 . 10 main <nl> - / / / @ endcode <nl> - / / / <nl> - / / / Update the repository data : <nl> - / / / <nl> - / / / @ code <nl> - / / / aptitude update <nl> - / / / @ endcode <nl> - / / / <nl> - / / / Now you should be able to search for arangodb : <nl> - / / / <nl> - / / / @ code <nl> - / / / aptitude search arangodb <nl> - / / / @ endcode <nl> - / / / <nl> - / / / In order to install arangodb : <nl> - / / / <nl> - / / / @ code <nl> - / / / aptitude install arangodb <nl> - / / / @ endcode <nl> - / / / <nl> - / / / @ subsubsection Gentoo <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / <nl> - / / / Please use the <nl> - / / / @ EXTREF_S { https : / / github . com / mgiken / portage - overlay / tree / master / dev - db / ArangoDB , portage } <nl> - / / / provided by @ @ mgiken . <nl> - / / / <nl> - / / / @ subsubsection InstallingOpenSUSE OpenSuSE <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / <nl> - / / / Add the repository as follows : <nl> - / / / <nl> - / / / @ code <nl> - / / / zypper addrepo - f - t YUM http : / / www . arangodb . org / repositories / openSUSE - 12 . 1 ArangoDB <nl> - / / / @ endcode <nl> - / / / <nl> - / / / Update the repository data : <nl> - / / / <nl> - / / / @ code <nl> - / / / zypper refresh <nl> - / / / @ endcode <nl> - / / / <nl> - / / / Search for arangodb : <nl> - / / / <nl> - / / / @ code <nl> - / / / zypper search arangodb <nl> - / / / @ endcode <nl> - / / / <nl> - / / / Install arangodb : <nl> - / / / <nl> - / / / @ code <nl> - / / / zypper install arangodb <nl> - / / / @ endcode <nl> - / / / <nl> - / / / @ section InstallingMacOSX Mac OS X <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / <nl> - / / / You can find the MacOSX packages here : <nl> - / / / <nl> - / / / @ LIT { http : / / www . arangodb . org / repositories / MacOSX } <nl> - / / / <nl> - / / / @ subsection InstallingMacOSXHomebrew Homebrew <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / <nl> - / / / If you are using < a href = " http : / / mxcl . github . com / homebrew / " > homebrew < / a > , <nl> - / / / then you can install the ArangoDB using @ LIT { brew } as follows : <nl> - / / / <nl> - / / / @ code <nl> - / / / brew install arangodb <nl> - / / / @ endcode <nl> - / / / <nl> - / / / This will install the current stable version of ArangoDB within <nl> - / / / your Homebrew tree . <nl> - / / / <nl> - / / / If you want to install the latest version use : <nl> - / / / <nl> - / / / @ code <nl> - / / / brew install - - HEAD arangodb <nl> - / / / @ endcode <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - COMPILING <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> mmm a / arangod / Documentation / ref - manual . dox <nl> ppp b / arangod / Documentation / ref - manual . dox <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ page RefManual ArangoDB ' s Reference Manual ( @ VERSION ) <nl> / / / <nl> - / / / @ NAVIGATE_LAST { ImplementorManual , Home } <nl> + / / / @ NAVIGATE_RefManual <nl> / / / <nl> / / / @ if LATEX <nl> / / / < ul > <nl> mmm a / arangod / Documentation / shell - collection . dox <nl> ppp b / arangod / Documentation / shell - collection . dox <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ page ShellCollection Handling Collections <nl> / / / <nl> + / / / @ NAVIGATE_ShellCollection <nl> + / / / <nl> / / / This is an introduction to ArangoDB ' s interface for collections and how <nl> / / / handle collections from the JavaScript shell @ LIT { arangosh } . For other <nl> / / / languages see the corresponding language API . <nl> mmm a / arangod / Documentation / shell - document . dox <nl> ppp b / arangod / Documentation / shell - document . dox <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ page ShellDocument Handling Documents <nl> / / / <nl> + / / / @ NAVIGATE_ShellDocument <nl> + / / / <nl> / / / This is an introduction to ArangoDB ' s interface for documents and how handle <nl> / / / documents from the JavaScript shell @ LIT { arangosh } . For other languages see <nl> / / / the corresponding language API . <nl> mmm a / arangod / Documentation / shell - edge . dox <nl> ppp b / arangod / Documentation / shell - edge . dox <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ page ShellEdge Handling Edges <nl> / / / <nl> + / / / @ NAVIGATE_ShellEdge <nl> + / / / <nl> / / / This is an introduction to ArangoDB ' s interface for edges and how handle <nl> / / / edges from the JavaScript shell @ LIT { arangosh } . For other languages see the <nl> / / / corresponding language API . <nl> mmm a / arangod / Documentation / simple - queries . dox <nl> ppp b / arangod / Documentation / simple - queries . dox <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ page SimpleQueries Simple Queries <nl> / / / <nl> + / / / @ NAVIGATE_SimpleQueries <nl> + / / / <nl> / / / Simple queries can be used if the query condition is straight forward , <nl> / / / i . e . , a document reference , all documents , a query - by - example , or a simple <nl> / / / geo query . In a simple query you can specify exactly one collection and one <nl> mmm a / arangod / Documentation / user - manual . dox <nl> ppp b / arangod / Documentation / user - manual . dox <nl> <nl> / / / @ author Copyright 2012 , triAGENS GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - USER MANUAL <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ page UserManual ArangoDB ' s User Manual ( @ VERSION ) <nl> - / / / <nl> - / / / @ NAVIGATE { Upgrading , Home , DbaManual } <nl> - / / / <nl> - / / / @ if LATEX <nl> - / / / < ul > <nl> - / / / < li > @ ref FirstStepsArangoDB < / li > <nl> - / / / < li > @ ref UserManualArangosh < / li > <nl> - / / / < li > @ ref UserManualWebInterface < / li > <nl> - / / / < li > @ ref ShellCollection < / li > <nl> - / / / < li > @ ref ShellDocument < / li > <nl> - / / / < li > @ ref ShellEdge < / li > <nl> - / / / < li > @ ref SimpleQueries < / li > <nl> - / / / < li > @ ref Aql < / li > <nl> - / / / < li > @ ref UserManualActions < / li > <nl> - / / / <nl> - / / / @ latexonly \ appendix @ endlatexonly <nl> - / / / < li > @ ref CommandLine < / li > <nl> - / / / < li > @ ref Glossary < / li > <nl> - / / / < / ul > <nl> - / / / @ else <nl> - / / / @ copydetails FirstStepsArangoDBTOC <nl> - / / / @ copydetails UserManualArangoshTOC <nl> - / / / @ copydetails UserManualWebInterfaceTOC <nl> - / / / @ copydetails ShellCollectionTOC <nl> - / / / @ copydetails ShellDocumentTOC <nl> - / / / @ copydetails ShellEdgeTOC <nl> - / / / @ copydetails SimpleQueriesTOC <nl> - / / / @ copydetails AqlTOC <nl> - / / / @ copydetails UserManualActionsTOC <nl> - / / / @ copydetails CommandLineTOC <nl> - / / / @ endif <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / - - SECTION - - USER MANUAL ARANGOSH <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ page UserManualArangosh The Arango Shell <nl> / / / <nl> + / / / @ NAVIGATE_UserManualArangosh <nl> / / / @ EMBEDTOC { UserManualArangoshTOC } <nl> / / / <nl> / / / @ section UserManualArangoshOutput Arango Shell Output <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ page UserManualActions Arango Actions <nl> / / / <nl> + / / / @ NAVIGATE_UserManualActions <nl> + / / / <nl> / / / Please note , that user Actions in ArangoDB are still preliminary and details <nl> / / / are subject to change . <nl> / / / <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / <nl> / / / The client API or browser sends a HTTP request to the ArangoDB server and <nl> - / / / the server returns a HTTP response to the client . A HTTP requests consists <nl> + / / / the server returns a HTTP response to the client . A HTTP request consists <nl> / / / of a method , normally @ LIT { GET } or @ LIT { POST } when using a browser , and a <nl> / / / request path like @ LIT { / hello / world } . For a real Web server there are a zillion <nl> / / / of other thing to consider , we will ignore this for the moment . The HTTP <nl> <nl> / / / . . . <nl> / / / } , <nl> / / / " options " : { <nl> - / / / " Hallo " : " world " <nl> + / / / " Hallo " : " World " <nl> / / / } <nl> / / / } <nl> / / / @ endcode <nl> <nl> / / / arangosh > db . _routing . save ( { <nl> / / / . . . . . . . . > url : " / " , <nl> / / / . . . . . . . . > action : { <nl> - / / / . . . . . . . . > controller : " org / arangodb / actions " , <nl> + / / / . . . . . . . . > controller : " org / arangodb / actions " , <nl> / / / . . . . . . . . > do : " redirectRequest " , <nl> / / / . . . . . . . . > options : { <nl> / / / . . . . . . . . > permanently : true , <nl> mmm a / arangod / HashIndex / hasharray . c <nl> ppp b / arangod / HashIndex / hasharray . c <nl> static void AddNewElement ( TRI_hasharray_t * array , void * element ) { <nl> static bool AllocateTable ( TRI_hasharray_t * array , size_t numElements ) { <nl> char * table ; <nl> <nl> - table = TRI_Allocate ( TRI_UNKNOWN_MEM_ZONE , CACHE_LINE_SIZE + ( array - > _elementSize * numElements ) , true ) ; <nl> + table = ( char * ) TRI_Allocate ( TRI_UNKNOWN_MEM_ZONE , CACHE_LINE_SIZE + ( array - > _elementSize * numElements ) , true ) ; <nl> + <nl> if ( table = = NULL ) { <nl> return false ; <nl> } <nl> mmm a / arangod / RestServer / ArangoServer . cpp <nl> ppp b / arangod / RestServer / ArangoServer . cpp <nl> void ArangoServer : : buildApplicationServer ( ) { <nl> int err = 0 ; <nl> string currentDir = FileUtils : : currentDirectory ( & err ) ; <nl> char * absoluteFile = TRI_GetAbsolutePath ( _pidFile . c_str ( ) , currentDir . c_str ( ) ) ; <nl> + <nl> if ( absoluteFile ! = 0 ) { <nl> _pidFile = string ( absoluteFile ) ; <nl> TRI_Free ( TRI_UNKNOWN_MEM_ZONE , absoluteFile ) ; <nl> - <nl> + <nl> LOGGER_DEBUG < < " using absolute pid file ' " < < _pidFile < < " ' " ; <nl> } <nl> else { <nl> old mode 100755 <nl> new mode 100644 <nl> old mode 100755 <nl> new mode 100644 <nl> index b67ad54df75 . . 4e46df7a753 <nl> mmm a / arangod / V8Server / v8 - vocbase . cpp <nl> ppp b / arangod / V8Server / v8 - vocbase . cpp <nl> static v8 : : Handle < v8 : : Value > JS_EnsureCapConstraintVocbaseCol ( v8 : : Arguments con <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief ensures that a bitarray index exists <nl> - / / / <nl> - / / / @ FUN { @ FA { collection } . ensureBitarray ( @ FA { field1 } , @ FA { value1 } , @ FA { field2 } , @ FA { value2 } , . . . , @ FA { fieldn } , @ FA { valuen } ) } <nl> - / / / <nl> - / / / Creates a bitarray index on all documents using attributes as paths to <nl> - / / / the fields . At least one attribute and one set of possible values must be given . <nl> - / / / All documents , which do not have the attribute path or <nl> - / / / with one or more values that are not suitable , are ignored . <nl> - / / / <nl> - / / / In case that the index was successfully created , the index identifier <nl> - / / / is returned . <nl> - / / / <nl> - / / / @ verbinclude fluent14 <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> static v8 : : Handle < v8 : : Value > EnsureBitarray ( v8 : : Arguments const & argv , bool supportUndef ) { <nl> static v8 : : Handle < v8 : : Value > EnsureBitarray ( v8 : : Arguments const & argv , bool sup <nl> return scope . Close ( theIndex ) ; <nl> } <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief ensures that a bitarray index exists <nl> + / / / <nl> + / / / @ FUN { @ FA { collection } . ensureBitarray ( @ FA { field1 } , @ FA { value1 } , @ FA { field2 } , @ FA { value2 } , . . . , @ FA { fieldn } , @ FA { valuen } ) } <nl> + / / / <nl> + / / / Creates a bitarray index on documents using attributes as paths to the <nl> + / / / fields ( @ FA { field1 } , . . . , @ FA { fieldn } ) . A value ( @ FA { value1 } , . . . , @ FA { valuen } ) <nl> + / / / consists of an array of possible values that the field can take . At least <nl> + / / / one field and one set of possible values must be given . <nl> + / / / <nl> + / / / All documents , which do not have * all * of the attribute paths are ignored <nl> + / / / ( that is , are not part of the bitarray index , they are however stored within <nl> + / / / the collection ) . A document which contains all of the attribute paths yet <nl> + / / / has one or more values which are * not * part of the defined range of values <nl> + / / / will be rejected and the document will not inserted within the <nl> + / / / collection . Note that , if a bitarray index is created subsequent to <nl> + / / / any documents inserted in the given collection , then the creation of the <nl> + / / / index will fail if one or more documents are rejected ( due to <nl> + / / / attribute values being outside the designated range ) . <nl> + / / / <nl> + / / / In case that the index was successfully created , the index identifier is <nl> + / / / returned . <nl> + / / / <nl> + / / / In the example below we create a bitarray index with one field and that <nl> + / / / field can have the values of either ` 0 ` or ` 1 ` . Any document which has the <nl> + / / / attribute ` x ` defined and does not have a value of ` 0 ` or ` 1 ` will be <nl> + / / / rejected and therefore not inserted within the collection . Documents without <nl> + / / / the attribute ` x ` defined will not take part in the index . <nl> + / / / <nl> + / / / @ code <nl> + / / / arango > arangod > db . example . ensureBitarray ( " x " , [ 0 , 1 ] ) ; <nl> + / / / { <nl> + / / / " id " : " 2755894 / 3607862 " , <nl> + / / / " unique " : false , <nl> + / / / " type " : " bitarray " , <nl> + / / / " fields " : [ [ " x " , [ 0 , 1 ] ] ] , <nl> + / / / " undefined " : false , <nl> + / / / " isNewlyCreated " : true <nl> + / / / } <nl> + / / / @ endcode <nl> + / / / <nl> + / / / In the example below we create a bitarray index with one field and that <nl> + / / / field can have the values of either ` 0 ` , ` 1 ` or * other * ( indicated by <nl> + / / / ` [ ] ` ) . Any document which has the attribute ` x ` defined will take part in <nl> + / / / the index . Documents without the attribute ` x ` defined will not take part in <nl> + / / / the index . <nl> + / / / <nl> + / / / @ code <nl> + / / / arangod > db . example . ensureBitarray ( " x " , [ 0 , 1 , [ ] ] ) ; <nl> + / / / { <nl> + / / / " id " : " 2755894 / 4263222 " , <nl> + / / / " unique " : false , <nl> + / / / " type " : " bitarray " , <nl> + / / / " fields " : [ [ " x " , [ 0 , 1 , [ ] ] ] ] , <nl> + / / / " undefined " : false , <nl> + / / / " isNewlyCreated " : true <nl> + / / / } <nl> + / / / @ endcode <nl> + / / / <nl> + / / / In the example below we create a bitarray index with two fields . Field ` x ` <nl> + / / / can have the values of either ` 0 ` or ` 1 ` ; while field ` y ` can have the values <nl> + / / / of ` 2 ` or ` " a " ` . A document which does not have * both * attributes ` x ` and ` y ` <nl> + / / / will not take part within the index . A document which does have both attributes <nl> + / / / ` x ` and ` y ` defined must have the values ` 0 ` or ` 1 ` for attribute ` x ` and <nl> + / / / ` 2 ` or ` a ` for attribute ` y ` , otherwise the document will not be inserted <nl> + / / / within the collection . <nl> + / / / <nl> + / / / @ code <nl> + / / / arangod > db . example . ensureBitarray ( " x " , [ 0 , 1 ] , " y " , [ 2 , " a " ] ) ; <nl> + / / / { <nl> + / / / " id " : " 2755894 / 5246262 " , <nl> + / / / " unique " : false , <nl> + / / / " type " : " bitarray " , <nl> + / / / " fields " : [ [ " x " , [ 0 , 1 ] ] , [ " y " , [ 0 , 1 ] ] ] , <nl> + / / / " undefined " : false , <nl> + / / / " isNewlyCreated " : false <nl> + / / / } <nl> + / / / @ endcode <nl> + / / / <nl> + / / / In the example below we create a bitarray index with two fields . Field ` x ` <nl> + / / / can have the values of either ` 0 ` or ` 1 ` ; while field ` y ` can have the <nl> + / / / values of ` 2 ` , ` " a " ` or * other * . A document which does not have * both * <nl> + / / / attributes ` x ` and ` y ` will not take part within the index . A document <nl> + / / / which does have both attributes ` x ` and ` y ` defined must have the values ` 0 ` <nl> + / / / or ` 1 ` for attribute ` x ` and any value for attribute ` y ` will be acceptable , <nl> + / / / otherwise the document will not be inserted within the collection . <nl> + / / / <nl> + / / / @ code <nl> + / / / arangod > db . example . ensureBitarray ( " x " , [ 0 , 1 ] , " y " , [ 2 , " a " , [ ] ] ) ; <nl> + / / / { <nl> + / / / " id " : " 2755894 / 5770550 " , <nl> + / / / " unique " : false , <nl> + / / / " type " : " bitarray " , <nl> + / / / " fields " : [ [ " x " , [ 0 , 1 ] ] , [ " y " , [ 2 , " a " , [ ] ] ] ] , <nl> + / / / " undefined " : false , <nl> + / / / " isNewlyCreated " : true <nl> + / / / } <nl> + / / / @ endcode <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> static v8 : : Handle < v8 : : Value > JS_EnsureBitarrayVocbaseCol ( v8 : : Arguments const & argv ) { <nl> return EnsureBitarray ( argv , false ) ; <nl> } <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief ensures that a bitarray index exists <nl> + / / / <nl> + / / / @ FUN { @ FA { collection } . ensureUndefBitarray ( @ FA { field1 } , @ FA { value1 } , @ FA { field2 } , @ FA { value2 } , . . . , @ FA { fieldn } , @ FA { valuen } ) } <nl> + / / / <nl> + / / / Creates a bitarray index on all documents using attributes as paths to <nl> + / / / the fields . At least one attribute and one set of possible values must be given . <nl> + / / / All documents , which do not have the attribute path or <nl> + / / / with one or more values that are not suitable , are ignored . <nl> + / / / <nl> + / / / In case that the index was successfully created , the index identifier <nl> + / / / is returned . <nl> + / / / <nl> + / / / @ verbinclude fluent14 <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> static v8 : : Handle < v8 : : Value > JS_EnsureUndefBitarrayVocbaseCol ( v8 : : Arguments const & argv ) { <nl> return EnsureBitarray ( argv , true ) ; <nl> } <nl> mmm a / configure . ac <nl> ppp b / configure . ac <nl> dnl mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> AC_CONFIG_AUX_DIR ( [ config ] ) <nl> AC_CONFIG_MACRO_DIR ( [ m4 ] ) <nl> <nl> + CURRENT_DIR = ` pwd ` <nl> + AC_MSG_NOTICE ( [ configure started in ' $ CURRENT_DIR ] ) <nl> + AC_MSG_NOTICE ( [ with CPPFLAGS = ' $ CPPFLAGS ' ] ) <nl> + AC_MSG_NOTICE ( [ with CFLAGS = ' $ CFLAGS ' ] ) <nl> + AC_MSG_NOTICE ( [ with CXXFLAGS = ' $ CXXFLAGS ' ] ) <nl> + AC_MSG_NOTICE ( [ with LDFLAGS = ' $ LDFLAGS ' ] ) <nl> + <nl> dnl = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> dnl - - SECTION - - 3RD - PARTY LIBRARIES <nl> dnl = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> mmm a / lib / BasicsC / files . c <nl> ppp b / lib / BasicsC / files . c <nl> char * TRI_GetAbsolutePath ( char const * file , char const * cwd ) { <nl> bool isAbsolute ; <nl> <nl> # ifdef _WIN32 <nl> - # error please validate this function for Windows <nl> + # error please validate if this works on Windows <nl> # endif <nl> <nl> if ( file = = NULL | | * file = = ' \ 0 ' ) { <nl> mmm a / m4 / configure . basics <nl> ppp b / m4 / configure . basics <nl> AC_PROG_INSTALL <nl> AC_PROG_LN_S <nl> AC_PROG_MAKE_SET <nl> <nl> - LDFLAGS = - g <nl> - <nl> AC_ARG_ENABLE ( error - on - warning , <nl> AS_HELP_STRING ( [ - - enable - error - on - warning ] , [ treat warnings as errors ( default : no ) ] ) , <nl> [ tr_WERROR = " $ { enableval : - yes } " ] , <nl> mmm a / m4 / configure . static <nl> ppp b / m4 / configure . static <nl> dnl enable static programs if possible <nl> dnl mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> if test " x $ tr_STATIC_PROGRAMS " = xyes ; then <nl> - LDFLAGS = " - static " <nl> + LDFLAGS = " $ LDFLAGS - static " <nl> fi <nl> <nl> dnl mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> | Merge remote - tracking branch ' origin / 1 . 1 ' into devel | arangodb/arangodb | 621e4409bc5022ddef52bfff5563f63b7584880b | 2012-12-18T12:37:34Z |
new file mode 100644 <nl> index 000000000000 . . 77d9811be2a5 <nl> mmm / dev / null <nl> ppp b / SpatialFullDilatedConvolution . cu <nl> <nl> + # include " THCUNN . h " <nl> + # include " im2col . h " <nl> + <nl> + # include " THCHalf . h " <nl> + # include " THCHalfAutoNumerics . cuh " <nl> + <nl> + # include " generic / SpatialFullDilatedConvolution . cu " <nl> + # include " THCGenerateFloatTypes . h " <nl> mmm a / VolumetricFullConvolution . cu <nl> ppp b / VolumetricFullConvolution . cu <nl> <nl> # include " THCUNN . h " <nl> # include " common . h " <nl> - # include " vol2col . h " <nl> # include " THCHalf . h " <nl> # include " THCHalfAutoNumerics . cuh " <nl> <nl> new file mode 100644 <nl> index 000000000000 . . 47173f24633b <nl> mmm / dev / null <nl> ppp b / VolumetricFullDilatedConvolution . cu <nl> <nl> + # include " THCUNN . h " <nl> + # include " common . h " <nl> + # include " vol2col . h " <nl> + # include " THCHalf . h " <nl> + # include " THCHalfAutoNumerics . cuh " <nl> + <nl> + # include " generic / VolumetricFullDilatedConvolution . cu " <nl> + # include " THCGenerateFloatTypes . h " <nl> mmm a / generic / SpatialFullConvolution . cu <nl> ppp b / generic / SpatialFullConvolution . cu <nl> <nl> # define THC_GENERIC_FILE " generic / SpatialFullConvolution . cu " <nl> # else <nl> <nl> - static inline void THNN_ ( SpatialFullConvolution_shapeCheck ) ( <nl> - THCState * state , <nl> - THCTensor * input , THCTensor * gradOutput , <nl> - THCTensor * weight , THCTensor * bias , <nl> - int kH , int kW , int dH , int dW , int padH , int padW , <nl> - int adjH , int adjW ) { <nl> - THArgCheck ( kW > 0 & & kH > 0 , 9 , <nl> - " kernel size should be greater than zero , but got kH : % d kW : % d " , kH , kW ) ; <nl> - THArgCheck ( dW > 0 & & dH > 0 , 11 , <nl> - " stride should be greater than zero , but got dH : % d dW : % d " , dH , dW ) ; <nl> - THArgCheck ( adjW < dW & & adjH < dH , 15 , <nl> - " output adjustment must be smaller than stride , but got adjH : % d adjW : % d dH : % d dW : % d " , <nl> - adjH , adjW , dH , dW ) ; <nl> - THArgCheck ( THCTensor_ ( isContiguous ) ( state , weight ) , 4 , <nl> - " weight tensor has to be contiguous " ) ; <nl> - THArgCheck ( ! bias | | THCTensor_ ( isContiguous ) ( state , bias ) , 5 , <nl> - " bias tensor has to be contiguous " ) ; <nl> - THCUNN_argCheck ( state , weight - > nDimension = = 2 | | weight - > nDimension = = 4 , 5 , weight , <nl> - " 2D or 4D weight tensor expected , but got : % s " ) ; <nl> - <nl> - if ( bias ! = NULL ) { <nl> - THCUNN_check_dim_size ( state , bias , 1 , 0 , weight - > size [ 1 ] ) ; <nl> - } <nl> - <nl> - int ndim = input - > nDimension ; <nl> - int dimf = 0 ; <nl> - int dimh = 1 ; <nl> - int dimw = 2 ; <nl> - <nl> - if ( ndim = = 4 ) { <nl> - dimf + + ; <nl> - dimh + + ; <nl> - dimw + + ; <nl> - } <nl> - <nl> - THCUNN_argCheck ( state , ndim = = 3 | | ndim = = 4 , 2 , input , <nl> - " 3D or 4D input tensor expected but got : % s " ) ; <nl> - <nl> - long nInputPlane = weight - > size [ 0 ] ; <nl> - long inputHeight = input - > size [ dimh ] ; <nl> - long inputWidth = input - > size [ dimw ] ; <nl> - long nOutputPlane = weight - > size [ 1 ] ; <nl> - long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + kH + adjH ; <nl> - long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + kW + adjW ; <nl> - <nl> - if ( outputWidth < 1 | | outputHeight < 1 ) <nl> - THError ( " Given input size : ( % d x % d x % d ) . " <nl> - " Calculated output size : ( % d x % d x % d ) . Output size is too small " , <nl> - nInputPlane , inputHeight , inputWidth , nOutputPlane , outputHeight , outputWidth ) ; <nl> - <nl> - THCUNN_check_dim_size ( state , input , ndim , dimf , nInputPlane ) ; <nl> - <nl> - if ( gradOutput ! = NULL ) { <nl> - THCUNN_check_dim_size ( state , gradOutput , ndim , dimf , nOutputPlane ) ; <nl> - THCUNN_check_dim_size ( state , gradOutput , ndim , dimh , outputHeight ) ; <nl> - THCUNN_check_dim_size ( state , gradOutput , ndim , dimw , outputWidth ) ; <nl> - } <nl> - } <nl> - <nl> void THNN_ ( SpatialFullConvolution_updateOutput ) ( <nl> THCState * state , <nl> THCTensor * input , <nl> void THNN_ ( SpatialFullConvolution_updateOutput ) ( <nl> int padW , int padH , <nl> int adjW , int adjH ) <nl> { <nl> - <nl> - int nInputPlane = THCTensor_ ( size ) ( state , weight , 0 ) ; <nl> - int nOutputPlane = THCTensor_ ( size ) ( state , weight , 1 ) ; <nl> - <nl> - THCUNN_assertSameGPU ( state , 6 , input , output , weight , <nl> - bias , columns , ones ) ; <nl> - THNN_ ( SpatialFullConvolution_shapeCheck ) <nl> - ( state , input , NULL , weight , bias , kH , kW , dH , dW , padH , padW , adjH , adjW ) ; <nl> - <nl> - input = THCTensor_ ( newContiguous ) ( state , input ) ; <nl> - weight = THCTensor_ ( newContiguous ) ( state , weight ) ; <nl> - bias = bias ? THCTensor_ ( newContiguous ) ( state , bias ) : bias ; <nl> - <nl> - int batch = 1 ; <nl> - if ( input - > nDimension = = 3 ) { <nl> - / / Force batch <nl> - batch = 0 ; <nl> - THCTensor_ ( resize4d ) ( state , input , 1 , input - > size [ 0 ] , input - > size [ 1 ] , input - > size [ 2 ] ) ; <nl> - } <nl> - <nl> - long inputWidth = input - > size [ 3 ] ; <nl> - long inputHeight = input - > size [ 2 ] ; <nl> - long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + kW + adjW ; <nl> - long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + kH + adjH ; <nl> - <nl> - / / Batch size + input planes <nl> - long batchSize = input - > size [ 0 ] ; <nl> - <nl> - / / Resize output <nl> - THCTensor_ ( resize4d ) ( state , output , batchSize , nOutputPlane , outputHeight , outputWidth ) ; <nl> - <nl> - / / Resize temporary columns <nl> - THCTensor_ ( resize2d ) ( state , columns , nOutputPlane * kW * kH , inputHeight * inputWidth ) ; <nl> - <nl> - / / Define a buffer of ones , for bias accumulation <nl> - / / Note : this buffer can be shared with other modules , it only ever gets increased , <nl> - / / and always contains ones . <nl> - if ( ones - > nDimension ! = 2 | | ones - > size [ 0 ] * ones - > size [ 1 ] < outputHeight * outputWidth ) { <nl> - / / Resize plane and fill with ones . . . <nl> - THCTensor_ ( resize2d ) ( state , ones , outputHeight , outputWidth ) ; <nl> - THCTensor_ ( fill ) ( state , ones , ScalarConvert < int , real > : : to ( 1 ) ) ; <nl> - } <nl> - <nl> - / / Helpers <nl> - THCTensor * input_n = THCTensor_ ( new ) ( state ) ; <nl> - THCTensor * output_n = THCTensor_ ( new ) ( state ) ; <nl> - <nl> - / / For each elt in batch , do : <nl> - for ( int elt = 0 ; elt < batchSize ; elt + + ) { <nl> - / / Matrix mulitply per output : <nl> - THCTensor_ ( select ) ( state , input_n , input , 0 , elt ) ; <nl> - THCTensor_ ( select ) ( state , output_n , output , 0 , elt ) ; <nl> - <nl> - / / M , N , K are dims of matrix A and B <nl> - / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> - long m = weight - > size [ 1 ] * weight - > size [ 2 ] * weight - > size [ 3 ] ; <nl> - long n = columns - > size [ 1 ] ; <nl> - long k = weight - > size [ 0 ] ; <nl> - <nl> - / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> - # ifdef THC_REAL_IS_FLOAT <nl> - THCudaBlas_Sgemm ( <nl> - # elif defined ( THC_REAL_IS_HALF ) <nl> - THCudaBlas_Hgemm ( <nl> - # elif defined ( THC_REAL_IS_DOUBLE ) <nl> - THCudaBlas_Dgemm ( <nl> - # endif <nl> - state , <nl> - ' n ' , ' t ' , <nl> - n , m , k , <nl> - ScalarConvert < int , real > : : to ( 1 ) , <nl> - THCTensor_ ( data ) ( state , input_n ) , n , <nl> - THCTensor_ ( data ) ( state , weight ) , m , <nl> - ScalarConvert < int , real > : : to ( 0 ) , <nl> - THCTensor_ ( data ) ( state , columns ) , n <nl> - ) ; <nl> - <nl> - / / Unpack columns back into input : <nl> - col2im < real , accreal > ( <nl> - THCState_getCurrentStream ( state ) , <nl> - THCTensor_ ( data ) ( state , columns ) , <nl> - nOutputPlane , outputHeight , outputWidth , kH , kW , padH , padW , dH , dW , <nl> - 1 , 1 , THCTensor_ ( data ) ( state , output_n ) <nl> - ) ; <nl> - <nl> - / / Do Bias after : <nl> - / / M , N , K are dims of matrix A and B <nl> - / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> - long m_ = nOutputPlane ; <nl> - long n_ = outputHeight * outputWidth ; <nl> - long k_ = 1 ; <nl> - <nl> - / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> - if ( bias ) { <nl> - # ifdef THC_REAL_IS_FLOAT <nl> - THCudaBlas_Sgemm ( <nl> - # elif defined ( THC_REAL_IS_HALF ) <nl> - THCudaBlas_Hgemm ( <nl> - # elif defined ( THC_REAL_IS_DOUBLE ) <nl> - THCudaBlas_Dgemm ( <nl> - # endif <nl> - state , <nl> - ' t ' , ' n ' , <nl> - n_ , m_ , k_ , <nl> - ScalarConvert < int , real > : : to ( 1 ) , <nl> - THCTensor_ ( data ) ( state , ones ) , k_ , <nl> - THCTensor_ ( data ) ( state , bias ) , k_ , <nl> - ScalarConvert < int , real > : : to ( 1 ) , <nl> - THCTensor_ ( data ) ( state , output_n ) , n_ <nl> - ) ; <nl> - } <nl> - } <nl> - <nl> - / / Free <nl> - THCTensor_ ( free ) ( state , input_n ) ; <nl> - THCTensor_ ( free ) ( state , output_n ) ; <nl> - <nl> - / / Resize output <nl> - if ( batch = = 0 ) { <nl> - THCTensor_ ( resize3d ) ( state , output , nOutputPlane , outputHeight , outputWidth ) ; <nl> - THCTensor_ ( resize3d ) ( state , input , nInputPlane , inputHeight , inputWidth ) ; <nl> - } <nl> - <nl> - THCTensor_ ( free ) ( state , input ) ; <nl> - THCTensor_ ( free ) ( state , weight ) ; <nl> - if ( bias ) THCTensor_ ( free ) ( state , bias ) ; <nl> - <nl> + THNN_ ( SpatialFullDilatedConvolution_updateOutput ) ( <nl> + state , input , output , weight , bias , columns , ones , <nl> + kW , kH , dW , dH , padW , padH , 1 , 1 , adjW , adjH ) ; <nl> } <nl> <nl> void THNN_ ( SpatialFullConvolution_updateGradInput ) ( <nl> void THNN_ ( SpatialFullConvolution_updateGradInput ) ( <nl> int padW , int padH , <nl> int adjW , int adjH ) <nl> { <nl> - int nInputPlane = THCTensor_ ( size ) ( state , weight , 0 ) ; <nl> - int nOutputPlane = THCTensor_ ( size ) ( state , weight , 1 ) ; <nl> - <nl> - THCUNN_assertSameGPU ( state , 5 , input , gradOutput , weight , <nl> - gradColumns , gradInput ) ; <nl> - THNN_ ( SpatialFullConvolution_shapeCheck ) <nl> - ( state , input , gradOutput , weight , NULL , kH , kW , dH , dW , padH , padW , adjH , adjW ) ; <nl> - <nl> - input = THCTensor_ ( newContiguous ) ( state , input ) ; <nl> - gradOutput = THCTensor_ ( newContiguous ) ( state , gradOutput ) ; <nl> - weight = THCTensor_ ( newContiguous ) ( state , weight ) ; <nl> - int batch = 1 ; <nl> - if ( input - > nDimension = = 3 ) { <nl> - / / Force batch <nl> - batch = 0 ; <nl> - THCTensor_ ( resize4d ) ( state , input , 1 , input - > size [ 0 ] , input - > size [ 1 ] , input - > size [ 2 ] ) ; <nl> - THCTensor_ ( resize4d ) ( state , gradOutput , 1 , gradOutput - > size [ 0 ] , gradOutput - > size [ 1 ] , gradOutput - > size [ 2 ] ) ; <nl> - } <nl> - <nl> - long inputWidth = input - > size [ 3 ] ; <nl> - long inputHeight = input - > size [ 2 ] ; <nl> - long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + kW + adjW ; <nl> - long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + kH + adjH ; <nl> - <nl> - / / Batch size + input planes <nl> - long batchSize = input - > size [ 0 ] ; <nl> - <nl> - / / Resize output <nl> - THCTensor_ ( resize4d ) ( state , gradInput , batchSize , nInputPlane , inputHeight , inputWidth ) ; <nl> - <nl> - / / Resize temporary columns <nl> - THCTensor_ ( resize2d ) ( state , gradColumns , nOutputPlane * kW * kH , inputHeight * inputWidth ) ; <nl> - <nl> - / / Helpers <nl> - THCTensor * gradInput_n = THCTensor_ ( new ) ( state ) ; <nl> - THCTensor * gradOutput_n = THCTensor_ ( new ) ( state ) ; <nl> - <nl> - / / For each elt in batch , do : <nl> - for ( int elt = 0 ; elt < batchSize ; elt + + ) { <nl> - / / Matrix mulitply per sample : <nl> - THCTensor_ ( select ) ( state , gradInput_n , gradInput , 0 , elt ) ; <nl> - THCTensor_ ( select ) ( state , gradOutput_n , gradOutput , 0 , elt ) ; <nl> - <nl> - / / Extract columns : <nl> - im2col ( <nl> - THCState_getCurrentStream ( state ) , <nl> - THCTensor_ ( data ) ( state , gradOutput_n ) , <nl> - nOutputPlane , outputHeight , outputWidth , kH , kW , padH , padW , dH , dW , <nl> - 1 , 1 , THCTensor_ ( data ) ( state , gradColumns ) <nl> - ) ; <nl> - <nl> - <nl> - / / M , N , K are dims of matrix A and B <nl> - / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> - long m = weight - > size [ 0 ] ; <nl> - long n = gradColumns - > size [ 1 ] ; <nl> - long k = weight - > size [ 1 ] * weight - > size [ 2 ] * weight - > size [ 3 ] ; <nl> - <nl> - / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> - # ifdef THC_REAL_IS_FLOAT <nl> - THCudaBlas_Sgemm ( <nl> - # elif defined ( THC_REAL_IS_HALF ) <nl> - THCudaBlas_Hgemm ( <nl> - # elif defined ( THC_REAL_IS_DOUBLE ) <nl> - THCudaBlas_Dgemm ( <nl> - # endif <nl> - state , <nl> - ' n ' , ' n ' , <nl> - n , m , k , <nl> - ScalarConvert < int , real > : : to ( 1 ) , <nl> - THCTensor_ ( data ) ( state , gradColumns ) , n , <nl> - THCTensor_ ( data ) ( state , weight ) , k , <nl> - ScalarConvert < int , real > : : to ( 0 ) , <nl> - THCTensor_ ( data ) ( state , gradInput_n ) , n <nl> - ) ; <nl> - } <nl> - <nl> - <nl> - / / Free <nl> - THCTensor_ ( free ) ( state , gradInput_n ) ; <nl> - THCTensor_ ( free ) ( state , gradOutput_n ) ; <nl> - <nl> - / / Resize output <nl> - if ( batch = = 0 ) { <nl> - THCTensor_ ( resize3d ) ( state , gradOutput , nOutputPlane , outputHeight , outputWidth ) ; <nl> - THCTensor_ ( resize3d ) ( state , input , nInputPlane , inputHeight , inputWidth ) ; <nl> - THCTensor_ ( resize3d ) ( state , gradInput , nInputPlane , inputHeight , inputWidth ) ; <nl> - } <nl> - <nl> - THCTensor_ ( free ) ( state , input ) ; <nl> - THCTensor_ ( free ) ( state , gradOutput ) ; <nl> - THCTensor_ ( free ) ( state , weight ) ; <nl> + THNN_ ( SpatialFullDilatedConvolution_updateGradInput ) ( <nl> + state , input , gradOutput , gradInput , weight , gradColumns , <nl> + kW , kH , dW , dH , padW , padH , 1 , 1 , adjW , adjH ) ; <nl> } <nl> <nl> <nl> void THNN_ ( SpatialFullConvolution_accGradParameters ) ( <nl> int adjW , int adjH , <nl> accreal scale_ ) <nl> { <nl> - real scale = ScalarConvert < accreal , real > : : to ( scale_ ) ; <nl> - int nInputPlane = THCTensor_ ( size ) ( state , gradWeight , 0 ) ; <nl> - int nOutputPlane = THCTensor_ ( size ) ( state , gradWeight , 1 ) ; <nl> - <nl> - THCUNN_assertSameGPU ( state , 6 , input , gradOutput , gradWeight , <nl> - gradBias , columns , ones ) ; <nl> - THNN_ ( SpatialFullConvolution_shapeCheck ) <nl> - ( state , input , gradOutput , gradWeight , gradBias , kH , kW , dH , dW , padH , padW , adjH , adjW ) ; <nl> - <nl> - THArgCheck ( THCTensor_ ( isContiguous ) ( state , gradWeight ) , 4 , " gradWeight needs to be contiguous " ) ; <nl> - if ( gradBias ) <nl> - THArgCheck ( THCTensor_ ( isContiguous ) ( state , gradBias ) , 5 , " gradBias needs to be contiguous " ) ; <nl> - input = THCTensor_ ( newContiguous ) ( state , input ) ; <nl> - gradOutput = THCTensor_ ( newContiguous ) ( state , gradOutput ) ; <nl> - int batch = 1 ; <nl> - if ( input - > nDimension = = 3 ) { <nl> - / / Force batch <nl> - batch = 0 ; <nl> - THCTensor_ ( resize4d ) ( state , input , 1 , input - > size [ 0 ] , input - > size [ 1 ] , input - > size [ 2 ] ) ; <nl> - THCTensor_ ( resize4d ) ( state , gradOutput , 1 , gradOutput - > size [ 0 ] , gradOutput - > size [ 1 ] , gradOutput - > size [ 2 ] ) ; <nl> - } <nl> - <nl> - long inputWidth = input - > size [ 3 ] ; <nl> - long inputHeight = input - > size [ 2 ] ; <nl> - long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + kW + adjW ; <nl> - long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + kH + adjH ; <nl> - <nl> - / / Batch size + input planes <nl> - long batchSize = input - > size [ 0 ] ; <nl> - <nl> - / / Define a buffer of ones , for bias accumulation <nl> - if ( ones - > nDimension ! = 2 | | ones - > size [ 0 ] * ones - > size [ 1 ] < outputHeight * outputWidth ) { <nl> - / / Resize plane and fill with ones . . . <nl> - THCTensor_ ( resize2d ) ( state , ones , outputHeight , outputWidth ) ; <nl> - THCTensor_ ( fill ) ( state , ones , ScalarConvert < int , real > : : to ( 1 ) ) ; <nl> - } <nl> - <nl> - / / Resize temporary columns <nl> - THCTensor_ ( resize2d ) ( state , columns , nOutputPlane * kW * kH , inputHeight * inputWidth ) ; <nl> - <nl> - / / Helpers <nl> - THCTensor * input_n = THCTensor_ ( new ) ( state ) ; <nl> - THCTensor * gradOutput_n = THCTensor_ ( new ) ( state ) ; <nl> - <nl> - / / For each elt in batch , do : <nl> - for ( int elt = 0 ; elt < batchSize ; elt + + ) { <nl> - / / Matrix mulitply per output : <nl> - THCTensor_ ( select ) ( state , input_n , input , 0 , elt ) ; <nl> - THCTensor_ ( select ) ( state , gradOutput_n , gradOutput , 0 , elt ) ; <nl> - <nl> - / / Extract columns : <nl> - im2col ( <nl> - THCState_getCurrentStream ( state ) , <nl> - THCTensor_ ( data ) ( state , gradOutput_n ) , <nl> - nOutputPlane , outputHeight , outputWidth , kH , kW , padH , padW , dH , dW , <nl> - 1 , 1 , THCTensor_ ( data ) ( state , columns ) <nl> - ) ; <nl> - <nl> - / / M , N , K are dims of matrix A and B <nl> - / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> - long n = columns - > size [ 0 ] ; / / nOutputPlane * kh * kw <nl> - long m = input_n - > size [ 0 ] ; / / nInputPlane <nl> - long k = columns - > size [ 1 ] ; / / inputHeight * inputWidth <nl> - <nl> - / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> - # ifdef THC_REAL_IS_FLOAT <nl> - THCudaBlas_Sgemm ( <nl> - # elif defined ( THC_REAL_IS_HALF ) <nl> - THCudaBlas_Hgemm ( <nl> - # elif defined ( THC_REAL_IS_DOUBLE ) <nl> - THCudaBlas_Dgemm ( <nl> - # endif <nl> - state , <nl> - ' t ' , ' n ' , <nl> - n , m , k , <nl> - scale , <nl> - THCTensor_ ( data ) ( state , columns ) , k , <nl> - THCTensor_ ( data ) ( state , input_n ) , k , <nl> - ScalarConvert < int , real > : : to ( 1 ) , <nl> - THCTensor_ ( data ) ( state , gradWeight ) , n <nl> - ) ; <nl> - <nl> - / / Do Bias : <nl> - / / M , N , K are dims of matrix A and B <nl> - / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> - long m_ = nOutputPlane ; <nl> - long k_ = outputHeight * outputWidth ; <nl> - <nl> - / / Do GEMV ( note : this is a bit confusing because gemv assumes column - major matrices ) <nl> - if ( gradBias ) { <nl> - # if defined ( THC_REAL_IS_FLOAT ) | | defined ( THC_REAL_IS_DOUBLE ) <nl> - # ifdef THC_REAL_IS_FLOAT <nl> - THCudaBlas_Sgemv ( <nl> - # elif defined ( THC_REAL_IS_DOUBLE ) <nl> - THCudaBlas_Dgemv ( <nl> - # endif <nl> - state , <nl> - ' t ' , <nl> - k_ , m_ , <nl> - scale , <nl> - THCTensor_ ( data ) ( state , gradOutput_n ) , k_ , <nl> - THCTensor_ ( data ) ( state , ones ) , 1 , <nl> - ScalarConvert < int , real > : : to ( 1 ) , <nl> - THCTensor_ ( data ) ( state , gradBias ) , 1 <nl> - ) ; <nl> - # endif <nl> - # ifdef THC_REAL_IS_HALF <nl> - THCudaBlas_Hgemm ( <nl> - state , <nl> - ' t ' , ' n ' , <nl> - m_ , 1 , k_ , <nl> - scale , <nl> - THCTensor_ ( data ) ( state , gradOutput_n ) , k_ , <nl> - THCTensor_ ( data ) ( state , ones ) , k_ , <nl> - ScalarConvert < int , real > : : to ( 1 ) , <nl> - THCTensor_ ( data ) ( state , gradBias ) , m_ <nl> - ) ; <nl> - # endif <nl> - } <nl> - } <nl> - <nl> - / / Free <nl> - THCTensor_ ( free ) ( state , input_n ) ; <nl> - THCTensor_ ( free ) ( state , gradOutput_n ) ; <nl> - <nl> - / / Resize <nl> - if ( batch = = 0 ) { <nl> - THCTensor_ ( resize3d ) ( state , gradOutput , nOutputPlane , outputHeight , outputWidth ) ; <nl> - THCTensor_ ( resize3d ) ( state , input , nInputPlane , inputHeight , inputWidth ) ; <nl> - } <nl> - <nl> - THCTensor_ ( free ) ( state , input ) ; <nl> - THCTensor_ ( free ) ( state , gradOutput ) ; <nl> + THNN_ ( SpatialFullDilatedConvolution_accGradParameters ) ( <nl> + state , input , gradOutput , gradWeight , gradBias , <nl> + columns , ones , <nl> + kW , kH , dW , dH , padW , padH , 1 , 1 , adjW , adjH , scale_ ) ; <nl> } <nl> <nl> # endif <nl> new file mode 100644 <nl> index 000000000000 . . 322a213efacd <nl> mmm / dev / null <nl> ppp b / generic / SpatialFullDilatedConvolution . cu <nl> <nl> + # ifndef THC_GENERIC_FILE <nl> + # define THC_GENERIC_FILE " generic / SpatialFullDilatedConvolution . cu " <nl> + # else <nl> + <nl> + static inline void THNN_ ( SpatialFullDilatedConvolution_shapeCheck ) ( <nl> + THCState * state , <nl> + THCTensor * input , THCTensor * gradOutput , <nl> + THCTensor * weight , THCTensor * bias , <nl> + int kH , int kW , int dH , int dW , int padH , int padW , <nl> + int dilationH , int dilationW , <nl> + int adjH , int adjW ) { <nl> + THArgCheck ( kW > 0 & & kH > 0 , 9 , <nl> + " kernel size should be greater than zero , but got kH : % d kW : % d " , kH , kW ) ; <nl> + THArgCheck ( dW > 0 & & dH > 0 , 11 , <nl> + " stride should be greater than zero , but got dH : % d dW : % d " , dH , dW ) ; <nl> + THArgCheck ( adjW < dW & & adjH < dH , 15 , <nl> + " output adjustment must be smaller than stride , but got adjH : % d adjW : % d dH : % d dW : % d " , <nl> + adjH , adjW , dH , dW ) ; <nl> + THArgCheck ( dilationW > 0 & & dilationH > 0 , 15 , <nl> + " dilation should be greater than zero , but got dilationH : % d , dilationW : % d " , <nl> + dilationH , dilationW ) ; <nl> + THArgCheck ( THCTensor_ ( isContiguous ) ( state , weight ) , 4 , <nl> + " weight tensor has to be contiguous " ) ; <nl> + THArgCheck ( ! bias | | THCTensor_ ( isContiguous ) ( state , bias ) , 5 , <nl> + " bias tensor has to be contiguous " ) ; <nl> + THCUNN_argCheck ( state , weight - > nDimension = = 2 | | weight - > nDimension = = 4 , 5 , weight , <nl> + " 2D or 4D weight tensor expected , but got : % s " ) ; <nl> + <nl> + if ( bias ! = NULL ) { <nl> + THCUNN_check_dim_size ( state , bias , 1 , 0 , weight - > size [ 1 ] ) ; <nl> + } <nl> + <nl> + int ndim = input - > nDimension ; <nl> + int dimf = 0 ; <nl> + int dimh = 1 ; <nl> + int dimw = 2 ; <nl> + <nl> + if ( ndim = = 4 ) { <nl> + dimf + + ; <nl> + dimh + + ; <nl> + dimw + + ; <nl> + } <nl> + <nl> + THCUNN_argCheck ( state , ndim = = 3 | | ndim = = 4 , 2 , input , <nl> + " 3D or 4D input tensor expected but got : % s " ) ; <nl> + <nl> + long nInputPlane = weight - > size [ 0 ] ; <nl> + long inputHeight = input - > size [ dimh ] ; <nl> + long inputWidth = input - > size [ dimw ] ; <nl> + long nOutputPlane = weight - > size [ 1 ] ; <nl> + long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + ( dilationH * ( kH - 1 ) + 1 ) + adjH ; <nl> + long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + ( dilationW * ( kW - 1 ) + 1 ) + adjW ; <nl> + <nl> + if ( outputWidth < 1 | | outputHeight < 1 ) <nl> + THError ( " Given input size : ( % d x % d x % d ) . " <nl> + " Calculated output size : ( % d x % d x % d ) . Output size is too small " , <nl> + nInputPlane , inputHeight , inputWidth , nOutputPlane , outputHeight , outputWidth ) ; <nl> + <nl> + THCUNN_check_dim_size ( state , input , ndim , dimf , nInputPlane ) ; <nl> + <nl> + if ( gradOutput ! = NULL ) { <nl> + THCUNN_check_dim_size ( state , gradOutput , ndim , dimf , nOutputPlane ) ; <nl> + THCUNN_check_dim_size ( state , gradOutput , ndim , dimh , outputHeight ) ; <nl> + THCUNN_check_dim_size ( state , gradOutput , ndim , dimw , outputWidth ) ; <nl> + } <nl> + } <nl> + <nl> + void THNN_ ( SpatialFullDilatedConvolution_updateOutput ) ( <nl> + THCState * state , <nl> + THCTensor * input , <nl> + THCTensor * output , <nl> + THCTensor * weight , <nl> + THCTensor * bias , <nl> + THCTensor * columns , <nl> + THCTensor * ones , <nl> + int kW , int kH , <nl> + int dW , int dH , <nl> + int padW , int padH , <nl> + int dilationW , int dilationH , <nl> + int adjW , int adjH ) <nl> + { <nl> + <nl> + int nInputPlane = THCTensor_ ( size ) ( state , weight , 0 ) ; <nl> + int nOutputPlane = THCTensor_ ( size ) ( state , weight , 1 ) ; <nl> + <nl> + THCUNN_assertSameGPU ( state , 6 , input , output , weight , <nl> + bias , columns , ones ) ; <nl> + THNN_ ( SpatialFullDilatedConvolution_shapeCheck ) <nl> + ( state , input , NULL , weight , bias , kH , kW , dH , dW , padH , padW , dilationH , dilationW , adjH , adjW ) ; <nl> + <nl> + input = THCTensor_ ( newContiguous ) ( state , input ) ; <nl> + weight = THCTensor_ ( newContiguous ) ( state , weight ) ; <nl> + bias = bias ? THCTensor_ ( newContiguous ) ( state , bias ) : bias ; <nl> + <nl> + int batch = 1 ; <nl> + if ( input - > nDimension = = 3 ) { <nl> + / / Force batch <nl> + batch = 0 ; <nl> + THCTensor_ ( resize4d ) ( state , input , 1 , input - > size [ 0 ] , input - > size [ 1 ] , input - > size [ 2 ] ) ; <nl> + } <nl> + <nl> + long inputWidth = input - > size [ 3 ] ; <nl> + long inputHeight = input - > size [ 2 ] ; <nl> + long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + ( dilationH * ( kH - 1 ) + 1 ) + adjH ; <nl> + long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + ( dilationW * ( kW - 1 ) + 1 ) + adjW ; <nl> + <nl> + / / Batch size + input planes <nl> + long batchSize = input - > size [ 0 ] ; <nl> + <nl> + / / Resize output <nl> + THCTensor_ ( resize4d ) ( state , output , batchSize , nOutputPlane , outputHeight , outputWidth ) ; <nl> + <nl> + / / Resize temporary columns <nl> + THCTensor_ ( resize2d ) ( state , columns , nOutputPlane * kW * kH , inputHeight * inputWidth ) ; <nl> + <nl> + / / Define a buffer of ones , for bias accumulation <nl> + / / Note : this buffer can be shared with other modules , it only ever gets increased , <nl> + / / and always contains ones . <nl> + if ( ones - > nDimension ! = 2 | | ones - > size [ 0 ] * ones - > size [ 1 ] < outputHeight * outputWidth ) { <nl> + / / Resize plane and fill with ones . . . <nl> + THCTensor_ ( resize2d ) ( state , ones , outputHeight , outputWidth ) ; <nl> + THCTensor_ ( fill ) ( state , ones , ScalarConvert < int , real > : : to ( 1 ) ) ; <nl> + } <nl> + <nl> + / / Helpers <nl> + THCTensor * input_n = THCTensor_ ( new ) ( state ) ; <nl> + THCTensor * output_n = THCTensor_ ( new ) ( state ) ; <nl> + <nl> + / / For each elt in batch , do : <nl> + for ( int elt = 0 ; elt < batchSize ; elt + + ) { <nl> + / / Matrix mulitply per output : <nl> + THCTensor_ ( select ) ( state , input_n , input , 0 , elt ) ; <nl> + THCTensor_ ( select ) ( state , output_n , output , 0 , elt ) ; <nl> + <nl> + / / M , N , K are dims of matrix A and B <nl> + / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> + long m = weight - > size [ 1 ] * weight - > size [ 2 ] * weight - > size [ 3 ] ; <nl> + long n = columns - > size [ 1 ] ; <nl> + long k = weight - > size [ 0 ] ; <nl> + <nl> + / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> + # ifdef THC_REAL_IS_FLOAT <nl> + THCudaBlas_Sgemm ( <nl> + # elif defined ( THC_REAL_IS_HALF ) <nl> + THCudaBlas_Hgemm ( <nl> + # elif defined ( THC_REAL_IS_DOUBLE ) <nl> + THCudaBlas_Dgemm ( <nl> + # endif <nl> + state , <nl> + ' n ' , ' t ' , <nl> + n , m , k , <nl> + ScalarConvert < int , real > : : to ( 1 ) , <nl> + THCTensor_ ( data ) ( state , input_n ) , n , <nl> + THCTensor_ ( data ) ( state , weight ) , m , <nl> + ScalarConvert < int , real > : : to ( 0 ) , <nl> + THCTensor_ ( data ) ( state , columns ) , n <nl> + ) ; <nl> + <nl> + / / Unpack columns back into input : <nl> + col2im < real , accreal > ( <nl> + THCState_getCurrentStream ( state ) , <nl> + THCTensor_ ( data ) ( state , columns ) , <nl> + nOutputPlane , outputHeight , outputWidth , kH , kW , padH , padW , dH , dW , <nl> + dilationH , dilationW , THCTensor_ ( data ) ( state , output_n ) <nl> + ) ; <nl> + <nl> + / / Do Bias after : <nl> + / / M , N , K are dims of matrix A and B <nl> + / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> + long m_ = nOutputPlane ; <nl> + long n_ = outputHeight * outputWidth ; <nl> + long k_ = 1 ; <nl> + <nl> + / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> + if ( bias ) { <nl> + # ifdef THC_REAL_IS_FLOAT <nl> + THCudaBlas_Sgemm ( <nl> + # elif defined ( THC_REAL_IS_HALF ) <nl> + THCudaBlas_Hgemm ( <nl> + # elif defined ( THC_REAL_IS_DOUBLE ) <nl> + THCudaBlas_Dgemm ( <nl> + # endif <nl> + state , <nl> + ' t ' , ' n ' , <nl> + n_ , m_ , k_ , <nl> + ScalarConvert < int , real > : : to ( 1 ) , <nl> + THCTensor_ ( data ) ( state , ones ) , k_ , <nl> + THCTensor_ ( data ) ( state , bias ) , k_ , <nl> + ScalarConvert < int , real > : : to ( 1 ) , <nl> + THCTensor_ ( data ) ( state , output_n ) , n_ <nl> + ) ; <nl> + } <nl> + } <nl> + <nl> + / / Free <nl> + THCTensor_ ( free ) ( state , input_n ) ; <nl> + THCTensor_ ( free ) ( state , output_n ) ; <nl> + <nl> + / / Resize output <nl> + if ( batch = = 0 ) { <nl> + THCTensor_ ( resize3d ) ( state , output , nOutputPlane , outputHeight , outputWidth ) ; <nl> + THCTensor_ ( resize3d ) ( state , input , nInputPlane , inputHeight , inputWidth ) ; <nl> + } <nl> + <nl> + THCTensor_ ( free ) ( state , input ) ; <nl> + THCTensor_ ( free ) ( state , weight ) ; <nl> + if ( bias ) THCTensor_ ( free ) ( state , bias ) ; <nl> + <nl> + } <nl> + <nl> + void THNN_ ( SpatialFullDilatedConvolution_updateGradInput ) ( <nl> + THCState * state , <nl> + THCTensor * input , <nl> + THCTensor * gradOutput , <nl> + THCTensor * gradInput , <nl> + THCTensor * weight , <nl> + THCTensor * gradColumns , <nl> + int kW , int kH , <nl> + int dW , int dH , <nl> + int padW , int padH , <nl> + int dilationW , int dilationH , <nl> + int adjW , int adjH ) <nl> + { <nl> + int nInputPlane = THCTensor_ ( size ) ( state , weight , 0 ) ; <nl> + int nOutputPlane = THCTensor_ ( size ) ( state , weight , 1 ) ; <nl> + <nl> + THCUNN_assertSameGPU ( state , 5 , input , gradOutput , weight , <nl> + gradColumns , gradInput ) ; <nl> + THNN_ ( SpatialFullDilatedConvolution_shapeCheck ) <nl> + ( state , input , gradOutput , weight , NULL , kH , kW , dH , dW , padH , padW , dilationH , dilationW , adjH , adjW ) ; <nl> + <nl> + input = THCTensor_ ( newContiguous ) ( state , input ) ; <nl> + gradOutput = THCTensor_ ( newContiguous ) ( state , gradOutput ) ; <nl> + weight = THCTensor_ ( newContiguous ) ( state , weight ) ; <nl> + int batch = 1 ; <nl> + if ( input - > nDimension = = 3 ) { <nl> + / / Force batch <nl> + batch = 0 ; <nl> + THCTensor_ ( resize4d ) ( state , input , 1 , input - > size [ 0 ] , input - > size [ 1 ] , input - > size [ 2 ] ) ; <nl> + THCTensor_ ( resize4d ) ( state , gradOutput , 1 , gradOutput - > size [ 0 ] , gradOutput - > size [ 1 ] , gradOutput - > size [ 2 ] ) ; <nl> + } <nl> + <nl> + long inputWidth = input - > size [ 3 ] ; <nl> + long inputHeight = input - > size [ 2 ] ; <nl> + long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + ( dilationH * ( kH - 1 ) + 1 ) + adjH ; <nl> + long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + ( dilationW * ( kW - 1 ) + 1 ) + adjW ; <nl> + <nl> + / / Batch size + input planes <nl> + long batchSize = input - > size [ 0 ] ; <nl> + <nl> + / / Resize output <nl> + THCTensor_ ( resize4d ) ( state , gradInput , batchSize , nInputPlane , inputHeight , inputWidth ) ; <nl> + <nl> + / / Resize temporary columns <nl> + THCTensor_ ( resize2d ) ( state , gradColumns , nOutputPlane * kW * kH , inputHeight * inputWidth ) ; <nl> + <nl> + / / Helpers <nl> + THCTensor * gradInput_n = THCTensor_ ( new ) ( state ) ; <nl> + THCTensor * gradOutput_n = THCTensor_ ( new ) ( state ) ; <nl> + <nl> + / / For each elt in batch , do : <nl> + for ( int elt = 0 ; elt < batchSize ; elt + + ) { <nl> + / / Matrix mulitply per sample : <nl> + THCTensor_ ( select ) ( state , gradInput_n , gradInput , 0 , elt ) ; <nl> + THCTensor_ ( select ) ( state , gradOutput_n , gradOutput , 0 , elt ) ; <nl> + <nl> + / / Extract columns : <nl> + im2col ( <nl> + THCState_getCurrentStream ( state ) , <nl> + THCTensor_ ( data ) ( state , gradOutput_n ) , <nl> + nOutputPlane , outputHeight , outputWidth , kH , kW , padH , padW , dH , dW , <nl> + dilationH , dilationW , THCTensor_ ( data ) ( state , gradColumns ) <nl> + ) ; <nl> + <nl> + <nl> + / / M , N , K are dims of matrix A and B <nl> + / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> + long m = weight - > size [ 0 ] ; <nl> + long n = gradColumns - > size [ 1 ] ; <nl> + long k = weight - > size [ 1 ] * weight - > size [ 2 ] * weight - > size [ 3 ] ; <nl> + <nl> + / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> + # ifdef THC_REAL_IS_FLOAT <nl> + THCudaBlas_Sgemm ( <nl> + # elif defined ( THC_REAL_IS_HALF ) <nl> + THCudaBlas_Hgemm ( <nl> + # elif defined ( THC_REAL_IS_DOUBLE ) <nl> + THCudaBlas_Dgemm ( <nl> + # endif <nl> + state , <nl> + ' n ' , ' n ' , <nl> + n , m , k , <nl> + ScalarConvert < int , real > : : to ( 1 ) , <nl> + THCTensor_ ( data ) ( state , gradColumns ) , n , <nl> + THCTensor_ ( data ) ( state , weight ) , k , <nl> + ScalarConvert < int , real > : : to ( 0 ) , <nl> + THCTensor_ ( data ) ( state , gradInput_n ) , n <nl> + ) ; <nl> + } <nl> + <nl> + <nl> + / / Free <nl> + THCTensor_ ( free ) ( state , gradInput_n ) ; <nl> + THCTensor_ ( free ) ( state , gradOutput_n ) ; <nl> + <nl> + / / Resize output <nl> + if ( batch = = 0 ) { <nl> + THCTensor_ ( resize3d ) ( state , gradOutput , nOutputPlane , outputHeight , outputWidth ) ; <nl> + THCTensor_ ( resize3d ) ( state , input , nInputPlane , inputHeight , inputWidth ) ; <nl> + THCTensor_ ( resize3d ) ( state , gradInput , nInputPlane , inputHeight , inputWidth ) ; <nl> + } <nl> + <nl> + THCTensor_ ( free ) ( state , input ) ; <nl> + THCTensor_ ( free ) ( state , gradOutput ) ; <nl> + THCTensor_ ( free ) ( state , weight ) ; <nl> + } <nl> + <nl> + <nl> + void THNN_ ( SpatialFullDilatedConvolution_accGradParameters ) ( <nl> + THCState * state , <nl> + THCTensor * input , <nl> + THCTensor * gradOutput , <nl> + THCTensor * gradWeight , <nl> + THCTensor * gradBias , <nl> + THCTensor * columns , <nl> + THCTensor * ones , <nl> + int kW , int kH , <nl> + int dW , int dH , <nl> + int padW , int padH , <nl> + int dilationW , int dilationH , <nl> + int adjW , int adjH , <nl> + accreal scale_ ) <nl> + { <nl> + real scale = ScalarConvert < accreal , real > : : to ( scale_ ) ; <nl> + int nInputPlane = THCTensor_ ( size ) ( state , gradWeight , 0 ) ; <nl> + int nOutputPlane = THCTensor_ ( size ) ( state , gradWeight , 1 ) ; <nl> + <nl> + THCUNN_assertSameGPU ( state , 6 , input , gradOutput , gradWeight , <nl> + gradBias , columns , ones ) ; <nl> + THNN_ ( SpatialFullDilatedConvolution_shapeCheck ) <nl> + ( state , input , gradOutput , gradWeight , gradBias , kH , kW , dH , dW , padH , padW , dilationH , dilationW , adjH , adjW ) ; <nl> + <nl> + THArgCheck ( THCTensor_ ( isContiguous ) ( state , gradWeight ) , 4 , " gradWeight needs to be contiguous " ) ; <nl> + if ( gradBias ) <nl> + THArgCheck ( THCTensor_ ( isContiguous ) ( state , gradBias ) , 5 , " gradBias needs to be contiguous " ) ; <nl> + input = THCTensor_ ( newContiguous ) ( state , input ) ; <nl> + gradOutput = THCTensor_ ( newContiguous ) ( state , gradOutput ) ; <nl> + int batch = 1 ; <nl> + if ( input - > nDimension = = 3 ) { <nl> + / / Force batch <nl> + batch = 0 ; <nl> + THCTensor_ ( resize4d ) ( state , input , 1 , input - > size [ 0 ] , input - > size [ 1 ] , input - > size [ 2 ] ) ; <nl> + THCTensor_ ( resize4d ) ( state , gradOutput , 1 , gradOutput - > size [ 0 ] , gradOutput - > size [ 1 ] , gradOutput - > size [ 2 ] ) ; <nl> + } <nl> + <nl> + long inputWidth = input - > size [ 3 ] ; <nl> + long inputHeight = input - > size [ 2 ] ; <nl> + long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + ( dilationH * ( kH - 1 ) + 1 ) + adjH ; <nl> + long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + ( dilationW * ( kW - 1 ) + 1 ) + adjW ; <nl> + <nl> + / / Batch size + input planes <nl> + long batchSize = input - > size [ 0 ] ; <nl> + <nl> + / / Define a buffer of ones , for bias accumulation <nl> + if ( ones - > nDimension ! = 2 | | ones - > size [ 0 ] * ones - > size [ 1 ] < outputHeight * outputWidth ) { <nl> + / / Resize plane and fill with ones . . . <nl> + THCTensor_ ( resize2d ) ( state , ones , outputHeight , outputWidth ) ; <nl> + THCTensor_ ( fill ) ( state , ones , ScalarConvert < int , real > : : to ( 1 ) ) ; <nl> + } <nl> + <nl> + / / Resize temporary columns <nl> + THCTensor_ ( resize2d ) ( state , columns , nOutputPlane * kW * kH , inputHeight * inputWidth ) ; <nl> + <nl> + / / Helpers <nl> + THCTensor * input_n = THCTensor_ ( new ) ( state ) ; <nl> + THCTensor * gradOutput_n = THCTensor_ ( new ) ( state ) ; <nl> + <nl> + / / For each elt in batch , do : <nl> + for ( int elt = 0 ; elt < batchSize ; elt + + ) { <nl> + / / Matrix mulitply per output : <nl> + THCTensor_ ( select ) ( state , input_n , input , 0 , elt ) ; <nl> + THCTensor_ ( select ) ( state , gradOutput_n , gradOutput , 0 , elt ) ; <nl> + <nl> + / / Extract columns : <nl> + im2col ( <nl> + THCState_getCurrentStream ( state ) , <nl> + THCTensor_ ( data ) ( state , gradOutput_n ) , <nl> + nOutputPlane , outputHeight , outputWidth , kH , kW , padH , padW , dH , dW , <nl> + dilationH , dilationW , THCTensor_ ( data ) ( state , columns ) <nl> + ) ; <nl> + <nl> + / / M , N , K are dims of matrix A and B <nl> + / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> + long n = columns - > size [ 0 ] ; / / nOutputPlane * kh * kw <nl> + long m = input_n - > size [ 0 ] ; / / nInputPlane <nl> + long k = columns - > size [ 1 ] ; / / inputHeight * inputWidth <nl> + <nl> + / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> + # ifdef THC_REAL_IS_FLOAT <nl> + THCudaBlas_Sgemm ( <nl> + # elif defined ( THC_REAL_IS_HALF ) <nl> + THCudaBlas_Hgemm ( <nl> + # elif defined ( THC_REAL_IS_DOUBLE ) <nl> + THCudaBlas_Dgemm ( <nl> + # endif <nl> + state , <nl> + ' t ' , ' n ' , <nl> + n , m , k , <nl> + scale , <nl> + THCTensor_ ( data ) ( state , columns ) , k , <nl> + THCTensor_ ( data ) ( state , input_n ) , k , <nl> + ScalarConvert < int , real > : : to ( 1 ) , <nl> + THCTensor_ ( data ) ( state , gradWeight ) , n <nl> + ) ; <nl> + <nl> + / / Do Bias : <nl> + / / M , N , K are dims of matrix A and B <nl> + / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> + long m_ = nOutputPlane ; <nl> + long k_ = outputHeight * outputWidth ; <nl> + <nl> + / / Do GEMV ( note : this is a bit confusing because gemv assumes column - major matrices ) <nl> + if ( gradBias ) { <nl> + # if defined ( THC_REAL_IS_FLOAT ) | | defined ( THC_REAL_IS_DOUBLE ) <nl> + # ifdef THC_REAL_IS_FLOAT <nl> + THCudaBlas_Sgemv ( <nl> + # elif defined ( THC_REAL_IS_DOUBLE ) <nl> + THCudaBlas_Dgemv ( <nl> + # endif <nl> + state , <nl> + ' t ' , <nl> + k_ , m_ , <nl> + scale , <nl> + THCTensor_ ( data ) ( state , gradOutput_n ) , k_ , <nl> + THCTensor_ ( data ) ( state , ones ) , 1 , <nl> + ScalarConvert < int , real > : : to ( 1 ) , <nl> + THCTensor_ ( data ) ( state , gradBias ) , 1 <nl> + ) ; <nl> + # endif <nl> + # ifdef THC_REAL_IS_HALF <nl> + THCudaBlas_Hgemm ( <nl> + state , <nl> + ' t ' , ' n ' , <nl> + m_ , 1 , k_ , <nl> + scale , <nl> + THCTensor_ ( data ) ( state , gradOutput_n ) , k_ , <nl> + THCTensor_ ( data ) ( state , ones ) , k_ , <nl> + ScalarConvert < int , real > : : to ( 1 ) , <nl> + THCTensor_ ( data ) ( state , gradBias ) , m_ <nl> + ) ; <nl> + # endif <nl> + } <nl> + } <nl> + <nl> + / / Free <nl> + THCTensor_ ( free ) ( state , input_n ) ; <nl> + THCTensor_ ( free ) ( state , gradOutput_n ) ; <nl> + <nl> + / / Resize <nl> + if ( batch = = 0 ) { <nl> + THCTensor_ ( resize3d ) ( state , gradOutput , nOutputPlane , outputHeight , outputWidth ) ; <nl> + THCTensor_ ( resize3d ) ( state , input , nInputPlane , inputHeight , inputWidth ) ; <nl> + } <nl> + <nl> + THCTensor_ ( free ) ( state , input ) ; <nl> + THCTensor_ ( free ) ( state , gradOutput ) ; <nl> + } <nl> + <nl> + # endif <nl> mmm a / generic / THCUNN . h <nl> ppp b / generic / THCUNN . h <nl> TH_API void THNN_ ( SpatialDilatedConvolution_accGradParameters ) ( <nl> int dilationW , int dilationH , <nl> accreal scale ) ; <nl> <nl> + TH_API void THNN_ ( SpatialFullDilatedConvolution_updateOutput ) ( <nl> + THCState * state , <nl> + THCTensor * input , <nl> + THCTensor * output , <nl> + THCTensor * weight , <nl> + THCTensor * bias , / / [ OPTIONAL ] <nl> + THCTensor * columns , <nl> + THCTensor * ones , <nl> + int kW , int kH , <nl> + int dW , int dH , <nl> + int padW , int padH , <nl> + int dilationW , int dilationH , <nl> + int adjW , int adjH ) ; <nl> + <nl> + TH_API void THNN_ ( SpatialFullDilatedConvolution_updateGradInput ) ( <nl> + THCState * state , <nl> + THCTensor * input , <nl> + THCTensor * gradOutput , <nl> + THCTensor * gradInput , <nl> + THCTensor * weight , <nl> + THCTensor * gradColumns , <nl> + int kW , int kH , <nl> + int dW , int dH , <nl> + int padW , int padH , <nl> + int dilationW , int dilationH , <nl> + int adjW , int adjH ) ; <nl> + <nl> + TH_API void THNN_ ( SpatialFullDilatedConvolution_accGradParameters ) ( <nl> + THCState * state , <nl> + THCTensor * input , <nl> + THCTensor * gradOutput , <nl> + THCTensor * gradWeight , <nl> + THCTensor * gradBias , / / [ OPTIONAL ] <nl> + THCTensor * columns , <nl> + THCTensor * ones , <nl> + int kW , int kH , <nl> + int dW , int dH , <nl> + int padW , int padH , <nl> + int dilationW , int dilationH , <nl> + int adjW , int adjH , <nl> + accreal scale ) ; <nl> + <nl> TH_API void THNN_ ( SpatialDilatedMaxPooling_updateOutput ) ( <nl> THCState * state , <nl> THCTensor * input , <nl> TH_API void THNN_ ( VolumetricDilatedConvolution_accGradParameters ) ( <nl> int dilationT , int dilationW , int dilationH , <nl> accreal scale ) ; <nl> <nl> + TH_API void THNN_ ( VolumetricFullDilatedConvolution_updateOutput ) ( <nl> + THCState * state , <nl> + THCTensor * input , <nl> + THCTensor * output , <nl> + THCTensor * weight , <nl> + THCTensor * bias , / / [ OPTIONAL ] <nl> + THCTensor * finput , <nl> + THCTensor * fgradInput , <nl> + int dT , int dW , int dH , <nl> + int padT , int padW , int padH , <nl> + int dilationT , int dilationW , int dilationH , <nl> + int adjT , int adjW , int adjH ) ; <nl> + <nl> + TH_API void THNN_ ( VolumetricFullDilatedConvolution_updateGradInput ) ( <nl> + THCState * state , <nl> + THCTensor * input , <nl> + THCTensor * gradOutput , <nl> + THCTensor * gradInput , <nl> + THCTensor * weight , <nl> + THCTensor * finput , <nl> + THCTensor * fgradInput , <nl> + int dT , int dW , int dH , <nl> + int padT , int padW , int padH , <nl> + int dilationT , int dilationW , int dilationH , <nl> + int adjT , int adjW , int adjH ) ; <nl> + <nl> + TH_API void THNN_ ( VolumetricFullDilatedConvolution_accGradParameters ) ( <nl> + THCState * state , <nl> + THCTensor * input , <nl> + THCTensor * gradOutput , <nl> + THCTensor * gradWeight , <nl> + THCTensor * gradBias , / / [ OPTIONAL ] <nl> + THCTensor * finput , <nl> + THCTensor * fgradInput , <nl> + int dT , int dW , int dH , <nl> + int padT , int padW , int padH , <nl> + int dilationT , int dilationW , int dilationH , <nl> + int adjT , int adjW , int adjH , <nl> + accreal scale ) ; <nl> + <nl> TH_API void THNN_ ( VolumetricDilatedMaxPooling_updateOutput ) ( <nl> THCState * state , <nl> THCTensor * input , <nl> mmm a / generic / VolumetricFullConvolution . cu <nl> ppp b / generic / VolumetricFullConvolution . cu <nl> <nl> # define THC_GENERIC_FILE " generic / VolumetricFullConvolution . cu " <nl> # else <nl> <nl> - static inline void THNN_ ( VolumetricFullConvolution_shapeCheck ) ( <nl> - THCState * state , <nl> - THCTensor * input , <nl> - THCTensor * gradOutput , <nl> - THCTensor * weight , <nl> - THCTensor * bias , <nl> - int dT , int dW , int dH , <nl> - int padT , int padW , int padH , <nl> - int adjT , int adjW , int adjH ) { <nl> - THCUNN_argCheck ( state , input - > nDimension = = 4 | | input - > nDimension = = 5 , 2 , input , <nl> - " 4D or 5D ( batch mode ) tensor expected for input , but got : % s " ) ; <nl> - / / number of input & output planes and kernel size is indirectly defined by the weight tensor <nl> - THCUNN_argCheck ( state , weight - > nDimension = = 5 , 4 , weight , <nl> - " 5D ( nOutputPlane x nInputPlane x kT x kH x kW ) tensor " <nl> - " expected for weight , but got : % s " ) ; <nl> - THArgCheck ( THCTensor_ ( isContiguous ) ( state , weight ) , 4 , <nl> - " weight tensor has to be contiguous " ) ; <nl> - THArgCheck ( ! bias | | THCTensor_ ( isContiguous ) ( state , bias ) , 5 , <nl> - " bias tensor has to be contiguous " ) ; <nl> - THArgCheck ( dT > 0 & & dW > 0 & & dH > 0 , 8 , <nl> - " stride should be greater than zero , but got dT : % d dH : % d dW : % d " , dT , dH , dW ) ; <nl> - THArgCheck ( adjT < dT & & adjW < dW & & adjH < dH , 14 , <nl> - " output adjustment must be smaller than stride , but got " <nl> - " adjT : % d adjH : % d adjW : % d dT : % d dH : % d dW : % d " , <nl> - adjT , adjH , adjW , dT , dH , dW ) ; <nl> - <nl> - int ndim = input - > nDimension ; <nl> - int nInputPlane = THCTensor_ ( size ) ( state , weight , 0 ) ; <nl> - int nOutputPlane = THCTensor_ ( size ) ( state , weight , 1 ) ; <nl> - const int kT = ( int ) weight - > size [ 2 ] ; <nl> - const int kH = ( int ) weight - > size [ 3 ] ; <nl> - const int kW = ( int ) weight - > size [ 4 ] ; <nl> - <nl> - if ( bias ! = NULL ) { <nl> - THCUNN_check_dim_size ( state , bias , 1 , 0 , weight - > size [ 1 ] ) ; <nl> - } <nl> - <nl> - int dimf = 0 ; <nl> - int dimd = 1 ; <nl> - int dimh = 2 ; <nl> - int dimw = 3 ; <nl> - <nl> - if ( ndim = = 5 ) { <nl> - dimf + + ; <nl> - dimd + + ; <nl> - dimh + + ; <nl> - dimw + + ; <nl> - } <nl> - <nl> - long inputWidth = input - > size [ dimw ] ; <nl> - long inputHeight = input - > size [ dimh ] ; <nl> - long inputDepth = input - > size [ dimd ] ; <nl> - long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + kW + adjW ; <nl> - long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + kH + adjH ; <nl> - long outputDepth = ( inputDepth - 1 ) * dT - 2 * padT + kT + adjT ; <nl> - <nl> - if ( outputDepth < 1 | | outputWidth < 1 | | outputHeight < 1 ) <nl> - THError ( " Given input size : ( % dx % dx % dx % d ) . Calculated output size : ( % dx % dx % dx % d ) . Output size is too small " , <nl> - nInputPlane , inputDepth , inputHeight , inputWidth , nOutputPlane , outputDepth , outputHeight , outputWidth ) ; <nl> - <nl> - THCUNN_check_dim_size ( state , input , ndim , dimf , nInputPlane ) ; <nl> - if ( gradOutput ! = NULL ) { <nl> - THCUNN_check_dim_size ( state , gradOutput , ndim , dimf , nOutputPlane ) ; <nl> - THCUNN_check_dim_size ( state , gradOutput , ndim , dimd , outputDepth ) ; <nl> - THCUNN_check_dim_size ( state , gradOutput , ndim , dimh , outputHeight ) ; <nl> - THCUNN_check_dim_size ( state , gradOutput , ndim , dimw , outputWidth ) ; <nl> - } <nl> - } <nl> - <nl> void THNN_ ( VolumetricFullConvolution_updateOutput ) ( <nl> THCState * state , <nl> THCTensor * input , <nl> void THNN_ ( VolumetricFullConvolution_updateOutput ) ( <nl> int padT , int padW , int padH , <nl> int adjT , int adjW , int adjH ) <nl> { <nl> - <nl> - THCTensor * columns = finput ; <nl> - THCTensor * ones = fgradInput ; <nl> - <nl> - int nInputPlane = THCTensor_ ( size ) ( state , weight , 0 ) ; <nl> - int nOutputPlane = THCTensor_ ( size ) ( state , weight , 1 ) ; <nl> - const int kT = ( int ) weight - > size [ 2 ] ; <nl> - const int kH = ( int ) weight - > size [ 3 ] ; <nl> - const int kW = ( int ) weight - > size [ 4 ] ; <nl> - <nl> - THCUNN_assertSameGPU ( state , 6 , input , output , weight , <nl> - bias , columns , ones ) ; <nl> - THNN_ ( VolumetricFullConvolution_shapeCheck ) ( <nl> - state , input , NULL , weight , bias , <nl> - dT , dW , dH , padT , padW , padH , <nl> - adjT , adjW , adjH ) ; <nl> - <nl> - input = THCTensor_ ( newContiguous ) ( state , input ) ; <nl> - weight = THCTensor_ ( newContiguous ) ( state , weight ) ; <nl> - bias = bias ? THCTensor_ ( newContiguous ) ( state , bias ) : bias ; <nl> - <nl> - int batch = 1 ; <nl> - if ( input - > nDimension = = 4 ) { <nl> - / / Force batch <nl> - batch = 0 ; <nl> - THCTensor_ ( resize5d ) ( state , input , 1 , input - > size [ 0 ] , input - > size [ 1 ] , input - > size [ 2 ] , input - > size [ 3 ] ) ; <nl> - } <nl> - <nl> - long inputWidth = input - > size [ 4 ] ; <nl> - long inputHeight = input - > size [ 3 ] ; <nl> - long inputDepth = input - > size [ 2 ] ; <nl> - long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + kW + adjW ; <nl> - long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + kH + adjH ; <nl> - long outputDepth = ( inputDepth - 1 ) * dT - 2 * padT + kT + adjT ; <nl> - <nl> - / / Batch size + input planes <nl> - long batchSize = input - > size [ 0 ] ; <nl> - <nl> - / / Resize output <nl> - THCTensor_ ( resize5d ) ( state , output , batchSize , nOutputPlane , outputDepth , outputHeight , outputWidth ) ; <nl> - <nl> - / / Resize temporary columns <nl> - THCTensor_ ( resize2d ) ( state , columns , nOutputPlane * kW * kH * kT , inputDepth * inputHeight * inputWidth ) ; <nl> - <nl> - / / Define a buffer of ones , for bias accumulation <nl> - / / Note : this buffer can be shared with other modules , it only ever gets increased , <nl> - / / and always contains ones . <nl> - if ( ones - > nDimension ! = 3 | | ones - > size [ 0 ] * ones - > size [ 1 ] * ones - > size [ 2 ] < outputDepth * outputHeight * outputWidth ) { <nl> - / / Resize plane and fill with ones . . . <nl> - THCTensor_ ( resize3d ) ( state , ones , outputDepth , outputHeight , outputWidth ) ; <nl> - THCTensor_ ( fill ) ( state , ones , ScalarConvert < int , real > : : to ( 1 ) ) ; <nl> - } <nl> - <nl> - / / Helpers <nl> - THCTensor * input_n = THCTensor_ ( new ) ( state ) ; <nl> - THCTensor * output_n = THCTensor_ ( new ) ( state ) ; <nl> - <nl> - / / For each elt in batch , do : <nl> - for ( int elt = 0 ; elt < batchSize ; elt + + ) { <nl> - / / Matrix mulitply per output : <nl> - THCTensor_ ( select ) ( state , input_n , input , 0 , elt ) ; <nl> - THCTensor_ ( select ) ( state , output_n , output , 0 , elt ) ; <nl> - <nl> - / / M , N , K are dims of matrix A and B <nl> - / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> - long m = weight - > size [ 1 ] * weight - > size [ 2 ] * weight - > size [ 3 ] * weight - > size [ 4 ] ; <nl> - long n = columns - > size [ 1 ] ; <nl> - long k = weight - > size [ 0 ] ; <nl> - <nl> - / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> - # ifdef THC_REAL_IS_FLOAT <nl> - THCudaBlas_Sgemm ( <nl> - # elif defined ( THC_REAL_IS_HALF ) <nl> - THCudaBlas_Hgemm ( <nl> - # elif defined ( THC_REAL_IS_DOUBLE ) <nl> - THCudaBlas_Dgemm ( <nl> - # endif <nl> - state , <nl> - ' n ' , ' t ' , <nl> - n , m , k , <nl> - ScalarConvert < int , real > : : to ( 1 ) , <nl> - THCTensor_ ( data ) ( state , input_n ) , n , <nl> - THCTensor_ ( data ) ( state , weight ) , m , <nl> - ScalarConvert < int , real > : : to ( 0 ) , <nl> - THCTensor_ ( data ) ( state , columns ) , n <nl> - ) ; <nl> - <nl> - / / Unpack columns back into input : <nl> - col2vol < real , accreal > ( <nl> - THCState_getCurrentStream ( state ) , <nl> - THCTensor_ ( data ) ( state , columns ) , <nl> - nOutputPlane , outputDepth , outputHeight , outputWidth , kT , kH , kW , padT , padH , padW , dT , dH , dW , <nl> - 1 , 1 , 1 , <nl> - THCTensor_ ( data ) ( state , output_n ) <nl> - ) ; <nl> - <nl> - / / Do Bias after : <nl> - / / M , N , K are dims of matrix A and B <nl> - / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> - long m_ = nOutputPlane ; <nl> - long n_ = outputDepth * outputHeight * outputWidth ; <nl> - long k_ = 1 ; <nl> - <nl> - / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> - if ( bias ) { <nl> - # ifdef THC_REAL_IS_FLOAT <nl> - THCudaBlas_Sgemm ( <nl> - # elif defined ( THC_REAL_IS_HALF ) <nl> - THCudaBlas_Hgemm ( <nl> - # elif defined ( THC_REAL_IS_DOUBLE ) <nl> - THCudaBlas_Dgemm ( <nl> - # endif <nl> - state , <nl> - ' t ' , ' n ' , <nl> - n_ , m_ , k_ , <nl> - ScalarConvert < int , real > : : to ( 1 ) , <nl> - THCTensor_ ( data ) ( state , ones ) , k_ , <nl> - THCTensor_ ( data ) ( state , bias ) , k_ , <nl> - ScalarConvert < int , real > : : to ( 1 ) , <nl> - THCTensor_ ( data ) ( state , output_n ) , n_ <nl> - ) ; <nl> - } <nl> - } <nl> - <nl> - / / Free <nl> - THCTensor_ ( free ) ( state , input_n ) ; <nl> - THCTensor_ ( free ) ( state , output_n ) ; <nl> - <nl> - / / Resize output <nl> - if ( batch = = 0 ) { <nl> - THCTensor_ ( resize4d ) ( state , output , nOutputPlane , outputDepth , outputHeight , outputWidth ) ; <nl> - THCTensor_ ( resize4d ) ( state , input , nInputPlane , inputDepth , inputHeight , inputWidth ) ; <nl> - } <nl> - <nl> - THCTensor_ ( free ) ( state , input ) ; <nl> - THCTensor_ ( free ) ( state , weight ) ; <nl> - if ( bias ) THCTensor_ ( free ) ( state , bias ) ; <nl> - <nl> + THNN_ ( VolumetricFullDilatedConvolution_updateOutput ) ( <nl> + state , input , output , weight , bias , finput , fgradInput , <nl> + dT , dW , dH , padT , padW , padH , 1 , 1 , 1 , adjT , adjW , adjH ) ; <nl> } <nl> <nl> void THNN_ ( VolumetricFullConvolution_updateGradInput ) ( <nl> void THNN_ ( VolumetricFullConvolution_updateGradInput ) ( <nl> int padT , int padW , int padH , <nl> int adjT , int adjW , int adjH ) <nl> { <nl> - THCTensor * gradColumns = finput ; <nl> - <nl> - int nInputPlane = THCTensor_ ( size ) ( state , weight , 0 ) ; <nl> - int nOutputPlane = THCTensor_ ( size ) ( state , weight , 1 ) ; <nl> - const int kT = ( int ) weight - > size [ 2 ] ; <nl> - const int kH = ( int ) weight - > size [ 3 ] ; <nl> - const int kW = ( int ) weight - > size [ 4 ] ; <nl> - <nl> - THCUNN_assertSameGPU ( state , 5 , input , gradOutput , weight , <nl> - gradColumns , gradInput ) ; <nl> - THNN_ ( VolumetricFullConvolution_shapeCheck ) ( <nl> - state , input , gradOutput , weight , NULL , <nl> - dT , dW , dH , padT , padW , padH , <nl> - adjT , adjW , adjH ) ; <nl> - <nl> - input = THCTensor_ ( newContiguous ) ( state , input ) ; <nl> - gradOutput = THCTensor_ ( newContiguous ) ( state , gradOutput ) ; <nl> - weight = THCTensor_ ( newContiguous ) ( state , weight ) ; <nl> - <nl> - int batch = 1 ; <nl> - if ( input - > nDimension = = 4 ) { <nl> - / / Force batch <nl> - batch = 0 ; <nl> - THCTensor_ ( resize5d ) ( state , input , 1 , input - > size [ 0 ] , input - > size [ 1 ] , input - > size [ 2 ] , input - > size [ 3 ] ) ; <nl> - THCTensor_ ( resize5d ) ( state , gradOutput , 1 , gradOutput - > size [ 0 ] , gradOutput - > size [ 1 ] , gradOutput - > size [ 2 ] , gradOutput - > size [ 3 ] ) ; <nl> - } <nl> - <nl> - long inputWidth = input - > size [ 4 ] ; <nl> - long inputHeight = input - > size [ 3 ] ; <nl> - long inputDepth = input - > size [ 2 ] ; <nl> - long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + kW + adjW ; <nl> - long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + kH + adjH ; <nl> - long outputDepth = ( inputDepth - 1 ) * dT - 2 * padT + kT + adjT ; <nl> - <nl> - / / Batch size + input planes <nl> - long batchSize = input - > size [ 0 ] ; <nl> - <nl> - / / Resize output <nl> - THCTensor_ ( resize5d ) ( state , gradInput , batchSize , nInputPlane , inputDepth , inputHeight , inputWidth ) ; <nl> - <nl> - / / Resize temporary columns <nl> - THCTensor_ ( resize2d ) ( state , gradColumns , nOutputPlane * kW * kH * kT , inputDepth * inputHeight * inputWidth ) ; <nl> - <nl> - / / Helpers <nl> - THCTensor * gradInput_n = THCTensor_ ( new ) ( state ) ; <nl> - THCTensor * gradOutput_n = THCTensor_ ( new ) ( state ) ; <nl> - <nl> - / / For each elt in batch , do : <nl> - for ( int elt = 0 ; elt < batchSize ; elt + + ) { <nl> - / / Matrix mulitply per sample : <nl> - THCTensor_ ( select ) ( state , gradInput_n , gradInput , 0 , elt ) ; <nl> - THCTensor_ ( select ) ( state , gradOutput_n , gradOutput , 0 , elt ) ; <nl> - <nl> - / / Extract columns : <nl> - vol2col ( <nl> - THCState_getCurrentStream ( state ) , <nl> - THCTensor_ ( data ) ( state , gradOutput_n ) , <nl> - nOutputPlane , outputDepth , outputHeight , outputWidth , kT , kH , kW , padT , padH , padW , dT , dH , dW , <nl> - 1 , 1 , 1 , <nl> - THCTensor_ ( data ) ( state , gradColumns ) <nl> - ) ; <nl> - <nl> - <nl> - / / M , N , K are dims of matrix A and B <nl> - / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> - long m = weight - > size [ 0 ] ; <nl> - long n = gradColumns - > size [ 1 ] ; <nl> - long k = weight - > size [ 1 ] * weight - > size [ 2 ] * weight - > size [ 3 ] * weight - > size [ 4 ] ; <nl> - <nl> - / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> - # ifdef THC_REAL_IS_FLOAT <nl> - THCudaBlas_Sgemm ( <nl> - # elif defined ( THC_REAL_IS_HALF ) <nl> - THCudaBlas_Hgemm ( <nl> - # elif defined ( THC_REAL_IS_DOUBLE ) <nl> - THCudaBlas_Dgemm ( <nl> - # endif <nl> - state , <nl> - ' n ' , ' n ' , <nl> - n , m , k , <nl> - ScalarConvert < int , real > : : to ( 1 ) , <nl> - THCTensor_ ( data ) ( state , gradColumns ) , n , <nl> - THCTensor_ ( data ) ( state , weight ) , k , <nl> - ScalarConvert < int , real > : : to ( 0 ) , <nl> - THCTensor_ ( data ) ( state , gradInput_n ) , n <nl> - ) ; <nl> - } <nl> - <nl> - <nl> - / / Free <nl> - THCTensor_ ( free ) ( state , gradInput_n ) ; <nl> - THCTensor_ ( free ) ( state , gradOutput_n ) ; <nl> - <nl> - / / Resize output <nl> - if ( batch = = 0 ) { <nl> - THCTensor_ ( resize4d ) ( state , gradOutput , nOutputPlane , outputDepth , outputHeight , outputWidth ) ; <nl> - THCTensor_ ( resize4d ) ( state , input , nInputPlane , inputDepth , inputHeight , inputWidth ) ; <nl> - THCTensor_ ( resize4d ) ( state , gradInput , nInputPlane , inputDepth , inputHeight , inputWidth ) ; <nl> - } <nl> - <nl> - THCTensor_ ( free ) ( state , input ) ; <nl> - THCTensor_ ( free ) ( state , gradOutput ) ; <nl> - THCTensor_ ( free ) ( state , weight ) ; <nl> + THNN_ ( VolumetricFullDilatedConvolution_updateGradInput ) ( <nl> + state , input , gradOutput , gradInput , weight , finput , fgradInput , <nl> + dT , dW , dH , padT , padW , padH , 1 , 1 , 1 , adjT , adjW , adjH ) ; <nl> } <nl> <nl> <nl> void THNN_ ( VolumetricFullConvolution_accGradParameters ) ( <nl> int adjT , int adjW , int adjH , <nl> accreal scale_ ) <nl> { <nl> - real scale = ScalarConvert < accreal , real > : : to ( scale_ ) ; <nl> - THCTensor * columns = finput ; <nl> - THCTensor * ones = fgradInput ; <nl> - <nl> - int nInputPlane = THCTensor_ ( size ) ( state , gradWeight , 0 ) ; <nl> - int nOutputPlane = THCTensor_ ( size ) ( state , gradWeight , 1 ) ; <nl> - const int kT = ( int ) gradWeight - > size [ 2 ] ; <nl> - const int kH = ( int ) gradWeight - > size [ 3 ] ; <nl> - const int kW = ( int ) gradWeight - > size [ 4 ] ; <nl> - <nl> - THCUNN_assertSameGPU ( state , 6 , input , gradOutput , gradWeight , <nl> - gradBias , columns , ones ) ; <nl> - THNN_ ( VolumetricFullConvolution_shapeCheck ) ( <nl> - state , input , gradOutput , gradWeight , <nl> - gradBias , dT , dW , dH , padT , padW , padH , <nl> - adjT , adjW , adjH ) ; <nl> - <nl> - THArgCheck ( THCTensor_ ( isContiguous ) ( state , gradWeight ) , 4 , " gradWeight needs to be contiguous " ) ; <nl> - if ( gradBias ) <nl> - THArgCheck ( THCTensor_ ( isContiguous ) ( state , gradBias ) , 5 , " gradBias needs to be contiguous " ) ; <nl> - <nl> - input = THCTensor_ ( newContiguous ) ( state , input ) ; <nl> - gradOutput = THCTensor_ ( newContiguous ) ( state , gradOutput ) ; <nl> - <nl> - int batch = 1 ; <nl> - if ( input - > nDimension = = 4 ) { <nl> - / / Force batch <nl> - batch = 0 ; <nl> - THCTensor_ ( resize5d ) ( state , input , 1 , input - > size [ 0 ] , input - > size [ 1 ] , input - > size [ 2 ] , input - > size [ 3 ] ) ; <nl> - THCTensor_ ( resize5d ) ( state , gradOutput , 1 , gradOutput - > size [ 0 ] , gradOutput - > size [ 1 ] , gradOutput - > size [ 2 ] , gradOutput - > size [ 3 ] ) ; <nl> - } <nl> - <nl> - long inputWidth = input - > size [ 4 ] ; <nl> - long inputHeight = input - > size [ 3 ] ; <nl> - long inputDepth = input - > size [ 2 ] ; <nl> - long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + kW + adjW ; <nl> - long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + kH + adjH ; <nl> - long outputDepth = ( inputDepth - 1 ) * dT - 2 * padT + kT + adjT ; <nl> - <nl> - / / Batch size + input planes <nl> - long batchSize = input - > size [ 0 ] ; <nl> - <nl> - / / Define a buffer of ones , for bias accumulation <nl> - if ( ones - > nDimension ! = 3 | | ones - > size [ 0 ] * ones - > size [ 1 ] * ones - > size [ 2 ] < outputDepth * outputHeight * outputWidth ) { <nl> - / / Resize plane and fill with ones . . . <nl> - THCTensor_ ( resize3d ) ( state , ones , outputDepth , outputHeight , outputWidth ) ; <nl> - THCTensor_ ( fill ) ( state , ones , ScalarConvert < int , real > : : to ( 1 ) ) ; <nl> - } <nl> - <nl> - / / Resize temporary columns <nl> - THCTensor_ ( resize2d ) ( state , columns , nOutputPlane * kW * kH * kT , inputDepth * inputHeight * inputWidth ) ; <nl> - <nl> - / / Helpers <nl> - THCTensor * input_n = THCTensor_ ( new ) ( state ) ; <nl> - THCTensor * gradOutput_n = THCTensor_ ( new ) ( state ) ; <nl> - <nl> - / / For each elt in batch , do : <nl> - for ( int elt = 0 ; elt < batchSize ; elt + + ) { <nl> - / / Matrix mulitply per output : <nl> - THCTensor_ ( select ) ( state , input_n , input , 0 , elt ) ; <nl> - THCTensor_ ( select ) ( state , gradOutput_n , gradOutput , 0 , elt ) ; <nl> - <nl> - / / Extract columns : <nl> - vol2col ( <nl> - THCState_getCurrentStream ( state ) , <nl> - THCTensor_ ( data ) ( state , gradOutput_n ) , <nl> - nOutputPlane , outputDepth , outputHeight , outputWidth , kT , kH , kW , padT , padH , padW , dT , dH , dW , <nl> - 1 , 1 , 1 , <nl> - THCTensor_ ( data ) ( state , columns ) <nl> - ) ; <nl> - <nl> - / / M , N , K are dims of matrix A and B <nl> - / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> - long n = columns - > size [ 0 ] ; / / nOutputPlane * kt * kh * kw <nl> - long m = input_n - > size [ 0 ] ; / / nInputPlane <nl> - long k = columns - > size [ 1 ] ; / / inputHeight * inputWidth <nl> - <nl> - / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> - # ifdef THC_REAL_IS_FLOAT <nl> - THCudaBlas_Sgemm ( <nl> - # elif defined ( THC_REAL_IS_HALF ) <nl> - THCudaBlas_Hgemm ( <nl> - # elif defined ( THC_REAL_IS_DOUBLE ) <nl> - THCudaBlas_Dgemm ( <nl> - # endif <nl> - state , <nl> - ' t ' , ' n ' , <nl> - n , m , k , <nl> - scale , <nl> - THCTensor_ ( data ) ( state , columns ) , k , <nl> - THCTensor_ ( data ) ( state , input_n ) , k , <nl> - ScalarConvert < int , real > : : to ( 1 ) , <nl> - THCTensor_ ( data ) ( state , gradWeight ) , n <nl> - ) ; <nl> - <nl> - / / Do Bias : <nl> - / / M , N , K are dims of matrix A and B <nl> - / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> - long m_ = nOutputPlane ; <nl> - long k_ = outputDepth * outputHeight * outputWidth ; <nl> - <nl> - / / Do GEMV ( note : this is a bit confusing because gemv assumes column - major matrices ) <nl> - if ( gradBias ) { <nl> - # if defined ( THC_REAL_IS_FLOAT ) | | defined ( THC_REAL_IS_DOUBLE ) <nl> - # ifdef THC_REAL_IS_FLOAT <nl> - THCudaBlas_Sgemv ( <nl> - # elif defined ( THC_REAL_IS_DOUBLE ) <nl> - THCudaBlas_Dgemv ( <nl> - # endif <nl> - state , <nl> - ' t ' , <nl> - k_ , m_ , <nl> - scale , <nl> - THCTensor_ ( data ) ( state , gradOutput_n ) , k_ , <nl> - THCTensor_ ( data ) ( state , ones ) , 1 , <nl> - ScalarConvert < int , real > : : to ( 1 ) , <nl> - THCTensor_ ( data ) ( state , gradBias ) , 1 <nl> - ) ; <nl> - # endif <nl> - # ifdef THC_REAL_IS_HALF <nl> - THCudaBlas_Hgemm ( <nl> - state , <nl> - ' t ' , ' n ' , <nl> - m_ , 1 , k_ , <nl> - scale , <nl> - THCTensor_ ( data ) ( state , gradOutput_n ) , k_ , <nl> - THCTensor_ ( data ) ( state , ones ) , k_ , <nl> - ScalarConvert < int , real > : : to ( 1 ) , <nl> - THCTensor_ ( data ) ( state , gradBias ) , m_ <nl> - ) ; <nl> - # endif <nl> - } <nl> - } <nl> - <nl> - / / Free <nl> - THCTensor_ ( free ) ( state , input_n ) ; <nl> - THCTensor_ ( free ) ( state , gradOutput_n ) ; <nl> - <nl> - / / Resize <nl> - if ( batch = = 0 ) { <nl> - THCTensor_ ( resize4d ) ( state , gradOutput , nOutputPlane , outputDepth , outputHeight , outputWidth ) ; <nl> - THCTensor_ ( resize4d ) ( state , input , nInputPlane , inputDepth , inputHeight , inputWidth ) ; <nl> - } <nl> - <nl> - THCTensor_ ( free ) ( state , input ) ; <nl> - THCTensor_ ( free ) ( state , gradOutput ) ; <nl> + THNN_ ( VolumetricFullDilatedConvolution_accGradParameters ) ( <nl> + state , input , gradOutput , gradWeight , gradBias , finput , fgradInput , <nl> + dT , dW , dH , padT , padW , padH , 1 , 1 , 1 , adjT , adjW , adjH , scale_ ) ; <nl> } <nl> <nl> # endif <nl> new file mode 100644 <nl> index 000000000000 . . bda0b596a0b7 <nl> mmm / dev / null <nl> ppp b / generic / VolumetricFullDilatedConvolution . cu <nl> <nl> + # ifndef THC_GENERIC_FILE <nl> + # define THC_GENERIC_FILE " generic / VolumetricFullDilatedConvolution . cu " <nl> + # else <nl> + <nl> + static inline void THNN_ ( VolumetricFullDilatedConvolution_shapeCheck ) ( <nl> + THCState * state , <nl> + THCTensor * input , <nl> + THCTensor * gradOutput , <nl> + THCTensor * weight , <nl> + THCTensor * bias , <nl> + int dT , int dW , int dH , <nl> + int padT , int padW , int padH , <nl> + int dilationT , int dilationW , int dilationH , <nl> + int adjT , int adjW , int adjH ) { <nl> + THCUNN_argCheck ( state , input - > nDimension = = 4 | | input - > nDimension = = 5 , 2 , input , <nl> + " 4D or 5D ( batch mode ) tensor expected for input , but got : % s " ) ; <nl> + / / number of input & output planes and kernel size is indirectly defined by the weight tensor <nl> + THCUNN_argCheck ( state , weight - > nDimension = = 5 , 4 , weight , <nl> + " 5D ( nOutputPlane x nInputPlane x kT x kH x kW ) tensor " <nl> + " expected for weight , but got : % s " ) ; <nl> + THArgCheck ( THCTensor_ ( isContiguous ) ( state , weight ) , 4 , <nl> + " weight tensor has to be contiguous " ) ; <nl> + THArgCheck ( ! bias | | THCTensor_ ( isContiguous ) ( state , bias ) , 5 , <nl> + " bias tensor has to be contiguous " ) ; <nl> + THArgCheck ( dT > 0 & & dW > 0 & & dH > 0 , 8 , <nl> + " stride should be greater than zero , but got dT : % d dH : % d dW : % d " , dT , dH , dW ) ; <nl> + THArgCheck ( adjT < dT & & adjW < dW & & adjH < dH , 14 , <nl> + " output adjustment must be smaller than stride , but got " <nl> + " adjT : % d adjH : % d adjW : % d dT : % d dH : % d dW : % d " , <nl> + adjT , adjH , adjW , dT , dH , dW ) ; <nl> + THArgCheck ( dilationT > 0 & & dilationW > 0 & & dilationH > 0 , 15 , <nl> + " dilation should be greater than zero , but got dilationT : % d , dilationH : % d , dilationW : % d " , <nl> + dilationT , dilationH , dilationW ) ; <nl> + <nl> + int ndim = input - > nDimension ; <nl> + int nInputPlane = THCTensor_ ( size ) ( state , weight , 0 ) ; <nl> + int nOutputPlane = THCTensor_ ( size ) ( state , weight , 1 ) ; <nl> + const int kT = ( int ) weight - > size [ 2 ] ; <nl> + const int kH = ( int ) weight - > size [ 3 ] ; <nl> + const int kW = ( int ) weight - > size [ 4 ] ; <nl> + <nl> + if ( bias ! = NULL ) { <nl> + THCUNN_check_dim_size ( state , bias , 1 , 0 , weight - > size [ 1 ] ) ; <nl> + } <nl> + <nl> + int dimf = 0 ; <nl> + int dimd = 1 ; <nl> + int dimh = 2 ; <nl> + int dimw = 3 ; <nl> + <nl> + if ( ndim = = 5 ) { <nl> + dimf + + ; <nl> + dimd + + ; <nl> + dimh + + ; <nl> + dimw + + ; <nl> + } <nl> + <nl> + long inputWidth = input - > size [ dimw ] ; <nl> + long inputHeight = input - > size [ dimh ] ; <nl> + long inputDepth = input - > size [ dimd ] ; <nl> + long outputDepth = ( inputDepth - 1 ) * dT - 2 * padT + ( dilationT * ( kT - 1 ) + 1 ) + adjT ; <nl> + long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + ( dilationH * ( kH - 1 ) + 1 ) + adjH ; <nl> + long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + ( dilationW * ( kW - 1 ) + 1 ) + adjW ; <nl> + if ( outputDepth < 1 | | outputWidth < 1 | | outputHeight < 1 ) <nl> + THError ( " Given input size : ( % dx % dx % dx % d ) . Calculated output size : ( % dx % dx % dx % d ) . Output size is too small " , <nl> + nInputPlane , inputDepth , inputHeight , inputWidth , nOutputPlane , outputDepth , outputHeight , outputWidth ) ; <nl> + <nl> + THCUNN_check_dim_size ( state , input , ndim , dimf , nInputPlane ) ; <nl> + if ( gradOutput ! = NULL ) { <nl> + THCUNN_check_dim_size ( state , gradOutput , ndim , dimf , nOutputPlane ) ; <nl> + THCUNN_check_dim_size ( state , gradOutput , ndim , dimd , outputDepth ) ; <nl> + THCUNN_check_dim_size ( state , gradOutput , ndim , dimh , outputHeight ) ; <nl> + THCUNN_check_dim_size ( state , gradOutput , ndim , dimw , outputWidth ) ; <nl> + } <nl> + } <nl> + <nl> + void THNN_ ( VolumetricFullDilatedConvolution_updateOutput ) ( <nl> + THCState * state , <nl> + THCTensor * input , <nl> + THCTensor * output , <nl> + THCTensor * weight , <nl> + THCTensor * bias , <nl> + THCTensor * finput , <nl> + THCTensor * fgradInput , <nl> + int dT , int dW , int dH , <nl> + int padT , int padW , int padH , <nl> + int dilationT , int dilationW , int dilationH , <nl> + int adjT , int adjW , int adjH ) <nl> + { <nl> + <nl> + THCTensor * columns = finput ; <nl> + THCTensor * ones = fgradInput ; <nl> + <nl> + int nInputPlane = THCTensor_ ( size ) ( state , weight , 0 ) ; <nl> + int nOutputPlane = THCTensor_ ( size ) ( state , weight , 1 ) ; <nl> + const int kT = ( int ) weight - > size [ 2 ] ; <nl> + const int kH = ( int ) weight - > size [ 3 ] ; <nl> + const int kW = ( int ) weight - > size [ 4 ] ; <nl> + <nl> + THCUNN_assertSameGPU ( state , 6 , input , output , weight , <nl> + bias , columns , ones ) ; <nl> + THNN_ ( VolumetricFullDilatedConvolution_shapeCheck ) ( <nl> + state , input , NULL , weight , bias , <nl> + dT , dW , dH , padT , padW , padH , dilationT , dilationW , dilationH , <nl> + adjT , adjW , adjH ) ; <nl> + <nl> + input = THCTensor_ ( newContiguous ) ( state , input ) ; <nl> + weight = THCTensor_ ( newContiguous ) ( state , weight ) ; <nl> + bias = bias ? THCTensor_ ( newContiguous ) ( state , bias ) : bias ; <nl> + <nl> + int batch = 1 ; <nl> + if ( input - > nDimension = = 4 ) { <nl> + / / Force batch <nl> + batch = 0 ; <nl> + THCTensor_ ( resize5d ) ( state , input , 1 , input - > size [ 0 ] , input - > size [ 1 ] , input - > size [ 2 ] , input - > size [ 3 ] ) ; <nl> + } <nl> + <nl> + long inputWidth = input - > size [ 4 ] ; <nl> + long inputHeight = input - > size [ 3 ] ; <nl> + long inputDepth = input - > size [ 2 ] ; <nl> + long outputDepth = ( inputDepth - 1 ) * dT - 2 * padT + ( dilationT * ( kT - 1 ) + 1 ) + adjT ; <nl> + long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + ( dilationH * ( kH - 1 ) + 1 ) + adjH ; <nl> + long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + ( dilationW * ( kW - 1 ) + 1 ) + adjW ; <nl> + <nl> + / / Batch size + input planes <nl> + long batchSize = input - > size [ 0 ] ; <nl> + <nl> + / / Resize output <nl> + THCTensor_ ( resize5d ) ( state , output , batchSize , nOutputPlane , outputDepth , outputHeight , outputWidth ) ; <nl> + <nl> + / / Resize temporary columns <nl> + THCTensor_ ( resize2d ) ( state , columns , nOutputPlane * kW * kH * kT , inputDepth * inputHeight * inputWidth ) ; <nl> + <nl> + / / Define a buffer of ones , for bias accumulation <nl> + / / Note : this buffer can be shared with other modules , it only ever gets increased , <nl> + / / and always contains ones . <nl> + if ( ones - > nDimension ! = 3 | | ones - > size [ 0 ] * ones - > size [ 1 ] * ones - > size [ 2 ] < outputDepth * outputHeight * outputWidth ) { <nl> + / / Resize plane and fill with ones . . . <nl> + THCTensor_ ( resize3d ) ( state , ones , outputDepth , outputHeight , outputWidth ) ; <nl> + THCTensor_ ( fill ) ( state , ones , ScalarConvert < int , real > : : to ( 1 ) ) ; <nl> + } <nl> + <nl> + / / Helpers <nl> + THCTensor * input_n = THCTensor_ ( new ) ( state ) ; <nl> + THCTensor * output_n = THCTensor_ ( new ) ( state ) ; <nl> + <nl> + / / For each elt in batch , do : <nl> + for ( int elt = 0 ; elt < batchSize ; elt + + ) { <nl> + / / Matrix mulitply per output : <nl> + THCTensor_ ( select ) ( state , input_n , input , 0 , elt ) ; <nl> + THCTensor_ ( select ) ( state , output_n , output , 0 , elt ) ; <nl> + <nl> + / / M , N , K are dims of matrix A and B <nl> + / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> + long m = weight - > size [ 1 ] * weight - > size [ 2 ] * weight - > size [ 3 ] * weight - > size [ 4 ] ; <nl> + long n = columns - > size [ 1 ] ; <nl> + long k = weight - > size [ 0 ] ; <nl> + <nl> + / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> + # ifdef THC_REAL_IS_FLOAT <nl> + THCudaBlas_Sgemm ( <nl> + # elif defined ( THC_REAL_IS_HALF ) <nl> + THCudaBlas_Hgemm ( <nl> + # elif defined ( THC_REAL_IS_DOUBLE ) <nl> + THCudaBlas_Dgemm ( <nl> + # endif <nl> + state , <nl> + ' n ' , ' t ' , <nl> + n , m , k , <nl> + ScalarConvert < int , real > : : to ( 1 ) , <nl> + THCTensor_ ( data ) ( state , input_n ) , n , <nl> + THCTensor_ ( data ) ( state , weight ) , m , <nl> + ScalarConvert < int , real > : : to ( 0 ) , <nl> + THCTensor_ ( data ) ( state , columns ) , n <nl> + ) ; <nl> + <nl> + / / Unpack columns back into input : <nl> + col2vol < real , accreal > ( <nl> + THCState_getCurrentStream ( state ) , <nl> + THCTensor_ ( data ) ( state , columns ) , <nl> + nOutputPlane , outputDepth , outputHeight , outputWidth , kT , kH , kW , padT , padH , padW , dT , dH , dW , <nl> + dilationT , dilationH , dilationW , <nl> + THCTensor_ ( data ) ( state , output_n ) <nl> + ) ; <nl> + <nl> + / / Do Bias after : <nl> + / / M , N , K are dims of matrix A and B <nl> + / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> + long m_ = nOutputPlane ; <nl> + long n_ = outputDepth * outputHeight * outputWidth ; <nl> + long k_ = 1 ; <nl> + <nl> + / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> + if ( bias ) { <nl> + # ifdef THC_REAL_IS_FLOAT <nl> + THCudaBlas_Sgemm ( <nl> + # elif defined ( THC_REAL_IS_HALF ) <nl> + THCudaBlas_Hgemm ( <nl> + # elif defined ( THC_REAL_IS_DOUBLE ) <nl> + THCudaBlas_Dgemm ( <nl> + # endif <nl> + state , <nl> + ' t ' , ' n ' , <nl> + n_ , m_ , k_ , <nl> + ScalarConvert < int , real > : : to ( 1 ) , <nl> + THCTensor_ ( data ) ( state , ones ) , k_ , <nl> + THCTensor_ ( data ) ( state , bias ) , k_ , <nl> + ScalarConvert < int , real > : : to ( 1 ) , <nl> + THCTensor_ ( data ) ( state , output_n ) , n_ <nl> + ) ; <nl> + } <nl> + } <nl> + <nl> + / / Free <nl> + THCTensor_ ( free ) ( state , input_n ) ; <nl> + THCTensor_ ( free ) ( state , output_n ) ; <nl> + <nl> + / / Resize output <nl> + if ( batch = = 0 ) { <nl> + THCTensor_ ( resize4d ) ( state , output , nOutputPlane , outputDepth , outputHeight , outputWidth ) ; <nl> + THCTensor_ ( resize4d ) ( state , input , nInputPlane , inputDepth , inputHeight , inputWidth ) ; <nl> + } <nl> + <nl> + THCTensor_ ( free ) ( state , input ) ; <nl> + THCTensor_ ( free ) ( state , weight ) ; <nl> + if ( bias ) THCTensor_ ( free ) ( state , bias ) ; <nl> + <nl> + } <nl> + <nl> + void THNN_ ( VolumetricFullDilatedConvolution_updateGradInput ) ( <nl> + THCState * state , <nl> + THCTensor * input , <nl> + THCTensor * gradOutput , <nl> + THCTensor * gradInput , <nl> + THCTensor * weight , <nl> + THCTensor * finput , <nl> + THCTensor * fgradInput , <nl> + int dT , int dW , int dH , <nl> + int padT , int padW , int padH , <nl> + int dilationT , int dilationW , int dilationH , <nl> + int adjT , int adjW , int adjH ) <nl> + { <nl> + THCTensor * gradColumns = finput ; <nl> + <nl> + int nInputPlane = THCTensor_ ( size ) ( state , weight , 0 ) ; <nl> + int nOutputPlane = THCTensor_ ( size ) ( state , weight , 1 ) ; <nl> + const int kT = ( int ) weight - > size [ 2 ] ; <nl> + const int kH = ( int ) weight - > size [ 3 ] ; <nl> + const int kW = ( int ) weight - > size [ 4 ] ; <nl> + <nl> + THCUNN_assertSameGPU ( state , 5 , input , gradOutput , weight , <nl> + gradColumns , gradInput ) ; <nl> + THNN_ ( VolumetricFullDilatedConvolution_shapeCheck ) ( <nl> + state , input , gradOutput , weight , NULL , <nl> + dT , dW , dH , padT , padW , padH , dilationT , dilationW , dilationH , <nl> + adjT , adjW , adjH ) ; <nl> + <nl> + input = THCTensor_ ( newContiguous ) ( state , input ) ; <nl> + gradOutput = THCTensor_ ( newContiguous ) ( state , gradOutput ) ; <nl> + weight = THCTensor_ ( newContiguous ) ( state , weight ) ; <nl> + <nl> + int batch = 1 ; <nl> + if ( input - > nDimension = = 4 ) { <nl> + / / Force batch <nl> + batch = 0 ; <nl> + THCTensor_ ( resize5d ) ( state , input , 1 , input - > size [ 0 ] , input - > size [ 1 ] , input - > size [ 2 ] , input - > size [ 3 ] ) ; <nl> + THCTensor_ ( resize5d ) ( state , gradOutput , 1 , gradOutput - > size [ 0 ] , gradOutput - > size [ 1 ] , gradOutput - > size [ 2 ] , gradOutput - > size [ 3 ] ) ; <nl> + } <nl> + <nl> + long inputWidth = input - > size [ 4 ] ; <nl> + long inputHeight = input - > size [ 3 ] ; <nl> + long inputDepth = input - > size [ 2 ] ; <nl> + long outputDepth = ( inputDepth - 1 ) * dT - 2 * padT + ( dilationT * ( kT - 1 ) + 1 ) + adjT ; <nl> + long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + ( dilationH * ( kH - 1 ) + 1 ) + adjH ; <nl> + long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + ( dilationW * ( kW - 1 ) + 1 ) + adjW ; <nl> + <nl> + / / Batch size + input planes <nl> + long batchSize = input - > size [ 0 ] ; <nl> + <nl> + / / Resize output <nl> + THCTensor_ ( resize5d ) ( state , gradInput , batchSize , nInputPlane , inputDepth , inputHeight , inputWidth ) ; <nl> + <nl> + / / Resize temporary columns <nl> + THCTensor_ ( resize2d ) ( state , gradColumns , nOutputPlane * kW * kH * kT , inputDepth * inputHeight * inputWidth ) ; <nl> + <nl> + / / Helpers <nl> + THCTensor * gradInput_n = THCTensor_ ( new ) ( state ) ; <nl> + THCTensor * gradOutput_n = THCTensor_ ( new ) ( state ) ; <nl> + <nl> + / / For each elt in batch , do : <nl> + for ( int elt = 0 ; elt < batchSize ; elt + + ) { <nl> + / / Matrix mulitply per sample : <nl> + THCTensor_ ( select ) ( state , gradInput_n , gradInput , 0 , elt ) ; <nl> + THCTensor_ ( select ) ( state , gradOutput_n , gradOutput , 0 , elt ) ; <nl> + <nl> + / / Extract columns : <nl> + vol2col ( <nl> + THCState_getCurrentStream ( state ) , <nl> + THCTensor_ ( data ) ( state , gradOutput_n ) , <nl> + nOutputPlane , outputDepth , outputHeight , outputWidth , kT , kH , kW , padT , padH , padW , dT , dH , dW , <nl> + dilationT , dilationH , dilationW , <nl> + THCTensor_ ( data ) ( state , gradColumns ) <nl> + ) ; <nl> + <nl> + <nl> + / / M , N , K are dims of matrix A and B <nl> + / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> + long m = weight - > size [ 0 ] ; <nl> + long n = gradColumns - > size [ 1 ] ; <nl> + long k = weight - > size [ 1 ] * weight - > size [ 2 ] * weight - > size [ 3 ] * weight - > size [ 4 ] ; <nl> + <nl> + / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> + # ifdef THC_REAL_IS_FLOAT <nl> + THCudaBlas_Sgemm ( <nl> + # elif defined ( THC_REAL_IS_HALF ) <nl> + THCudaBlas_Hgemm ( <nl> + # elif defined ( THC_REAL_IS_DOUBLE ) <nl> + THCudaBlas_Dgemm ( <nl> + # endif <nl> + state , <nl> + ' n ' , ' n ' , <nl> + n , m , k , <nl> + ScalarConvert < int , real > : : to ( 1 ) , <nl> + THCTensor_ ( data ) ( state , gradColumns ) , n , <nl> + THCTensor_ ( data ) ( state , weight ) , k , <nl> + ScalarConvert < int , real > : : to ( 0 ) , <nl> + THCTensor_ ( data ) ( state , gradInput_n ) , n <nl> + ) ; <nl> + } <nl> + <nl> + <nl> + / / Free <nl> + THCTensor_ ( free ) ( state , gradInput_n ) ; <nl> + THCTensor_ ( free ) ( state , gradOutput_n ) ; <nl> + <nl> + / / Resize output <nl> + if ( batch = = 0 ) { <nl> + THCTensor_ ( resize4d ) ( state , gradOutput , nOutputPlane , outputDepth , outputHeight , outputWidth ) ; <nl> + THCTensor_ ( resize4d ) ( state , input , nInputPlane , inputDepth , inputHeight , inputWidth ) ; <nl> + THCTensor_ ( resize4d ) ( state , gradInput , nInputPlane , inputDepth , inputHeight , inputWidth ) ; <nl> + } <nl> + <nl> + THCTensor_ ( free ) ( state , input ) ; <nl> + THCTensor_ ( free ) ( state , gradOutput ) ; <nl> + THCTensor_ ( free ) ( state , weight ) ; <nl> + } <nl> + <nl> + <nl> + void THNN_ ( VolumetricFullDilatedConvolution_accGradParameters ) ( <nl> + THCState * state , <nl> + THCTensor * input , <nl> + THCTensor * gradOutput , <nl> + THCTensor * gradWeight , <nl> + THCTensor * gradBias , <nl> + THCTensor * finput , <nl> + THCTensor * fgradInput , <nl> + int dT , int dW , int dH , <nl> + int padT , int padW , int padH , <nl> + int dilationT , int dilationW , int dilationH , <nl> + int adjT , int adjW , int adjH , <nl> + accreal scale_ ) <nl> + { <nl> + real scale = ScalarConvert < accreal , real > : : to ( scale_ ) ; <nl> + THCTensor * columns = finput ; <nl> + THCTensor * ones = fgradInput ; <nl> + <nl> + int nInputPlane = THCTensor_ ( size ) ( state , gradWeight , 0 ) ; <nl> + int nOutputPlane = THCTensor_ ( size ) ( state , gradWeight , 1 ) ; <nl> + const int kT = ( int ) gradWeight - > size [ 2 ] ; <nl> + const int kH = ( int ) gradWeight - > size [ 3 ] ; <nl> + const int kW = ( int ) gradWeight - > size [ 4 ] ; <nl> + <nl> + THCUNN_assertSameGPU ( state , 6 , input , gradOutput , gradWeight , <nl> + gradBias , columns , ones ) ; <nl> + THNN_ ( VolumetricFullDilatedConvolution_shapeCheck ) ( <nl> + state , input , gradOutput , gradWeight , <nl> + gradBias , dT , dW , dH , padT , padW , padH , dilationT , dilationW , dilationH , <nl> + adjT , adjW , adjH ) ; <nl> + <nl> + THArgCheck ( THCTensor_ ( isContiguous ) ( state , gradWeight ) , 4 , " gradWeight needs to be contiguous " ) ; <nl> + if ( gradBias ) <nl> + THArgCheck ( THCTensor_ ( isContiguous ) ( state , gradBias ) , 5 , " gradBias needs to be contiguous " ) ; <nl> + <nl> + input = THCTensor_ ( newContiguous ) ( state , input ) ; <nl> + gradOutput = THCTensor_ ( newContiguous ) ( state , gradOutput ) ; <nl> + <nl> + int batch = 1 ; <nl> + if ( input - > nDimension = = 4 ) { <nl> + / / Force batch <nl> + batch = 0 ; <nl> + THCTensor_ ( resize5d ) ( state , input , 1 , input - > size [ 0 ] , input - > size [ 1 ] , input - > size [ 2 ] , input - > size [ 3 ] ) ; <nl> + THCTensor_ ( resize5d ) ( state , gradOutput , 1 , gradOutput - > size [ 0 ] , gradOutput - > size [ 1 ] , gradOutput - > size [ 2 ] , gradOutput - > size [ 3 ] ) ; <nl> + } <nl> + <nl> + long inputWidth = input - > size [ 4 ] ; <nl> + long inputHeight = input - > size [ 3 ] ; <nl> + long inputDepth = input - > size [ 2 ] ; <nl> + long outputDepth = ( inputDepth - 1 ) * dT - 2 * padT + ( dilationT * ( kT - 1 ) + 1 ) + adjT ; <nl> + long outputHeight = ( inputHeight - 1 ) * dH - 2 * padH + ( dilationH * ( kH - 1 ) + 1 ) + adjH ; <nl> + long outputWidth = ( inputWidth - 1 ) * dW - 2 * padW + ( dilationW * ( kW - 1 ) + 1 ) + adjW ; <nl> + <nl> + / / Batch size + input planes <nl> + long batchSize = input - > size [ 0 ] ; <nl> + <nl> + / / Define a buffer of ones , for bias accumulation <nl> + if ( ones - > nDimension ! = 3 | | ones - > size [ 0 ] * ones - > size [ 1 ] * ones - > size [ 2 ] < outputDepth * outputHeight * outputWidth ) { <nl> + / / Resize plane and fill with ones . . . <nl> + THCTensor_ ( resize3d ) ( state , ones , outputDepth , outputHeight , outputWidth ) ; <nl> + THCTensor_ ( fill ) ( state , ones , ScalarConvert < int , real > : : to ( 1 ) ) ; <nl> + } <nl> + <nl> + / / Resize temporary columns <nl> + THCTensor_ ( resize2d ) ( state , columns , nOutputPlane * kW * kH * kT , inputDepth * inputHeight * inputWidth ) ; <nl> + <nl> + / / Helpers <nl> + THCTensor * input_n = THCTensor_ ( new ) ( state ) ; <nl> + THCTensor * gradOutput_n = THCTensor_ ( new ) ( state ) ; <nl> + <nl> + / / For each elt in batch , do : <nl> + for ( int elt = 0 ; elt < batchSize ; elt + + ) { <nl> + / / Matrix mulitply per output : <nl> + THCTensor_ ( select ) ( state , input_n , input , 0 , elt ) ; <nl> + THCTensor_ ( select ) ( state , gradOutput_n , gradOutput , 0 , elt ) ; <nl> + <nl> + / / Extract columns : <nl> + vol2col ( <nl> + THCState_getCurrentStream ( state ) , <nl> + THCTensor_ ( data ) ( state , gradOutput_n ) , <nl> + nOutputPlane , outputDepth , outputHeight , outputWidth , kT , kH , kW , padT , padH , padW , dT , dH , dW , <nl> + dilationT , dilationH , dilationW , <nl> + THCTensor_ ( data ) ( state , columns ) <nl> + ) ; <nl> + <nl> + / / M , N , K are dims of matrix A and B <nl> + / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> + long n = columns - > size [ 0 ] ; / / nOutputPlane * kt * kh * kw <nl> + long m = input_n - > size [ 0 ] ; / / nInputPlane <nl> + long k = columns - > size [ 1 ] ; / / inputHeight * inputWidth <nl> + <nl> + / / Do GEMM ( note : this is a bit confusing because gemm assumes column - major matrices ) <nl> + # ifdef THC_REAL_IS_FLOAT <nl> + THCudaBlas_Sgemm ( <nl> + # elif defined ( THC_REAL_IS_HALF ) <nl> + THCudaBlas_Hgemm ( <nl> + # elif defined ( THC_REAL_IS_DOUBLE ) <nl> + THCudaBlas_Dgemm ( <nl> + # endif <nl> + state , <nl> + ' t ' , ' n ' , <nl> + n , m , k , <nl> + scale , <nl> + THCTensor_ ( data ) ( state , columns ) , k , <nl> + THCTensor_ ( data ) ( state , input_n ) , k , <nl> + ScalarConvert < int , real > : : to ( 1 ) , <nl> + THCTensor_ ( data ) ( state , gradWeight ) , n <nl> + ) ; <nl> + <nl> + / / Do Bias : <nl> + / / M , N , K are dims of matrix A and B <nl> + / / ( see http : / / docs . nvidia . com / cuda / cublas / # cublas - lt - t - gt - gemm ) <nl> + long m_ = nOutputPlane ; <nl> + long k_ = outputDepth * outputHeight * outputWidth ; <nl> + <nl> + / / Do GEMV ( note : this is a bit confusing because gemv assumes column - major matrices ) <nl> + if ( gradBias ) { <nl> + # if defined ( THC_REAL_IS_FLOAT ) | | defined ( THC_REAL_IS_DOUBLE ) <nl> + # ifdef THC_REAL_IS_FLOAT <nl> + THCudaBlas_Sgemv ( <nl> + # elif defined ( THC_REAL_IS_DOUBLE ) <nl> + THCudaBlas_Dgemv ( <nl> + # endif <nl> + state , <nl> + ' t ' , <nl> + k_ , m_ , <nl> + scale , <nl> + THCTensor_ ( data ) ( state , gradOutput_n ) , k_ , <nl> + THCTensor_ ( data ) ( state , ones ) , 1 , <nl> + ScalarConvert < int , real > : : to ( 1 ) , <nl> + THCTensor_ ( data ) ( state , gradBias ) , 1 <nl> + ) ; <nl> + # endif <nl> + # ifdef THC_REAL_IS_HALF <nl> + THCudaBlas_Hgemm ( <nl> + state , <nl> + ' t ' , ' n ' , <nl> + m_ , 1 , k_ , <nl> + scale , <nl> + THCTensor_ ( data ) ( state , gradOutput_n ) , k_ , <nl> + THCTensor_ ( data ) ( state , ones ) , k_ , <nl> + ScalarConvert < int , real > : : to ( 1 ) , <nl> + THCTensor_ ( data ) ( state , gradBias ) , m_ <nl> + ) ; <nl> + # endif <nl> + } <nl> + } <nl> + <nl> + / / Free <nl> + THCTensor_ ( free ) ( state , input_n ) ; <nl> + THCTensor_ ( free ) ( state , gradOutput_n ) ; <nl> + <nl> + / / Resize <nl> + if ( batch = = 0 ) { <nl> + THCTensor_ ( resize4d ) ( state , gradOutput , nOutputPlane , outputDepth , outputHeight , outputWidth ) ; <nl> + THCTensor_ ( resize4d ) ( state , input , nInputPlane , inputDepth , inputHeight , inputWidth ) ; <nl> + } <nl> + <nl> + THCTensor_ ( free ) ( state , input ) ; <nl> + THCTensor_ ( free ) ( state , gradOutput ) ; <nl> + } <nl> + <nl> + # endif <nl> | add 2d and 3d dilated full Convolution | pytorch/pytorch | a565b77791e1a7371d4a40fbdbf9569fca985209 | 2017-08-03T02:44:59Z |
mmm a / test / core / end2end / fixtures / proxy . c <nl> ppp b / test / core / end2end / fixtures / proxy . c <nl> static void on_c2p_recv_msg ( void * arg , int success ) { <nl> new_closure ( on_p2s_sent_close , pc ) , NULL ) ; <nl> GPR_ASSERT ( err = = GRPC_CALL_OK ) ; <nl> } <nl> + } else { <nl> + if ( pc - > c2p_msg ! = NULL ) { <nl> + grpc_byte_buffer_destroy ( pc - > c2p_msg ) ; <nl> + } <nl> } <nl> <nl> unrefpc ( pc , " on_c2p_recv_msg " ) ; <nl> | Merge pull request from muxi / fix - cancel - after - invoke | grpc/grpc | 76c9e0806373181b32f1fba63686a3502acfb9cf | 2017-09-25T14:23:08Z |
mmm a / atom / browser / api / atom_api_menu . h <nl> ppp b / atom / browser / api / atom_api_menu . h <nl> class Menu : public mate : : Wrappable , <nl> virtual ~ Menu ( ) ; <nl> <nl> / / ui : : SimpleMenuModel : : Delegate implementations : <nl> - virtual bool IsCommandIdChecked ( int command_id ) const OVERRIDE ; <nl> - virtual bool IsCommandIdEnabled ( int command_id ) const OVERRIDE ; <nl> - virtual bool IsCommandIdVisible ( int command_id ) const OVERRIDE ; <nl> - virtual bool GetAcceleratorForCommandId ( <nl> + bool IsCommandIdChecked ( int command_id ) const override ; <nl> + bool IsCommandIdEnabled ( int command_id ) const override ; <nl> + bool IsCommandIdVisible ( int command_id ) const override ; <nl> + bool GetAcceleratorForCommandId ( <nl> int command_id , <nl> - ui : : Accelerator * accelerator ) OVERRIDE ; <nl> - virtual bool IsItemForCommandIdDynamic ( int command_id ) const OVERRIDE ; <nl> - virtual base : : string16 GetLabelForCommandId ( int command_id ) const OVERRIDE ; <nl> - virtual base : : string16 GetSublabelForCommandId ( int command_id ) const OVERRIDE ; <nl> - virtual void ExecuteCommand ( int command_id , int event_flags ) OVERRIDE ; <nl> - virtual void MenuWillShow ( ui : : SimpleMenuModel * source ) OVERRIDE ; <nl> + ui : : Accelerator * accelerator ) override ; <nl> + bool IsItemForCommandIdDynamic ( int command_id ) const override ; <nl> + base : : string16 GetLabelForCommandId ( int command_id ) const override ; <nl> + base : : string16 GetSublabelForCommandId ( int command_id ) const override ; <nl> + void ExecuteCommand ( int command_id , int event_flags ) override ; <nl> + void MenuWillShow ( ui : : SimpleMenuModel * source ) override ; <nl> <nl> virtual void AttachToWindow ( Window * window ) ; <nl> virtual void Popup ( Window * window ) = 0 ; <nl> | OVERRIDE = > override in atom_api_menu . h | electron/electron | ab6cb042f6c1dea084a7320276f435a888b2d088 | 2014-11-16T02:45:53Z |
mmm a / fdbclient / FileBackupAgent . actor . cpp <nl> ppp b / fdbclient / FileBackupAgent . actor . cpp <nl> StringRef FileBackupAgent : : restoreStateText ( ERestoreState id ) { <nl> } <nl> } <nl> <nl> - template < > Tuple Codec < ERestoreState > : : pack ( ERestoreState const & val ) { return Tuple ( ) . append ( val ) ; } <nl> - template < > ERestoreState Codec < ERestoreState > : : unpack ( Tuple const & val ) { return ( ERestoreState ) val . getInt ( 0 ) ; } <nl> + template < > inline Tuple Codec < ERestoreState > : : pack ( ERestoreState const & val ) { return Tuple ( ) . append ( val ) ; } <nl> + template < > inline ERestoreState Codec < ERestoreState > : : unpack ( Tuple const & val ) { return ( ERestoreState ) val . getInt ( 0 ) ; } <nl> <nl> ACTOR Future < std : : vector < KeyBackedTag > > TagUidMap : : getAll_impl ( TagUidMap * tagsMap , Reference < ReadYourWritesTransaction > tr ) { <nl> state Key prefix = tagsMap - > prefix ; / / Copying it here as tagsMap lifetime is not tied to this actor <nl> mmm a / fdbserver / RestoreCommon . actor . cpp <nl> ppp b / fdbserver / RestoreCommon . actor . cpp <nl> <nl> # include " fdbclient / MutationList . h " <nl> # include " fdbclient / BackupContainer . h " <nl> <nl> - / / For convenience <nl> - typedef FileBackupAgent : : ERestoreState ERestoreState ; <nl> - template < > Tuple Codec < ERestoreState > : : pack ( ERestoreState const & val ) ; <nl> - template < > ERestoreState Codec < ERestoreState > : : unpack ( Tuple const & val ) ; <nl> - <nl> - / / Split RestoreConfig defined in FileBackupAgent . actor . cpp to declaration in Restore . actor . h and implementation in <nl> + / / Split RestoreConfigFR defined in FileBackupAgent . actor . cpp to declaration in Restore . actor . h and implementation in <nl> / / RestoreCommon . actor . cpp <nl> - KeyBackedProperty < ERestoreState > RestoreConfig : : stateEnum ( ) { <nl> + KeyBackedProperty < ERestoreState > RestoreConfigFR : : stateEnum ( ) { <nl> return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ; <nl> } <nl> - Future < StringRef > RestoreConfig : : stateText ( Reference < ReadYourWritesTransaction > tr ) { <nl> + Future < StringRef > RestoreConfigFR : : stateText ( Reference < ReadYourWritesTransaction > tr ) { <nl> return map ( stateEnum ( ) . getD ( tr ) , [ ] ( ERestoreState s ) - > StringRef { return FileBackupAgent : : restoreStateText ( s ) ; } ) ; <nl> } <nl> - KeyBackedProperty < Key > RestoreConfig : : addPrefix ( ) { <nl> + KeyBackedProperty < Key > RestoreConfigFR : : addPrefix ( ) { <nl> return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ; <nl> } <nl> - KeyBackedProperty < Key > RestoreConfig : : removePrefix ( ) { <nl> + KeyBackedProperty < Key > RestoreConfigFR : : removePrefix ( ) { <nl> return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ; <nl> } <nl> / / XXX : Remove restoreRange ( ) once it is safe to remove . It has been changed to restoreRanges <nl> - KeyBackedProperty < KeyRange > RestoreConfig : : restoreRange ( ) { <nl> + KeyBackedProperty < KeyRange > RestoreConfigFR : : restoreRange ( ) { <nl> return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ; <nl> } <nl> - KeyBackedProperty < std : : vector < KeyRange > > RestoreConfig : : restoreRanges ( ) { <nl> + KeyBackedProperty < std : : vector < KeyRange > > RestoreConfigFR : : restoreRanges ( ) { <nl> return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ; <nl> } <nl> - KeyBackedProperty < Key > RestoreConfig : : batchFuture ( ) { <nl> + KeyBackedProperty < Key > RestoreConfigFR : : batchFuture ( ) { <nl> return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ; <nl> } <nl> - KeyBackedProperty < Version > RestoreConfig : : restoreVersion ( ) { <nl> + KeyBackedProperty < Version > RestoreConfigFR : : restoreVersion ( ) { <nl> return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ; <nl> } <nl> <nl> - KeyBackedProperty < Reference < IBackupContainer > > RestoreConfig : : sourceContainer ( ) { <nl> + KeyBackedProperty < Reference < IBackupContainer > > RestoreConfigFR : : sourceContainer ( ) { <nl> return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ; <nl> } <nl> / / Get the source container as a bare URL , without creating a container instance <nl> - KeyBackedProperty < Value > RestoreConfig : : sourceContainerURL ( ) { <nl> + KeyBackedProperty < Value > RestoreConfigFR : : sourceContainerURL ( ) { <nl> return configSpace . pack ( LiteralStringRef ( " sourceContainer " ) ) ; <nl> } <nl> <nl> / / Total bytes written by all log and range restore tasks . <nl> - KeyBackedBinaryValue < int64_t > RestoreConfig : : bytesWritten ( ) { <nl> + KeyBackedBinaryValue < int64_t > RestoreConfigFR : : bytesWritten ( ) { <nl> return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ; <nl> } <nl> / / File blocks that have had tasks created for them by the Dispatch task <nl> - KeyBackedBinaryValue < int64_t > RestoreConfig : : filesBlocksDispatched ( ) { <nl> + KeyBackedBinaryValue < int64_t > RestoreConfigFR : : filesBlocksDispatched ( ) { <nl> return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ; <nl> } <nl> / / File blocks whose tasks have finished <nl> - KeyBackedBinaryValue < int64_t > RestoreConfig : : fileBlocksFinished ( ) { <nl> + KeyBackedBinaryValue < int64_t > RestoreConfigFR : : fileBlocksFinished ( ) { <nl> return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ; <nl> } <nl> / / Total number of files in the fileMap <nl> - KeyBackedBinaryValue < int64_t > RestoreConfig : : fileCount ( ) { <nl> + KeyBackedBinaryValue < int64_t > RestoreConfigFR : : fileCount ( ) { <nl> return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ; <nl> } <nl> / / Total number of file blocks in the fileMap <nl> - KeyBackedBinaryValue < int64_t > RestoreConfig : : fileBlockCount ( ) { <nl> + KeyBackedBinaryValue < int64_t > RestoreConfigFR : : fileBlockCount ( ) { <nl> return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ; <nl> } <nl> <nl> - Future < std : : vector < KeyRange > > RestoreConfig : : getRestoreRangesOrDefault ( Reference < ReadYourWritesTransaction > tr ) { <nl> + Future < std : : vector < KeyRange > > RestoreConfigFR : : getRestoreRangesOrDefault ( Reference < ReadYourWritesTransaction > tr ) { <nl> return getRestoreRangesOrDefault_impl ( this , tr ) ; <nl> } <nl> <nl> - ACTOR Future < std : : vector < KeyRange > > RestoreConfig : : getRestoreRangesOrDefault_impl ( <nl> - RestoreConfig * self , Reference < ReadYourWritesTransaction > tr ) { <nl> + ACTOR Future < std : : vector < KeyRange > > RestoreConfigFR : : getRestoreRangesOrDefault_impl ( <nl> + RestoreConfigFR * self , Reference < ReadYourWritesTransaction > tr ) { <nl> state std : : vector < KeyRange > ranges = wait ( self - > restoreRanges ( ) . getD ( tr ) ) ; <nl> if ( ranges . empty ( ) ) { <nl> state KeyRange range = wait ( self - > restoreRange ( ) . getD ( tr ) ) ; <nl> ACTOR Future < std : : vector < KeyRange > > RestoreConfig : : getRestoreRangesOrDefault_imp <nl> return ranges ; <nl> } <nl> <nl> - KeyBackedSet < RestoreFile > RestoreConfig : : fileSet ( ) { <nl> + KeyBackedSet < RestoreConfigFR : : RestoreFile > RestoreConfigFR : : fileSet ( ) { <nl> return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ; <nl> } <nl> <nl> - Future < bool > RestoreConfig : : isRunnable ( Reference < ReadYourWritesTransaction > tr ) { <nl> + Future < bool > RestoreConfigFR : : isRunnable ( Reference < ReadYourWritesTransaction > tr ) { <nl> return map ( stateEnum ( ) . getD ( tr ) , [ ] ( ERestoreState s ) - > bool { <nl> return s ! = ERestoreState : : ABORTED & & s ! = ERestoreState : : COMPLETED & & s ! = ERestoreState : : UNITIALIZED ; <nl> } ) ; <nl> } <nl> <nl> - Future < Void > RestoreConfig : : logError ( Database cx , Error e , std : : string const & details , void * taskInstance ) { <nl> + Future < Void > RestoreConfigFR : : logError ( Database cx , Error e , std : : string const & details , void * taskInstance ) { <nl> if ( ! uid . isValid ( ) ) { <nl> TraceEvent ( SevError , " FileRestoreErrorNoUID " ) . error ( e ) . detail ( " Description " , details ) ; <nl> return Void ( ) ; <nl> Future < Void > RestoreConfig : : logError ( Database cx , Error e , std : : string const & de <nl> return updateErrorInfo ( cx , e , details ) ; <nl> } <nl> <nl> - Key RestoreConfig : : mutationLogPrefix ( ) { <nl> + Key RestoreConfigFR : : mutationLogPrefix ( ) { <nl> return uidPrefixKey ( applyLogKeys . begin , uid ) ; <nl> } <nl> <nl> - Key RestoreConfig : : applyMutationsMapPrefix ( ) { <nl> + Key RestoreConfigFR : : applyMutationsMapPrefix ( ) { <nl> return uidPrefixKey ( applyMutationsKeyVersionMapRange . begin , uid ) ; <nl> } <nl> <nl> - ACTOR Future < int64_t > RestoreConfig : : getApplyVersionLag_impl ( Reference < ReadYourWritesTransaction > tr , UID uid ) { <nl> + ACTOR Future < int64_t > RestoreConfigFR : : getApplyVersionLag_impl ( Reference < ReadYourWritesTransaction > tr , UID uid ) { <nl> / / Both of these are snapshot reads <nl> state Future < Optional < Value > > beginVal = tr - > get ( uidPrefixKey ( applyMutationsBeginRange . begin , uid ) , true ) ; <nl> state Future < Optional < Value > > endVal = tr - > get ( uidPrefixKey ( applyMutationsEndRange . begin , uid ) , true ) ; <nl> ACTOR Future < int64_t > RestoreConfig : : getApplyVersionLag_impl ( Reference < ReadYourW <nl> return endVersion - beginVersion ; <nl> } <nl> <nl> - Future < int64_t > RestoreConfig : : getApplyVersionLag ( Reference < ReadYourWritesTransaction > tr ) { <nl> + Future < int64_t > RestoreConfigFR : : getApplyVersionLag ( Reference < ReadYourWritesTransaction > tr ) { <nl> return getApplyVersionLag_impl ( tr , uid ) ; <nl> } <nl> <nl> - void RestoreConfig : : initApplyMutations ( Reference < ReadYourWritesTransaction > tr , Key addPrefix , Key removePrefix ) { <nl> + void RestoreConfigFR : : initApplyMutations ( Reference < ReadYourWritesTransaction > tr , Key addPrefix , Key removePrefix ) { <nl> / / Set these because they have to match the applyMutations values . <nl> this - > addPrefix ( ) . set ( tr , addPrefix ) ; <nl> this - > removePrefix ( ) . set ( tr , removePrefix ) ; <nl> void RestoreConfig : : initApplyMutations ( Reference < ReadYourWritesTransaction > tr , <nl> tr - > set ( mapStart , BinaryWriter : : toValue < Version > ( invalidVersion , Unversioned ( ) ) ) ; <nl> } <nl> <nl> - void RestoreConfig : : clearApplyMutationsKeys ( Reference < ReadYourWritesTransaction > tr ) { <nl> + void RestoreConfigFR : : clearApplyMutationsKeys ( Reference < ReadYourWritesTransaction > tr ) { <nl> tr - > setOption ( FDBTransactionOptions : : COMMIT_ON_FIRST_PROXY ) ; <nl> <nl> / / Clear add / remove prefix keys <nl> void RestoreConfig : : clearApplyMutationsKeys ( Reference < ReadYourWritesTransaction > <nl> tr - > clear ( uidPrefixKey ( applyMutationsBeginRange . begin , uid ) ) ; <nl> } <nl> <nl> - void RestoreConfig : : setApplyBeginVersion ( Reference < ReadYourWritesTransaction > tr , Version ver ) { <nl> + void RestoreConfigFR : : setApplyBeginVersion ( Reference < ReadYourWritesTransaction > tr , Version ver ) { <nl> tr - > set ( uidPrefixKey ( applyMutationsBeginRange . begin , uid ) , BinaryWriter : : toValue ( ver , Unversioned ( ) ) ) ; <nl> } <nl> <nl> - void RestoreConfig : : setApplyEndVersion ( Reference < ReadYourWritesTransaction > tr , Version ver ) { <nl> + void RestoreConfigFR : : setApplyEndVersion ( Reference < ReadYourWritesTransaction > tr , Version ver ) { <nl> tr - > set ( uidPrefixKey ( applyMutationsEndRange . begin , uid ) , BinaryWriter : : toValue ( ver , Unversioned ( ) ) ) ; <nl> } <nl> <nl> - Future < Version > RestoreConfig : : getApplyEndVersion ( Reference < ReadYourWritesTransaction > tr ) { <nl> + Future < Version > RestoreConfigFR : : getApplyEndVersion ( Reference < ReadYourWritesTransaction > tr ) { <nl> return map ( tr - > get ( uidPrefixKey ( applyMutationsEndRange . begin , uid ) ) , [ = ] ( Optional < Value > const & value ) - > Version { <nl> return value . present ( ) ? BinaryReader : : fromStringRef < Version > ( value . get ( ) , Unversioned ( ) ) : 0 ; <nl> } ) ; <nl> } <nl> <nl> - / / Meng : Change RestoreConfig to Reference < RestoreConfig > because FastRestore pass the Reference < RestoreConfig > around <nl> - ACTOR Future < std : : string > RestoreConfig : : getProgress_impl ( Reference < RestoreConfig > restore , <nl> + / / Meng : Change RestoreConfigFR to Reference < RestoreConfigFR > because FastRestore pass the Reference < RestoreConfigFR > around <nl> + ACTOR Future < std : : string > RestoreConfigFR : : getProgress_impl ( Reference < RestoreConfigFR > restore , <nl> Reference < ReadYourWritesTransaction > tr ) { <nl> tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ; <nl> tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> ACTOR Future < std : : string > RestoreConfig : : getProgress_impl ( Reference < RestoreConfi <nl> fileBlockCount . get ( ) , fileBlocksDispatched . get ( ) - fileBlocksFinished . get ( ) , fileCount . get ( ) , <nl> bytesWritten . get ( ) , lag . get ( ) , errstr . c_str ( ) ) ; <nl> } <nl> - Future < std : : string > RestoreConfig : : getProgress ( Reference < ReadYourWritesTransaction > tr ) { <nl> - Reference < RestoreConfig > restore = Reference < RestoreConfig > ( this ) ; <nl> + Future < std : : string > RestoreConfigFR : : getProgress ( Reference < ReadYourWritesTransaction > tr ) { <nl> + Reference < RestoreConfigFR > restore = Reference < RestoreConfigFR > ( this ) ; <nl> return getProgress_impl ( restore , tr ) ; <nl> } <nl> <nl> - / / Meng : Change RestoreConfig to Reference < RestoreConfig > <nl> - ACTOR Future < std : : string > RestoreConfig : : getFullStatus_impl ( Reference < RestoreConfig > restore , <nl> + / / Meng : Change RestoreConfigFR to Reference < RestoreConfigFR > <nl> + ACTOR Future < std : : string > RestoreConfigFR : : getFullStatus_impl ( Reference < RestoreConfigFR > restore , <nl> Reference < ReadYourWritesTransaction > tr ) { <nl> tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ; <nl> tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> ACTOR Future < std : : string > RestoreConfig : : getFullStatus_impl ( Reference < RestoreCon <nl> printable ( removePrefix . get ( ) ) . c_str ( ) , restoreVersion . get ( ) ) ; <nl> return returnStr ; <nl> } <nl> - Future < std : : string > RestoreConfig : : getFullStatus ( Reference < ReadYourWritesTransaction > tr ) { <nl> - Reference < RestoreConfig > restore = Reference < RestoreConfig > ( this ) ; <nl> + Future < std : : string > RestoreConfigFR : : getFullStatus ( Reference < ReadYourWritesTransaction > tr ) { <nl> + Reference < RestoreConfigFR > restore = Reference < RestoreConfigFR > ( this ) ; <nl> return getFullStatus_impl ( restore , tr ) ; <nl> } <nl> <nl> - std : : string RestoreConfig : : toString ( ) { <nl> + std : : string RestoreConfigFR : : toString ( ) { <nl> std : : stringstream ss ; <nl> ss < < " uid : " < < uid . toString ( ) < < " prefix : " < < prefix . contents ( ) . toString ( ) ; <nl> return ss . str ( ) ; <nl> } <nl> <nl> - typedef RestoreConfig : : RestoreFile RestoreFile ; <nl> + / / typedef RestoreConfigFR : : RestoreFile RestoreFileFR ; <nl> <nl> - / / parallelFileRestore is copied from FileBackupAgent . actor . cpp for the same reason as RestoreConfig is copied <nl> + / / parallelFileRestore is copied from FileBackupAgent . actor . cpp for the same reason as RestoreConfigFR is copied <nl> / / The implementation of parallelFileRestore is copied from FileBackupAgent . actor . cpp <nl> - / / parallelFileRestore is copied from FileBackupAgent . actor . cpp for the same reason as RestoreConfig is copied <nl> + / / parallelFileRestore is copied from FileBackupAgent . actor . cpp for the same reason as RestoreConfigFR is copied <nl> namespace parallelFileRestore { <nl> / / Helper class for reading restore data from a buffer and throwing the right errors . <nl> struct StringRefReader { <nl> mmm a / fdbserver / RestoreCommon . actor . h <nl> ppp b / fdbserver / RestoreCommon . actor . h <nl> <nl> # include " flow / actorcompiler . h " / / has to be last include <nl> <nl> / / RestoreConfig copied from FileBackupAgent . actor . cpp <nl> - / / We copy RestoreConfig instead of using ( and potentially changing ) it in place to avoid conflict with the existing <nl> - / / code <nl> + / / We copy RestoreConfig instead of using ( and potentially changing ) it in place <nl> + / / to avoid conflict with the existing code . <nl> + / / We also made minor changes to allow RestoreConfig to be ReferenceCounted <nl> / / TODO : Merge this RestoreConfig with the original RestoreConfig in FileBackupAgent . actor . cpp <nl> + / / For convenience <nl> typedef FileBackupAgent : : ERestoreState ERestoreState ; <nl> + / / template < > Tuple Codec < ERestoreState > : : pack ( ERestoreState const & val ) ; <nl> + / / template < > ERestoreState Codec < ERestoreState > : : unpack ( Tuple const & val ) ; <nl> + template < > inline Tuple Codec < ERestoreState > : : pack ( ERestoreState const & val ) { return Tuple ( ) . append ( val ) ; } <nl> + template < > inline ERestoreState Codec < ERestoreState > : : unpack ( Tuple const & val ) { return ( ERestoreState ) val . getInt ( 0 ) ; } <nl> + <nl> struct RestoreFileFR ; <nl> <nl> / / We copy RestoreConfig copied from FileBackupAgent . actor . cpp instead of using ( and potentially changing ) it in place <nl> / / to avoid conflict with the existing code Split RestoreConfig defined in FileBackupAgent . actor . cpp to declaration in <nl> / / Restore . actor . h and implementation in RestoreCommon . actor . cpp , so that we can use in both the existing restore and <nl> - / / the new fast restore subsystems We use RestoreConfig as a Reference < RestoreConfig > , which leads to some <nl> + / / the new fast restore subsystems . We use RestoreConfig as a Reference < RestoreConfig > , which leads to some <nl> / / non - functional changes in RestoreConfig <nl> - class RestoreConfig : public KeyBackedConfig , public ReferenceCounted < RestoreConfig > { <nl> + class RestoreConfigFR : public KeyBackedConfig , public ReferenceCounted < RestoreConfigFR > { <nl> public : <nl> - RestoreConfig ( UID uid = UID ( ) ) : KeyBackedConfig ( fileRestorePrefixRange . begin , uid ) { } <nl> - RestoreConfig ( Reference < Task > task ) : KeyBackedConfig ( fileRestorePrefixRange . begin , task ) { } <nl> + RestoreConfigFR ( UID uid = UID ( ) ) : KeyBackedConfig ( fileRestorePrefixRange . begin , uid ) { } <nl> + RestoreConfigFR ( Reference < Task > task ) : KeyBackedConfig ( fileRestorePrefixRange . begin , task ) { } <nl> <nl> KeyBackedProperty < ERestoreState > stateEnum ( ) ; <nl> <nl> class RestoreConfig : public KeyBackedConfig , public ReferenceCounted < RestoreCon <nl> KeyBackedBinaryValue < int64_t > fileBlockCount ( ) ; <nl> <nl> Future < std : : vector < KeyRange > > getRestoreRangesOrDefault ( Reference < ReadYourWritesTransaction > tr ) ; <nl> - ACTOR static Future < std : : vector < KeyRange > > getRestoreRangesOrDefault_impl ( RestoreConfig * self , <nl> + ACTOR static Future < std : : vector < KeyRange > > getRestoreRangesOrDefault_impl ( RestoreConfigFR * self , <nl> Reference < ReadYourWritesTransaction > tr ) ; <nl> <nl> / / Describes a file to load blocks from during restore . Ordered by version and then fileName to enable <nl> class RestoreConfig : public KeyBackedConfig , public ReferenceCounted < RestoreCon <nl> <nl> Future < Version > getApplyEndVersion ( Reference < ReadYourWritesTransaction > tr ) ; <nl> <nl> - ACTOR static Future < std : : string > getProgress_impl ( Reference < RestoreConfig > restore , <nl> + ACTOR static Future < std : : string > getProgress_impl ( Reference < RestoreConfigFR > restore , <nl> Reference < ReadYourWritesTransaction > tr ) ; <nl> Future < std : : string > getProgress ( Reference < ReadYourWritesTransaction > tr ) ; <nl> <nl> - ACTOR static Future < std : : string > getFullStatus_impl ( Reference < RestoreConfig > restore , <nl> + ACTOR static Future < std : : string > getFullStatus_impl ( Reference < RestoreConfigFR > restore , <nl> Reference < ReadYourWritesTransaction > tr ) ; <nl> Future < std : : string > getFullStatus ( Reference < ReadYourWritesTransaction > tr ) ; <nl> <nl> std : : string toString ( ) ; / / Added by Meng <nl> } ; <nl> <nl> - typedef RestoreConfig : : RestoreFile RestoreFile ; <nl> + / / typedef RestoreConfigFR : : RestoreFile RestoreFile ; <nl> <nl> / / Describes a file to load blocks from during restore . Ordered by version and then fileName to enable <nl> / / incrementally advancing through the map , saving the version and path of the next starting point . <nl> mmm a / fdbserver / RestoreMaster . actor . cpp <nl> ppp b / fdbserver / RestoreMaster . actor . cpp <nl> ACTOR static Future < Version > processRestoreRequest ( RestoreRequest request , Refer <nl> <nl> self - > initBackupContainer ( request . url ) ; <nl> <nl> - wait ( <nl> - _collectBackupFiles ( self - > bc , & files , cx , request ) ) ; / / Get all backup files ' description and save them to files <nl> + / / Get all backup files ' description and save them to files <nl> + wait ( _collectBackupFiles ( self - > bc , & files , cx , request ) ) ; <nl> self - > buildVersionBatches ( files , self - > versionBatches ) ; / / Divide files into version batches <nl> <nl> state std : : map < Version , VersionBatch > : : iterator versionBatch ; <nl> ACTOR static Future < Void > loadFilesOnLoaders ( Reference < RestoreMasterData > self , <nl> files = & versionBatch . rangeFiles ; <nl> } else { <nl> files = & versionBatch . logFiles ; <nl> - Reference < RestoreConfig > restoreConfig ( new RestoreConfig ( request . randomUid ) ) ; <nl> + Reference < RestoreConfigFR > restoreConfig ( new RestoreConfigFR ( request . randomUid ) ) ; <nl> mutationLogPrefix = restoreConfig - > mutationLogPrefix ( ) ; <nl> } <nl> <nl> mmm a / fdbserver / RestoreWorker . actor . cpp <nl> ppp b / fdbserver / RestoreWorker . actor . cpp <nl> int NUM_APPLIERS = 40 ; <nl> <nl> int restoreStatusIndex = 0 ; <nl> <nl> - class RestoreConfig ; <nl> + class RestoreConfigFR ; <nl> struct RestoreWorkerData ; / / Only declare the struct exist but we cannot use its field <nl> <nl> void initRestoreWorkerConfig ( ) ; <nl> ACTOR Future < Void > monitorleader ( Reference < AsyncVar < RestoreWorkerInterface > > lea <nl> ACTOR Future < Void > startRestoreWorkerLeader ( Reference < RestoreWorkerData > self , RestoreWorkerInterface workerInterf , <nl> Database cx ) ; <nl> <nl> - template < > <nl> - Tuple Codec < ERestoreState > : : pack ( ERestoreState const & val ) ; <nl> - template < > <nl> - ERestoreState Codec < ERestoreState > : : unpack ( Tuple const & val ) ; <nl> - <nl> / / Remove the worker interface from restoreWorkerKey and remove its roles interfaces from their keys . <nl> ACTOR Future < Void > handlerTerminateWorkerRequest ( RestoreSimpleRequest req , Reference < RestoreWorkerData > self , <nl> RestoreWorkerInterface workerInterf , Database cx ) { <nl> mmm a / fdbserver / RestoreWorkerInterface . h <nl> ppp b / fdbserver / RestoreWorkerInterface . h <nl> <nl> # define DUMPTOKEN ( name ) \ <nl> TraceEvent ( " DumpToken " , recruited . id ( ) ) . detail ( " Name " , # name ) . detail ( " Token " , name . getEndpoint ( ) . token ) <nl> <nl> - class RestoreConfig ; <nl> + class RestoreConfigFR ; <nl> <nl> struct RestoreCommonReply ; <nl> struct RestoreRecruitRoleRequest ; <nl> mmm a / fdbserver / fdbserver . vcxproj . filters <nl> ppp b / fdbserver / fdbserver . vcxproj . filters <nl> <nl> < ClInclude Include = " Knobs . h " / > <nl> < ClInclude Include = " WorkerInterface . h " / > <nl> < ClInclude Include = " RestoreWorkerInterface . h " / > <nl> + < ClInclude Include = " RestoreCommon . actor . h " / > <nl> < ClInclude Include = " WaitFailure . h " / > <nl> < ClInclude Include = " TesterInterface . actor . h " / > <nl> < ClInclude Include = " workloads \ workloads . actor . h " > <nl> | FastRestore : Rename RestoreConfig to RestoreConfigFR to fix link problem in windows | apple/foundationdb | 2602cb35915993960943b60f53cc8be107e22479 | 2019-08-03T06:00:12Z |
mmm a / googletest / include / gtest / internal / gtest - port . h <nl> ppp b / googletest / include / gtest / internal / gtest - port . h <nl> <nl> / / - std = { c , gnu } + + { 0x , 11 } is passed . The C + + 11 standard specifies a <nl> / / value for __cplusplus , and recent versions of clang , gcc , and <nl> / / probably other compilers set that too in C + + 11 mode . <nl> - # if __GXX_EXPERIMENTAL_CXX0X__ | | __cplusplus > = 201103L <nl> + # if __GXX_EXPERIMENTAL_CXX0X__ | | __cplusplus > = 201103L | | _MSC_VER > = 1900 <nl> / / Compiling in at least C + + 11 mode . <nl> # define GTEST_LANG_CXX11 1 <nl> # else <nl> <nl> # if GTEST_STDLIB_CXX11 <nl> # define GTEST_HAS_STD_BEGIN_AND_END_ 1 <nl> # define GTEST_HAS_STD_FORWARD_LIST_ 1 <nl> - # define GTEST_HAS_STD_FUNCTION_ 1 <nl> + # if ! defined ( _MSC_VER ) | | ( _MSC_FULL_VER > = 190023824 ) / / works only with VS2015U2 and better <nl> + # define GTEST_HAS_STD_FUNCTION_ 1 <nl> + # endif <nl> # define GTEST_HAS_STD_INITIALIZER_LIST_ 1 <nl> # define GTEST_HAS_STD_MOVE_ 1 <nl> # define GTEST_HAS_STD_SHARED_PTR_ 1 <nl> # define GTEST_HAS_STD_TYPE_TRAITS_ 1 <nl> # define GTEST_HAS_STD_UNIQUE_PTR_ 1 <nl> + # define GTEST_HAS_UNORDERED_MAP_ 1 <nl> + # define GTEST_HAS_UNORDERED_SET_ 1 <nl> # endif <nl> <nl> / / C + + 11 specifies that < tuple > provides std : : tuple . <nl> typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION ; <nl> / / support TR1 tuple . libc + + only provides std : : tuple , in C + + 11 mode , <nl> / / and it can be used with some compilers that define __GNUC__ . <nl> # if ( defined ( __GNUC__ ) & & ! defined ( __CUDACC__ ) & & ( GTEST_GCC_VER_ > = 40000 ) \ <nl> - & & ! GTEST_OS_QNX & & ! defined ( _LIBCPP_VERSION ) ) | | _MSC_VER > = 1600 <nl> + & & ! GTEST_OS_QNX & & ! defined ( _LIBCPP_VERSION ) ) \ <nl> + | | ( _MSC_VER > = 1600 & & _MSC_VER < 1900 ) <nl> # define GTEST_ENV_HAS_TR1_TUPLE_ 1 <nl> # endif <nl> <nl> | Enable C + + 11 features for VS2015 and VS2017 | google/googletest | 77380cddf77133b98a16b5427ac732648233de29 | 2017-10-25T00:24:49Z |
mmm a / include / fmt / format - inl . h <nl> ppp b / include / fmt / format - inl . h <nl> <nl> # include < cstdarg > <nl> # include < cstring > / / for std : : memmove <nl> # include < cwchar > <nl> + # include < exception > <nl> <nl> # include " format . h " <nl> # if ! defined ( FMT_STATIC_THOUSANDS_SEPARATOR ) <nl> namespace internal { <nl> <nl> FMT_FUNC void assert_fail ( const char * file , int line , const char * message ) { <nl> print ( stderr , " { } : { } : assertion failed : { } " , file , line , message ) ; <nl> - std : : abort ( ) ; <nl> + / / Chosen instead of std : : abort to satisfy Clang in CUDA mode during device <nl> + / / code pass . <nl> + std : : terminate ( ) ; <nl> } <nl> <nl> # ifndef _MSC_VER <nl> | Exclude std : : abort from compilation when compiling CUDA with Clang ( ) | fmtlib/fmt | 7e57cace5dacceaa31e983fdc2d1e279d36f1dca | 2020-05-04T13:21:21Z |
mmm a / lib / SIL / SILGen / Cleanup . cpp <nl> ppp b / lib / SIL / SILGen / Cleanup . cpp <nl> void CleanupManager : : emitBranchAndCleanups ( JumpDest Dest ) { <nl> } <nl> <nl> void CleanupManager : : emitCleanupsForReturn ( SILLocation loc ) { <nl> - for ( auto & cleanup : Stack ) <nl> - if ( cleanup . isActive ( ) ) <nl> - cleanup . emit ( Gen ) ; <nl> + auto end = Stack . find ( ReturnScope ) ; <nl> + for ( auto cleanup = Stack . begin ( ) ; cleanup ! = end ; + + cleanup ) { <nl> + if ( cleanup - > isActive ( ) ) <nl> + cleanup - > emit ( Gen ) ; <nl> + } <nl> } <nl> <nl> - void CleanupManager : : emitReturnAndCleanups ( SILLocation loc , SILValue returnValue ) { <nl> + void CleanupManager : : emitReturnAndCleanups ( SILLocation loc , <nl> + SILValue returnValue ) { <nl> SILBuilder & B = Gen . getBuilder ( ) ; <nl> assert ( B . hasValidInsertionPoint ( ) & & " Inserting return in invalid spot " ) ; <nl> <nl> emitCleanupsForReturn ( loc ) ; <nl> <nl> - if ( Gen . epilogBB ) { <nl> + if ( ! Gen . inlineReturnBBStack . empty ( ) ) { <nl> + B . createBranch ( loc , Gen . inlineReturnBBStack . back ( ) , <nl> + returnValue ) ; <nl> + } else if ( Gen . epilogBB ) { <nl> assert ( Gen . hasVoidReturn & & " ctor or dtor with non - void return ? ! " ) ; <nl> B . createBranch ( loc , Gen . epilogBB ) ; <nl> } else { <nl> mmm a / lib / SIL / SILGen / Cleanup . h <nl> ppp b / lib / SIL / SILGen / Cleanup . h <nl> class LLVM_LIBRARY_VISIBILITY Cleanup { <nl> virtual void emit ( SILGenFunction & Gen ) = 0 ; <nl> } ; <nl> <nl> + class CleanupManager ; <nl> + template < CleanupsDepth CleanupManager : : * SCOPE > <nl> + class ScopeImpl ; <nl> + <nl> class LLVM_LIBRARY_VISIBILITY CleanupManager { <nl> - friend class Scope ; <nl> + template < CleanupsDepth CleanupManager : : * SCOPE > <nl> + friend class ScopeImpl ; <nl> <nl> SILGenFunction & Gen ; <nl> <nl> class LLVM_LIBRARY_VISIBILITY CleanupManager { <nl> DiverseStack < Cleanup , 128 > Stack ; <nl> <nl> CleanupsDepth InnermostScope ; <nl> + CleanupsDepth ReturnScope ; <nl> <nl> void popAndEmitTopCleanup ( ) ; <nl> void popAndEmitTopDeadCleanups ( CleanupsDepth end ) ; <nl> class LLVM_LIBRARY_VISIBILITY CleanupManager { <nl> <nl> public : <nl> CleanupManager ( SILGenFunction & Gen ) <nl> - : Gen ( Gen ) , InnermostScope ( Stack . stable_end ( ) ) { <nl> + : Gen ( Gen ) , InnermostScope ( Stack . stable_end ( ) ) , <nl> + ReturnScope ( Stack . stable_end ( ) ) { <nl> } <nl> <nl> / / / Return a stable reference to the current cleanup . <nl> class LLVM_LIBRARY_VISIBILITY CleanupManager { <nl> / / / Set the state of the cleanup at the given depth . <nl> / / / The transition must be non - trivial and legal . <nl> void setCleanupState ( CleanupsDepth depth , CleanupState state ) ; <nl> + <nl> + / / / Scope RAII manager for local scopes . <nl> + using Scope = ScopeImpl < & CleanupManager : : InnermostScope > ; <nl> + / / / Scope RAII manager for force - inlining . <nl> + using InliningScope = ScopeImpl < & CleanupManager : : ReturnScope > ; <nl> } ; <nl> <nl> } / / end namespace Lowering <nl> mmm a / lib / SIL / SILGen / SILGen . h <nl> ppp b / lib / SIL / SILGen / SILGen . h <nl> class LLVM_LIBRARY_VISIBILITY SILGenFunction <nl> / / / block that ' return ' jumps to in those contexts , or ' null ' if returning <nl> / / / can return normally from the function . <nl> SILBasicBlock * epilogBB ; <nl> + <nl> + / / / Return continuation block stack for force - inlined functions . <nl> + std : : vector < SILBasicBlock * > inlineReturnBBStack ; <nl> <nl> public : <nl> SILGenFunction ( SILGenModule & SGM , SILFunction & F , bool hasVoidReturn ) ; <nl> class LLVM_LIBRARY_VISIBILITY SILGenFunction <nl> / / / to a variable or passed as an argument or return value . <nl> SILValue emitGeneralizedValue ( SILLocation loc , SILValue thinFn ) ; <nl> <nl> + / / / Emit an inline function call . <nl> + ManagedValue emitInlineFunction ( FuncExpr * body , Expr * args , SGFContext C ) ; <nl> + ManagedValue emitInlineFunction ( FuncExpr * body , RValue & & args , SGFContext C ) ; <nl> + <nl> / / <nl> / / Helpers for emitting ApplyExpr chains . <nl> / / <nl> mmm a / lib / SIL / SILGen / SILGenApply . cpp <nl> ppp b / lib / SIL / SILGen / SILGenApply . cpp <nl> class Callee { <nl> return mv ; <nl> } <nl> <nl> + bool isForceInline ( ) const { <nl> + if ( kind = = Kind : : GenericValue ) <nl> + return false ; <nl> + if ( ! standaloneFunction . hasDecl ( ) ) <nl> + return false ; <nl> + return standaloneFunction . getDecl ( ) - > getAttrs ( ) . isForceInline ( ) ; <nl> + } <nl> + <nl> + FuncExpr * getForceInlineBody ( ) const { <nl> + assert ( isForceInline ( ) & & " not force_inlineable " ) ; <nl> + return cast < FuncDecl > ( standaloneFunction . getDecl ( ) ) - > getBody ( ) ; <nl> + } <nl> + <nl> OwnershipConventions const & getOwnershipConventions ( ) const { <nl> / / FIXME : May need to adjust ownership conventions with uncurry level ? <nl> return ownership ; <nl> namespace { <nl> return ; <nl> } <nl> } <nl> + <nl> + ManagedValue emitInline ( SILGenFunction & gen , FuncExpr * body , <nl> + SGFContext C ) & & { <nl> + switch ( kind ) { <nl> + case Kind : : Expr : <nl> + return gen . emitInlineFunction ( body , expr , C ) ; <nl> + case Kind : : Value : <nl> + return gen . emitInlineFunction ( body , std : : move ( value ) , C ) ; <nl> + } <nl> + } <nl> } ; <nl> <nl> class CallEmission { <nl> namespace { <nl> Callee : : SpecializedEmitter specializedEmitter <nl> = callee . getSpecializedEmitter ( uncurryLevel ) ; <nl> <nl> - ManagedValue calleeValue ; <nl> - if ( ! specializedEmitter ) <nl> - calleeValue = callee . getAtUncurryLevel ( gen , uncurryLevel ) ; <nl> - <nl> - / / Collect the arguments to the uncurried call . <nl> - SmallVector < ManagedValue , 4 > args ; <nl> - SILLocation uncurriedLoc ; <nl> - for ( auto & site : uncurriedSites ) { <nl> - uncurriedLoc = site . loc ; <nl> - std : : move ( site ) . emit ( gen , args ) ; <nl> - } <nl> - <nl> / / We use the context emit - into initialization only for the outermost <nl> / / call . <nl> SGFContext uncurriedContext = extraSites . empty ( ) ? C : SGFContext ( ) ; <nl> - <nl> - / / Emit the uncurried call . <nl> - ManagedValue result ; <nl> <nl> - if ( specializedEmitter ) <nl> - result = specializedEmitter ( gen , <nl> - uncurriedLoc , <nl> - callee . getSubstitutions ( ) , <nl> - args , <nl> - uncurriedContext ) ; <nl> - else <nl> - result = gen . emitApply ( uncurriedLoc , calleeValue , args , <nl> - callee . getOwnershipConventions ( ) , <nl> - uncurriedContext ) ; <nl> + SmallVector < ManagedValue , 4 > args ; <nl> + ManagedValue result ; <nl> + <nl> + / / If the call is force - inlined , emit it inline . <nl> + if ( callee . isForceInline ( ) <nl> + & & ! specializedEmitter <nl> + & & uncurriedSites . size ( ) = = 1 ) { <nl> + result = std : : move ( uncurriedSites [ 0 ] ) <nl> + . emitInline ( gen , callee . getForceInlineBody ( ) , uncurriedContext ) ; <nl> + } else { <nl> + / / Get the callee value . <nl> + ManagedValue calleeValue ; <nl> + if ( ! specializedEmitter ) <nl> + calleeValue = callee . getAtUncurryLevel ( gen , uncurryLevel ) ; <nl> + <nl> + / / Collect the arguments to the uncurried call . <nl> + SILLocation uncurriedLoc ; <nl> + for ( auto & site : uncurriedSites ) { <nl> + uncurriedLoc = site . loc ; <nl> + std : : move ( site ) . emit ( gen , args ) ; <nl> + } <nl> + <nl> + / / Emit the uncurried call . <nl> + if ( specializedEmitter ) <nl> + result = specializedEmitter ( gen , <nl> + uncurriedLoc , <nl> + callee . getSubstitutions ( ) , <nl> + args , <nl> + uncurriedContext ) ; <nl> + else <nl> + result = gen . emitApply ( uncurriedLoc , calleeValue , args , <nl> + callee . getOwnershipConventions ( ) , <nl> + uncurriedContext ) ; <nl> + } <nl> <nl> / / If there are remaining call sites , apply them to the result function . <nl> for ( unsigned i = 0 , size = extraSites . size ( ) ; i < size ; + + i ) { <nl> mmm a / lib / SIL / SILGen / SILGenDecl . cpp <nl> ppp b / lib / SIL / SILGen / SILGenDecl . cpp <nl> void SILGenFunction : : deallocateUninitializedLocalVariable ( VarDecl * vd ) { <nl> assert ( loc . box & & " captured var should have been given a box " ) ; <nl> B . createDeallocRef ( vd , loc . box ) ; <nl> } <nl> + } <nl> + <nl> + namespace { <nl> + / / / RAII class for scoping an inline function emission . <nl> + class InliningScope { <nl> + SILGenFunction & gen ; <nl> + bool popped , savedHasVoidReturn ; <nl> + SILValue savedIndirectReturnAddress ; <nl> + Optional < CleanupManager : : InliningScope > cleanupScope ; <nl> + SILType returnType ; <nl> + SILValue returnValue ; <nl> + <nl> + public : <nl> + InliningScope ( SILGenFunction & gen , <nl> + FuncExpr * body , <nl> + SGFContext C ) <nl> + : gen ( gen ) , <nl> + popped ( false ) , <nl> + savedHasVoidReturn ( gen . hasVoidReturn ) , <nl> + savedIndirectReturnAddress ( gen . IndirectReturnAddress ) <nl> + { <nl> + returnType = gen . getLoweredType ( <nl> + body - > getType ( ) - > castTo < FunctionType > ( ) - > getResult ( ) ) ; <nl> + <nl> + / / Create a continuation block for the function to return to . <nl> + / / FIXME : We could avoid imploding and exploding return tuples here . <nl> + SILBasicBlock * returnBB <nl> + = new ( gen . F . getModule ( ) ) SILBasicBlock ( & gen . F , " inline_return " ) ; <nl> + <nl> + / / Set things up for the inlined function to return . <nl> + gen . hasVoidReturn = returnType = = gen . SGM . Types . getEmptyTupleType ( ) ; <nl> + if ( returnType . isAddressOnly ( ) ) { <nl> + returnValue = gen . IndirectReturnAddress <nl> + = gen . getBufferForExprResult ( body , returnType , C ) ; <nl> + <nl> + / / FIXME : Pretend to do a void return . <nl> + new ( gen . F . getModule ( ) ) SILArgument ( gen . SGM . Types . getEmptyTupleType ( ) , <nl> + returnBB ) ; <nl> + } else { <nl> + gen . IndirectReturnAddress = SILValue ( ) ; <nl> + returnValue <nl> + = new ( gen . F . getModule ( ) ) SILArgument ( returnType , returnBB ) ; <nl> + } <nl> + <nl> + gen . inlineReturnBBStack . push_back ( returnBB ) ; <nl> + <nl> + / / Create a cleanups scope . <nl> + cleanupScope . emplace ( gen . Cleanups ) ; <nl> + } <nl> + <nl> + ManagedValue pop ( ) { <nl> + SILBasicBlock * returnBB = gen . inlineReturnBBStack . back ( ) ; <nl> + <nl> + / / Terminate the inner function , either with an implicit void return or <nl> + / / an unreachable if there is a value return . <nl> + if ( gen . B . hasValidInsertionPoint ( ) ) { <nl> + if ( gen . hasVoidReturn ) <nl> + gen . Cleanups . emitReturnAndCleanups ( SILLocation ( ) , <nl> + gen . emitEmptyTuple ( SILLocation ( ) ) ) ; <nl> + else <nl> + gen . B . createUnreachable ( ) ; <nl> + } <nl> + <nl> + assert ( ! gen . B . hasValidInsertionPoint ( ) & & <nl> + " did not terminate inline function " ) ; <nl> + <nl> + / / Unwind cleanups . <nl> + cleanupScope . reset ( ) ; <nl> + <nl> + / / Emit the continuation block . <nl> + / / FIXME : If we have a single predecessor , we should fuse to it . <nl> + / / FIXME : If we have no predecessors , then the function never returns and <nl> + / / we ' re in dead code . Early exit from SILGen ? <nl> + gen . B . emitBlock ( returnBB ) ; <nl> + <nl> + / / Restore the outer return state . <nl> + gen . inlineReturnBBStack . pop_back ( ) ; <nl> + gen . hasVoidReturn = savedHasVoidReturn ; <nl> + gen . IndirectReturnAddress = savedIndirectReturnAddress ; <nl> + popped = true ; <nl> + <nl> + return gen . emitManagedRValueWithCleanup ( returnValue ) ; <nl> + } <nl> + <nl> + ~ InliningScope ( ) { <nl> + assert ( popped & & " did not pop ! " ) ; <nl> + } <nl> + } ; <nl> + } <nl> + <nl> + ManagedValue SILGenFunction : : emitInlineFunction ( FuncExpr * body , Expr * args , <nl> + SGFContext C ) { <nl> + assert ( body - > getNumParamPatterns ( ) = = 1 & & <nl> + " can ' t inline curried functions yet " ) ; <nl> + <nl> + / / Create a scope for the inline function . <nl> + InliningScope scope ( * this , body , C ) ; <nl> + <nl> + / / Emit the inlined function ' s argument variables . <nl> + / / NB : We use ' InitializationForPattern : : Var ' because we really want new <nl> + / / local variables for all the arguments — their values haven ' t been evaluated <nl> + / / yet . <nl> + InitializationPtr argInit <nl> + = InitializationForPattern ( * this , InitializationForPattern : : Var ) <nl> + . visit ( body - > getBodyParamPatterns ( ) [ 0 ] ) ; <nl> + / / Emit the arguments into the variables . <nl> + emitExprInto ( args , argInit . get ( ) ) ; <nl> + <nl> + / / Emit the function body . <nl> + visit ( body - > getBody ( ) ) ; <nl> + <nl> + / / Clean up and return the result as an RValue . <nl> + return scope . pop ( ) ; <nl> + } <nl> + <nl> + ManagedValue SILGenFunction : : emitInlineFunction ( FuncExpr * body , RValue & & args , <nl> + SGFContext C ) { <nl> + assert ( body - > getNumParamPatterns ( ) = = 1 & & <nl> + " can ' t inline curried functions yet " ) ; <nl> + <nl> + / / Create a scope for the inline function . <nl> + InliningScope scope ( * this , body , C ) ; <nl> + <nl> + / / Emit the inlined function ' s argument variables . <nl> + / / NB : We use ' InitializationForPattern : : Var ' because we really want new <nl> + / / local variables for all the arguments — their values haven ' t been evaluated <nl> + / / yet . <nl> + InitializationPtr argInit <nl> + = InitializationForPattern ( * this , InitializationForPattern : : Var ) <nl> + . visit ( body - > getBodyParamPatterns ( ) [ 0 ] ) ; <nl> + / / Emit the arguments into the variables . <nl> + std : : move ( args ) . forwardInto ( * this , argInit . get ( ) ) ; <nl> + <nl> + / / Emit the function body . <nl> + visit ( body - > getBody ( ) ) ; <nl> + <nl> + / / Clean up and return the result as an RValue . <nl> + return scope . pop ( ) ; <nl> + <nl> } <nl> \ No newline at end of file <nl> mmm a / lib / SIL / SILGen / Scope . h <nl> ppp b / lib / SIL / SILGen / Scope . h <nl> namespace Lowering { <nl> <nl> / / / A Scope is a RAII object recording that a scope ( e . g . a brace <nl> / / / statement ) has been entered . <nl> - class LLVM_LIBRARY_VISIBILITY Scope { <nl> + template < CleanupsDepth CleanupManager : : * SCOPE > <nl> + class LLVM_LIBRARY_VISIBILITY ScopeImpl { <nl> CleanupManager & Cleanups ; <nl> CleanupsDepth Depth ; <nl> - CleanupsDepth SavedInnermostScope ; <nl> + CleanupsDepth SavedScope ; <nl> <nl> void popImpl ( ) { <nl> Cleanups . Stack . checkIterator ( Depth ) ; <nl> - Cleanups . Stack . checkIterator ( Cleanups . InnermostScope ) ; <nl> - assert ( Cleanups . InnermostScope = = Depth & & " popping scopes out of order " ) ; <nl> + Cleanups . Stack . checkIterator ( Cleanups . * SCOPE ) ; <nl> + assert ( Cleanups . * SCOPE = = Depth & & " popping scopes out of order " ) ; <nl> <nl> - Cleanups . InnermostScope = SavedInnermostScope ; <nl> + Cleanups . * SCOPE = SavedScope ; <nl> Cleanups . endScope ( Depth ) ; <nl> - Cleanups . Stack . checkIterator ( Cleanups . InnermostScope ) ; <nl> + Cleanups . Stack . checkIterator ( Cleanups . * SCOPE ) ; <nl> } <nl> <nl> public : <nl> - explicit Scope ( CleanupManager & Cleanups ) <nl> + explicit ScopeImpl ( CleanupManager & Cleanups ) <nl> : Cleanups ( Cleanups ) , Depth ( Cleanups . getCleanupsDepth ( ) ) , <nl> - SavedInnermostScope ( Cleanups . InnermostScope ) { <nl> + SavedScope ( Cleanups . * SCOPE ) { <nl> assert ( Depth . isValid ( ) ) ; <nl> - Cleanups . Stack . checkIterator ( Cleanups . InnermostScope ) ; <nl> - Cleanups . InnermostScope = Depth ; <nl> + Cleanups . Stack . checkIterator ( Cleanups . * SCOPE ) ; <nl> + Cleanups . * SCOPE = Depth ; <nl> } <nl> <nl> void pop ( ) { <nl> class LLVM_LIBRARY_VISIBILITY Scope { <nl> Depth = CleanupsDepth : : invalid ( ) ; <nl> } <nl> <nl> - ~ Scope ( ) { <nl> + ~ ScopeImpl ( ) { <nl> if ( Depth . isValid ( ) ) popImpl ( ) ; <nl> } <nl> + <nl> } ; <nl> <nl> + using Scope = CleanupManager : : Scope ; <nl> + <nl> / / / A FullExpr is a RAII object recording that a full - expression has <nl> / / / been entered . A full - expression is essentially a very small scope <nl> / / / for the temporaries in an expression , with the added complexity <nl> new file mode 100644 <nl> index 000000000000 . . a947e5ceff6f <nl> mmm / dev / null <nl> ppp b / test / SIL / force_inline . swift <nl> <nl> + / / RUN : % swift - emit - sil % s | FileCheck % s <nl> + <nl> + func [ force_inline ] easy ( x : String ) - > String { <nl> + return x <nl> + } <nl> + <nl> + func [ force_inline ] nested ( x : String ) - > String { <nl> + return easy ( x ) <nl> + } <nl> + <nl> + class C { func foo ( x : Int ) - > Int { return x } } <nl> + <nl> + func [ force_inline ] has_cleanups ( x : Int ) - > Int { <nl> + var c = C ( ) <nl> + return c . foo ( x ) <nl> + } <nl> + <nl> + protocol Ansible { } <nl> + <nl> + func [ force_inline ] address_only_return ( x : Ansible ) - > Ansible { <nl> + return x <nl> + } <nl> + <nl> + func [ force_inline ] void_return ( b : Bool ) { <nl> + if b { <nl> + return <nl> + } <nl> + } <nl> + <nl> + / / CHECK : sil @ _T12force_inline3fooFT1aSS1bSi1cPS_7Ansible_TSSSi_ <nl> + func foo ( a : String , b : Int , c : Ansible ) - > ( String , Int ) { <nl> + / / CHECK - NOT : function_ref { { . * } } @ _T12force_inline4easyFT1xSS_SS <nl> + var a2 = easy ( a ) <nl> + <nl> + / / CHECK - NOT : function_ref { { . * } } @ _T12force_inline6nestedFT1xSS_SS <nl> + nested ( a ) <nl> + / / CHECK : br <nl> + <nl> + / / Can use force - inline functions as values and they get called normally . <nl> + var f = easy <nl> + f ( a ) <nl> + <nl> + / / CHECK - NOT : function_ref { { . * } } @ _T12force_inline12has_cleanupsFT1xSi_Si <nl> + var b2 = has_cleanups ( b ) <nl> + <nl> + / / CHECK - NOT : function_ref { { . * } } @ _T12force_inline19address_only_returnFT1xPS_7Ansible_S0_ <nl> + var c2 = address_only_return ( c ) <nl> + <nl> + / / CHECK - NOT : function_ref { { . * } } @ _T12force_inline19address_only_returnFT1xPS_7Ansible_S0_ <nl> + void_return ( true ) <nl> + <nl> + return ( a2 , b2 ) <nl> + } <nl> | SILGen : Implement basic force - inlining . | apple/swift | e40d2f67691a9c62670b84a793f18a24d9c5e015 | 2013-05-04T00:46:51Z |
mmm a / cocos / 2d / CCActionInterval . h <nl> ppp b / cocos / 2d / CCActionInterval . h <nl> class CC_DLL JumpTo : public JumpBy <nl> CC_CONSTRUCTOR_ACCESS : <nl> JumpTo ( ) { } <nl> virtual ~ JumpTo ( ) { } <nl> + <nl> + private : <nl> CC_DISALLOW_COPY_AND_ASSIGN ( JumpTo ) ; <nl> } ; <nl> <nl> | move JumoTo ' s copy constuctor and operator = to private | cocos2d/cocos2d-x | 30574af6c53968e1746e870be2e4ad14a720ed0e | 2014-07-10T06:30:08Z |
mmm a / src / inspector / value - mirror . cc <nl> ppp b / src / inspector / value - mirror . cc <nl> bool doesAttributeHaveObservableSideEffectOnGet ( v8 : : Local < v8 : : Context > context , <nl> } <nl> return false ; <nl> } <nl> - template < typename ArrayView , typename ArrayBuffer > <nl> - void addTypedArrayView ( v8 : : Local < v8 : : Context > context , <nl> - v8 : : Local < ArrayBuffer > buffer , size_t length , <nl> - const char * name , <nl> - ValueMirror : : PropertyAccumulator * accumulator ) { <nl> - accumulator - > Add ( PropertyMirror { <nl> - String16 ( name ) , false , false , false , true , false , <nl> - ValueMirror : : create ( context , ArrayView : : New ( buffer , 0 , length ) ) , nullptr , <nl> - nullptr , nullptr , nullptr } ) ; <nl> - } <nl> - <nl> - template < typename ArrayBuffer > <nl> - void addTypedArrayViews ( v8 : : Local < v8 : : Context > context , <nl> - v8 : : Local < ArrayBuffer > buffer , <nl> - ValueMirror : : PropertyAccumulator * accumulator ) { <nl> - / / TODO ( alph ) : these should be internal properties . <nl> - / / TODO ( v8 : 9308 ) : Reconsider how large arrays are previewed . <nl> - const size_t byte_length = buffer - > ByteLength ( ) ; <nl> - <nl> - size_t length = byte_length ; <nl> - if ( length > v8 : : TypedArray : : kMaxLength ) return ; <nl> - <nl> - addTypedArrayView < v8 : : Int8Array > ( context , buffer , length , " [ [ Int8Array ] ] " , <nl> - accumulator ) ; <nl> - addTypedArrayView < v8 : : Uint8Array > ( context , buffer , length , " [ [ Uint8Array ] ] " , <nl> - accumulator ) ; <nl> - <nl> - length = byte_length / 2 ; <nl> - if ( length > v8 : : TypedArray : : kMaxLength | | ( byte_length % 2 ) ! = 0 ) return ; <nl> - <nl> - addTypedArrayView < v8 : : Int16Array > ( context , buffer , length , " [ [ Int16Array ] ] " , <nl> - accumulator ) ; <nl> <nl> - length = byte_length / 4 ; <nl> - if ( length > v8 : : TypedArray : : kMaxLength | | ( byte_length % 4 ) ! = 0 ) return ; <nl> - <nl> - addTypedArrayView < v8 : : Int32Array > ( context , buffer , length , " [ [ Int32Array ] ] " , <nl> - accumulator ) ; <nl> - } <nl> } / / anonymous namespace <nl> <nl> ValueMirror : : ~ ValueMirror ( ) = default ; <nl> bool ValueMirror : : getProperties ( v8 : : Local < v8 : : Context > context , <nl> <nl> bool formatAccessorsAsProperties = <nl> clientFor ( context ) - > formatAccessorsAsProperties ( object ) ; <nl> - <nl> - if ( object - > IsArrayBuffer ( ) ) { <nl> - addTypedArrayViews ( context , object . As < v8 : : ArrayBuffer > ( ) , accumulator ) ; <nl> - } <nl> - if ( object - > IsSharedArrayBuffer ( ) ) { <nl> - addTypedArrayViews ( context , object . As < v8 : : SharedArrayBuffer > ( ) , <nl> - accumulator ) ; <nl> - } <nl> - <nl> for ( auto iterator = v8 : : debug : : PropertyIterator : : Create ( object ) ; <nl> ! iterator - > Done ( ) ; iterator - > Advance ( ) ) { <nl> bool isOwn = iterator - > is_own ( ) ; <nl> mmm a / src / runtime / runtime - debug . cc <nl> ppp b / src / runtime / runtime - debug . cc <nl> MaybeHandle < JSArray > Runtime : : GetInternalProperties ( Isolate * isolate , <nl> return factory - > NewJSArrayWithElements ( result ) ; <nl> } else if ( object - > IsJSArrayBuffer ( ) ) { <nl> Handle < JSArrayBuffer > js_array_buffer = Handle < JSArrayBuffer > : : cast ( object ) ; <nl> - Handle < FixedArray > result = factory - > NewFixedArray ( 1 * 2 ) ; <nl> - <nl> - Handle < String > is_detached_str = <nl> - factory - > NewStringFromAsciiChecked ( " [ [ IsDetached ] ] " ) ; <nl> - result - > set ( 0 , * is_detached_str ) ; <nl> - result - > set ( 1 , isolate - > heap ( ) - > ToBoolean ( js_array_buffer - > was_detached ( ) ) ) ; <nl> - return factory - > NewJSArrayWithElements ( result ) ; <nl> + if ( js_array_buffer - > was_detached ( ) ) { <nl> + / / Mark a detached JSArrayBuffer and such and don ' t even try to <nl> + / / create views for it , since the TypedArray constructors will <nl> + / / throw a TypeError when the underlying buffer is detached . <nl> + Handle < FixedArray > result = factory - > NewFixedArray ( 1 * 2 ) ; <nl> + Handle < String > is_detached_str = <nl> + factory - > NewStringFromAsciiChecked ( " [ [ IsDetached ] ] " ) ; <nl> + result - > set ( 0 , * is_detached_str ) ; <nl> + result - > set ( 1 , isolate - > heap ( ) - > ToBoolean ( true ) ) ; <nl> + return factory - > NewJSArrayWithElements ( result , PACKED_ELEMENTS ) ; <nl> + } <nl> + const size_t byte_length = js_array_buffer - > byte_length ( ) ; <nl> + static const ExternalArrayType kTypes [ ] = { <nl> + kExternalInt8Array , <nl> + kExternalUint8Array , <nl> + kExternalInt16Array , <nl> + kExternalInt32Array , <nl> + } ; <nl> + Handle < FixedArray > result = factory - > NewFixedArray ( arraysize ( kTypes ) * 2 ) ; <nl> + int index = 0 ; <nl> + for ( auto type : kTypes ) { <nl> + switch ( type ) { <nl> + # define TYPED_ARRAY_CASE ( Type , type , TYPE , ctype ) \ <nl> + case kExternal # # Type # # Array : { \ <nl> + if ( ( byte_length % sizeof ( ctype ) ) ! = 0 ) continue ; \ <nl> + Handle < String > typed_array_str = \ <nl> + factory - > NewStringFromStaticChars ( " [ [ " # Type " Array ] ] " ) ; \ <nl> + Handle < JSTypedArray > js_typed_array = \ <nl> + factory - > NewJSTypedArray ( kExternal # # Type # # Array , js_array_buffer , 0 , \ <nl> + byte_length / sizeof ( ctype ) ) ; \ <nl> + result - > set ( index + + , * typed_array_str ) ; \ <nl> + result - > set ( index + + , * js_typed_array ) ; \ <nl> + break ; \ <nl> + } <nl> + TYPED_ARRAYS ( TYPED_ARRAY_CASE ) <nl> + # undef TYPED_ARRAY_CASE <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> + return factory - > NewJSArrayWithElements ( result , PACKED_ELEMENTS , index ) ; <nl> } <nl> return factory - > NewJSArray ( 0 ) ; <nl> } <nl> mmm a / test / inspector / debugger / get - properties - paused - expected . txt <nl> ppp b / test / inspector / debugger / get - properties - paused - expected . txt <nl> Running test : testTypedArrayWithoutLength <nl> __proto__ own object undefined <nl> <nl> Running test : testArrayBuffer <nl> - [ [ Int8Array ] ] <nl> - 0 own number 1 <nl> - 1 own number 1 <nl> - 2 own number 1 <nl> - 3 own number 1 <nl> - 4 own number 1 <nl> - 5 own number 1 <nl> - 6 own number 1 <nl> - 7 own number 1 <nl> - __proto__ own object undefined <nl> - [ [ Uint8Array ] ] <nl> - 0 own number 1 <nl> - 1 own number 1 <nl> - 2 own number 1 <nl> - 3 own number 1 <nl> - 4 own number 1 <nl> - 5 own number 1 <nl> - 6 own number 1 <nl> - 7 own number 1 <nl> - __proto__ own object undefined <nl> - [ [ Int16Array ] ] <nl> - 0 own number 257 <nl> - 1 own number 257 <nl> - 2 own number 257 <nl> - 3 own number 257 <nl> - __proto__ own object undefined <nl> - [ [ Int32Array ] ] <nl> - 0 own number 16843009 <nl> - 1 own number 16843009 <nl> - __proto__ own object undefined <nl> <nl> Running test : testArrayBufferWithBrokenUintCtor <nl> - [ [ Int8Array ] ] own object undefined <nl> - [ [ Uint8Array ] ] own object undefined <nl> __proto__ own object undefined <nl> Internal properties <nl> - [ [ IsDetached ] ] boolean false <nl> + [ [ Int8Array ] ] object undefined <nl> + [ [ Uint8Array ] ] object undefined <nl> mmm a / test / inspector / runtime / get - properties - expected . txt <nl> ppp b / test / inspector / runtime / get - properties - expected . txt <nl> Running test : testArrayBuffer <nl> 0 own number 16843009 <nl> 1 own number 16843009 <nl> __proto__ own object undefined <nl> - [ [ IsDetached ] ] false <nl> <nl> Running test : testDetachedArrayBuffer <nl> - [ [ Int8Array ] ] <nl> - __proto__ own object undefined <nl> - [ [ Uint8Array ] ] <nl> - __proto__ own object undefined <nl> - [ [ Int16Array ] ] <nl> - __proto__ own object undefined <nl> - [ [ Int32Array ] ] <nl> - __proto__ own object undefined <nl> [ [ IsDetached ] ] true <nl> <nl> Running test : testArrayBufferWithBrokenUintCtor <nl> - [ [ Int8Array ] ] own object undefined <nl> - [ [ Uint8Array ] ] own object undefined <nl> __proto__ own object undefined <nl> Internal properties <nl> - [ [ IsDetached ] ] boolean false <nl> + [ [ Int8Array ] ] object undefined <nl> + [ [ Uint8Array ] ] object undefined <nl> mmm a / test / inspector / runtime / get - properties . js <nl> ppp b / test / inspector / runtime / get - properties . js <nl> InspectorTest . runAsyncTestSuite ( [ <nl> await logGetPropertiesResult ( prop . value . objectId ) ; <nl> } <nl> for ( let prop of props . result . internalProperties ) { <nl> - InspectorTest . log ( prop . name + ' ' + prop . value . value ) ; <nl> + InspectorTest . log ( prop . name ) ; <nl> + await logGetPropertiesResult ( prop . value . objectId ) ; <nl> } <nl> } , <nl> <nl> | [ debug ] Make JSArrayBuffer ( pre ) views into internal properties . | v8/v8 | 2cab7ae90ebfee9a2c1507bc0d22f90094f4b614 | 2020-12-29T11:00:53Z |
mmm a / dlib / CMakeLists . txt <nl> ppp b / dlib / CMakeLists . txt <nl> if ( NOT TARGET dlib ) <nl> if ( DLIB_USE_CUDA ) <nl> find_package ( CUDA 7 . 5 ) <nl> <nl> + if ( CUDA_FOUND AND MSVC AND NOT CUDA_CUBLAS_LIBRARIES AND " $ { CMAKE_SIZEOF_VOID_P } " EQUAL " 4 " ) <nl> + message ( WARNING " You have CUDA installed , but we can ' t use it unless you put visual studio in 64bit mode . " ) <nl> + set ( CUDA_FOUND 0 ) <nl> + endif ( ) <nl> <nl> if ( CUDA_FOUND AND COMPILER_CAN_DO_CPP_11 ) <nl> <nl> | Added warning message about trying to use cuda from a 32 bit visual studio project . | davisking/dlib | 1e35214de2c017855aae66a6e0a394d632193f6d | 2016-10-08T18:28:21Z |
mmm a / hphp / hack / src / hhbc / emit_expression . rs <nl> ppp b / hphp / hack / src / hhbc / emit_expression . rs <nl> <nl> / / This source code is licensed under the MIT license found in the <nl> / / LICENSE file in the " hack " directory of this source tree . <nl> use ast_scope_rust : : Scope ; <nl> + use emit_type_constant_rust as emit_type_constant ; <nl> use env : : Env ; <nl> use hhbc_ast_rust : : * ; <nl> use instruction_sequence_rust : : InstrSeq ; <nl> use local_rust as local ; <nl> use naming_special_names_rust : : special_idents ; <nl> - use oxidized : : local_id ; <nl> + use options : : Options ; <nl> + use oxidized : : { aast , local_id } ; <nl> + <nl> + use std : : collections : : BTreeMap ; <nl> <nl> pub struct EmitJmpResult { <nl> / / generated instruction sequence <nl> mod inout_locals { <nl> } <nl> } <nl> } <nl> + <nl> + pub fn get_type_structure_for_hint ( <nl> + opts : & Options , <nl> + tparams : & [ & str ] , <nl> + targ_map : & BTreeMap < & str , i64 > , <nl> + hint : aast : : Hint , <nl> + ) - > InstrSeq { <nl> + let _tv = emit_type_constant : : hint_to_type_constant ( opts , tparams , targ_map , hint ) ; <nl> + unimplemented ! ( " TODO ( hrust ) after porting most of emit_adata " ) <nl> + } <nl> mmm a / hphp / hack / src / hhbc / emit_statement . rs <nl> ppp b / hphp / hack / src / hhbc / emit_statement . rs <nl> fn get_level < Ex , Fb , En , Hi > ( <nl> <nl> / / Wrapper functions <nl> <nl> - fn emit_return ( e : & Emitter , env : & Env ) - > InstrSeq { <nl> - let ctx = e . emit_state ( ) ; <nl> - tfr : : emit_return ( <nl> - e , <nl> - & ctx . verify_return , <nl> - & ctx . verify_out , <nl> - ctx . num_out , <nl> - false , <nl> - env , <nl> - ) <nl> + fn emit_return ( e : & mut Emitter , env : & mut Env ) - > InstrSeq { <nl> + tfr : : emit_return ( e , false , env ) <nl> } <nl> <nl> fn emit_break ( e : & mut Emitter , env : & mut Env , pos : & Pos ) - > InstrSeq { <nl> mmm a / hphp / hack / src / hhbc / try_finally_rewriter . rs <nl> ppp b / hphp / hack / src / hhbc / try_finally_rewriter . rs <nl> <nl> # ! [ allow ( dead_code ) ] <nl> <nl> use crate : : emit_statement : : { LazyState , Level } ; <nl> + use crate : : reified_generics_helpers as reified ; <nl> <nl> use ast_scope_rust as ast_scope ; <nl> + use emit_expression_rust as emit_expression ; <nl> use emit_fatal_rust as emit_fatal ; <nl> use emit_pos_rust : : emit_pos ; <nl> use env : : { emitter : : Emitter , iterator : : Iter , jump_targets as jt , Env } ; <nl> - use hhbc_ast_rust as hhbc_ast ; <nl> + use hhbc_ast_rust : : { self as hhbc_ast , Instruct } ; <nl> use instruction_sequence_rust : : InstrSeq ; <nl> use label : : Label ; <nl> use label_rust as label ; <nl> use local_rust as local ; <nl> - use oxidized : : { aast as a , pos : : Pos } ; <nl> + use oxidized : : pos : : Pos ; <nl> <nl> use bitflags : : bitflags ; <nl> <nl> use std : : { borrow : : Cow , collections : : BTreeMap } ; <nl> <nl> - type LabelMap < ' a > = BTreeMap < label : : Id , & ' a hhbc_ast : : Instruct > ; <nl> + type LabelMap < ' a > = BTreeMap < label : : Id , & ' a Instruct > ; <nl> <nl> pub ( super ) struct JumpInstructions < ' a > ( LabelMap < ' a > ) ; <nl> impl JumpInstructions < ' _ > { <nl> impl JumpInstructions < ' _ > { <nl> } <nl> <nl> / / / Delete Ret * , Break / Continue / Jmp ( Named ) instructions from the try body <nl> - <nl> pub ( super ) fn cleanup_try_body ( is : & InstrSeq ) - > InstrSeq { <nl> use hhbc_ast : : Instruct : : * ; <nl> use hhbc_ast : : InstructControlFlow : : { RetC , RetCSuspended , RetM } ; <nl> fn emit_goto ( <nl> } <nl> } <nl> <nl> - pub ( super ) fn emit_return ( <nl> - _e : & Emitter , <nl> - _verify_return : & Option < a : : Hint > , <nl> - _verify_out : & InstrSeq , <nl> - _num_out : usize , <nl> - _in_finally_epilogue : bool , <nl> - _env : & Env , <nl> - ) - > InstrSeq { <nl> - unimplemented ! ( " TODO ( hrust ) port reified_generics_helpers first " ) <nl> + pub ( super ) fn emit_return ( e : & mut Emitter , in_finally_epilogue : bool , env : & mut Env ) - > InstrSeq { <nl> + / / check if there are try / finally region <nl> + let jt_gen = & env . jump_targets_gen ; <nl> + match jt_gen . jump_targets ( ) . get_closest_enclosing_finally_label ( ) { <nl> + None = > { <nl> + / / no finally blocks , but there might be some iterators that should be <nl> + / / released before exit - do it <nl> + let ctx = e . emit_state ( ) ; <nl> + let num_out = ctx . num_out ; <nl> + let verify_out = ctx . verify_out . clone ( ) ; <nl> + let verify_return = ctx . verify_return . clone ( ) ; <nl> + let release_iterators_instr = InstrSeq : : gather ( <nl> + jt_gen <nl> + . jump_targets ( ) <nl> + . iterators ( ) <nl> + . cloned ( ) <nl> + . map ( InstrSeq : : make_iterfree ) <nl> + . collect ( ) , <nl> + ) ; <nl> + let mut instrs = Vec : : with_capacity ( 5 ) ; <nl> + if in_finally_epilogue { <nl> + let load_retval_instr = <nl> + InstrSeq : : make_cgetl ( e . local_gen_mut ( ) . get_retval ( ) . clone ( ) ) ; <nl> + instrs . push ( load_retval_instr ) ; <nl> + } <nl> + let verify_return_instr = verify_return . map_or_else ( <nl> + | | InstrSeq : : make_empty ( ) , <nl> + | h | { <nl> + use reified : : ReificationLevel ; <nl> + let h = reified : : convert_awaitable ( env , h . clone ( ) ) ; <nl> + let h = reified : : remove_erased_generics ( env , h ) ; <nl> + match reified : : has_reified_type_constraint ( env , & h ) { <nl> + ReificationLevel : : Unconstrained = > InstrSeq : : make_empty ( ) , <nl> + ReificationLevel : : Not = > InstrSeq : : make_verify_ret_type_c ( ) , <nl> + ReificationLevel : : Maybe = > InstrSeq : : gather ( vec ! [ <nl> + emit_expression : : get_type_structure_for_hint ( <nl> + e . options ( ) , <nl> + & [ ] , <nl> + & BTreeMap : : new ( ) , <nl> + h , <nl> + ) , <nl> + InstrSeq : : make_verify_ret_type_ts ( ) , <nl> + ] ) , <nl> + ReificationLevel : : Definitely = > { <nl> + let check = InstrSeq : : gather ( vec ! [ <nl> + InstrSeq : : make_dup ( ) , <nl> + InstrSeq : : make_istypec ( hhbc_ast : : IstypeOp : : OpNull ) , <nl> + ] ) ; <nl> + reified : : simplify_verify_type ( <nl> + env , <nl> + & Pos : : make_none ( ) , <nl> + check , <nl> + & h , <nl> + InstrSeq : : make_verify_ret_type_ts ( ) , <nl> + e . label_gen_mut ( ) , <nl> + ) <nl> + } <nl> + } <nl> + } , <nl> + ) ; <nl> + instrs . extend ( vec ! [ <nl> + verify_return_instr , <nl> + verify_out , <nl> + release_iterators_instr , <nl> + if num_out ! = 0 { <nl> + InstrSeq : : make_retm ( num_out + 1 ) <nl> + } else { <nl> + InstrSeq : : make_retc ( ) <nl> + } , <nl> + ] ) ; <nl> + InstrSeq : : gather ( instrs ) <nl> + } <nl> + / / ret is in finally block and there might be iterators to release - <nl> + / / jump to finally block via Jmp <nl> + Some ( ( target_label , iterators_to_release ) ) = > { <nl> + let preamble = if in_finally_epilogue { <nl> + InstrSeq : : make_empty ( ) <nl> + } else { <nl> + let jt_gen = & mut env . jump_targets_gen ; <nl> + let save_state = emit_save_label_id ( e . local_gen_mut ( ) , jt_gen . get_id_for_return ( ) ) ; <nl> + let save_retval = InstrSeq : : gather ( vec ! [ <nl> + InstrSeq : : make_setl ( e . local_gen_mut ( ) . get_retval ( ) . clone ( ) ) , <nl> + InstrSeq : : make_popc ( ) , <nl> + ] ) ; <nl> + InstrSeq : : gather ( vec ! [ save_state , save_retval ] ) <nl> + } ; <nl> + InstrSeq : : gather ( vec ! [ <nl> + preamble , <nl> + emit_jump_to_label ( target_label , iterators_to_release ) , <nl> + / / emit ret instr as an indicator for try / finally rewriter to generate <nl> + / / finally epilogue , try / finally rewriter will remove it . <nl> + InstrSeq : : make_retc ( ) , <nl> + ] ) <nl> + } <nl> + } <nl> } <nl> <nl> bitflags ! { <nl> pub ( super ) fn emit_break_or_continue ( <nl> } <nl> <nl> fn emit_finally_epilogue ( <nl> - e : & Emitter , <nl> - _env : & Env , <nl> - _jump_instrs : ( ) , <nl> - _finally_end : Label , <nl> - ) - > InstrSeq { <nl> - let ctx = e . emit_state ( ) ; <nl> - let _verify_return = & ctx . verify_return ; <nl> - unimplemented ! ( " TODO ( hrust ) blocked on porting emit_return " ) <nl> + e : & mut Emitter , <nl> + env : & mut Env , <nl> + pos : & Pos , <nl> + jump_instrs : JumpInstructions , <nl> + finally_end : Label , <nl> + ) - > Result < InstrSeq , emit_fatal : : Error > { <nl> + fn emit_instr ( <nl> + e : & mut Emitter , <nl> + env : & mut Env , <nl> + pos : & Pos , <nl> + i : & Instruct , <nl> + ) - > Result < InstrSeq , emit_fatal : : Error > { <nl> + use hhbc_ast : : Instruct : : * ; <nl> + use hhbc_ast : : InstructControlFlow : : { RetC , RetCSuspended , RetM } ; <nl> + use hhbc_ast : : InstructSpecialFlow : : { Break , Continue , Goto } ; <nl> + let fail = | | { <nl> + panic ! ( " unexpected instruction : only Ret * or Break / Continue / Jmp ( Named ) are expected " ) <nl> + } ; <nl> + match i { <nl> + & IContFlow ( ref cont_flow ) = > match cont_flow { <nl> + RetC | RetCSuspended | RetM ( _ ) = > Ok ( emit_return ( e , true , env ) ) , <nl> + _ = > fail ( ) , <nl> + } , <nl> + & ISpecialFlow ( Break ( level ) ) = > Ok ( emit_break_or_continue ( <nl> + e , <nl> + EmitBreakOrContinueFlags : : IS_BREAK | EmitBreakOrContinueFlags : : IN_FINALLY_EPILOGUE , <nl> + env , <nl> + pos , <nl> + level as Level , <nl> + ) ) , <nl> + & ISpecialFlow ( Continue ( level ) ) = > Ok ( emit_break_or_continue ( <nl> + e , <nl> + EmitBreakOrContinueFlags : : IN_FINALLY_EPILOGUE , <nl> + env , <nl> + pos , <nl> + level as Level , <nl> + ) ) , <nl> + & ISpecialFlow ( Goto ( ref label ) ) = > { <nl> + emit_goto ( true , label . clone ( ) , env , e . local_gen_mut ( ) ) <nl> + } <nl> + _ = > fail ( ) , <nl> + } <nl> + } ; <nl> + Ok ( if jump_instrs . 0 . is_empty ( ) { <nl> + InstrSeq : : make_empty ( ) <nl> + } else if jump_instrs . 0 . len ( ) = = 1 { <nl> + let ( _ , instr ) = jump_instrs . 0 . iter ( ) . next ( ) . unwrap ( ) ; <nl> + InstrSeq : : gather ( vec ! [ <nl> + emit_pos ( e , pos ) , <nl> + InstrSeq : : make_issetl ( e . local_gen_mut ( ) . get_label ( ) . clone ( ) ) , <nl> + InstrSeq : : make_jmpz ( finally_end ) , <nl> + emit_instr ( e , env , pos , instr ) ? , <nl> + ] ) <nl> + } else { <nl> + / / mimic HHVM behavior : <nl> + / / in some cases ids can be non - consequtive - this might happen i . e . return statement <nl> + / / appear in the block and it was assigned a high id before . <nl> + / / ( ( 3 , Return ) , ( 1 , Break ) , ( 0 , Continue ) ) <nl> + / / In thid case generate switch as <nl> + / / switch ( id ) { <nl> + / / L0 - > handler for continue <nl> + / / L1 - > handler for break <nl> + / / FinallyEnd - > empty <nl> + / / L3 - > handler for return <nl> + / / } <nl> + / / <nl> + / / This function builds a list of labels and jump targets for switch . <nl> + / / It is possible that cases ids are not consequtive <nl> + / / [ L1 , L2 , L4 ] . Vector of labels in switch should be dense so we need to <nl> + / / fill holes with a label that points to the end of finally block <nl> + / / [ End , L1 , L2 , End , L4 ] <nl> + let ( max_id , _ ) = jump_instrs . 0 . iter ( ) . next_back ( ) . unwrap ( ) ; <nl> + let ( mut labels , mut bodies ) = ( vec ! [ ] , vec ! [ ] ) ; <nl> + let mut n : isize = * max_id as isize ; <nl> + / / lst is already sorted - BTreeMap / IMap bindings took care of it <nl> + / / TODO : add is_sorted assert to make sure this behavior is preserved for labels <nl> + for ( id , instr ) in jump_instrs . 0 . into_iter ( ) . rev ( ) { <nl> + let mut done = false ; <nl> + while ! done { <nl> + / / NOTE ( hrust ) looping is equivalent to recursing without consuming instr <nl> + done = ( id as isize ) = = n ; <nl> + let ( label , body ) = if done { <nl> + let label = e . label_gen_mut ( ) . next_regular ( ) ; <nl> + let body = InstrSeq : : gather ( vec ! [ <nl> + InstrSeq : : make_label ( label . clone ( ) ) , <nl> + emit_instr ( e , env , pos , instr ) ? , <nl> + ] ) ; <nl> + ( label , body ) <nl> + } else { <nl> + ( finally_end . clone ( ) , InstrSeq : : make_empty ( ) ) <nl> + } ; <nl> + labels . push ( label ) ; <nl> + bodies . push ( body ) ; <nl> + n - = 1 ; <nl> + } <nl> + } <nl> + / / NOTE ( hrust ) : base case when empty and n > = 0 <nl> + for _ in 0 . . = n { <nl> + labels . push ( finally_end . clone ( ) ) ; <nl> + bodies . push ( InstrSeq : : make_empty ( ) ) ; <nl> + } <nl> + InstrSeq : : gather ( vec ! [ <nl> + emit_pos ( e , pos ) , <nl> + InstrSeq : : make_issetl ( e . local_gen_mut ( ) . get_label ( ) . clone ( ) ) , <nl> + InstrSeq : : make_jmpz ( finally_end ) , <nl> + InstrSeq : : make_cgetl ( e . local_gen_mut ( ) . get_label ( ) . clone ( ) ) , <nl> + InstrSeq : : make_switch ( labels ) , <nl> + InstrSeq : : gather ( bodies ) , <nl> + ] ) <nl> + } ) <nl> } <nl> <nl> / / TODO : This codegen is unnecessarily complex . Basically we are generating <nl> | Port emit_statement [ 3 / ? ] : rest of try_finally_rewriter | facebook/hhvm | 700df48d90984f637d8a096ea17aa0263298d94c | 2019-12-31T00:09:27Z |
mmm a / test / cpp / interop / interop_client . h <nl> ppp b / test / cpp / interop / interop_client . h <nl> namespace grpc { <nl> namespace testing { <nl> <nl> / / Function pointer for custom checks . <nl> - using CheckerFn = <nl> - std : : function < void ( const InteropClientContextInspector & , <nl> - const SimpleRequest * , const SimpleResponse * ) > ; <nl> + typedef std : : function < void ( const InteropClientContextInspector & , <nl> + const SimpleRequest * , const SimpleResponse * ) > <nl> + CheckerFn ; <nl> <nl> class InteropClient { <nl> public : <nl> | Switch out a using with a typedef | grpc/grpc | 9aa5f23645b624f22ef66150600bd1772c8d2654 | 2016-06-15T17:50:23Z |
mmm a / . travis . yml <nl> ppp b / . travis . yml <nl> before_install : <nl> # whitelist <nl> branches : <nl> only : <nl> - - v3 . 11 <nl> + - v3 <nl> | compile v3 | cocos2d/cocos2d-x | 2df2e71fc9a84eb45f8fee31778972277b3f2592 | 2016-05-30T09:21:12Z |
mmm a / src / zone / accounting - allocator . h <nl> ppp b / src / zone / accounting - allocator . h <nl> class Zone ; <nl> class V8_EXPORT_PRIVATE AccountingAllocator { <nl> public : <nl> AccountingAllocator ( ) ; <nl> + AccountingAllocator ( const AccountingAllocator & ) = delete ; <nl> + AccountingAllocator & operator = ( const AccountingAllocator & ) = delete ; <nl> virtual ~ AccountingAllocator ( ) ; <nl> <nl> / / Allocates a new segment . Returns nullptr on failed allocation . <nl> class V8_EXPORT_PRIVATE AccountingAllocator { <nl> <nl> std : : unique_ptr < VirtualMemory > reserved_area_ ; <nl> std : : unique_ptr < base : : BoundedPageAllocator > bounded_page_allocator_ ; <nl> - <nl> - DISALLOW_COPY_AND_ASSIGN ( AccountingAllocator ) ; <nl> } ; <nl> <nl> } / / namespace internal <nl> mmm a / src / zone / zone - chunk - list . h <nl> ppp b / src / zone / zone - chunk - list . h <nl> class ZoneChunkList : public ZoneObject { <nl> } <nl> } <nl> <nl> + ZoneChunkList ( const ZoneChunkList & ) = delete ; <nl> + ZoneChunkList & operator = ( const ZoneChunkList & ) = delete ; <nl> + <nl> size_t size ( ) const { return size_ ; } <nl> bool is_empty ( ) const { return size ( ) = = 0 ; } <nl> <nl> class ZoneChunkList : public ZoneObject { <nl> size_t size_ = 0 ; <nl> Chunk * front_ = nullptr ; <nl> Chunk * back_ = nullptr ; <nl> - <nl> - DISALLOW_COPY_AND_ASSIGN ( ZoneChunkList ) ; <nl> } ; <nl> <nl> template < typename T , bool backwards , bool modifiable > <nl> mmm a / src / zone / zone - list . h <nl> ppp b / src / zone / zone - list . h <nl> class ZoneList final : public ZoneObject { <nl> <nl> ZoneList ( ZoneList < T > & & other ) V8_NOEXCEPT { * this = std : : move ( other ) ; } <nl> <nl> + ZoneList ( const ZoneList & ) = delete ; <nl> + ZoneList & operator = ( const ZoneList & ) = delete ; <nl> + <nl> / / The ZoneList objects are usually allocated as a fields in other <nl> / / zone - allocated objects for which destructors are not called anyway , so <nl> / / we are not going to clear the memory here as well . <nl> class ZoneList final : public ZoneObject { <nl> <nl> / / Resize the list . <nl> void Resize ( int new_capacity , Zone * zone ) ; <nl> - <nl> - DISALLOW_COPY_AND_ASSIGN ( ZoneList ) ; <nl> } ; <nl> <nl> } / / namespace internal <nl> | [ zone ] [ cleanup ] Remove uses of DISALLOW_COPY_AND_ASSIGN | v8/v8 | c1d85f5359c9973a4c8c94916b21d24cb37a8da7 | 2020-11-11T10:03:27Z |
mmm a / tensorflow / core / BUILD <nl> ppp b / tensorflow / core / BUILD <nl> tf_cc_test ( <nl> " / / tensorflow / cc : cc_ops " , <nl> " / / tensorflow / core / kernels : bcast_ops " , <nl> " / / tensorflow / core / kernels : cast_op " , <nl> + " / / tensorflow / core / kernels : concat_op " , <nl> " / / tensorflow / core / kernels : identity_op " , <nl> " / / tensorflow / core / kernels : matmul_op " , <nl> " / / third_party / eigen3 " , <nl> mmm a / tensorflow / core / common_runtime / constant_folding . cc <nl> ppp b / tensorflow / core / common_runtime / constant_folding . cc <nl> bool ReplaceTensorWithConstant ( Graph * graph , Device * partition_device , <nl> / / constraint , do not replace it . <nl> / / 3 ) If the constant op created does not have a kernel implementation <nl> / / for the device , do not use it . <nl> + / / 4 ) If the size of the constant in bytes is too large ( > 10M ) , do not <nl> + / / replace it . This prevents the size of the Graph from growing too large . <nl> / / TODO ( keveman ) : Consider adding a new constant op that has a kernel <nl> / / implementation for all types , but with HostMemory constraint on it ' s <nl> / / output . <nl> bool ReplaceTensorWithConstant ( Graph * graph , Device * partition_device , <nl> return false ; <nl> } <nl> } <nl> + if ( constant . TotalBytes ( ) > 10 * 1024 * 1024 ) { <nl> + return false ; <nl> + } <nl> <nl> Node * n = tensor . first ; <nl> std : : vector < const Edge * > edges_to_remove ; <nl> mmm a / tensorflow / core / common_runtime / constant_folding_test . cc <nl> ppp b / tensorflow / core / common_runtime / constant_folding_test . cc <nl> class ConstantFoldingTest : public : : testing : : Test { <nl> return test : : graph : : Constant ( g_ . get ( ) , test : : AsTensor ( values , shape ) ) ; <nl> } <nl> <nl> + template < typename T > <nl> + Node * Constant ( T v ) { <nl> + return test : : graph : : Constant ( g_ . get ( ) , test : : AsScalar ( v ) ) ; <nl> + } <nl> + <nl> template < typename T > <nl> void ExpectNodeClose ( const Node * n , gtl : : ArraySlice < T > values , <nl> TensorShape shape ) { <nl> TEST_F ( ConstantFoldingTest , TestNoReplaceOnGPU ) { <nl> # endif / / GOOGLE_CUDA <nl> } <nl> <nl> + TEST_F ( ConstantFoldingTest , TestNoReplaceLargeConstant ) { <nl> + Reset ( ) ; <nl> + Graph * g = g_ . get ( ) ; <nl> + Node * s0 = <nl> + Constant < int > ( std : : vector < int > ( 5 * 1024 * 256 , 0 ) , { 5 * 1024 * 256 } ) ; <nl> + Node * s1 = Constant < int > ( std : : vector < int > ( 5 * 1024 * 256 + 1 , 0 ) , <nl> + { 5 * 1024 * 256 + 1 } ) ; <nl> + Node * concat_dim = Constant < int > ( 0 ) ; <nl> + g - > AddControlEdge ( g - > source_node ( ) , s0 ) ; <nl> + g - > AddControlEdge ( g - > source_node ( ) , s1 ) ; <nl> + / / Concat s0 and s1 . The resulting tensor would be of size 10M + 1 bytes <nl> + Node * concat = test : : graph : : Concat ( g , concat_dim , { s0 , s1 } ) ; <nl> + Node * concat_send = <nl> + test : : graph : : Send ( g , concat , " concat_send " , " sender " , 0 , " receiver " ) ; <nl> + g - > AddControlEdge ( concat_send , g - > sink_node ( ) ) ; <nl> + <nl> + / / The above concat should not have been constant folded . <nl> + EXPECT_FALSE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , g ) ) ; <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / graph / testlib . cc <nl> ppp b / tensorflow / core / graph / testlib . cc <nl> Node * Merge ( Graph * g , Node * in0 , gtl : : ArraySlice < string > remaining_in ) { <nl> return ret ; <nl> } <nl> <nl> + Node * Concat ( Graph * g , Node * concat_dim , gtl : : ArraySlice < Node * > tensors ) { <nl> + std : : vector < NodeBuilder : : NodeOut > nodeouts ; <nl> + nodeouts . reserve ( tensors . size ( ) ) ; <nl> + for ( auto const t : tensors ) { <nl> + nodeouts . emplace_back ( t ) ; <nl> + } <nl> + Node * ret ; <nl> + TF_CHECK_OK ( NodeBuilder ( g - > NewName ( " n " ) , " Concat " ) <nl> + . Input ( concat_dim ) <nl> + . Input ( nodeouts ) <nl> + . Finalize ( g , & ret ) ) ; <nl> + return ret ; <nl> + } <nl> + <nl> Node * Next ( Graph * g , const string & name , Node * input ) { <nl> Node * ret ; <nl> TF_CHECK_OK ( <nl> mmm a / tensorflow / core / graph / testlib . h <nl> ppp b / tensorflow / core / graph / testlib . h <nl> Node * BroadcastGradientArgs ( Graph * g , Node * s0 , Node * s1 ) ; <nl> / / Gets a tensor stored in the session state . <nl> Node * GetSessionTensor ( Graph * g , Node * in ) ; <nl> <nl> + / / Adds a Concat node in " g " . The first input is " concat_dim " , the <nl> + / / dimension to concatenate on , and the tensors to concatenate are <nl> + / / given in " tensors " . <nl> + Node * Concat ( Graph * g , Node * concat_dim , gtl : : ArraySlice < Node * > tensors ) ; <nl> + <nl> } / / end namespace graph <nl> } / / end namespace test <nl> } / / end namespace tensorflow <nl> | Do not add large constants ( > 10M ) to the graph while constant folding . | tensorflow/tensorflow | 73d026e588ce6deffa399ecb662d70b092127ede | 2016-04-16T03:32:43Z |
mmm a / src / video_core / shader / glsl_decompiler . cpp <nl> ppp b / src / video_core / shader / glsl_decompiler . cpp <nl> class GLSLDecompiler final { <nl> <nl> std : : string HNegate ( Operation operation ) { <nl> const auto GetNegate = [ & ] ( std : : size_t index ) - > std : : string { <nl> - if ( const auto pred = std : : get_if < PredicateNode > ( operation [ index ] ) ) { <nl> - if ( ! pred - > IsNegated ( ) ) { <nl> - switch ( pred - > GetIndex ( ) ) { <nl> - case Tegra : : Shader : : Pred : : UnusedIndex : <nl> - return " - 1 " ; <nl> - case Tegra : : Shader : : Pred : : NeverExecute : <nl> - return " 1 " ; <nl> - } <nl> - } <nl> - } <nl> return VisitOperand ( operation , index , Type : : Bool ) + " ? - 1 : 1 " ; <nl> } ; <nl> const std : : string value = ' ( ' + VisitOperand ( operation , 0 , Type : : HalfFloat ) + " * vec2 ( " + <nl> | glsl_decompiler : Remove HNegate inlining | yuzu-emu/yuzu | d6f76307febaa2deb05112bb2c29ed667210ee2b | 2019-01-15T20:54:52Z |
new file mode 100644 <nl> index 000000000000 . . e9255a9580ce <nl> mmm / dev / null <nl> ppp b / jstests / sharding / mongos_quiesce_mode . js <nl> <nl> + / * * <nl> + * Tests the behavior of quiesce mode on mongos , which is entered during shutdown . <nl> + * During quiesce mode , existing operations are allowed to continue and new operations are <nl> + * accepted . However , isMaster requests return a ShutdownInProgress error , so that clients can <nl> + * begin re - routing operations . <nl> + * @ tags : [ requires_fcv_46 ] <nl> + * / <nl> + <nl> + ( function ( ) { <nl> + " use strict " ; <nl> + <nl> + load ( " jstests / libs / parallel_shell_helpers . js " ) ; <nl> + load ( " jstests / libs / fail_point_util . js " ) ; <nl> + <nl> + const st = new ShardingTest ( { shards : [ { nodes : 1 } ] , mongos : 1 } ) ; <nl> + const mongos = st . s ; <nl> + const mongodPrimary = st . rs0 . getPrimary ( ) ; <nl> + <nl> + const dbName = " test " ; <nl> + const collName = " coll " ; <nl> + const mongosDB = mongos . getDB ( dbName ) ; <nl> + assert . commandWorked ( mongosDB . coll . insert ( [ { _id : 0 } , { _id : 1 } , { _id : 2 } , { _id : 3 } ] ) ) ; <nl> + <nl> + function runAwaitableIsMaster ( topologyVersionField ) { <nl> + assert . commandFailedWithCode ( db . runCommand ( { <nl> + isMaster : 1 , <nl> + topologyVersion : topologyVersionField , <nl> + maxAwaitTimeMS : 99999999 , <nl> + } ) , <nl> + ErrorCodes . ShutdownInProgress ) ; <nl> + } <nl> + <nl> + function runFind ( ) { <nl> + assert . eq ( 4 , db . getSiblingDB ( " test " ) . coll . find ( ) . itcount ( ) ) ; <nl> + } <nl> + <nl> + function runInsert ( ) { <nl> + assert . commandWorked ( db . getSiblingDB ( " test " ) . coll . insert ( { _id : 4 } ) ) ; <nl> + } <nl> + <nl> + jsTestLog ( " Create a cursor via mongos . " ) ; <nl> + let res = assert . commandWorked ( mongosDB . runCommand ( { find : collName , batchSize : 2 } ) ) ; <nl> + assert . eq ( 2 , res . cursor . firstBatch . length , res ) ; <nl> + let cursorId = res . cursor . id ; <nl> + <nl> + jsTestLog ( " Create a hanging read operation via mongos . " ) ; <nl> + let findCmdFailPoint = configureFailPoint ( mongos , " waitInFindBeforeMakingBatch " ) ; <nl> + let findCmd = startParallelShell ( runFind , mongos . port ) ; <nl> + findCmdFailPoint . wait ( ) ; <nl> + <nl> + / / Hanging the write operation on mongod should be fine since mongos will not return to <nl> + / / the client until it finishes . <nl> + jsTestLog ( " Create a hanging write operation via mongos . " ) ; <nl> + let insertCmdFailPoint = configureFailPoint ( mongodPrimary , " hangAfterCollectionInserts " ) ; <nl> + let insertCmd = startParallelShell ( runInsert , mongodPrimary . port ) ; <nl> + insertCmdFailPoint . wait ( ) ; <nl> + <nl> + jsTestLog ( " Create a hanging isMaster via mongos . " ) ; <nl> + res = assert . commandWorked ( mongos . adminCommand ( { isMaster : 1 } ) ) ; <nl> + assert ( res . hasOwnProperty ( " topologyVersion " ) , res ) ; <nl> + let topologyVersionField = res . topologyVersion ; <nl> + let isMasterFailPoint = configureFailPoint ( mongos , " waitForIsMasterResponse " ) ; <nl> + let isMaster = <nl> + startParallelShell ( funWithArgs ( runAwaitableIsMaster , topologyVersionField ) , mongos . port ) ; <nl> + isMasterFailPoint . wait ( ) ; <nl> + assert . eq ( 1 , mongos . getDB ( " admin " ) . serverStatus ( ) . connections . awaitingTopologyChanges ) ; <nl> + <nl> + jsTestLog ( " Transition mongos to quiesce mode . " ) ; <nl> + let quiesceModeFailPoint = configureFailPoint ( mongos , " hangDuringQuiesceMode " ) ; <nl> + / / We must skip validation due to the failpoint that hangs find commands . <nl> + st . stopMongos ( 0 / * mongos index * / , undefined / * opts * / , { waitpid : false } ) ; <nl> + quiesceModeFailPoint . wait ( ) ; <nl> + <nl> + jsTestLog ( " The waiting isMaster returns a ShutdownInProgress error . " ) ; <nl> + isMaster ( ) ; <nl> + <nl> + / / Test operation behavior during quiesce mode . <nl> + jsTestLog ( " The running read operation is allowed to finish . " ) ; <nl> + findCmdFailPoint . off ( ) ; <nl> + findCmd ( ) ; <nl> + <nl> + jsTestLog ( " getMores on existing cursors are allowed . " ) ; <nl> + res = assert . commandWorked ( mongosDB . runCommand ( { getMore : cursorId , collection : collName } ) ) ; <nl> + assert . eq ( 2 , res . cursor . nextBatch . length , res ) ; <nl> + <nl> + jsTestLog ( " The running write operation is allowed to finish . " ) ; <nl> + insertCmdFailPoint . off ( ) ; <nl> + insertCmd ( ) ; <nl> + <nl> + jsTestLog ( " New reads are allowed . " ) ; <nl> + assert . eq ( 5 , mongosDB . coll . find ( ) . itcount ( ) ) ; <nl> + <nl> + jsTestLog ( " New writes are allowed . " ) ; <nl> + assert . commandWorked ( mongosDB . coll . insert ( { _id : 5 } ) ) ; <nl> + <nl> + / / Restart mongos <nl> + quiesceModeFailPoint . off ( ) ; <nl> + st . restartMongos ( 0 ) ; <nl> + <nl> + st . stop ( ) ; <nl> + } ) ( ) ; <nl> mmm a / src / mongo / SConscript <nl> ppp b / src / mongo / SConscript <nl> mongos = env . Program ( <nl> ' s / committed_optime_metadata_hook ' , <nl> ' s / coreshard ' , <nl> ' s / is_mongos ' , <nl> + ' s / mongos_topology_coordinator ' , <nl> ' s / query / cluster_cursor_cleanup_job ' , <nl> ' s / sessions_collection_sharded ' , <nl> ' s / sharding_egress_metadata_hook_for_mongos ' , <nl> mmm a / src / mongo / s / mongos_topology_coordinator . cpp <nl> ppp b / src / mongo / s / mongos_topology_coordinator . cpp <nl> MONGO_INITIALIZER ( GenerateMongosInstanceId ) ( InitializerContext * ) { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + / / Signals that an isMaster request has started waiting . <nl> + MONGO_FAIL_POINT_DEFINE ( waitForIsMasterResponse ) ; <nl> / / Awaitable isMaster requests with the proper topologyVersions are expected to wait for <nl> / / maxAwaitTimeMS on mongos . When set , this failpoint will hang right before waiting on a <nl> / / topology change . <nl> MONGO_FAIL_POINT_DEFINE ( hangWhileWaitingForIsMasterResponse ) ; <nl> + / / Failpoint for hanging during quiesce mode on mongos . <nl> + MONGO_FAIL_POINT_DEFINE ( hangDuringQuiesceMode ) ; <nl> <nl> template < typename T > <nl> StatusOrStatusWith < T > futureGetNoThrowWithDeadline ( OperationContext * opCtx , <nl> std : : shared_ptr < const MongosIsMasterResponse > MongosTopologyCoordinator : : awaitIs <nl> IsMasterMetrics : : get ( opCtx ) - > incrementNumAwaitingTopologyChanges ( ) ; <nl> lk . unlock ( ) ; <nl> <nl> + if ( MONGO_unlikely ( waitForIsMasterResponse . shouldFail ( ) ) ) { <nl> + / / Used in tests that wait for this failpoint to be entered before shutting down mongos , <nl> + / / which is the only action that triggers a topology change . <nl> + LOGV2 ( 4695704 , " waitForIsMasterResponse failpoint enabled " ) ; <nl> + } <nl> + <nl> if ( MONGO_unlikely ( hangWhileWaitingForIsMasterResponse . shouldFail ( ) ) ) { <nl> LOGV2 ( 4695501 , " hangWhileWaitingForIsMasterResponse failpoint enabled " ) ; <nl> hangWhileWaitingForIsMasterResponse . pauseWhileSet ( opCtx ) ; <nl> void MongosTopologyCoordinator : : enterQuiesceMode ( ) { <nl> IsMasterMetrics : : get ( getGlobalServiceContext ( ) ) - > resetNumAwaitingTopologyChanges ( ) ; <nl> } <nl> <nl> + void MongosTopologyCoordinator : : enterQuiesceModeAndWait ( OperationContext * opCtx ) { <nl> + enterQuiesceMode ( ) ; <nl> + <nl> + if ( MONGO_unlikely ( hangDuringQuiesceMode . shouldFail ( ) ) ) { <nl> + LOGV2 ( 4695700 , " hangDuringQuiesceMode failpoint enabled " ) ; <nl> + hangDuringQuiesceMode . pauseWhileSet ( opCtx ) ; <nl> + } <nl> + <nl> + / / TODO SERVER - 46958 : Determine what the quiesce time should be by checking the <nl> + / / shutdownTimeoutMillisForSignaledShutdown mongos server parameter . <nl> + auto timeout = Milliseconds ( 100 ) ; <nl> + LOGV2 ( 4695701 , " Entering quiesce mode for mongos shutdown " , " quiesceTime " _attr = timeout ) ; <nl> + opCtx - > sleepFor ( timeout ) ; <nl> + LOGV2 ( 4695702 , " Exiting quiesce mode for mongos shutdown " ) ; <nl> + } <nl> + <nl> } / / namespace mongo <nl> mmm a / src / mongo / s / mongos_topology_coordinator . h <nl> ppp b / src / mongo / s / mongos_topology_coordinator . h <nl> class MongosTopologyCoordinator { <nl> <nl> / * * <nl> * We only enter quiesce mode during the shutdown process , which means the <nl> - * MongosTopologyCoordinator will never need to exit quiesce mode . While in quiesce mode , we <nl> - * allow operations to continue and accept new operations , but we fail isMaster requests with <nl> - * ShutdownInProgress . This function causes us to increment the topologyVersion and start <nl> - * failing isMaster requests with ShutdownInProgress . <nl> + * MongosTopologyCoordinator will never need to exit quiesce mode . This function causes us to <nl> + * increment the topologyVersion and start failing isMaster requests with ShutdownInProgress . <nl> * / <nl> void enterQuiesceMode ( ) ; <nl> <nl> + / * * <nl> + * While in quiesce mode , we will sleep for 100ms . This allows short running operations to <nl> + * continue . We will also accept new operations , but we fail isMaster requests with <nl> + * ShutdownInProgress . <nl> + * TODO SERVER - 46958 : Modify comment with correct timeout value . <nl> + * / <nl> + void enterQuiesceModeAndWait ( OperationContext * opCtx ) ; <nl> + <nl> TopologyVersion getTopologyVersion ( ) const { <nl> stdx : : lock_guard lk ( _mutex ) ; <nl> return _topologyVersion ; <nl> mmm a / src / mongo / s / mongos_topology_coordinator_test . cpp <nl> ppp b / src / mongo / s / mongos_topology_coordinator_test . cpp <nl> class MongosTopoCoordTest : public ServiceContextTest { <nl> public : <nl> virtual void setUp ( ) { <nl> _topo = std : : make_unique < MongosTopologyCoordinator > ( ) ; <nl> + <nl> + getServiceContext ( ) - > setFastClockSource ( std : : make_unique < ClockSourceMock > ( ) ) ; <nl> + _fastClock = dynamic_cast < ClockSourceMock * > ( getServiceContext ( ) - > getFastClockSource ( ) ) ; <nl> + <nl> getServiceContext ( ) - > setPreciseClockSource ( std : : make_unique < ClockSourceMock > ( ) ) ; <nl> + _preciseClock = <nl> + dynamic_cast < ClockSourceMock * > ( getServiceContext ( ) - > getPreciseClockSource ( ) ) ; <nl> + } <nl> + <nl> + virtual void tearDown ( ) { <nl> + _fastClock = nullptr ; <nl> + _preciseClock = nullptr ; <nl> } <nl> <nl> protected : <nl> class MongosTopoCoordTest : public ServiceContextTest { <nl> } <nl> <nl> / * * <nl> - * Gets the clock used by MongosTopologyCoordinator . <nl> + * Advance the time by millis on both clock source mocks . <nl> + * / <nl> + void advanceTime ( Milliseconds millis ) { <nl> + _fastClock - > advance ( millis ) ; <nl> + _preciseClock - > advance ( millis ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Assumes that the times on both clock source mocks is the same . <nl> * / <nl> - ClockSourceMock * getClock ( ) { <nl> - return dynamic_cast < ClockSourceMock * > ( getServiceContext ( ) - > getPreciseClockSource ( ) ) ; <nl> + Date_t now ( ) { <nl> + invariant ( _fastClock - > now ( ) = = _preciseClock - > now ( ) ) ; <nl> + return _fastClock - > now ( ) ; <nl> } <nl> <nl> private : <nl> unique_ptr < MongosTopologyCoordinator > _topo ; <nl> + / / The fast clock is used by OperationContext : : hasDeadlineExpired . <nl> + ClockSourceMock * _fastClock ; <nl> + / / The precise clock is used by waitForConditionOrInterruptNoAssertUntil . <nl> + ClockSourceMock * _preciseClock ; <nl> } ; <nl> <nl> TEST_F ( MongosTopoCoordTest , MongosTopologyVersionCounterInitializedAtStartup ) { <nl> TEST_F ( MongosTopoCoordTest , AwaitIsMasterResponseReturnsCurrentMongosTopologyVer <nl> auto opCtx = makeOperationContext ( ) ; <nl> auto maxAwaitTime = Milliseconds ( 5000 ) ; <nl> auto halfwayToMaxAwaitTime = maxAwaitTime / 2 ; <nl> - auto halfwayToDeadline = getClock ( ) - > now ( ) + halfwayToMaxAwaitTime ; <nl> - auto deadline = getClock ( ) - > now ( ) + maxAwaitTime ; <nl> + auto halfwayToDeadline = now ( ) + halfwayToMaxAwaitTime ; <nl> + auto deadline = now ( ) + maxAwaitTime ; <nl> <nl> / / isMaster request with the current TopologyVersion should attempt to wait for maxAwaitTimeMS . <nl> auto currentTopologyVersion = getTopoCoord ( ) . getTopologyVersion ( ) ; <nl> TEST_F ( MongosTopoCoordTest , AwaitIsMasterResponseReturnsCurrentMongosTopologyVer <nl> ASSERT_EQUALS ( topologyVersion . getProcessId ( ) , currentTopologyVersion . getProcessId ( ) ) ; <nl> } ) ; <nl> <nl> - / / Advance the clock halfway and make sure awaitIsMasterResponse did not return yet . <nl> - getClock ( ) - > advance ( halfwayToMaxAwaitTime ) ; <nl> - ASSERT_EQUALS ( halfwayToDeadline , getClock ( ) - > now ( ) ) ; <nl> + / / Advance the clocks halfway and make sure awaitIsMasterResponse did not return yet . <nl> + advanceTime ( halfwayToMaxAwaitTime ) ; <nl> + ASSERT_EQUALS ( halfwayToDeadline , now ( ) ) ; <nl> ASSERT_FALSE ( isMasterReturned ) ; <nl> <nl> - / / Advance the clock the rest of the way so that awaitIsMasterResponse times out . <nl> - getClock ( ) - > advance ( halfwayToMaxAwaitTime ) ; <nl> - ASSERT_EQUALS ( deadline , getClock ( ) - > now ( ) ) ; <nl> + / / Advance the clocks the rest of the way so that awaitIsMasterResponse times out . <nl> + advanceTime ( halfwayToMaxAwaitTime ) ; <nl> + ASSERT_EQUALS ( deadline , now ( ) ) ; <nl> getIsMasterThread . join ( ) ; <nl> ASSERT_TRUE ( isMasterReturned ) ; <nl> } <nl> TEST_F ( MongosTopoCoordTest , AwaitIsMasterResponseReturnsCurrentMongosTopologyVer <nl> TEST_F ( MongosTopoCoordTest , AwaitIsMasterErrorsWithHigherCounterAndSameProcessID ) { <nl> auto opCtx = makeOperationContext ( ) ; <nl> auto maxAwaitTime = Milliseconds ( 5000 ) ; <nl> - auto deadline = getClock ( ) - > now ( ) + maxAwaitTime ; <nl> + auto deadline = now ( ) + maxAwaitTime ; <nl> <nl> auto currentTopologyVersion = getTopoCoord ( ) . getTopologyVersion ( ) ; <nl> <nl> TEST_F ( MongosTopoCoordTest , AwaitIsMasterErrorsWithHigherCounterAndSameProcessID <nl> TEST_F ( MongosTopoCoordTest , AwaitIsMasterReturnsImmediatelyWithHigherCounterAndDifferentProcessID ) { <nl> auto opCtx = makeOperationContext ( ) ; <nl> auto maxAwaitTime = Milliseconds ( 5000 ) ; <nl> - auto deadline = getClock ( ) - > now ( ) + maxAwaitTime ; <nl> + auto deadline = now ( ) + maxAwaitTime ; <nl> <nl> auto currentTopologyVersion = getTopoCoord ( ) . getTopologyVersion ( ) ; <nl> <nl> TEST_F ( MongosTopoCoordTest , <nl> AwaitIsMasterReturnsImmediatelyWithCurrentCounterAndDifferentProcessID ) { <nl> auto opCtx = makeOperationContext ( ) ; <nl> auto maxAwaitTime = Milliseconds ( 5000 ) ; <nl> - auto deadline = getClock ( ) - > now ( ) + maxAwaitTime ; <nl> + auto deadline = now ( ) + maxAwaitTime ; <nl> <nl> auto currentTopologyVersion = getTopoCoord ( ) . getTopologyVersion ( ) ; <nl> <nl> TEST_F ( MongosTopoCoordTest , AwaitIsMasterReturnsImmediatelyWithNoTopologyVersion <nl> <nl> TEST_F ( MongosTopoCoordTest , IsMasterReturnsErrorInQuiesceMode ) { <nl> auto currentTopologyVersion = getTopoCoord ( ) . getTopologyVersion ( ) ; <nl> + auto opCtx = makeOperationContext ( ) ; <nl> + auto maxAwaitTime = Milliseconds ( 5000 ) ; <nl> + auto deadline = now ( ) + maxAwaitTime ; <nl> + <nl> getTopoCoord ( ) . enterQuiesceMode ( ) ; <nl> + <nl> ASSERT_EQUALS ( currentTopologyVersion . getCounter ( ) + 1 , <nl> getTopoCoord ( ) . getTopologyVersion ( ) . getCounter ( ) ) ; <nl> <nl> - auto opCtx = makeOperationContext ( ) ; <nl> - auto maxAwaitTime = Milliseconds ( 5000 ) ; <nl> - auto deadline = getClock ( ) - > now ( ) + maxAwaitTime ; <nl> - <nl> / / The following isMaster requests should fail immediately with ShutdownInProgress errors <nl> / / instead of following the usual error precedence . <nl> <nl> TEST_F ( MongosTopoCoordTest , IsMasterReturnsErrorOnEnteringQuiesceMode ) { <nl> auto opCtx = makeOperationContext ( ) ; <nl> auto currentTopologyVersion = getTopoCoord ( ) . getTopologyVersion ( ) ; <nl> auto maxAwaitTime = Milliseconds ( 5000 ) ; <nl> + auto deadline = now ( ) + maxAwaitTime ; <nl> <nl> / / This will cause the isMaster request to hang . <nl> auto waitForIsMasterFailPoint = <nl> TEST_F ( MongosTopoCoordTest , IsMasterReturnsErrorOnEnteringQuiesceMode ) { <nl> auto timesEnteredFailPoint = waitForIsMasterFailPoint - > setMode ( FailPoint : : alwaysOn ) ; <nl> ON_BLOCK_EXIT ( [ & ] { waitForIsMasterFailPoint - > setMode ( FailPoint : : off , 0 ) ; } ) ; <nl> stdx : : thread getIsMasterThread ( [ & ] { <nl> - auto maxAwaitTime = Milliseconds ( 5000 ) ; <nl> - auto deadline = getClock ( ) - > now ( ) + maxAwaitTime ; <nl> - <nl> ASSERT_THROWS_CODE ( <nl> getTopoCoord ( ) . awaitIsMasterResponse ( opCtx . get ( ) , currentTopologyVersion , deadline ) , <nl> AssertionException , <nl> TEST_F ( MongosTopoCoordTest , IsMasterReturnsErrorOnEnteringQuiesceMode ) { <nl> ASSERT_EQUALS ( currentTopologyVersion . getCounter ( ) + 1 , <nl> getTopoCoord ( ) . getTopologyVersion ( ) . getCounter ( ) ) ; <nl> waitForIsMasterFailPoint - > setMode ( FailPoint : : off ) ; <nl> - getClock ( ) - > advance ( maxAwaitTime ) ; <nl> + advanceTime ( maxAwaitTime ) ; <nl> getIsMasterThread . join ( ) ; <nl> } <nl> <nl> mmm a / src / mongo / s / server . cpp <nl> ppp b / src / mongo / s / server . cpp <nl> <nl> # include " mongo / s / grid . h " <nl> # include " mongo / s / is_mongos . h " <nl> # include " mongo / s / mongos_options . h " <nl> + # include " mongo / s / mongos_topology_coordinator . h " <nl> # include " mongo / s / query / cluster_cursor_cleanup_job . h " <nl> # include " mongo / s / query / cluster_cursor_manager . h " <nl> # include " mongo / s / service_entry_point_mongos . h " <nl> void cleanupTask ( ServiceContext * serviceContext ) { <nl> opCtx = uniqueTxn . get ( ) ; <nl> } <nl> <nl> + / / Enter quiesce mode so that existing and new short operations are allowed to finish . <nl> + / / At this point , we will start responding to any isMaster request with ShutdownInProgress <nl> + / / so that clients can re - route their operations . <nl> + if ( auto mongosTopCoord = MongosTopologyCoordinator : : get ( opCtx ) ) { <nl> + mongosTopCoord - > enterQuiesceModeAndWait ( opCtx ) ; <nl> + } <nl> + <nl> / / Shutdown the TransportLayer so that new connections aren ' t accepted <nl> if ( auto tl = serviceContext - > getTransportLayer ( ) ) { <nl> LOGV2_OPTIONS ( <nl> mmm a / src / mongo / shell / shardingtest . js <nl> ppp b / src / mongo / shell / shardingtest . js <nl> var ShardingTest = function ( params ) { <nl> <nl> / * * <nl> * Kills the mongos with index n . <nl> + * <nl> + * @ param { boolean } [ extraOptions . waitPid = true ] if true , we will wait for the process to <nl> + * terminate after stopping it . <nl> * / <nl> - this . stopMongos = function ( n , opts ) { <nl> + this . stopMongos = function ( n , opts , { <nl> + waitpid : waitpid = true , <nl> + } = { } ) { <nl> if ( otherParams . useBridge ) { <nl> - MongoRunner . stopMongos ( unbridgedMongos [ n ] , undefined , opts ) ; <nl> + MongoRunner . stopMongos ( unbridgedMongos [ n ] , undefined , opts , waitpid ) ; <nl> this [ " s " + n ] . stop ( ) ; <nl> } else { <nl> - MongoRunner . stopMongos ( this [ " s " + n ] , undefined , opts ) ; <nl> + MongoRunner . stopMongos ( this [ " s " + n ] , undefined , opts , waitpid ) ; <nl> } <nl> } ; <nl> <nl> | SERVER - 46957 Implement Quiesce Mode for mongos | mongodb/mongo | 3b4e9894e2971c2b42b83c336a48068810c91a9c | 2020-05-13T16:23:43Z |
mmm a / src / python / grpcio / grpc / __init__ . py <nl> ppp b / src / python / grpcio / grpc / __init__ . py <nl> def stop ( self , grace ) : <nl> " " " Stops this Server . <nl> <nl> This method immediately stop service of new RPCs in all cases . <nl> + <nl> If a grace period is specified , this method returns immediately <nl> and all RPCs active at the end of the grace period are aborted . <nl> - <nl> - If a grace period is not specified , then all existing RPCs are <nl> - teriminated immediately and the this method blocks until the last <nl> - RPC handler terminates . <nl> + If a grace period is not specified ( by passing None for ` grace ` ) , <nl> + all existing RPCs are aborted immediately and this method <nl> + blocks until the last RPC handler terminates . <nl> <nl> This method is idempotent and may be called at any time . <nl> - Passing a smaller grace value in subsequent call will have <nl> - the effect of stopping the Server sooner . Passing a larger <nl> - grace value in subsequent call * will not * have the effect of <nl> - stopping the server later ( i . e . the most restrictive grace <nl> - value is used ) . <nl> + Passing a smaller grace value in a subsequent call will have <nl> + the effect of stopping the Server sooner ( passing None will <nl> + have the effect of stopping the server immediately ) . Passing <nl> + a larger grace value in a subsequent call * will not * have the <nl> + effect of stopping the server later ( i . e . the most restrictive <nl> + grace value is used ) . <nl> <nl> Args : <nl> grace : A duration of time in seconds or None . <nl> | Merge pull request from durin42 / patch - 1 | grpc/grpc | ba4a4995327cd02de4e882615beacd78009b818a | 2018-06-01T18:14:05Z |
mmm a / src / mongo / db / initialize_server_global_state . cpp <nl> ppp b / src / mongo / db / initialize_server_global_state . cpp <nl> namespace mongo { <nl> } <nl> # endif <nl> <nl> - bool initializeServerGlobalState ( bool isMongodShutdownSpecialCase ) { <nl> + bool initializeServerGlobalState ( ) { <nl> <nl> Listener : : globalTicketHolder . resize ( cmdLine . maxConns ) ; <nl> <nl> namespace mongo { <nl> Logstream : : useSyslog ( sb . str ( ) . c_str ( ) ) ; <nl> } <nl> # endif <nl> - if ( ! cmdLine . logpath . empty ( ) & & ! isMongodShutdownSpecialCase ) { <nl> + if ( ! cmdLine . logpath . empty ( ) ) { <nl> fassert ( 16448 , ! cmdLine . logWithSyslog ) ; <nl> string absoluteLogpath = boost : : filesystem : : absolute ( <nl> cmdLine . logpath , cmdLine . cwd ) . string ( ) ; <nl> mmm a / src / mongo / db / initialize_server_global_state . h <nl> ppp b / src / mongo / db / initialize_server_global_state . h <nl> namespace mongo { <nl> * Perform initialization activity common across all mongo server types . <nl> * <nl> * Set up logging , daemonize the process , configure SSL , etc . <nl> - * <nl> - * If isMongodShutdownSpecialCase , perform this processing knowing that <nl> - * we ' re only bringing this process up to kill another mongod . <nl> - * <nl> - * TODO : Untie the knot that requires the isMongodShutdownSpecialCase parameter . <nl> * / <nl> - bool initializeServerGlobalState ( bool isMongodShutdownSpecialCase = false ) ; <nl> + bool initializeServerGlobalState ( ) ; <nl> <nl> void setupCoreSignals ( ) ; <nl> <nl> | Eliminate unused parameter to initializeServerGlobalState ( ) . | mongodb/mongo | edcded80dff8ef25eebf95cebf2099ab7da29cc0 | 2013-07-01T18:21:18Z |
mmm a / src / core / lib / iomgr / closure . h <nl> ppp b / src / core / lib / iomgr / closure . h <nl> grpc_closure * grpc_closure_create ( grpc_iomgr_cb_func cb , void * cb_arg ) ; <nl> # define GRPC_CLOSURE_LIST_INIT \ <nl> { NULL , NULL } <nl> <nl> + void grpc_closure_list_init ( grpc_closure_list * list ) ; <nl> + <nl> / * * add \ a closure to the end of \ a list <nl> and set \ a closure ' s result to \ a error * / <nl> void grpc_closure_list_append ( grpc_closure_list * list , grpc_closure * closure , <nl> mmm a / src / core / lib / iomgr / combiner . c <nl> ppp b / src / core / lib / iomgr / combiner . c <nl> struct grpc_combiner { <nl> / / lower bit - zero if orphaned <nl> / / other bits - number of items queued on the lock <nl> gpr_atm state ; <nl> + bool take_async_break_before_final_list ; <nl> + grpc_closure_list final_list ; <nl> grpc_closure continue_finishing ; <nl> } ; <nl> <nl> - static void continue_finishing ( grpc_exec_ctx * exec_ctx , void * arg , <nl> - grpc_error * error ) ; <nl> - <nl> grpc_combiner * grpc_combiner_create ( grpc_workqueue * optional_workqueue ) { <nl> grpc_combiner * lock = gpr_malloc ( sizeof ( * lock ) ) ; <nl> lock - > optional_workqueue = optional_workqueue ; <nl> gpr_atm_no_barrier_store ( & lock - > state , 1 ) ; <nl> gpr_mpscq_init ( & lock - > queue ) ; <nl> - grpc_closure_init ( & lock - > continue_finishing , continue_finishing , lock ) ; <nl> + lock - > take_async_break_before_final_list = false ; <nl> + grpc_closure_list_init ( & lock - > final_list ) ; <nl> return lock ; <nl> } <nl> <nl> void grpc_combiner_destroy ( grpc_combiner * lock ) { <nl> } <nl> } <nl> <nl> + static bool maybe_finish_one ( grpc_exec_ctx * exec_ctx , grpc_combiner * lock ) ; <nl> + static void finish ( grpc_exec_ctx * exec_ctx , grpc_combiner * lock ) ; <nl> + <nl> + static void continue_finishing_mainline ( grpc_exec_ctx * exec_ctx , void * arg , <nl> + grpc_error * error ) { <nl> + if ( maybe_finish_one ( exec_ctx , arg ) ) finish ( exec_ctx , arg ) ; <nl> + } <nl> + <nl> + static void execute_final ( grpc_exec_ctx * exec_ctx , grpc_combiner * lock ) { <nl> + grpc_closure * c = lock - > final_list . head ; <nl> + grpc_closure_list_init ( & lock - > final_list ) ; <nl> + while ( c ! = NULL ) { <nl> + grpc_closure * next = c - > next_data . next ; <nl> + grpc_error * error = c - > error ; <nl> + c - > cb ( exec_ctx , c - > cb_arg , error ) ; <nl> + GRPC_ERROR_UNREF ( error ) ; <nl> + c = next ; <nl> + } <nl> + } <nl> + <nl> + static void continue_executing_final ( grpc_exec_ctx * exec_ctx , void * arg , <nl> + grpc_error * error ) { <nl> + execute_final ( exec_ctx , arg ) ; <nl> + finish ( exec_ctx , arg ) ; <nl> + } <nl> + <nl> + static bool start_execute_final ( grpc_exec_ctx * exec_ctx , grpc_combiner * lock ) { <nl> + if ( lock - > take_async_break_before_final_list ) { <nl> + grpc_closure_init ( & lock - > continue_finishing , continue_executing_final , <nl> + lock ) ; <nl> + grpc_exec_ctx_sched ( exec_ctx , & lock - > continue_finishing , GRPC_ERROR_NONE , <nl> + lock - > optional_workqueue ) ; <nl> + return false ; <nl> + } else { <nl> + execute_final ( exec_ctx , lock ) ; <nl> + return true ; <nl> + } <nl> + } <nl> + <nl> static bool maybe_finish_one ( grpc_exec_ctx * exec_ctx , grpc_combiner * lock ) { <nl> gpr_mpscq_node * n = gpr_mpscq_pop ( & lock - > queue ) ; <nl> if ( n = = NULL ) { <nl> / / queue is in an inconsistant state : use this as a cue that we should <nl> / / go off and do something else for a while ( and come back later ) <nl> + grpc_closure_init ( & lock - > continue_finishing , continue_finishing_mainline , <nl> + lock ) ; <nl> grpc_exec_ctx_sched ( exec_ctx , & lock - > continue_finishing , GRPC_ERROR_NONE , <nl> lock - > optional_workqueue ) ; <nl> return false ; <nl> static bool maybe_finish_one ( grpc_exec_ctx * exec_ctx , grpc_combiner * lock ) { <nl> } <nl> <nl> static void finish ( grpc_exec_ctx * exec_ctx , grpc_combiner * lock ) { <nl> + bool ( * executor ) ( grpc_exec_ctx * exec_ctx , grpc_combiner * lock ) = <nl> + maybe_finish_one ; <nl> do { <nl> switch ( gpr_atm_full_fetch_add ( & lock - > state , - 2 ) ) { <nl> + case 5 : / / we ' re down to one queued item : if it ' s the final list we <nl> + case 4 : / / should do that <nl> + if ( ! grpc_closure_list_empty ( lock - > final_list ) ) { <nl> + executor = start_execute_final ; <nl> + } <nl> + break ; <nl> case 3 : / / had one count , one unorphaned - - > unlocked unorphaned <nl> return ; <nl> case 2 : / / and one count , one orphaned - - > unlocked and orphaned <nl> static void finish ( grpc_exec_ctx * exec_ctx , grpc_combiner * lock ) { <nl> / / deleted lock <nl> GPR_UNREACHABLE_CODE ( return ) ; <nl> } <nl> - } while ( maybe_finish_one ( exec_ctx , lock ) ) ; <nl> - } <nl> - <nl> - static void continue_finishing ( grpc_exec_ctx * exec_ctx , void * arg , <nl> - grpc_error * error ) { <nl> - if ( maybe_finish_one ( exec_ctx , arg ) ) finish ( exec_ctx , arg ) ; <nl> + } while ( executor ( exec_ctx , lock ) ) ; <nl> } <nl> <nl> void grpc_combiner_execute ( grpc_exec_ctx * exec_ctx , grpc_combiner * lock , <nl> void grpc_combiner_execute ( grpc_exec_ctx * exec_ctx , grpc_combiner * lock , <nl> gpr_mpscq_push ( & lock - > queue , & cl - > next_data . atm_next ) ; <nl> } <nl> } <nl> + <nl> + void grpc_combiner_execute_finally ( grpc_exec_ctx * exec_ctx , grpc_combiner * lock , <nl> + grpc_closure * closure , grpc_error * error , <nl> + bool force_async_break ) { <nl> + if ( force_async_break ) { <nl> + lock - > take_async_break_before_final_list = true ; <nl> + } <nl> + if ( grpc_closure_list_empty ( lock - > final_list ) ) { <nl> + gpr_atm_full_fetch_add ( & lock - > state , 2 ) ; <nl> + } <nl> + grpc_closure_list_append ( & lock - > final_list , closure , error ) ; <nl> + } <nl> mmm a / src / core / lib / iomgr / combiner . h <nl> ppp b / src / core / lib / iomgr / combiner . h <nl> void grpc_combiner_destroy ( grpc_combiner * lock ) ; <nl> / / Execute \ a action within the lock . <nl> void grpc_combiner_execute ( grpc_exec_ctx * exec_ctx , grpc_combiner * lock , <nl> grpc_closure * closure , grpc_error * error ) ; <nl> + / / Execute \ a action within the lock just prior to unlocking . <nl> + / / if \ a force_async_break is additionally set , the combiner is forced to trip <nl> + / / through the workqueue between finishing the primary queue of combined <nl> + / / closures and executing the finally list . <nl> + / / Can only be called from within a closure scheduled by grpc_combiner_execute <nl> + void grpc_combiner_execute_finally ( grpc_exec_ctx * exec_ctx , grpc_combiner * lock , <nl> + grpc_closure * closure , grpc_error * error , <nl> + bool force_async_break ) ; <nl> <nl> # endif / * GRPC_CORE_LIB_IOMGR_COMBINER_H * / <nl> mmm a / test / core / iomgr / combiner_test . c <nl> ppp b / test / core / iomgr / combiner_test . c <nl> static void test_execute_many ( void ) { <nl> grpc_combiner_destroy ( lock ) ; <nl> } <nl> <nl> + static void test_execute_finally ( void ) { <nl> + gpr_log ( GPR_DEBUG , " test_execute_finally " ) ; <nl> + <nl> + grpc_combiner * lock = grpc_combiner_create ( NULL ) ; <nl> + bool done = false ; <nl> + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT ; <nl> + grpc_combiner_execute ( & exec_ctx , lock , grpc_closure_create ( add_finally , lock ) , <nl> + GRPC_ERROR_NONE ) ; <nl> + grpc_exec_ctx_finish ( & exec_ctx ) ; <nl> + GPR_ASSERT ( done ) ; <nl> + grpc_combiner_destroy ( lock ) ; <nl> + } <nl> + <nl> int main ( int argc , char * * argv ) { <nl> grpc_test_init ( argc , argv ) ; <nl> grpc_init ( ) ; <nl> test_no_op ( ) ; <nl> test_execute_one ( ) ; <nl> test_execute_many ( ) ; <nl> + test_execute_finally ( ) ; <nl> grpc_shutdown ( ) ; <nl> <nl> return 0 ; <nl> | Progress on a finalization list | grpc/grpc | a36857da24b126300234f86450a61b7b2a7d0daa | 2016-07-08T23:57:42Z |
mmm a / src / library . js <nl> ppp b / src / library . js <nl> LibraryManager . library = { <nl> / / http : / / pubs . opengroup . org / onlinepubs / 009695399 / functions / times . html <nl> / / NOTE : This is fake , since we can ' t calculate real CPU time usage in JS . <nl> if ( buffer ! = = 0 ) { <nl> - memset ( buffer , 0 , ___tms_struct_layout . __size__ ) ; <nl> + _memset ( buffer , 0 , ___tms_struct_layout . __size__ ) ; <nl> } <nl> return 0 ; <nl> } , <nl> | fix for sys / times ( ) implementation | emscripten-core/emscripten | bf6bb1fa210dc0b6b93976dd0b4b4395f40a441d | 2012-09-09T10:34:02Z |
mmm a / include / swift / Frontend / Frontend . h <nl> ppp b / include / swift / Frontend / Frontend . h <nl> class CompilerInstance { <nl> void freeContextAndSIL ( ) ; <nl> <nl> private : <nl> + SourceFile : : ImplicitModuleImportKind createSILModuleIfNecessary ( const FrontendOptions & options , <nl> + const std : : vector < unsigned > & BufferIDs , <nl> + unsigned MainBufferID , <nl> + const InputFileKind Kind ) ; <nl> + <nl> void parseALibraryFile ( unsigned BufferID , <nl> SourceFile : : ImplicitModuleImportKind modImpKind , <nl> ModuleDecl * underlying , <nl> mmm a / lib / Frontend / Frontend . cpp <nl> ppp b / lib / Frontend / Frontend . cpp <nl> void CompilerInstance : : performSema ( ) { <nl> const InputFileKind Kind = Invocation . getInputKind ( ) ; <nl> Context - > LoadedModules [ MainModule - > getName ( ) ] = getMainModule ( ) ; <nl> <nl> - auto modImpKind = SourceFile : : ImplicitModuleImportKind : : Stdlib ; <nl> - <nl> - if ( Kind = = InputFileKind : : IFK_SIL ) { <nl> - assert ( BufferIDs . size ( ) = = 1 ) ; <nl> - assert ( MainBufferID ! = NO_SUCH_BUFFER ) ; <nl> - / / Assume WMO , if a - primary - file option was not provided . <nl> - createSILModule ( ! options . PrimaryInput . hasValue ( ) ) ; <nl> - modImpKind = SourceFile : : ImplicitModuleImportKind : : None ; <nl> - } else if ( Invocation . getParseStdlib ( ) ) { <nl> - modImpKind = SourceFile : : ImplicitModuleImportKind : : Builtin ; <nl> - } <nl> + const auto modImpKind = createSILModuleIfNecessary ( options , BufferIDs , MainBufferID , Kind ) ; <nl> <nl> switch ( modImpKind ) { <nl> case SourceFile : : ImplicitModuleImportKind : : None : <nl> void CompilerInstance : : performSema ( ) { <nl> finishTypeCheckingMainModule ( ) ; <nl> } <nl> <nl> + SourceFile : : ImplicitModuleImportKind CompilerInstance : : createSILModuleIfNecessary ( const FrontendOptions & options , <nl> + const std : : vector < unsigned > & BufferIDs , <nl> + unsigned MainBufferID , <nl> + const InputFileKind Kind ) { <nl> + if ( Kind = = InputFileKind : : IFK_SIL ) { <nl> + assert ( BufferIDs . size ( ) = = 1 ) ; <nl> + assert ( MainBufferID ! = NO_SUCH_BUFFER ) ; <nl> + / / Assume WMO , if a - primary - file option was not provided . <nl> + createSILModule ( ! options . PrimaryInput . hasValue ( ) ) ; <nl> + return SourceFile : : ImplicitModuleImportKind : : None ; <nl> + } else if ( Invocation . getParseStdlib ( ) ) { <nl> + return SourceFile : : ImplicitModuleImportKind : : Builtin ; <nl> + } <nl> + return SourceFile : : ImplicitModuleImportKind : : Stdlib ; <nl> + } <nl> + <nl> void CompilerInstance : : parseALibraryFile ( unsigned BufferID , <nl> SourceFile : : ImplicitModuleImportKind modImpKind , <nl> ModuleDecl * underlying , <nl> | Pull out createSILModuleIfNecessary so modImpKind can be clearly constant . | apple/swift | 3566ad73f449a78639e13f4bcd67d55218a1d76b | 2017-09-14T23:24:01Z |
mmm a / modules / gdscript / gdscript_editor . cpp <nl> ppp b / modules / gdscript / gdscript_editor . cpp <nl> static void _find_call_arguments ( GDScriptParser : : CompletionContext & p_context , c <nl> <nl> if ( GDScriptParser : : get_builtin_function ( call - > function_name ) < GDScriptFunctions : : FUNC_MAX ) { <nl> MethodInfo info = GDScriptFunctions : : get_info ( GDScriptParser : : get_builtin_function ( call - > function_name ) ) ; <nl> - <nl> - if ( ( info . name = = " load " | | info . name = = " preload " ) & & bool ( EditorSettings : : get_singleton ( ) - > get ( " text_editor / completion / complete_file_paths " ) ) ) { <nl> - _get_directory_contents ( EditorFileSystem : : get_singleton ( ) - > get_filesystem ( ) , r_result ) ; <nl> - } <nl> - <nl> r_arghint = _make_arguments_hint ( info , p_argidx ) ; <nl> return ; <nl> } else if ( GDScriptParser : : get_builtin_type ( call - > function_name ) < Variant : : VARIANT_MAX ) { <nl> mmm a / modules / gdscript / gdscript_parser . cpp <nl> ppp b / modules / gdscript / gdscript_parser . cpp <nl> GDScriptParser : : ExpressionNode * GDScriptParser : : parse_call ( ExpressionNode * p_pre <nl> } <nl> } <nl> <nl> - if ( ! check ( GDScriptTokenizer : : Token : : PARENTHESIS_CLOSE ) ) { <nl> - / / Arguments . <nl> - push_completion_call ( call ) ; <nl> - make_completion_context ( COMPLETION_CALL_ARGUMENTS , call , 0 , true ) ; <nl> - int argument_index = 0 ; <nl> - do { <nl> - make_completion_context ( COMPLETION_CALL_ARGUMENTS , call , argument_index + + , true ) ; <nl> - if ( check ( GDScriptTokenizer : : Token : : PARENTHESIS_CLOSE ) ) { <nl> - / / Allow for trailing comma . <nl> - break ; <nl> - } <nl> - ExpressionNode * argument = parse_expression ( false ) ; <nl> - if ( argument = = nullptr ) { <nl> - push_error ( R " ( Expected expression as the function argument . ) " ) ; <nl> - } else { <nl> - call - > arguments . push_back ( argument ) ; <nl> - } <nl> - } while ( match ( GDScriptTokenizer : : Token : : COMMA ) ) ; <nl> - pop_completion_call ( ) ; <nl> + / / Arguments . <nl> + CompletionType ct = COMPLETION_CALL_ARGUMENTS ; <nl> + if ( get_builtin_function ( call - > function_name ) = = GDScriptFunctions : : RESOURCE_LOAD ) { <nl> + ct = COMPLETION_RESOURCE_PATH ; <nl> } <nl> + push_completion_call ( call ) ; <nl> + int argument_index = 0 ; <nl> + do { <nl> + make_completion_context ( ct , call , argument_index + + , true ) ; <nl> + if ( check ( GDScriptTokenizer : : Token : : PARENTHESIS_CLOSE ) ) { <nl> + / / Allow for trailing comma . <nl> + break ; <nl> + } <nl> + ExpressionNode * argument = parse_expression ( false ) ; <nl> + if ( argument = = nullptr ) { <nl> + push_error ( R " ( Expected expression as the function argument . ) " ) ; <nl> + } else { <nl> + call - > arguments . push_back ( argument ) ; <nl> + } <nl> + ct = COMPLETION_CALL_ARGUMENTS ; <nl> + } while ( match ( GDScriptTokenizer : : Token : : COMMA ) ) ; <nl> + pop_completion_call ( ) ; <nl> <nl> pop_multiline ( ) ; <nl> consume ( GDScriptTokenizer : : Token : : PARENTHESIS_CLOSE , R " * ( Expected closing " ) " after call arguments . ) * " ) ; <nl> | Fix completion for built - in load function | godotengine/godot | 0ddd4097a649bb0be66924cb3105936390e30917 | 2020-11-10T11:00:08Z |
mmm a / ports / abseil / CMakeLists . txt <nl> ppp b / ports / abseil / CMakeLists . txt <nl> add_sublibrary ( algorithm ) <nl> add_sublibrary ( base ) <nl> add_sublibrary ( container ) <nl> add_sublibrary ( debugging ) <nl> + add_sublibrary ( hash ) <nl> add_sublibrary ( memory ) <nl> add_sublibrary ( meta ) <nl> add_sublibrary ( numeric ) <nl> add_sublibrary ( utility ) <nl> target_link_public_libraries ( algorithm base meta ) <nl> target_link_public_libraries ( container algorithm base memory ) <nl> target_link_public_libraries ( debugging base ) <nl> + target_link_public_libraries ( hash base ) <nl> target_link_public_libraries ( memory meta ) <nl> target_link_public_libraries ( meta base ) <nl> target_link_public_libraries ( numeric base ) <nl> | Update CMakeLists . txt to include hash / * | microsoft/vcpkg | 0dcfc13003da120b1365fedf0fb9c5e0f137aab0 | 2018-11-30T18:25:05Z |
mmm a / contracts / test_api_multi_index / test_multi_index . cpp <nl> ppp b / contracts / test_api_multi_index / test_multi_index . cpp <nl> void test_multi_index : : idx_long_double_general ( uint64_t receiver , uint64_t code , <nl> <nl> auto secidx = table . get_index < N ( bysecondary ) > ( ) ; <nl> <nl> - long double tolerance = 1 . 0l ; / / std : : numeric_limits < double > : : epsilon ( ) ; <nl> - auto * ptr = & tolerance ; <nl> - print ( ( uint64_t ) ptr , " \ n " ) ; <nl> + long double tolerance = std : : min ( static_cast < long double > ( std : : numeric_limits < double > : : epsilon ( ) ) , <nl> + std : : numeric_limits < long double > : : epsilon ( ) * 1e7l ) ; <nl> print ( " tolerance = " , tolerance , " \ n " ) ; <nl> <nl> long double f = 1 . 0l ; <nl> void test_multi_index : : idx_long_double_general ( uint64_t receiver , uint64_t code , <nl> <nl> print ( " id = " , obj . id , " , sec = " , obj . sec , " , sec * id = " , prod , " \ n " ) ; <nl> <nl> - auto difference = std : : abs ( prod - expected_product ) ; <nl> - print ( " difference = " , difference , " \ n " ) ; <nl> - <nl> - bool test1 = difference < tolerance ; <nl> - print ( " is difference < tolerance ? " , test1 , " \ n " ) ; <nl> - bool test2 = tolerance > difference ; <nl> - print ( " is tolerance > difference ? " , test2 , " \ n " ) ; <nl> - bool test3 = difference < = tolerance ; <nl> - print ( " is difference < = tolerance ? " , test3 , " \ n " ) ; <nl> - bool test4 = tolerance > = difference ; <nl> - print ( " is tolerance > = difference ? " , test4 , " \ n " ) ; <nl> - <nl> - eosio_assert ( difference < tolerance , <nl> + eosio_assert ( std : : abs ( prod - expected_product ) < = tolerance , <nl> " idx_long_double_general - product of secondary and id not equal to expected_product within tolerance " ) ; <nl> <nl> - - expected_key ; <nl> void test_multi_index : : idx_long_double_general ( uint64_t receiver , uint64_t code , <nl> <nl> { <nl> auto itr = secidx . lower_bound ( expected_product / 5 . 5l ) ; <nl> - eosio_assert ( std : : abs ( 1 . 0l / itr - > sec - 5000000 . 0l ) < tolerance , " idx_long_double_general - lower_bound " ) ; <nl> + eosio_assert ( std : : abs ( 1 . 0l / itr - > sec - 5000000 . 0l ) < = tolerance , " idx_long_double_general - lower_bound " ) ; <nl> <nl> itr = secidx . upper_bound ( expected_product / 5 . 0l ) ; <nl> - eosio_assert ( std : : abs ( 1 . 0l / itr - > sec - 4000000 . 0l ) > tolerance , " idx_long_double_general - upper_bound " ) ; <nl> + eosio_assert ( std : : abs ( 1 . 0l / itr - > sec - 4000000 . 0l ) < = tolerance , " idx_long_double_general - upper_bound " ) ; <nl> <nl> } <nl> } <nl> mmm a / libraries / chain / wasm_interface . cpp <nl> ppp b / libraries / chain / wasm_interface . cpp <nl> class compiler_builtins : public context_aware_api { <nl> float128_t b = { { lb , hb } } ; <nl> ret = f128_div ( a , b ) ; <nl> } <nl> - int __eqtf2 ( uint64_t la , uint64_t ha , uint64_t lb , uint64_t hb ) { <nl> + int ___cmptf2 ( uint64_t la , uint64_t ha , uint64_t lb , uint64_t hb , int return_value_if_nan ) { <nl> float128_t a = { { la , ha } } ; <nl> float128_t b = { { lb , hb } } ; <nl> - return f128_eq ( a , b ) ; <nl> + if ( __unordtf2 ( la , ha , lb , hb ) ) <nl> + return return_value_if_nan ; <nl> + if ( f128_lt ( a , b ) ) <nl> + return - 1 ; <nl> + if ( f128_eq ( a , b ) ) <nl> + return 0 ; <nl> + return 1 ; <nl> + } <nl> + int __eqtf2 ( uint64_t la , uint64_t ha , uint64_t lb , uint64_t hb ) { <nl> + return ___cmptf2 ( la , ha , lb , hb , 1 ) ; <nl> } <nl> int __netf2 ( uint64_t la , uint64_t ha , uint64_t lb , uint64_t hb ) { <nl> - float128_t a = { { la , ha } } ; <nl> - float128_t b = { { lb , hb } } ; <nl> - return ! f128_eq ( a , b ) ; <nl> + return ___cmptf2 ( la , ha , lb , hb , 1 ) ; <nl> } <nl> int __getf2 ( uint64_t la , uint64_t ha , uint64_t lb , uint64_t hb ) { <nl> - float128_t a = { { la , ha } } ; <nl> - float128_t b = { { lb , hb } } ; <nl> - auto res = ! f128_lt ( a , b ) ; <nl> - idump ( ( la ) ( ha ) ( lb ) ( hb ) ( res ) ) ; <nl> - return res ; <nl> + return ___cmptf2 ( la , ha , lb , hb , - 1 ) ; <nl> } <nl> int __gttf2 ( uint64_t la , uint64_t ha , uint64_t lb , uint64_t hb ) { <nl> - float128_t a = { { la , ha } } ; <nl> - float128_t b = { { lb , hb } } ; <nl> - auto res = ! f128_lt ( a , b ) & & ! f128_eq ( a , b ) ; <nl> - idump ( ( la ) ( ha ) ( lb ) ( hb ) ( res ) ) ; <nl> - return res ; <nl> + return ___cmptf2 ( la , ha , lb , hb , 0 ) ; <nl> } <nl> int __letf2 ( uint64_t la , uint64_t ha , uint64_t lb , uint64_t hb ) { <nl> - float128_t a = { { la , ha } } ; <nl> - float128_t b = { { lb , hb } } ; <nl> - auto res = f128_le ( a , b ) ; <nl> - idump ( ( la ) ( ha ) ( lb ) ( hb ) ( res ) ) ; <nl> - return res ; <nl> + return ___cmptf2 ( la , ha , lb , hb , 1 ) ; <nl> } <nl> int __lttf2 ( uint64_t la , uint64_t ha , uint64_t lb , uint64_t hb ) { <nl> - float128_t a = { { la , ha } } ; <nl> - float128_t b = { { lb , hb } } ; <nl> - auto res = f128_lt ( a , b ) ; <nl> - idump ( ( la ) ( ha ) ( lb ) ( hb ) ( res ) ) ; <nl> - return res ; <nl> + return ___cmptf2 ( la , ha , lb , hb , 0 ) ; <nl> } <nl> int __cmptf2 ( uint64_t la , uint64_t ha , uint64_t lb , uint64_t hb ) { <nl> - float128_t a = { { la , ha } } ; <nl> - float128_t b = { { lb , hb } } ; <nl> - if ( f128_lt ( a , b ) ) <nl> - return - 1 ; <nl> - if ( f128_eq ( a , b ) ) <nl> - return 0 ; <nl> - return 1 ; <nl> + return ___cmptf2 ( la , ha , lb , hb , 1 ) ; <nl> } <nl> int __unordtf2 ( uint64_t la , uint64_t ha , uint64_t lb , uint64_t hb ) { <nl> float128_t a = { { la , ha } } ; <nl> float128_t b = { { lb , hb } } ; <nl> - if ( f128_isSignalingNaN ( a ) | | f128_isSignalingNaN ( b ) ) <nl> + if ( f128M_isSignalingNaN ( & a ) | | f128M_isSignalingNaN ( & b ) ) / / TODO / QUESTION : What about quiet NaNs ? Would prefer to use softfloat_api : : is_nan but there is a linker error regarding symbol _softfloat_isNaNF128M <nl> return 1 ; <nl> return 0 ; <nl> } <nl> | fix compiler_builtins long double comparisons to fix test for | EOSIO/eos | bbfb575fdd11e0fceae9775cbbd3bd065e954e3b | 2018-04-19T01:40:32Z |
mmm a / jstests / noPassthrough / router_transactions_metrics . js <nl> ppp b / jstests / noPassthrough / router_transactions_metrics . js <nl> <nl> const expectedFields = [ <nl> " totalStarted " , <nl> " totalAborted " , <nl> + " abortCause " , <nl> " totalCommitted " , <nl> " totalContactedParticipants " , <nl> " totalParticipantsAtCommit " , <nl> <nl> } <nl> } <nl> <nl> + class ExpectedAbortCause { <nl> + constructor ( ) { <nl> + } <nl> + } <nl> + <nl> class ExpectedTransactionServerStatus { <nl> constructor ( ) { <nl> this . totalStarted = 0 ; <nl> this . totalAborted = 0 ; <nl> + this . abortCause = new ExpectedAbortCause ( ) ; <nl> this . totalCommitted = 0 ; <nl> this . totalContactedParticipants = 0 ; <nl> this . totalParticipantsAtCommit = 0 ; <nl> <nl> " unexpected successful for " + commitType + " , commit types : " + <nl> tojson ( commitTypes ) ) ; <nl> } ) ; <nl> + <nl> + const abortCause = res . transactions . abortCause ; <nl> + Object . keys ( abortCause ) . forEach ( ( cause ) = > { <nl> + assert . eq ( expectedStats . abortCause [ cause ] , <nl> + abortCause [ cause ] , <nl> + " unexpected abortCause for " + cause + " , res : " + tojson ( stats ) ) ; <nl> + } ) ; <nl> + <nl> + assert . eq ( Object . keys ( abortCause ) . length , <nl> + Object . keys ( expectedStats . abortCause ) . length , <nl> + " the ' transactions ' field had an unexpected number of abort causes , res : " + <nl> + tojson ( stats ) ) ; <nl> } <nl> <nl> function abortFromUnderneath ( st , session ) { <nl> <nl> ErrorCodes . NoSuchTransaction ) ; <nl> <nl> expectedStats . totalAborted + = 1 ; <nl> + expectedStats . abortCause [ " NoSuchTransaction " ] = 1 ; <nl> expectedStats . commitTypes . singleShard . initiated + = 1 ; <nl> expectedStats . totalParticipantsAtCommit + = 1 ; <nl> / / The one shard is targeted for the commit then the implicit abort . <nl> <nl> ErrorCodes . NoSuchTransaction ) ; <nl> <nl> expectedStats . totalAborted + = 1 ; <nl> + expectedStats . abortCause [ " NoSuchTransaction " ] + = 1 ; <nl> expectedStats . commitTypes . singleWriteShard . initiated + = 1 ; <nl> expectedStats . totalParticipantsAtCommit + = 2 ; <nl> / / In a single write shard commit , all read shards are committed first , then the <nl> <nl> ErrorCodes . NoSuchTransaction ) ; <nl> <nl> expectedStats . totalAborted + = 1 ; <nl> + expectedStats . abortCause [ " NoSuchTransaction " ] + = 1 ; <nl> expectedStats . commitTypes . readOnly . initiated + = 1 ; <nl> expectedStats . totalParticipantsAtCommit + = 2 ; <nl> / / Both shards are targeted for the commit then the implicit abort . <nl> <nl> ErrorCodes . NoSuchTransaction ) ; <nl> <nl> expectedStats . totalAborted + = 1 ; <nl> + expectedStats . abortCause [ " NoSuchTransaction " ] + = 1 ; <nl> expectedStats . commitTypes . twoPhaseCommit . initiated + = 1 ; <nl> expectedStats . totalParticipantsAtCommit + = 2 ; <nl> / / There are no implicit aborts after two phase commit , so the coordinator is targeted once . <nl> <nl> <nl> expectedStats . totalStarted + = 1 ; <nl> expectedStats . totalAborted + = 1 ; <nl> + expectedStats . abortCause [ " NoSuchTransaction " ] + = 1 ; <nl> expectedStats . commitTypes . recoverWithToken . initiated + = 1 ; <nl> / / The participant stats shouldn ' t increase if we ' re recovering commit . <nl> / / There are no implicit aborts during commit recovery , so the recovery shard is targeted <nl> <nl> assert . commandWorked ( session . abortTransaction_forTesting ( ) ) ; <nl> <nl> expectedStats . totalAborted + = 1 ; <nl> + expectedStats . abortCause [ " abort " ] = 1 ; <nl> expectedStats . totalRequestsTargeted + = 1 ; <nl> verifyServerStatusValues ( st , expectedStats ) ; <nl> } ) ( ) ; <nl> <nl> <nl> expectedStats . totalStarted + = 1 ; <nl> expectedStats . totalAborted + = 1 ; <nl> + expectedStats . abortCause [ " DuplicateKey " ] = 1 ; <nl> expectedStats . totalContactedParticipants + = 1 ; <nl> expectedStats . totalRequestsTargeted + = 2 ; / / Plus one for the implicit abort . <nl> verifyServerStatusValues ( st , expectedStats ) ; <nl> <nl> assert . commandWorked ( session . abortTransaction_forTesting ( ) ) ; <nl> <nl> expectedStats . totalAborted + = 1 ; <nl> + expectedStats . abortCause [ " abort " ] + = 1 ; <nl> expectedStats . totalRequestsTargeted + = 1 ; <nl> verifyServerStatusValues ( st , expectedStats ) ; <nl> } ) ( ) ; <nl> mmm a / src / mongo / s / router_transactions_metrics . cpp <nl> ppp b / src / mongo / s / router_transactions_metrics . cpp <nl> void RouterTransactionsMetrics : : incrementCommitSuccessful ( <nl> } <nl> } <nl> <nl> + void RouterTransactionsMetrics : : incrementAbortCauseMap ( std : : string abortCause ) { <nl> + invariant ( ! abortCause . empty ( ) ) ; <nl> + <nl> + stdx : : lock_guard < stdx : : mutex > lock ( _abortCauseMutex ) ; <nl> + auto it = _abortCauseMap . find ( abortCause ) ; <nl> + if ( it = = _abortCauseMap . end ( ) ) { <nl> + _abortCauseMap . emplace ( std : : pair < std : : string , std : : int64_t > ( std : : move ( abortCause ) , 1 ) ) ; <nl> + } else { <nl> + it - > second + + ; <nl> + } <nl> + } <nl> + <nl> CommitTypeStats RouterTransactionsMetrics : : _constructCommitTypeStats ( const CommitStats & stats ) { <nl> CommitTypeStats commitStats ; <nl> commitStats . setInitiated ( stats . initiated . load ( ) ) ; <nl> void RouterTransactionsMetrics : : updateStats ( RouterTransactionsStats * stats ) { <nl> commitTypes . setTwoPhaseCommit ( _constructCommitTypeStats ( _twoPhaseCommitStats ) ) ; <nl> commitTypes . setRecoverWithToken ( _constructCommitTypeStats ( _recoverWithTokenCommitStats ) ) ; <nl> stats - > setCommitTypes ( commitTypes ) ; <nl> + <nl> + BSONObjBuilder bob ; <nl> + { <nl> + stdx : : lock_guard < stdx : : mutex > lock ( _abortCauseMutex ) ; <nl> + for ( auto const & abortCauseEntry : _abortCauseMap ) { <nl> + bob . append ( abortCauseEntry . first , abortCauseEntry . second ) ; <nl> + } <nl> + } <nl> + stats - > setAbortCause ( bob . obj ( ) ) ; <nl> } <nl> <nl> } / / namespace mongo <nl> mmm a / src / mongo / s / router_transactions_metrics . h <nl> ppp b / src / mongo / s / router_transactions_metrics . h <nl> class RouterTransactionsMetrics { <nl> void incrementCommitInitiated ( TransactionRouter : : CommitType commitType ) ; <nl> void incrementCommitSuccessful ( TransactionRouter : : CommitType commitType ) ; <nl> <nl> + void incrementAbortCauseMap ( std : : string abortCause ) ; <nl> + <nl> / * * <nl> * Appends the accumulated stats to a sharded transactions stats object for reporting . <nl> * / <nl> class RouterTransactionsMetrics { <nl> CommitStats _readOnlyCommitStats ; <nl> CommitStats _twoPhaseCommitStats ; <nl> CommitStats _recoverWithTokenCommitStats ; <nl> + <nl> + / / Mutual exclusion for _abortCauseMap <nl> + stdx : : mutex _abortCauseMutex ; <nl> + <nl> + / / Map tracking the total number of each abort cause for any multi - statement transaction that <nl> + / / was aborted through this router . <nl> + std : : map < std : : string , std : : int64_t > _abortCauseMap ; <nl> } ; <nl> <nl> } / / namespace mongo <nl> mmm a / src / mongo / s / router_transactions_stats . idl <nl> ppp b / src / mongo / s / router_transactions_stats . idl <nl> structs : <nl> totalAborted : <nl> type : long <nl> default : 0 <nl> + abortCause : <nl> + type : object <nl> totalContactedParticipants : <nl> type : long <nl> default : 0 <nl> mmm a / src / mongo / s / transaction_router . cpp <nl> ppp b / src / mongo / s / transaction_router . cpp <nl> void TransactionRouter : : _endTransactionTrackingIfNecessary ( OperationContext * opC <nl> auto routerTxnMetrics = RouterTransactionsMetrics : : get ( opCtx ) ; <nl> if ( terminationCause = = TerminationCause : : kAborted ) { <nl> routerTxnMetrics - > incrementTotalAborted ( ) ; <nl> + routerTxnMetrics - > incrementAbortCauseMap ( _abortCause ) ; <nl> } else { <nl> routerTxnMetrics - > incrementTotalCommitted ( ) ; <nl> routerTxnMetrics - > incrementCommitSuccessful ( _commitType ) ; <nl> | SERVER - 41373 Add abortCause to mongos transactions serverStatus output | mongodb/mongo | db2e286c92904fb48fa1414b486782a4e1a654a7 | 2019-06-11T21:58:03Z |
mmm a / src / core / surface / server . c <nl> ppp b / src / core / surface / server . c <nl> void grpc_server_setup_transport ( grpc_server * s , grpc_transport * transport , <nl> op . set_accept_stream_user_data = chand ; <nl> op . on_connectivity_state_change = & chand - > channel_connectivity_changed ; <nl> op . connectivity_state = & chand - > connectivity_state ; <nl> + op . disconnect = gpr_atm_acq_load ( & s - > shutdown_flag ) ; <nl> grpc_transport_perform_op ( transport , & op ) ; <nl> } <nl> <nl> | Reject incoming calls if the server is already shutting down | grpc/grpc | 98371d9ced65eda7fc216d51a4913e907791e9b9 | 2015-07-30T23:00:47Z |
mmm a / tensorflow / compiler / mlir / xla / tests / legalize - tf . mlir <nl> ppp b / tensorflow / compiler / mlir / xla / tests / legalize - tf . mlir <nl> func @ assert ( % arg0 : tensor < i1 > , % arg1 : tensor < * xf32 > ) { <nl> / / tf . Unpack legalization <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> - / / TODO ( b / 156340000 ) : Re - enable when fixed . <nl> - / / / / C - HECK - LABEL : @ unpack <nl> - / / func @ unpack ( % input : tensor < 4x3x6xf32 > ) - > ( tensor < 4x ? xf32 > , tensor < 4x6xf32 > , tensor < 4x6xf32 > ) { <nl> - / / / / C - HECK : % [ [ SLICE1 : . * ] ] = " mhlo . slice " ( % { { . * } } ) { limit_indices = dense < [ 4 , 1 , 6 ] > : tensor < 3xi64 > , start_indices = dense < 0 > : tensor < 3xi64 > , strides = dense < 1 > : tensor < 3xi64 > } : ( tensor < 4x3x6xf32 > ) - > tensor < 4x1x6xf32 > <nl> - / / / / C - HECK : % [ [ RES1 : . * ] ] = " mhlo . reshape " ( % [ [ SLICE1 ] ] ) : ( tensor < 4x1x6xf32 > ) - > tensor < 4x ? xf32 > <nl> - / / / / C - HECK : % [ [ SLICE2 : . * ] ] = " mhlo . slice " ( % { { . * } } ) { limit_indices = dense < [ 4 , 2 , 6 ] > : tensor < 3xi64 > , start_indices = dense < [ 0 , 1 , 0 ] > : tensor < 3xi64 > , strides = dense < 1 > : tensor < 3xi64 > } : ( tensor < 4x3x6xf32 > ) - > tensor < 4x1x6xf32 > <nl> - / / / / C - HECK : % [ [ RES2 : . * ] ] = " mhlo . reshape " ( % [ [ SLICE2 ] ] ) : ( tensor < 4x1x6xf32 > ) - > tensor < 4x6xf32 > <nl> - / / / / C - HECK : % [ [ SLICE3 : . * ] ] = " mhlo . slice " ( % { { . * } } ) { limit_indices = dense < [ 4 , 3 , 6 ] > : tensor < 3xi64 > , start_indices = dense < [ 0 , 2 , 0 ] > : tensor < 3xi64 > , strides = dense < 1 > : tensor < 3xi64 > } : ( tensor < 4x3x6xf32 > ) - > tensor < 4x1x6xf32 > <nl> - / / / / C - HECK : % [ [ RES3 : . * ] ] = " mhlo . reshape " ( % [ [ SLICE3 ] ] ) : ( tensor < 4x1x6xf32 > ) - > tensor < 4x6xf32 > <nl> - <nl> - / / % 0 : 3 = " tf . Unpack " ( % input ) { axis = 1 } : ( tensor < 4x3x6xf32 > ) - > ( tensor < 4x ? xf32 > , tensor < 4x6xf32 > , tensor < 4x6xf32 > ) <nl> - / / / / return % [ [ RES1 ] ] , % [ [ RES2 ] ] , % [ [ RES3 ] ] <nl> - / / return % 0 # 0 , % 0 # 1 , % 0 # 2 : tensor < 4x ? xf32 > , tensor < 4x6xf32 > , tensor < 4x6xf32 > <nl> - / / } <nl> - <nl> - / / / / C - HECK - LABEL : @ unpack_dynamic <nl> - / / func @ unpack_dynamic ( % input : tensor < ? x ? x2xf32 > ) - > ( tensor < ? x ? xf32 > , tensor < ? x ? xf32 > ) { <nl> - / / / / C - HECK : % [ [ SLICE1 : . * ] ] = " mhlo . slice " ( % { { . * } } ) { limit_indices = dense < [ - 1 , - 1 , 1 ] > : tensor < 3xi64 > , start_indices = dense < 0 > : tensor < 3xi64 > , strides = dense < 1 > : tensor < 3xi64 > } : ( tensor < ? x ? x2xf32 > ) - > tensor < ? x ? x1xf32 > <nl> - / / / / C - HECK : " mhlo . reshape " ( % [ [ SLICE1 ] ] ) : ( tensor < ? x ? x1xf32 > ) - > tensor < ? x ? xf32 > <nl> - / / / / C - HECK : % [ [ SLICE2 : . * ] ] = " mhlo . slice " ( % { { . * } } ) { limit_indices = dense < [ - 1 , - 1 , 2 ] > : tensor < 3xi64 > , start_indices = dense < [ 0 , 0 , 1 ] > : tensor < 3xi64 > , strides = dense < 1 > : tensor < 3xi64 > } : ( tensor < ? x ? x2xf32 > ) - > tensor < ? x ? x1xf32 > <nl> - / / / / C - HECK : " mhlo . reshape " ( % [ [ SLICE2 ] ] ) : ( tensor < ? x ? x1xf32 > ) - > tensor < ? x ? xf32 > <nl> - <nl> - / / % 0 : 2 = " tf . Unpack " ( % input ) { axis = - 1 } : ( tensor < ? x ? x2xf32 > ) - > ( tensor < ? x ? xf32 > , tensor < ? x ? xf32 > ) <nl> - / / return % 0 # 0 , % 0 # 1 : tensor < ? x ? xf32 > , tensor < ? x ? xf32 > <nl> - / / } <nl> + / / CHECK - LABEL : @ unpack <nl> + func @ unpack ( % input : tensor < 4x3x6xf32 > ) - > ( tensor < 4x6xf32 > , tensor < 4x6xf32 > , tensor < 4x6xf32 > ) { <nl> + / / CHECK : % [ [ SLICE1 : . * ] ] = " mhlo . slice " ( % { { . * } } ) { limit_indices = dense < [ 4 , 1 , 6 ] > : tensor < 3xi64 > , start_indices = dense < 0 > : tensor < 3xi64 > , strides = dense < 1 > : tensor < 3xi64 > } : ( tensor < 4x3x6xf32 > ) - > tensor < 4x1x6xf32 > <nl> + / / CHECK : % [ [ RES1 : . * ] ] = " mhlo . reshape " ( % [ [ SLICE1 ] ] ) : ( tensor < 4x1x6xf32 > ) - > tensor < 4x6xf32 > <nl> + / / CHECK : % [ [ SLICE2 : . * ] ] = " mhlo . slice " ( % { { . * } } ) { limit_indices = dense < [ 4 , 2 , 6 ] > : tensor < 3xi64 > , start_indices = dense < [ 0 , 1 , 0 ] > : tensor < 3xi64 > , strides = dense < 1 > : tensor < 3xi64 > } : ( tensor < 4x3x6xf32 > ) - > tensor < 4x1x6xf32 > <nl> + / / CHECK : % [ [ RES2 : . * ] ] = " mhlo . reshape " ( % [ [ SLICE2 ] ] ) : ( tensor < 4x1x6xf32 > ) - > tensor < 4x6xf32 > <nl> + / / CHECK : % [ [ SLICE3 : . * ] ] = " mhlo . slice " ( % { { . * } } ) { limit_indices = dense < [ 4 , 3 , 6 ] > : tensor < 3xi64 > , start_indices = dense < [ 0 , 2 , 0 ] > : tensor < 3xi64 > , strides = dense < 1 > : tensor < 3xi64 > } : ( tensor < 4x3x6xf32 > ) - > tensor < 4x1x6xf32 > <nl> + / / CHECK : % [ [ RES3 : . * ] ] = " mhlo . reshape " ( % [ [ SLICE3 ] ] ) : ( tensor < 4x1x6xf32 > ) - > tensor < 4x6xf32 > <nl> + <nl> + % 0 : 3 = " tf . Unpack " ( % input ) { axis = 1 } : ( tensor < 4x3x6xf32 > ) - > ( tensor < 4x6xf32 > , tensor < 4x6xf32 > , tensor < 4x6xf32 > ) <nl> + / / return % [ [ RES1 ] ] , % [ [ RES2 ] ] , % [ [ RES3 ] ] <nl> + return % 0 # 0 , % 0 # 1 , % 0 # 2 : tensor < 4x6xf32 > , tensor < 4x6xf32 > , tensor < 4x6xf32 > <nl> + } <nl> + <nl> + / / CHECK - LABEL : @ unpack_dynamic <nl> + func @ unpack_dynamic ( % input : tensor < ? x ? x2xf32 > ) - > ( tensor < ? x ? xf32 > , tensor < ? x ? xf32 > ) { <nl> + <nl> + / / CHECK : tf . Unpack <nl> + % 0 : 2 = " tf . Unpack " ( % input ) { axis = - 1 } : ( tensor < ? x ? x2xf32 > ) - > ( tensor < ? x ? xf32 > , tensor < ? x ? xf32 > ) <nl> + return % 0 # 0 , % 0 # 1 : tensor < ? x ? xf32 > , tensor < ? x ? xf32 > <nl> + } <nl> + <nl> + / / CHECK - LABEL : @ unpack_unranked <nl> + func @ unpack_unranked ( % input : tensor < * xf32 > ) - > ( tensor < ? x ? xf32 > , tensor < ? x ? xf32 > ) { <nl> + <nl> + / / CHECK : tf . Unpack <nl> + % 0 : 2 = " tf . Unpack " ( % input ) { axis = - 1 } : ( tensor < * xf32 > ) - > ( tensor < ? x ? xf32 > , tensor < ? x ? xf32 > ) <nl> + return % 0 # 0 , % 0 # 1 : tensor < ? x ? xf32 > , tensor < ? x ? xf32 > <nl> + } <nl> <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> / / tf . UnsortedSegment { Max | Min | Prod | Sum } legalization <nl> mmm a / tensorflow / compiler / mlir / xla / transforms / legalize_tf . cc <nl> ppp b / tensorflow / compiler / mlir / xla / transforms / legalize_tf . cc <nl> class ConvertUnpackOp : public OpRewritePattern < TF : : UnpackOp > { <nl> <nl> LogicalResult matchAndRewrite ( TF : : UnpackOp op , <nl> PatternRewriter & rewriter ) const override { <nl> - auto value_type = op . value ( ) . getType ( ) . cast < RankedTensorType > ( ) ; <nl> + auto value_type = op . value ( ) . getType ( ) . dyn_cast < RankedTensorType > ( ) ; <nl> if ( ! value_type ) return failure ( ) ; <nl> <nl> int64_t value_rank = value_type . getRank ( ) ; <nl> class ConvertUnpackOp : public OpRewritePattern < TF : : UnpackOp > { <nl> auto end_indices = llvm : : to_vector < 4 > ( value_type . getShape ( ) ) ; <nl> SmallVector < int64_t , 4 > strides ( value_rank , 1 ) ; <nl> <nl> - / / All HLO slice + reshape results used to replace the original tf . Unpack op . <nl> + / / All HLO slice + squeeze results used to replace the original tf . Unpack op . <nl> SmallVector < Value , 4 > results ; <nl> results . reserve ( op . getNumResults ( ) ) ; <nl> <nl> class ConvertUnpackOp : public OpRewritePattern < TF : : UnpackOp > { <nl> GetI64ElementsAttr ( end_indices , & rewriter ) , <nl> GetI64ElementsAttr ( strides , & rewriter ) ) ; <nl> / / Reshape to drop the axis dimension . <nl> - auto reshape_op = rewriter . create < mhlo : : ReshapeOp > ( <nl> - op . getLoc ( ) , op . getType ( i ) , slice_op ) ; <nl> - results . push_back ( reshape_op ) ; <nl> + auto result = <nl> + rewriter . create < TF : : SqueezeOp > ( op . getLoc ( ) , op . getType ( i ) , slice_op , <nl> + rewriter . getI64ArrayAttr ( op . axis ( ) ) ) ; <nl> + results . push_back ( result ) ; <nl> } <nl> <nl> rewriter . replaceOp ( op , results ) ; <nl> | Fix Unpack lowering to not crash for unranked inputs and generate valid reshape ops | tensorflow/tensorflow | 9b898e62d3ce614a1fb9f7db8795a69d8f7ef189 | 2020-11-19T21:57:14Z |
mmm a / . travis . yml <nl> ppp b / . travis . yml <nl> <nl> sudo : required <nl> <nl> + # TODO : gcc 5 , 7 <nl> + <nl> language : cpp <nl> matrix : <nl> include : <nl> - os : linux <nl> env : PYTHON = 3 . 5 CPP = 14 GCC = 6 <nl> - <nl> - - os : linux <nl> - env : PYTHON = 3 . 5 CPP = 14 GCC = 7 <nl> + addons : <nl> + apt : <nl> + sources : <nl> + - ubuntu - toolchain - r - test <nl> + packages : <nl> + - g + + - 6 <nl> + env : <nl> + - MATRIX_EVAL = " CC = gcc - 6 & & CXX = g + + - 6 " <nl> <nl> - os : osx <nl> osx_image : xcode9 <nl> mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> <nl> # Taichi - A Computer Graphics Library <nl> # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> <nl> - cmake_minimum_required ( VERSION 2 . 8 ) <nl> + cmake_minimum_required ( VERSION 3 . 0 ) <nl> <nl> project ( taichi ) <nl> <nl> mmm a / cmake / TaichiCXXFlags . cmake <nl> ppp b / cmake / TaichiCXXFlags . cmake <nl> if ( MSVC ) <nl> " $ { CMAKE_CXX_FLAGS } / MP / Z7 / D \ " _CRT_SECURE_NO_WARNINGS \ " / arch : AVX2 - DGL_DO_NOT_WARN_IF_MULTI_GL_VERSION_HEADERS_INCLUDED / std : c + + 14 " ) <nl> else ( ) <nl> set ( CMAKE_CXX_FLAGS <nl> - " $ { CMAKE_CXX_FLAGS } - std = c + + 14 - march = native - DGL_DO_NOT_WARN_IF_MULTI_GL_VERSION_HEADERS_INCLUDED - Wall " ) <nl> + " $ { CMAKE_CXX_FLAGS } - std = c + + 14 - march = native \ <nl> + - DGL_DO_NOT_WARN_IF_MULTI_GL_VERSION_HEADERS_INCLUDED - Wall " ) <nl> endif ( ) <nl> <nl> set ( CMAKE_CXX_FLAGS " $ { CMAKE_CXX_FLAGS } - DTC_PASS_EXCEPTION_TO_PYTHON " ) <nl> mmm a / install . py <nl> ppp b / install . py <nl> def run ( self ) : <nl> execute_command ( ' python3 get - pip . py - - user ' ) <nl> execute_command ( ' rm get - pip . py ' ) <nl> <nl> + <nl> + execute_command ( ' cmake - - version ' ) <nl> if get_os_name ( ) = = ' osx ' : <nl> # Check command existence <nl> check_command_existence ( ' git ' ) <nl> | print cmake version | taichi-dev/taichi | 7494159c9bc5a711115910aab554c55aa6130dab | 2018-02-24T15:17:11Z |
mmm a / docs / api / file - object . md <nl> ppp b / docs / api / file - object . md <nl> Example on getting a real path from a dragged - onto - the - app file : <nl> } ; <nl> holder . ondrop = ( e ) = > { <nl> e . preventDefault ( ) ; <nl> - const file = e . dataTransfer . files [ 0 ] ; <nl> - console . log ( ' File you dragged here is ' , file . path ) ; <nl> + for ( let f of e . dataTransfer . files ) { <nl> + console . log ( ' File ( s ) you dragged here : ' , f . path ) ; <nl> + } <nl> return false ; <nl> } ; <nl> < / script > <nl> | Update file - object . md | electron/electron | 0e24d148fdbe527cbd6c5b315adbeaf21c6112ec | 2016-07-20T04:51:58Z |
mmm a / src / core / client_config / lb_policies / round_robin . c <nl> ppp b / src / core / client_config / lb_policies / round_robin . c <nl> static void remove_disconnected_sc_locked ( round_robin_lb_policy * p , <nl> gpr_free ( node ) ; <nl> } <nl> <nl> + static void del_interested_parties_locked ( round_robin_lb_policy * p , <nl> + const size_t subchannel_idx ) { <nl> + pending_pick * pp ; <nl> + for ( pp = p - > pending_picks ; pp ; pp = pp - > next ) { <nl> + grpc_subchannel_del_interested_party ( p - > subchannels [ subchannel_idx ] , <nl> + pp - > pollset ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> void rr_destroy ( grpc_lb_policy * pol ) { <nl> round_robin_lb_policy * p = ( round_robin_lb_policy * ) pol ; <nl> size_t i ; <nl> ready_list * elem ; <nl> + for ( i = 0 ; i < p - > num_subchannels ; i + + ) { <nl> + del_interested_parties_locked ( p , i ) ; <nl> + } <nl> for ( i = 0 ; i < p - > num_subchannels ; i + + ) { <nl> GRPC_SUBCHANNEL_UNREF ( p - > subchannels [ i ] , " round_robin " ) ; <nl> } <nl> void rr_destroy ( grpc_lb_policy * pol ) { <nl> } <nl> <nl> void rr_shutdown ( grpc_lb_policy * pol ) { <nl> + size_t i ; <nl> round_robin_lb_policy * p = ( round_robin_lb_policy * ) pol ; <nl> pending_pick * pp ; <nl> gpr_mu_lock ( & p - > mu ) ; <nl> <nl> + for ( i = 0 ; i < p - > num_subchannels ; i + + ) { <nl> + del_interested_parties_locked ( p , i ) ; <nl> + } <nl> + <nl> p - > shutdown = 1 ; <nl> while ( ( pp = p - > pending_picks ) ) { <nl> p - > pending_picks = pp - > next ; <nl> static void rr_connectivity_changed ( void * arg , int iomgr_success ) { <nl> " [ RR CONN CHANGED ] TARGET < - - SUBCHANNEL % p ( NODE % p ) " , <nl> selected - > subchannel , selected ) ; <nl> } <nl> - grpc_subchannel_del_interested_party ( selected - > subchannel , pp - > pollset ) ; <nl> + grpc_subchannel_del_interested_party ( selected - > subchannel , <nl> + pp - > pollset ) ; <nl> grpc_iomgr_add_delayed_callback ( pp - > on_complete , 1 ) ; <nl> gpr_free ( pp ) ; <nl> } <nl> static void rr_connectivity_changed ( void * arg , int iomgr_success ) { <nl> & p - > connectivity_changed_cbs [ this_idx ] ) ; <nl> break ; <nl> case GRPC_CHANNEL_TRANSIENT_FAILURE : <nl> - grpc_connectivity_state_set ( & p - > state_tracker , <nl> - GRPC_CHANNEL_TRANSIENT_FAILURE , <nl> - " connecting_transient_failure " ) ; <nl> - <nl> + del_interested_parties_locked ( p , this_idx ) ; <nl> / * renew state notification * / <nl> grpc_subchannel_notify_on_state_change ( <nl> p - > subchannels [ this_idx ] , this_connectivity , <nl> & p - > connectivity_changed_cbs [ this_idx ] ) ; <nl> <nl> - / * remove for ready list if still present * / <nl> + / * remove from ready list if still present * / <nl> if ( p - > subchannel_index_to_readylist_node [ this_idx ] ! = NULL ) { <nl> remove_disconnected_sc_locked ( p , p - > subchannel_index_to_readylist_node [ this_idx ] ) ; <nl> p - > subchannel_index_to_readylist_node [ this_idx ] = NULL ; <nl> } <nl> + grpc_connectivity_state_set ( & p - > state_tracker , <nl> + GRPC_CHANNEL_TRANSIENT_FAILURE , <nl> + " connecting_transient_failure " ) ; <nl> break ; <nl> case GRPC_CHANNEL_FATAL_FAILURE : <nl> + del_interested_parties_locked ( p , this_idx ) ; <nl> if ( p - > subchannel_index_to_readylist_node [ this_idx ] ! = NULL ) { <nl> remove_disconnected_sc_locked ( p , p - > subchannel_index_to_readylist_node [ this_idx ] ) ; <nl> p - > subchannel_index_to_readylist_node [ this_idx ] = NULL ; <nl> mmm a / src / core / client_config / resolvers / sockaddr_resolver . c <nl> ppp b / src / core / client_config / resolvers / sockaddr_resolver . c <nl> static int parse_ipv6 ( grpc_uri * uri , struct sockaddr_storage * addr , int * len ) { <nl> <nl> static void do_nothing ( void * ignored ) { } <nl> static grpc_resolver * sockaddr_create ( <nl> - grpc_uri * uri , const char * lb_policy_name , <nl> + grpc_uri * uri , const char * default_lb_policy_name , <nl> grpc_subchannel_factory * subchannel_factory , <nl> int parse ( grpc_uri * uri , struct sockaddr_storage * dst , int * len ) ) { <nl> size_t i ; <nl> static grpc_resolver * sockaddr_create ( <nl> r = gpr_malloc ( sizeof ( sockaddr_resolver ) ) ; <nl> memset ( r , 0 , sizeof ( * r ) ) ; <nl> <nl> + r - > lb_policy_name = NULL ; <nl> + if ( 0 ! = strcmp ( uri - > query , " " ) ) { <nl> + gpr_slice query_slice ; <nl> + gpr_slice_buffer query_parts ; <nl> + <nl> + query_slice = gpr_slice_new ( uri - > query , strlen ( uri - > query ) , do_nothing ) ; <nl> + gpr_slice_buffer_init ( & query_parts ) ; <nl> + gpr_slice_split ( query_slice , " = " , & query_parts ) ; <nl> + GPR_ASSERT ( query_parts . count = = 2 ) ; <nl> + if ( 0 = = gpr_slice_str_cmp ( query_parts . slices [ 0 ] , " lb_policy " ) ) { <nl> + r - > lb_policy_name = gpr_dump_slice ( query_parts . slices [ 1 ] , GPR_DUMP_ASCII ) ; <nl> + } <nl> + gpr_slice_buffer_destroy ( & query_parts ) ; <nl> + gpr_slice_unref ( query_slice ) ; <nl> + } <nl> + if ( r - > lb_policy_name = = NULL ) { <nl> + r - > lb_policy_name = gpr_strdup ( default_lb_policy_name ) ; <nl> + } <nl> + <nl> path_slice = gpr_slice_new ( uri - > path , strlen ( uri - > path ) , do_nothing ) ; <nl> gpr_slice_buffer_init ( & path_parts ) ; <nl> <nl> static grpc_resolver * sockaddr_create ( <nl> gpr_mu_init ( & r - > mu ) ; <nl> grpc_resolver_init ( & r - > base , & sockaddr_resolver_vtable ) ; <nl> r - > subchannel_factory = subchannel_factory ; <nl> - r - > lb_policy_name = gpr_strdup ( lb_policy_name ) ; <nl> <nl> grpc_subchannel_factory_ref ( subchannel_factory ) ; <nl> return & r - > base ; <nl> static void sockaddr_factory_unref ( grpc_resolver_factory * factory ) { } <nl> static grpc_resolver * name # # _factory_create_resolver ( \ <nl> grpc_resolver_factory * factory , grpc_uri * uri , \ <nl> grpc_subchannel_factory * subchannel_factory ) { \ <nl> - return sockaddr_create ( uri , " round_robin " , \ <nl> + return sockaddr_create ( uri , " pick_first " , \ <nl> subchannel_factory , parse_ # # name ) ; \ <nl> } \ <nl> static const grpc_resolver_factory_vtable name # # _factory_vtable = { \ <nl> mmm a / test / core / client_config / lb_policies_test . c <nl> ppp b / test / core / client_config / lb_policies_test . c <nl> <nl> * <nl> * / <nl> <nl> + # include < stdarg . h > <nl> # include < string . h > <nl> <nl> # include < grpc / grpc . h > <nl> # include < grpc / support / alloc . h > <nl> # include < grpc / support / host_port . h > <nl> # include < grpc / support / log . h > <nl> + # include < grpc / support / time . h > <nl> # include < grpc / support / string_util . h > <nl> <nl> # include " src / core / channel / channel_stack . h " <nl> # include " src / core / surface / channel . h " <nl> # include " src / core / channel / client_channel . h " <nl> - # include " src / core / surface / server . h " <nl> # include " src / core / support / string . h " <nl> + # include " src / core / surface / server . h " <nl> # include " test / core / util / test_config . h " <nl> # include " test / core / util / port . h " <nl> # include " test / core / end2end / cq_verifier . h " <nl> typedef struct test_spec { <nl> int * * kill_at ; <nl> int * * revive_at ; <nl> <nl> + const char * description ; <nl> + <nl> verifier_fn verifier ; <nl> <nl> } test_spec ; <nl> int * perform_request ( servers_fixture * f , grpc_channel * client , <nl> } <nl> <nl> static void assert_channel_connectivity ( <nl> - grpc_channel * ch , grpc_connectivity_state expected_conn_state ) { <nl> + grpc_channel * ch , size_t num_accepted_conn_states , <nl> + grpc_connectivity_state accepted_conn_states , . . . ) { <nl> + size_t i ; <nl> grpc_channel_stack * client_stack ; <nl> grpc_channel_element * client_channel_filter ; <nl> grpc_connectivity_state actual_conn_state ; <nl> + va_list ap ; <nl> <nl> client_stack = grpc_channel_get_channel_stack ( ch ) ; <nl> client_channel_filter = grpc_channel_stack_last_element ( client_stack ) ; <nl> + <nl> actual_conn_state = grpc_client_channel_check_connectivity_state ( <nl> client_channel_filter , 0 / * don ' t try to connect * / ) ; <nl> - GPR_ASSERT ( actual_conn_state = = expected_conn_state ) ; <nl> + va_start ( ap , accepted_conn_states ) ; <nl> + for ( i = 0 ; i < num_accepted_conn_states ; i + + ) { <nl> + va_arg ( ap , grpc_connectivity_state ) ; <nl> + if ( actual_conn_state = = accepted_conn_states ) { <nl> + break ; <nl> + } <nl> + } <nl> + va_end ( ap ) ; <nl> + if ( i = = num_accepted_conn_states ) { <nl> + char * * accepted_strs = gpr_malloc ( sizeof ( char * ) * num_accepted_conn_states ) ; <nl> + char * accepted_str_joined ; <nl> + va_start ( ap , accepted_conn_states ) ; <nl> + for ( i = 0 ; i < num_accepted_conn_states ; i + + ) { <nl> + va_arg ( ap , grpc_connectivity_state ) ; <nl> + GPR_ASSERT ( gpr_asprintf ( & accepted_strs [ i ] , " % d " , accepted_conn_states ) > <nl> + 0 ) ; <nl> + } <nl> + va_end ( ap ) ; <nl> + accepted_str_joined = gpr_strjoin_sep ( ( const char * * ) accepted_strs , <nl> + num_accepted_conn_states , " , " , NULL ) ; <nl> + gpr_log ( <nl> + GPR_ERROR , <nl> + " Channel connectivity assertion failed : expected < one of [ % s ] > , got % d " , <nl> + accepted_str_joined , actual_conn_state ) ; <nl> + <nl> + for ( i = 0 ; i < num_accepted_conn_states ; i + + ) { <nl> + gpr_free ( accepted_strs [ i ] ) ; <nl> + } <nl> + gpr_free ( accepted_strs ) ; <nl> + gpr_free ( accepted_str_joined ) ; <nl> + abort ( ) ; <nl> + } <nl> } <nl> <nl> void run_spec ( const test_spec * spec ) { <nl> void run_spec ( const test_spec * spec ) { <nl> / * Create client . * / <nl> servers_hostports_str = gpr_strjoin_sep ( ( const char * * ) f - > servers_hostports , <nl> f - > num_servers , " , " , NULL ) ; <nl> - gpr_asprintf ( & client_hostport , " ipv4 : % s " , servers_hostports_str ) ; <nl> + gpr_asprintf ( & client_hostport , " ipv4 : % s ? lb_policy = round_robin " , <nl> + servers_hostports_str ) ; <nl> client = grpc_insecure_channel_create ( client_hostport , NULL , NULL ) ; <nl> <nl> - gpr_log ( GPR_INFO , " Testing with servers = % s client = % s " , <nl> + gpr_log ( GPR_INFO , " Testing ' % s ' with servers = % s client = % s " , spec - > description , <nl> servers_hostports_str , client_hostport ) ; <nl> <nl> actual_connection_sequence = perform_request ( f , client , spec ) ; <nl> static void verify_vanilla_round_robin ( const servers_fixture * f , <nl> abort ( ) ; <nl> } <nl> } <nl> - assert_channel_connectivity ( client , GRPC_CHANNEL_READY ) ; <nl> + assert_channel_connectivity ( client , 1 , GRPC_CHANNEL_READY ) ; <nl> <nl> gpr_free ( expected_connection_sequence ) ; <nl> } <nl> static void verify_vanishing_floor_round_robin ( <nl> expected_seq_length * sizeof ( int ) ) ; <nl> <nl> / * first three elements of the sequence should be [ < 1st > , - 1 ] * / <nl> - GPR_ASSERT ( actual_connection_sequence [ 0 ] = = expected_connection_sequence [ 0 ] ) ; <nl> + if ( actual_connection_sequence [ 0 ] ! = expected_connection_sequence [ 0 ] ) { <nl> + gpr_log ( GPR_ERROR , " FAILURE : expected % d , actual % d at iter % d " , <nl> + expected_connection_sequence [ 0 ] , actual_connection_sequence [ 0 ] , 0 ) ; <nl> + print_failed_expectations ( expected_connection_sequence , <nl> + actual_connection_sequence , expected_seq_length , <nl> + 1 ) ; <nl> + abort ( ) ; <nl> + } <nl> + <nl> GPR_ASSERT ( actual_connection_sequence [ 1 ] = = - 1 ) ; <nl> <nl> + <nl> for ( i = 2 ; i < num_iters ; i + + ) { <nl> const int actual = actual_connection_sequence [ i ] ; <nl> const int expected = expected_connection_sequence [ i % expected_seq_length ] ; <nl> static void verify_total_carnage_round_robin ( <nl> <nl> / * even though we know all the servers are dead , the client is still trying <nl> * retrying , believing it ' s in a transient failure situation * / <nl> - assert_channel_connectivity ( client , GRPC_CHANNEL_TRANSIENT_FAILURE ) ; <nl> + assert_channel_connectivity ( client , 2 , GRPC_CHANNEL_TRANSIENT_FAILURE , <nl> + GRPC_CHANNEL_CONNECTING ) ; <nl> } <nl> <nl> static void verify_partial_carnage_round_robin ( <nl> static void verify_partial_carnage_round_robin ( <nl> <nl> / * even though we know all the servers are dead , the client is still trying <nl> * retrying , believing it ' s in a transient failure situation * / <nl> - assert_channel_connectivity ( client , GRPC_CHANNEL_TRANSIENT_FAILURE ) ; <nl> + assert_channel_connectivity ( client , 2 , GRPC_CHANNEL_TRANSIENT_FAILURE , <nl> + GRPC_CHANNEL_CONNECTING ) ; <nl> gpr_free ( expected_connection_sequence ) ; <nl> } <nl> <nl> static void verify_rebirth_round_robin ( const servers_fixture * f , <nl> / * first iteration succeeds * / <nl> GPR_ASSERT ( actual_connection_sequence [ 0 ] ! = - 1 ) ; <nl> <nl> - / * back up on the third iteration * / <nl> - for ( i = 3 ; i < num_iters ; i + + ) { <nl> + / * back up on the third ( or maybe fourth ) iteration * / <nl> + i = 3 ; <nl> + if ( actual_connection_sequence [ i ] = = - 1 ) { <nl> + i = 4 ; <nl> + } <nl> + for ( ; i < num_iters ; i + + ) { <nl> const int actual = actual_connection_sequence [ i ] ; <nl> const int expected = expected_connection_sequence [ i % expected_seq_length ] ; <nl> if ( actual ! = expected ) { <nl> static void verify_rebirth_round_robin ( const servers_fixture * f , <nl> } <nl> <nl> / * things are fine once the servers are brought back up * / <nl> - assert_channel_connectivity ( client , GRPC_CHANNEL_READY ) ; <nl> + assert_channel_connectivity ( client , 1 , GRPC_CHANNEL_READY ) ; <nl> gpr_free ( expected_connection_sequence ) ; <nl> } <nl> <nl> int main ( int argc , char * * argv ) { <nl> / * everything is fine , all servers stay up the whole time and life ' s peachy * / <nl> spec = test_spec_create ( NUM_ITERS , NUM_SERVERS ) ; <nl> spec - > verifier = verify_vanilla_round_robin ; <nl> - gpr_log ( GPR_DEBUG , " test_all_server_up " ) ; <nl> - run_spec ( spec ) ; <nl> + spec - > description = " test_all_server_up " ; <nl> + / * run_spec ( spec ) ; * / <nl> <nl> / * Kill all servers first thing in the morning * / <nl> test_spec_reset ( spec ) ; <nl> spec - > verifier = verify_total_carnage_round_robin ; <nl> + spec - > description = " test_kill_all_server " ; <nl> for ( i = 0 ; i < NUM_SERVERS ; i + + ) { <nl> spec - > kill_at [ 0 ] [ i ] = 1 ; <nl> } <nl> - gpr_log ( GPR_DEBUG , " test_kill_all_server " ) ; <nl> run_spec ( spec ) ; <nl> <nl> / * at the start of the 2nd iteration , kill all but the first and last servers . <nl> * This should knock down the server bound to be selected next * / <nl> test_spec_reset ( spec ) ; <nl> spec - > verifier = verify_vanishing_floor_round_robin ; <nl> + spec - > description = " test_kill_all_server_at_2nd_iteration " ; <nl> for ( i = 1 ; i < NUM_SERVERS - 1 ; i + + ) { <nl> spec - > kill_at [ 1 ] [ i ] = 1 ; <nl> } <nl> - gpr_log ( GPR_DEBUG , " test_kill_all_server_at_2nd_iteration " ) ; <nl> - run_spec ( spec ) ; <nl> + / * run_spec ( spec ) ; * / <nl> <nl> / * Midway , kill all servers . * / <nl> test_spec_reset ( spec ) ; <nl> spec - > verifier = verify_partial_carnage_round_robin ; <nl> + spec - > description = " test_kill_all_server_midway " ; <nl> for ( i = 0 ; i < NUM_SERVERS ; i + + ) { <nl> spec - > kill_at [ spec - > num_iters / 2 ] [ i ] = 1 ; <nl> } <nl> - gpr_log ( GPR_DEBUG , " test_kill_all_server_midway " ) ; <nl> - run_spec ( spec ) ; <nl> - <nl> + / * run_spec ( spec ) ; * / <nl> <nl> / * After first iteration , kill all servers . On the third one , bring them all <nl> * back up . * / <nl> test_spec_reset ( spec ) ; <nl> spec - > verifier = verify_rebirth_round_robin ; <nl> + spec - > description = " test_kill_all_server_after_1st_resurrect_at_3rd " ; <nl> for ( i = 0 ; i < NUM_SERVERS ; i + + ) { <nl> spec - > kill_at [ 1 ] [ i ] = 1 ; <nl> spec - > revive_at [ 3 ] [ i ] = 1 ; <nl> } <nl> - gpr_log ( GPR_DEBUG , " test_kill_all_server_after_1st_resurrect_at_3rd " ) ; <nl> - run_spec ( spec ) ; <nl> + / * run_spec ( spec ) ; * / <nl> <nl> test_spec_destroy ( spec ) ; <nl> <nl> mmm a / vsprojects / vcxproj / test / lb_policies_test / lb_policies_test . vcxproj <nl> ppp b / vsprojects / vcxproj / test / lb_policies_test / lb_policies_test . vcxproj <nl> <nl> < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> < Project DefaultTargets = " Build " ToolsVersion = " 12 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < Import Project = " . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . openssl . 1 . 0 . 2 . 3 \ build \ native \ grpc . dependencies . openssl . props " Condition = " Exists ( ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . openssl . 1 . 0 . 2 . 3 \ build \ native \ 1 . 0 . 2 . 3 . props ' ) " / > <nl> < ItemGroup Label = " ProjectConfigurations " > <nl> < ProjectConfiguration Include = " Debug | Win32 " > <nl> < Configuration > Debug < / Configuration > <nl> <nl> < Import Project = " $ ( UserRootDir ) \ Microsoft . Cpp . $ ( Platform ) . user . props " Condition = " exists ( ' $ ( UserRootDir ) \ Microsoft . Cpp . $ ( Platform ) . user . props ' ) " Label = " LocalAppDataPlatform " / > <nl> < Import Project = " . . \ . . \ . . \ . . \ vsprojects \ global . props " / > <nl> < Import Project = " . . \ . . \ . . \ . . \ vsprojects \ openssl . props " / > <nl> - < Import Project = " . . \ . . \ . . \ . . \ vsprojects \ protobuf . props " / > <nl> < Import Project = " . . \ . . \ . . \ . . \ vsprojects \ winsock . props " / > <nl> < Import Project = " . . \ . . \ . . \ . . \ vsprojects \ zlib . props " / > <nl> < / ImportGroup > <nl> < PropertyGroup Label = " UserMacros " / > <nl> < PropertyGroup Condition = " ' $ ( Configuration ) ' = = ' Debug ' " > <nl> < TargetName > lb_policies_test < / TargetName > <nl> + < Linkage - grpc_dependencies_zlib > static < / Linkage - grpc_dependencies_zlib > <nl> + < Configuration - grpc_dependencies_zlib > Debug < / Configuration - grpc_dependencies_zlib > <nl> + < Configuration - grpc_dependencies_openssl > Debug < / Configuration - grpc_dependencies_openssl > <nl> < / PropertyGroup > <nl> < PropertyGroup Condition = " ' $ ( Configuration ) ' = = ' Release ' " > <nl> < TargetName > lb_policies_test < / TargetName > <nl> + < Linkage - grpc_dependencies_zlib > static < / Linkage - grpc_dependencies_zlib > <nl> + < Configuration - grpc_dependencies_zlib > Debug < / Configuration - grpc_dependencies_zlib > <nl> + < Configuration - grpc_dependencies_openssl > Debug < / Configuration - grpc_dependencies_openssl > <nl> < / PropertyGroup > <nl> < ItemDefinitionGroup Condition = " ' $ ( Configuration ) | $ ( Platform ) ' = = ' Debug | Win32 ' " > <nl> < ClCompile > <nl> <nl> < Project > { B23D3D1A - 9438 - 4EDA - BEB6 - 9A0A03D17792 } < / Project > <nl> < / ProjectReference > <nl> < / ItemGroup > <nl> + < ItemGroup > <nl> + < None Include = " packages . config " / > <nl> + < / ItemGroup > <nl> < Import Project = " $ ( VCTargetsPath ) \ Microsoft . Cpp . targets " / > <nl> < ImportGroup Label = " ExtensionTargets " > <nl> + < Import Project = " . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . zlib . redist . 1 . 2 . 8 . 9 \ build \ native \ grpc . dependencies . zlib . redist . targets " Condition = " Exists ( ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . zlib . redist . 1 . 2 . 8 . 9 \ build \ native \ grpc . dependencies \ grpc . dependencies . zlib . targets ' ) " / > <nl> + < Import Project = " . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . zlib . 1 . 2 . 8 . 9 \ build \ native \ grpc . dependencies . zlib . targets " Condition = " Exists ( ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . zlib . 1 . 2 . 8 . 9 \ build \ native \ grpc . dependencies \ grpc . dependencies . zlib . targets ' ) " / > <nl> + < Import Project = " . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . openssl . redist . 1 . 0 . 2 . 3 \ build \ native \ grpc . dependencies . openssl . redist . targets " Condition = " Exists ( ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . openssl . redist . 1 . 0 . 2 . 3 \ build \ native \ grpc . dependencies \ grpc . dependencies . openssl . targets ' ) " / > <nl> + < Import Project = " . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . openssl . 1 . 0 . 2 . 3 \ build \ native \ grpc . dependencies . openssl . targets " Condition = " Exists ( ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . openssl . 1 . 0 . 2 . 3 \ build \ native \ grpc . dependencies \ grpc . dependencies . openssl . targets ' ) " / > <nl> < / ImportGroup > <nl> < Target Name = " EnsureNuGetPackageBuildImports " BeforeTargets = " PrepareForBuild " > <nl> < PropertyGroup > <nl> < ErrorText > This project references NuGet package ( s ) that are missing on this computer . Enable NuGet Package Restore to download them . For more information , see http : / / go . microsoft . com / fwlink / ? LinkID = 322105 . The missing file is { 0 } . < / ErrorText > <nl> < / PropertyGroup > <nl> + < Error Condition = " ! Exists ( ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . zlib . redist . 1 . 2 . 8 . 9 \ build \ native \ grpc . dependencies . zlib . redist . targets ' ) " Text = " $ ( [ System . String ] : : Format ( ' $ ( ErrorText ) ' , ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . zlib . redist . 1 . 2 . 8 . 9 \ build \ native \ grpc . dependencies . zlib . redist . targets ' ) " / > <nl> + < Error Condition = " ! Exists ( ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . zlib . 1 . 2 . 8 . 9 \ build \ native \ grpc . dependencies . zlib . targets ' ) " Text = " $ ( [ System . String ] : : Format ( ' $ ( ErrorText ) ' , ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . zlib . 1 . 2 . 8 . 9 \ build \ native \ grpc . dependencies . zlib . targets ' ) " / > <nl> + < Error Condition = " ! Exists ( ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . openssl . redist . 1 . 0 . 2 . 3 \ build \ native \ grpc . dependencies . openssl . redist . targets ' ) " Text = " $ ( [ System . String ] : : Format ( ' $ ( ErrorText ) ' , ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . openssl . redist . 1 . 0 . 2 . 3 \ build \ native \ grpc . dependencies . openssl . redist . targets ' ) " / > <nl> + < Error Condition = " ! Exists ( ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . openssl . 1 . 0 . 2 . 3 \ build \ native \ grpc . dependencies . openssl . props ' ) " Text = " $ ( [ System . String ] : : Format ( ' $ ( ErrorText ) ' , ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . openssl . 1 . 0 . 2 . 3 \ build \ native \ grpc . dependencies . openssl . props ' ) " / > <nl> + < Error Condition = " ! Exists ( ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . openssl . 1 . 0 . 2 . 3 \ build \ native \ grpc . dependencies . openssl . targets ' ) " Text = " $ ( [ System . String ] : : Format ( ' $ ( ErrorText ) ' , ' . . \ . . \ . . \ . . \ vsprojects \ packages \ grpc . dependencies . openssl . 1 . 0 . 2 . 3 \ build \ native \ grpc . dependencies . openssl . targets ' ) " / > <nl> < / Target > <nl> < / Project > <nl> <nl> | Usage of ? lb_policy = xxx in sockaddr_resolver | grpc/grpc | fe7a6368fc154e291dd91b002e22bca4970ef00a | 2015-09-10T18:06:46Z |
mmm a / BUILD . gn <nl> ppp b / BUILD . gn <nl> config ( " internal_config " ) { <nl> include_dirs = [ " . " ] <nl> <nl> if ( is_component_build ) { <nl> - defines = [ <nl> - " V8_SHARED " , <nl> - " BUILDING_V8_SHARED " , <nl> - ] <nl> + defines = [ " BUILDING_V8_SHARED " ] <nl> } <nl> } <nl> <nl> config ( " libsampler_config " ) { <nl> # itself . <nl> config ( " external_config " ) { <nl> if ( is_component_build ) { <nl> - defines = [ <nl> - " V8_SHARED " , <nl> - " USING_V8_SHARED " , <nl> - ] <nl> + defines = [ " USING_V8_SHARED " ] <nl> } <nl> include_dirs = [ " include " ] <nl> if ( v8_enable_inspector_override ) { <nl> if ( is_component_build ) { <nl> <nl> v8_executable ( " d8 " ) { <nl> sources = [ <nl> + " $ target_gen_dir / d8 - js . cc " , <nl> " src / d8 . cc " , <nl> " src / d8 . h " , <nl> ] <nl> v8_executable ( " d8 " ) { <nl> sources + = [ " src / d8 - windows . cc " ] <nl> } <nl> <nl> - if ( ! is_component_build ) { <nl> - sources + = [ " $ target_gen_dir / d8 - js . cc " ] <nl> - } <nl> if ( v8_enable_i18n_support ) { <nl> deps + = [ " / / third_party / icu " ] <nl> } <nl> mmm a / include / v8 . h <nl> ppp b / include / v8 . h <nl> <nl> # else / / V8_OS_WIN <nl> <nl> / / Setup for Linux shared library export . <nl> - # if V8_HAS_ATTRIBUTE_VISIBILITY & & defined ( V8_SHARED ) <nl> + # if V8_HAS_ATTRIBUTE_VISIBILITY <nl> # ifdef BUILDING_V8_SHARED <nl> # define V8_EXPORT __attribute__ ( ( visibility ( " default " ) ) ) <nl> # else <nl> mmm a / src / allocation . h <nl> ppp b / src / allocation . h <nl> namespace internal { <nl> / / Called when allocation routines fail to allocate . <nl> / / This function should not return , but should terminate the current <nl> / / processing . <nl> - void FatalProcessOutOfMemory ( const char * message ) ; <nl> + V8_EXPORT_PRIVATE void FatalProcessOutOfMemory ( const char * message ) ; <nl> <nl> / / Superclass for classes managed with new & delete . <nl> - class Malloced { <nl> + class V8_EXPORT_PRIVATE Malloced { <nl> public : <nl> void * operator new ( size_t size ) { return New ( size ) ; } <nl> void operator delete ( void * p ) { Delete ( p ) ; } <nl> void DeleteArray ( T * array ) { <nl> / / The normal strdup functions use malloc . These versions of StrDup <nl> / / and StrNDup uses new and calls the FatalProcessOutOfMemory handler <nl> / / if allocation fails . <nl> - char * StrDup ( const char * str ) ; <nl> + V8_EXPORT_PRIVATE char * StrDup ( const char * str ) ; <nl> char * StrNDup ( const char * str , int n ) ; <nl> <nl> <nl> mmm a / src / basic - block - profiler . h <nl> ppp b / src / basic - block - profiler . h <nl> <nl> # include < vector > <nl> <nl> # include " src / base / macros . h " <nl> + # include " src / globals . h " <nl> <nl> namespace v8 { <nl> namespace internal { <nl> class BasicBlockProfiler { <nl> const DataList * data_list ( ) { return & data_list_ ; } <nl> <nl> private : <nl> - friend std : : ostream & operator < < ( std : : ostream & os , <nl> - const BasicBlockProfiler & s ) ; <nl> + friend V8_EXPORT_PRIVATE std : : ostream & operator < < ( <nl> + std : : ostream & os , const BasicBlockProfiler & s ) ; <nl> <nl> DataList data_list_ ; <nl> <nl> DISALLOW_COPY_AND_ASSIGN ( BasicBlockProfiler ) ; <nl> } ; <nl> <nl> - std : : ostream & operator < < ( std : : ostream & os , const BasicBlockProfiler & s ) ; <nl> + V8_EXPORT_PRIVATE std : : ostream & operator < < ( std : : ostream & os , <nl> + const BasicBlockProfiler & s ) ; <nl> std : : ostream & operator < < ( std : : ostream & os , const BasicBlockProfiler : : Data & s ) ; <nl> <nl> } / / namespace internal <nl> mmm a / src / d8 . cc <nl> ppp b / src / d8 . cc <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - <nl> - / / Defined when linking against shared lib on Windows . <nl> - # if defined ( USING_V8_SHARED ) & & ! defined ( V8_SHARED ) <nl> - # define V8_SHARED <nl> - # endif <nl> - <nl> # include < errno . h > <nl> # include < stdlib . h > <nl> # include < string . h > <nl> # include < sys / stat . h > <nl> <nl> - # ifdef V8_SHARED <nl> - # include < assert . h > <nl> - # endif / / V8_SHARED <nl> - <nl> - # ifndef V8_SHARED <nl> # include < algorithm > <nl> # include < fstream > <nl> # include < vector > <nl> - # endif / / ! V8_SHARED <nl> - <nl> - # ifdef V8_SHARED <nl> - # include " include / v8 - testing . h " <nl> - # endif / / V8_SHARED <nl> <nl> # ifdef ENABLE_VTUNE_JIT_INTERFACE <nl> # include " src / third_party / vtune / v8 - vtune . h " <nl> <nl> <nl> # include " include / libplatform / libplatform . h " <nl> # include " include / libplatform / v8 - tracing . h " <nl> - # ifndef V8_SHARED <nl> # include " src / api . h " <nl> # include " src / base / cpu . h " <nl> # include " src / base / debug / stack_trace . h " <nl> <nl> # include " src / snapshot / natives . h " <nl> # include " src / utils . h " <nl> # include " src / v8 . h " <nl> - # endif / / ! V8_SHARED <nl> <nl> # if ! defined ( _WIN32 ) & & ! defined ( _WIN64 ) <nl> # include < unistd . h > / / NOLINT <nl> namespace v8 { <nl> namespace { <nl> <nl> const int MB = 1024 * 1024 ; <nl> - # ifndef V8_SHARED <nl> const int kMaxWorkers = 50 ; <nl> - # endif <nl> <nl> <nl> class ShellArrayBufferAllocator : public v8 : : ArrayBuffer : : Allocator { <nl> class MockArrayBufferAllocator : public v8 : : ArrayBuffer : : Allocator { <nl> } ; <nl> <nl> <nl> - # ifndef V8_SHARED <nl> / / Predictable v8 : : Platform implementation . All background and foreground <nl> / / tasks are run immediately , delayed tasks are not executed at all . <nl> class PredictablePlatform : public Platform { <nl> class PredictablePlatform : public Platform { <nl> <nl> DISALLOW_COPY_AND_ASSIGN ( PredictablePlatform ) ; <nl> } ; <nl> - # endif / / ! V8_SHARED <nl> <nl> <nl> v8 : : Platform * g_platform = NULL ; <nl> static Local < Value > Throw ( Isolate * isolate , const char * message ) { <nl> } <nl> <nl> <nl> - # ifndef V8_SHARED <nl> bool FindInObjectList ( Local < Object > object , const Shell : : ObjectList & list ) { <nl> for ( int i = 0 ; i < list . length ( ) ; + + i ) { <nl> if ( list [ i ] - > StrictEquals ( object ) ) { <nl> Worker * GetWorkerFromInternalField ( Isolate * isolate , Local < Object > object ) { <nl> <nl> return worker ; <nl> } <nl> - # endif / / ! V8_SHARED <nl> <nl> <nl> } / / namespace <nl> class PerIsolateData { <nl> } ; <nl> <nl> <nl> - # ifndef V8_SHARED <nl> CounterMap * Shell : : counter_map_ ; <nl> base : : OS : : MemoryMappedFile * Shell : : counters_file_ = NULL ; <nl> CounterCollection Shell : : local_counters_ ; <nl> base : : LazyMutex Shell : : workers_mutex_ ; <nl> bool Shell : : allow_new_workers_ = true ; <nl> i : : List < Worker * > Shell : : workers_ ; <nl> i : : List < SharedArrayBuffer : : Contents > Shell : : externalized_shared_contents_ ; <nl> - # endif / / ! V8_SHARED <nl> <nl> Global < Context > Shell : : evaluation_context_ ; <nl> ArrayBuffer : : Allocator * Shell : : array_buffer_allocator ; <nl> ShellOptions Shell : : options ; <nl> base : : OnceType Shell : : quit_once_ = V8_ONCE_INIT ; <nl> <nl> - # ifndef V8_SHARED <nl> bool CounterMap : : Match ( void * key1 , void * key2 ) { <nl> const char * name1 = reinterpret_cast < const char * > ( key1 ) ; <nl> const char * name2 = reinterpret_cast < const char * > ( key2 ) ; <nl> return strcmp ( name1 , name2 ) = = 0 ; <nl> } <nl> - # endif / / ! V8_SHARED <nl> <nl> <nl> / / Converts a V8 value to a C string . <nl> bool Shell : : ExecuteString ( Isolate * isolate , Local < String > source , <nl> } <nl> DCHECK ( ! try_catch . HasCaught ( ) ) ; <nl> if ( print_result ) { <nl> - # if ! defined ( V8_SHARED ) <nl> if ( options . test_shell ) { <nl> - # endif <nl> if ( ! result - > IsUndefined ( ) ) { <nl> / / If all went well and the result wasn ' t undefined then print <nl> / / the returned value . <nl> bool Shell : : ExecuteString ( Isolate * isolate , Local < String > source , <nl> fwrite ( * str , sizeof ( * * str ) , str . length ( ) , stdout ) ; <nl> printf ( " \ n " ) ; <nl> } <nl> - # if ! defined ( V8_SHARED ) <nl> } else { <nl> v8 : : String : : Utf8Value str ( Stringify ( isolate , result ) ) ; <nl> fwrite ( * str , sizeof ( * * str ) , str . length ( ) , stdout ) ; <nl> printf ( " \ n " ) ; <nl> } <nl> - # endif <nl> } <nl> return true ; <nl> } <nl> int PerIsolateData : : RealmIndexOrThrow ( <nl> } <nl> <nl> <nl> - # ifndef V8_SHARED <nl> / / performance . now ( ) returns a time stamp as double , measured in milliseconds . <nl> / / When FLAG_verify_predictable mode is enabled it returns result of <nl> / / v8 : : Platform : : MonotonicallyIncreasingTime ( ) . <nl> void Shell : : PerformanceNow ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> args . GetReturnValue ( ) . Set ( delta . InMillisecondsF ( ) ) ; <nl> } <nl> } <nl> - # endif / / ! V8_SHARED <nl> <nl> <nl> / / Realm . current ( ) returns the index of the currently active realm . <nl> void Shell : : Load ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> } <nl> <nl> <nl> - # ifndef V8_SHARED <nl> void Shell : : WorkerNew ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> Isolate * isolate = args . GetIsolate ( ) ; <nl> HandleScope handle_scope ( isolate ) ; <nl> void Shell : : WorkerTerminate ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> <nl> worker - > Terminate ( ) ; <nl> } <nl> - # endif / / ! V8_SHARED <nl> <nl> <nl> void Shell : : QuitOnce ( v8 : : FunctionCallbackInfo < v8 : : Value > * args ) { <nl> int exit_code = ( * args ) [ 0 ] <nl> - > Int32Value ( args - > GetIsolate ( ) - > GetCurrentContext ( ) ) <nl> . FromMaybe ( 0 ) ; <nl> - # ifndef V8_SHARED <nl> CleanupWorkers ( ) ; <nl> - # endif / / ! V8_SHARED <nl> OnExit ( args - > GetIsolate ( ) ) ; <nl> Exit ( exit_code ) ; <nl> } <nl> void Shell : : Version ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> <nl> void Shell : : ReportException ( Isolate * isolate , v8 : : TryCatch * try_catch ) { <nl> HandleScope handle_scope ( isolate ) ; <nl> - # ifndef V8_SHARED <nl> Local < Context > context ; <nl> bool enter_context = ! isolate - > InContext ( ) ; <nl> if ( enter_context ) { <nl> context = Local < Context > : : New ( isolate , evaluation_context_ ) ; <nl> context - > Enter ( ) ; <nl> } <nl> - # endif / / ! V8_SHARED <nl> v8 : : String : : Utf8Value exception ( try_catch - > Exception ( ) ) ; <nl> const char * exception_string = ToCString ( exception ) ; <nl> Local < Message > message = try_catch - > Message ( ) ; <nl> void Shell : : ReportException ( Isolate * isolate , v8 : : TryCatch * try_catch ) { <nl> } <nl> } <nl> printf ( " \ n " ) ; <nl> - # ifndef V8_SHARED <nl> if ( enter_context ) context - > Exit ( ) ; <nl> - # endif / / ! V8_SHARED <nl> } <nl> <nl> <nl> - # ifndef V8_SHARED <nl> int32_t * Counter : : Bind ( const char * name , bool is_histogram ) { <nl> int i ; <nl> for ( i = 0 ; i < kMaxNameSize - 1 & & name [ i ] ; i + + ) <nl> Local < String > Shell : : Stringify ( Isolate * isolate , Local < Value > value ) { <nl> if ( result . IsEmpty ( ) ) return String : : Empty ( isolate ) ; <nl> return result . ToLocalChecked ( ) . As < String > ( ) ; <nl> } <nl> - # endif / / ! V8_SHARED <nl> <nl> <nl> Local < ObjectTemplate > Shell : : CreateGlobalTemplate ( Isolate * isolate ) { <nl> Local < ObjectTemplate > Shell : : CreateGlobalTemplate ( Isolate * isolate ) { <nl> . ToLocalChecked ( ) , <nl> realm_template ) ; <nl> <nl> - # ifndef V8_SHARED <nl> Local < ObjectTemplate > performance_template = ObjectTemplate : : New ( isolate ) ; <nl> performance_template - > Set ( <nl> String : : NewFromUtf8 ( isolate , " now " , NewStringType : : kNormal ) <nl> Local < ObjectTemplate > Shell : : CreateGlobalTemplate ( Isolate * isolate ) { <nl> String : : NewFromUtf8 ( isolate , " Worker " , NewStringType : : kNormal ) <nl> . ToLocalChecked ( ) , <nl> worker_fun_template ) ; <nl> - # endif / / ! V8_SHARED <nl> <nl> Local < ObjectTemplate > os_templ = ObjectTemplate : : New ( isolate ) ; <nl> AddOSMethods ( isolate , os_templ ) ; <nl> static void EmptyMessageCallback ( Local < Message > message , Local < Value > error ) { <nl> } <nl> <nl> void Shell : : Initialize ( Isolate * isolate ) { <nl> - # ifndef V8_SHARED <nl> / / Set up counters <nl> if ( i : : StrLength ( i : : FLAG_map_counters ) ! = 0 ) <nl> MapCounters ( isolate , i : : FLAG_map_counters ) ; <nl> - # endif / / ! V8_SHARED <nl> / / Disable default message reporting . <nl> isolate - > AddMessageListener ( EmptyMessageCallback ) ; <nl> } <nl> <nl> <nl> Local < Context > Shell : : CreateEvaluationContext ( Isolate * isolate ) { <nl> - # ifndef V8_SHARED <nl> / / This needs to be a critical section since this is not thread - safe <nl> base : : LockGuard < base : : Mutex > lock_guard ( context_mutex_ . Pointer ( ) ) ; <nl> - # endif / / ! V8_SHARED <nl> / / Initialize the global objects <nl> Local < ObjectTemplate > global_template = CreateGlobalTemplate ( isolate ) ; <nl> EscapableHandleScope handle_scope ( isolate ) ; <nl> Local < Context > Shell : : CreateEvaluationContext ( Isolate * isolate ) { <nl> DCHECK ( ! context . IsEmpty ( ) ) ; <nl> Context : : Scope scope ( context ) ; <nl> <nl> - # ifndef V8_SHARED <nl> i : : Factory * factory = reinterpret_cast < i : : Isolate * > ( isolate ) - > factory ( ) ; <nl> i : : JSArguments js_args = i : : FLAG_js_arguments ; <nl> i : : Handle < i : : FixedArray > arguments_array = <nl> Local < Context > Shell : : CreateEvaluationContext ( Isolate * isolate ) { <nl> . ToLocalChecked ( ) , <nl> Utils : : ToLocal ( arguments_jsarray ) ) <nl> . FromJust ( ) ; <nl> - # endif / / ! V8_SHARED <nl> return handle_scope . Escape ( context ) ; <nl> } <nl> <nl> void Shell : : Exit ( int exit_code ) { <nl> } <nl> <nl> <nl> - # ifndef V8_SHARED <nl> struct CounterAndKey { <nl> Counter * counter ; <nl> const char * key ; <nl> void Shell : : WriteIgnitionDispatchCountersFile ( v8 : : Isolate * isolate ) { <nl> JSON : : Stringify ( context , dispatch_counters ) . ToLocalChecked ( ) ) ; <nl> } <nl> <nl> - # endif / / ! V8_SHARED <nl> - <nl> <nl> void Shell : : OnExit ( v8 : : Isolate * isolate ) { <nl> - # ifndef V8_SHARED <nl> if ( i : : FLAG_dump_counters ) { <nl> int number_of_counters = 0 ; <nl> for ( CounterMap : : Iterator i ( counter_map_ ) ; i . More ( ) ; i . Next ( ) ) { <nl> void Shell : : OnExit ( v8 : : Isolate * isolate ) { <nl> <nl> delete counters_file_ ; <nl> delete counter_map_ ; <nl> - # endif / / ! V8_SHARED <nl> } <nl> <nl> <nl> void Shell : : RunShell ( Isolate * isolate ) { <nl> <nl> <nl> SourceGroup : : ~ SourceGroup ( ) { <nl> - # ifndef V8_SHARED <nl> delete thread_ ; <nl> thread_ = NULL ; <nl> - # endif / / ! V8_SHARED <nl> } <nl> <nl> <nl> Local < String > SourceGroup : : ReadFile ( Isolate * isolate , const char * name ) { <nl> } <nl> <nl> <nl> - # ifndef V8_SHARED <nl> base : : Thread : : Options SourceGroup : : GetThreadOptions ( ) { <nl> / / On some systems ( OSX 10 . 6 ) the stack size default is 0 . 5Mb or less <nl> / / which is not enough to parse the big literal expressions used in tests . <nl> void Worker : : PostMessageOut ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> delete data ; <nl> } <nl> } <nl> - # endif / / ! V8_SHARED <nl> <nl> <nl> void SetFlagsFromString ( const char * flags ) { <nl> bool Shell : : SetOptions ( int argc , char * argv [ ] ) { <nl> / / JavaScript engines . <nl> continue ; <nl> } else if ( strcmp ( argv [ i ] , " - - isolate " ) = = 0 ) { <nl> - # ifdef V8_SHARED <nl> - printf ( " D8 with shared library does not support multi - threading \ n " ) ; <nl> - return false ; <nl> - # endif / / V8_SHARED <nl> options . num_isolates + + ; <nl> } else if ( strcmp ( argv [ i ] , " - - dump - heap - constants " ) = = 0 ) { <nl> - # ifdef V8_SHARED <nl> - printf ( " D8 with shared library does not support constant dumping \ n " ) ; <nl> - return false ; <nl> - # else <nl> options . dump_heap_constants = true ; <nl> argv [ i ] = NULL ; <nl> - # endif / / V8_SHARED <nl> } else if ( strcmp ( argv [ i ] , " - - throws " ) = = 0 ) { <nl> options . expected_to_throw = true ; <nl> argv [ i ] = NULL ; <nl> } else if ( strncmp ( argv [ i ] , " - - icu - data - file = " , 16 ) = = 0 ) { <nl> options . icu_data_file = argv [ i ] + 16 ; <nl> argv [ i ] = NULL ; <nl> - # ifdef V8_SHARED <nl> - } else if ( strcmp ( argv [ i ] , " - - dump - counters " ) = = 0 ) { <nl> - printf ( " D8 with shared library does not include counters \ n " ) ; <nl> - return false ; <nl> - # endif / / V8_SHARED <nl> # ifdef V8_USE_EXTERNAL_STARTUP_DATA <nl> } else if ( strncmp ( argv [ i ] , " - - natives_blob = " , 15 ) = = 0 ) { <nl> options . natives_blob = argv [ i ] + 15 ; <nl> bool Shell : : SetOptions ( int argc , char * argv [ ] ) { <nl> <nl> <nl> int Shell : : RunMain ( Isolate * isolate , int argc , char * argv [ ] , bool last_run ) { <nl> - # ifndef V8_SHARED <nl> for ( int i = 1 ; i < options . num_isolates ; + + i ) { <nl> options . isolate_sources [ i ] . StartExecuteInThread ( ) ; <nl> } <nl> - # endif / / ! V8_SHARED <nl> { <nl> HandleScope scope ( isolate ) ; <nl> Local < Context > context = CreateEvaluationContext ( isolate ) ; <nl> int Shell : : RunMain ( Isolate * isolate , int argc , char * argv [ ] , bool last_run ) { <nl> } <nl> } <nl> CollectGarbage ( isolate ) ; <nl> - # ifndef V8_SHARED <nl> for ( int i = 1 ; i < options . num_isolates ; + + i ) { <nl> if ( last_run ) { <nl> options . isolate_sources [ i ] . JoinThread ( ) ; <nl> int Shell : : RunMain ( Isolate * isolate , int argc , char * argv [ ] , bool last_run ) { <nl> } <nl> } <nl> CleanupWorkers ( ) ; <nl> - # endif / / ! V8_SHARED <nl> return 0 ; <nl> } <nl> <nl> void Shell : : CollectGarbage ( Isolate * isolate ) { <nl> <nl> <nl> void Shell : : EmptyMessageQueues ( Isolate * isolate ) { <nl> - # ifndef V8_SHARED <nl> if ( ! i : : FLAG_verify_predictable ) { <nl> - # endif <nl> while ( v8 : : platform : : PumpMessageLoop ( g_platform , isolate ) ) continue ; <nl> - # ifndef V8_SHARED <nl> } <nl> - # endif <nl> } <nl> <nl> <nl> - # ifndef V8_SHARED <nl> bool Shell : : SerializeValue ( Isolate * isolate , Local < Value > value , <nl> const ObjectList & to_transfer , <nl> ObjectList * seen_objects , <nl> static void DumpHeapConstants ( i : : Isolate * isolate ) { <nl> printf ( " } \ n " ) ; <nl> # undef ROOT_LIST_CASE <nl> } <nl> - # endif / / ! V8_SHARED <nl> <nl> <nl> int Shell : : Main ( int argc , char * argv [ ] ) { <nl> std : : ofstream trace_file ; <nl> - # ifndef V8_SHARED <nl> v8 : : base : : debug : : EnableInProcessStackDumping ( ) ; <nl> - # endif <nl> # if ( defined ( _WIN32 ) | | defined ( _WIN64 ) ) <nl> UINT new_flags = <nl> SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX ; <nl> int Shell : : Main ( int argc , char * argv [ ] ) { <nl> # endif / / defined ( _WIN32 ) | | defined ( _WIN64 ) <nl> if ( ! SetOptions ( argc , argv ) ) return 1 ; <nl> v8 : : V8 : : InitializeICUDefaultLocation ( argv [ 0 ] , options . icu_data_file ) ; <nl> - # ifndef V8_SHARED <nl> g_platform = i : : FLAG_verify_predictable <nl> ? new PredictablePlatform ( ) <nl> : v8 : : platform : : CreateDefaultPlatform ( ) ; <nl> - # else <nl> - g_platform = v8 : : platform : : CreateDefaultPlatform ( ) ; <nl> - # endif / / ! V8_SHARED <nl> <nl> v8 : : V8 : : InitializePlatform ( g_platform ) ; <nl> v8 : : V8 : : Initialize ( ) ; <nl> int Shell : : Main ( int argc , char * argv [ ] ) { <nl> # ifdef ENABLE_VTUNE_JIT_INTERFACE <nl> create_params . code_event_handler = vTune : : GetVtuneCodeEventHandler ( ) ; <nl> # endif <nl> - # ifndef V8_SHARED <nl> create_params . constraints . ConfigureDefaults ( <nl> base : : SysInfo : : AmountOfPhysicalMemory ( ) , <nl> base : : SysInfo : : AmountOfVirtualMemory ( ) ) ; <nl> int Shell : : Main ( int argc , char * argv [ ] ) { <nl> create_params . create_histogram_callback = CreateHistogram ; <nl> create_params . add_histogram_sample_callback = AddHistogramSample ; <nl> } <nl> - # endif <nl> Isolate * isolate = Isolate : : New ( create_params ) ; <nl> { <nl> Isolate : : Scope scope ( isolate ) ; <nl> int Shell : : Main ( int argc , char * argv [ ] ) { <nl> } <nl> tracing_controller - > Initialize ( trace_buffer ) ; <nl> tracing_controller - > StartTracing ( trace_config ) ; <nl> - # ifndef V8_SHARED <nl> if ( ! i : : FLAG_verify_predictable ) { <nl> platform : : SetTracingController ( g_platform , tracing_controller ) ; <nl> } <nl> - # else <nl> - platform : : SetTracingController ( g_platform , tracing_controller ) ; <nl> - # endif <nl> } <nl> <nl> - # ifndef V8_SHARED <nl> if ( options . dump_heap_constants ) { <nl> DumpHeapConstants ( reinterpret_cast < i : : Isolate * > ( isolate ) ) ; <nl> return 0 ; <nl> } <nl> - # endif <nl> <nl> if ( options . stress_opt | | options . stress_deopt ) { <nl> Testing : : SetStressRunType ( options . stress_opt <nl> int Shell : : Main ( int argc , char * argv [ ] ) { <nl> } <nl> printf ( " = = = = = = = = Full Deoptimization = = = = = = = \ n " ) ; <nl> Testing : : DeoptimizeAll ( isolate ) ; <nl> - # if ! defined ( V8_SHARED ) <nl> } else if ( i : : FLAG_stress_runs > 0 ) { <nl> options . stress_runs = i : : FLAG_stress_runs ; <nl> for ( int i = 0 ; i < options . stress_runs & & result = = 0 ; i + + ) { <nl> int Shell : : Main ( int argc , char * argv [ ] ) { <nl> bool last_run = i = = options . stress_runs - 1 ; <nl> result = RunMain ( isolate , argc , argv , last_run ) ; <nl> } <nl> - # endif <nl> } else { <nl> bool last_run = true ; <nl> result = RunMain ( isolate , argc , argv , last_run ) ; <nl> int Shell : : Main ( int argc , char * argv [ ] ) { <nl> RunShell ( isolate ) ; <nl> } <nl> <nl> - # ifndef V8_SHARED <nl> if ( i : : FLAG_ignition & & i : : FLAG_trace_ignition_dispatches & & <nl> i : : FLAG_trace_ignition_dispatches_output_file ! = nullptr ) { <nl> WriteIgnitionDispatchCountersFile ( isolate ) ; <nl> } <nl> - # endif <nl> <nl> / / Shut down contexts and collect garbage . <nl> evaluation_context_ . Reset ( ) ; <nl> - # ifndef V8_SHARED <nl> stringify_function_ . Reset ( ) ; <nl> - # endif / / ! V8_SHARED <nl> CollectGarbage ( isolate ) ; <nl> } <nl> OnExit ( isolate ) ; <nl> - # ifndef V8_SHARED <nl> / / Dump basic block profiling data . <nl> if ( i : : BasicBlockProfiler * profiler = <nl> reinterpret_cast < i : : Isolate * > ( isolate ) - > basic_block_profiler ( ) ) { <nl> i : : OFStream os ( stdout ) ; <nl> os < < * profiler ; <nl> } <nl> - # endif / / ! V8_SHARED <nl> isolate - > Dispose ( ) ; <nl> V8 : : Dispose ( ) ; <nl> V8 : : ShutdownPlatform ( ) ; <nl> mmm a / src / d8 . gyp <nl> ppp b / src / d8 . gyp <nl> <nl> ' sources ' : [ <nl> ' d8 . h ' , <nl> ' d8 . cc ' , <nl> + ' < ( SHARED_INTERMEDIATE_DIR ) / d8 - js . cc ' , <nl> ] , <nl> ' conditions ' : [ <nl> [ ' want_separate_host_toolset = = 1 ' , { <nl> ' toolsets ' : [ ' target ' , ] , <nl> + ' dependencies ' : [ <nl> + ' d8_js2c # host ' , <nl> + ] , <nl> + } , { <nl> + ' dependencies ' : [ <nl> + ' d8_js2c ' , <nl> + ] , <nl> } ] , <nl> [ ' ( OS = = " linux " or OS = = " mac " or OS = = " freebsd " or OS = = " netbsd " \ <nl> or OS = = " openbsd " or OS = = " solaris " or OS = = " android " \ <nl> <nl> ' sources ' : [ ' d8 - windows . cc ' , ] <nl> } ] , <nl> [ ' component ! = " shared_library " ' , { <nl> - ' sources ' : [ <nl> - ' < ( SHARED_INTERMEDIATE_DIR ) / d8 - js . cc ' , <nl> - ] , <nl> ' conditions ' : [ <nl> - [ ' want_separate_host_toolset = = 1 ' , { <nl> - ' dependencies ' : [ <nl> - ' d8_js2c # host ' , <nl> - ] , <nl> - } , { <nl> - ' dependencies ' : [ <nl> - ' d8_js2c ' , <nl> - ] , <nl> - } ] , <nl> [ ' v8_postmortem_support = = " true " ' , { <nl> ' xcode_settings ' : { <nl> ' OTHER_LDFLAGS ' : [ <nl> mmm a / src / d8 . h <nl> ppp b / src / d8 . h <nl> <nl> # ifndef V8_D8_H_ <nl> # define V8_D8_H_ <nl> <nl> - # ifndef V8_SHARED <nl> # include " src / allocation . h " <nl> # include " src / base / hashmap . h " <nl> # include " src / base / platform / time . h " <nl> # include " src / list . h " <nl> - # else <nl> - # include " include / v8 . h " <nl> - # include " src / base / compiler - specific . h " <nl> - # endif / / ! V8_SHARED <nl> <nl> # include " src / base / once . h " <nl> <nl> <nl> namespace v8 { <nl> <nl> <nl> - # ifndef V8_SHARED <nl> / / A single counter in a counter collection . <nl> class Counter { <nl> public : <nl> class CounterMap { <nl> static bool Match ( void * key1 , void * key2 ) ; <nl> base : : HashMap hash_map_ ; <nl> } ; <nl> - # endif / / ! V8_SHARED <nl> <nl> <nl> class SourceGroup { <nl> public : <nl> SourceGroup ( ) : <nl> - # ifndef V8_SHARED <nl> next_semaphore_ ( 0 ) , <nl> done_semaphore_ ( 0 ) , <nl> thread_ ( NULL ) , <nl> - # endif / / ! V8_SHARED <nl> argv_ ( NULL ) , <nl> begin_offset_ ( 0 ) , <nl> end_offset_ ( 0 ) { } <nl> class SourceGroup { <nl> <nl> void Execute ( Isolate * isolate ) ; <nl> <nl> - # ifndef V8_SHARED <nl> void StartExecuteInThread ( ) ; <nl> void WaitForThread ( ) ; <nl> void JoinThread ( ) ; <nl> class SourceGroup { <nl> base : : Semaphore next_semaphore_ ; <nl> base : : Semaphore done_semaphore_ ; <nl> base : : Thread * thread_ ; <nl> - # endif / / ! V8_SHARED <nl> <nl> void ExitShell ( int exit_code ) ; <nl> Local < String > ReadFile ( Isolate * isolate , const char * name ) ; <nl> class SourceGroup { <nl> int end_offset_ ; <nl> } ; <nl> <nl> - # ifndef V8_SHARED <nl> enum SerializationTag { <nl> kSerializationTagUndefined , <nl> kSerializationTagNull , <nl> class Worker { <nl> char * script_ ; <nl> base : : Atomic32 running_ ; <nl> } ; <nl> - # endif / / ! V8_SHARED <nl> <nl> <nl> class ShellOptions { <nl> class ShellOptions { <nl> const char * trace_config ; <nl> } ; <nl> <nl> - # ifdef V8_SHARED <nl> - class Shell { <nl> - # else <nl> class Shell : public i : : AllStatic { <nl> - # endif / / V8_SHARED <nl> - <nl> public : <nl> enum SourceType { SCRIPT , MODULE } ; <nl> <nl> class Shell : public i : : AllStatic { <nl> static void CollectGarbage ( Isolate * isolate ) ; <nl> static void EmptyMessageQueues ( Isolate * isolate ) ; <nl> <nl> - # ifndef V8_SHARED <nl> / / TODO ( binji ) : stupid implementation for now . Is there an easy way to hash an <nl> / / object for use in base : : HashMap ? By pointer ? <nl> typedef i : : List < Local < Object > > ObjectList ; <nl> class Shell : public i : : AllStatic { <nl> static void MapCounters ( v8 : : Isolate * isolate , const char * name ) ; <nl> <nl> static void PerformanceNow ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) ; <nl> - # endif / / ! V8_SHARED <nl> <nl> static void RealmCurrent ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) ; <nl> static void RealmOwner ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) ; <nl> class Shell : public i : : AllStatic { <nl> private : <nl> static Global < Context > evaluation_context_ ; <nl> static base : : OnceType quit_once_ ; <nl> - # ifndef V8_SHARED <nl> static Global < Function > stringify_function_ ; <nl> static CounterMap * counter_map_ ; <nl> / / We statically allocate a set of local counters to be used if we <nl> class Shell : public i : : AllStatic { <nl> static void WriteIgnitionDispatchCountersFile ( v8 : : Isolate * isolate ) ; <nl> static Counter * GetCounter ( const char * name , bool is_histogram ) ; <nl> static Local < String > Stringify ( Isolate * isolate , Local < Value > value ) ; <nl> - # endif / / ! V8_SHARED <nl> static void Initialize ( Isolate * isolate ) ; <nl> static void RunShell ( Isolate * isolate ) ; <nl> static bool SetOptions ( int argc , char * argv [ ] ) ; <nl> mmm a / src / factory . h <nl> ppp b / src / factory . h <nl> class Factory final { <nl> byte kind ) ; <nl> <nl> / / Allocates a fixed array initialized with undefined values . <nl> - Handle < FixedArray > NewFixedArray ( <nl> - int size , <nl> - PretenureFlag pretenure = NOT_TENURED ) ; <nl> + V8_EXPORT_PRIVATE Handle < FixedArray > NewFixedArray ( <nl> + int size , PretenureFlag pretenure = NOT_TENURED ) ; <nl> <nl> / / Allocate a new fixed array with non - existing entries ( the hole ) . <nl> Handle < FixedArray > NewFixedArrayWithHoles ( <nl> class Factory final { <nl> <nl> / / UTF8 strings are pretenured when used for regexp literal patterns and <nl> / / flags in the parser . <nl> - MUST_USE_RESULT MaybeHandle < String > NewStringFromUtf8 ( <nl> - Vector < const char > str , <nl> - PretenureFlag pretenure = NOT_TENURED ) ; <nl> + MUST_USE_RESULT V8_EXPORT_PRIVATE MaybeHandle < String > NewStringFromUtf8 ( <nl> + Vector < const char > str , PretenureFlag pretenure = NOT_TENURED ) ; <nl> <nl> MUST_USE_RESULT MaybeHandle < String > NewStringFromTwoByte ( <nl> Vector < const uc16 > str , <nl> class Factory final { <nl> } <nl> <nl> / / Create a JSArray with the given elements . <nl> - Handle < JSArray > NewJSArrayWithElements ( Handle < FixedArrayBase > elements , <nl> - ElementsKind elements_kind , int length , <nl> - PretenureFlag pretenure = NOT_TENURED ) ; <nl> + V8_EXPORT_PRIVATE Handle < JSArray > NewJSArrayWithElements ( <nl> + Handle < FixedArrayBase > elements , ElementsKind elements_kind , int length , <nl> + PretenureFlag pretenure = NOT_TENURED ) ; <nl> <nl> - Handle < JSArray > NewJSArrayWithElements ( <nl> + V8_EXPORT_PRIVATE Handle < JSArray > NewJSArrayWithElements ( <nl> Handle < FixedArrayBase > elements , <nl> ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND , <nl> PretenureFlag pretenure = NOT_TENURED ) { <nl> mmm a / src / flag - definitions . h <nl> ppp b / src / flag - definitions . h <nl> <nl> / / this will just be an extern declaration , but for a readonly flag we let the <nl> / / compiler make better optimizations by giving it the value . <nl> # if defined ( FLAG_MODE_DECLARE ) <nl> - # define FLAG_FULL ( ftype , ctype , nam , def , cmt ) extern ctype FLAG_ # # nam ; <nl> + # define FLAG_FULL ( ftype , ctype , nam , def , cmt ) \ <nl> + V8_EXPORT_PRIVATE extern ctype FLAG_ # # nam ; <nl> # define FLAG_READONLY ( ftype , ctype , nam , def , cmt ) \ <nl> static ctype const FLAG_ # # nam = def ; <nl> <nl> / / We want to supply the actual storage and value for the flag variable in the <nl> / / . cc file . We only do this for writable flags . <nl> # elif defined ( FLAG_MODE_DEFINE ) <nl> - # define FLAG_FULL ( ftype , ctype , nam , def , cmt ) ctype FLAG_ # # nam = def ; <nl> + # ifdef USING_V8_SHARED <nl> + # define FLAG_FULL ( ftype , ctype , nam , def , cmt ) \ <nl> + V8_EXPORT_PRIVATE extern ctype FLAG_ # # nam ; <nl> + # else <nl> + # define FLAG_FULL ( ftype , ctype , nam , def , cmt ) \ <nl> + V8_EXPORT_PRIVATE ctype FLAG_ # # nam = def ; <nl> + # endif <nl> <nl> / / We need to define all of our default values so that the Flag structure can <nl> / / access them by pointer . These are just used internally inside of one . cc , <nl> mmm a / src / globals . h <nl> ppp b / src / globals . h <nl> <nl> # include " src / base / logging . h " <nl> # include " src / base / macros . h " <nl> <nl> + # ifdef V8_OS_WIN <nl> + <nl> + / / Setup for Windows shared library export . <nl> + # ifdef BUILDING_V8_SHARED <nl> + # define V8_EXPORT_PRIVATE __declspec ( dllexport ) <nl> + # elif USING_V8_SHARED <nl> + # define V8_EXPORT_PRIVATE __declspec ( dllimport ) <nl> + # else <nl> + # define V8_EXPORT_PRIVATE <nl> + # endif / / BUILDING_V8_SHARED <nl> + <nl> + # else / / V8_OS_WIN <nl> + <nl> + / / Setup for Linux shared library export . <nl> + # if V8_HAS_ATTRIBUTE_VISIBILITY <nl> + # ifdef BUILDING_V8_SHARED <nl> + # define V8_EXPORT_PRIVATE __attribute__ ( ( visibility ( " default " ) ) ) <nl> + # else <nl> + # define V8_EXPORT_PRIVATE <nl> + # endif <nl> + # else <nl> + # define V8_EXPORT_PRIVATE <nl> + # endif <nl> + <nl> + # endif / / V8_OS_WIN <nl> + <nl> / / Unfortunately , the INFINITY macro cannot be used with the ' - pedantic ' <nl> / / warning flag and certain versions of GCC due to a bug : <nl> / / http : / / gcc . gnu . org / bugzilla / show_bug . cgi ? id = 11931 <nl> mmm a / src / heap / heap . h <nl> ppp b / src / heap / heap . h <nl> class AllSpaces BASE_EMBEDDED { <nl> <nl> / / Space iterator for iterating over all old spaces of the heap : Old space <nl> / / and code space . Returns each space in turn , and null when it is done . <nl> - class OldSpaces BASE_EMBEDDED { <nl> + class V8_EXPORT_PRIVATE OldSpaces BASE_EMBEDDED { <nl> public : <nl> explicit OldSpaces ( Heap * heap ) : heap_ ( heap ) , counter_ ( OLD_SPACE ) { } <nl> OldSpace * next ( ) ; <nl> mmm a / src / heap / incremental - marking . h <nl> ppp b / src / heap / incremental - marking . h <nl> class IncrementalMarking { <nl> INLINE ( void RecordWriteOfCodeEntry ( JSFunction * host , Object * * slot , <nl> Code * value ) ) ; <nl> <nl> - <nl> - void RecordWriteSlow ( HeapObject * obj , Object * * slot , Object * value ) ; <nl> + V8_EXPORT_PRIVATE void RecordWriteSlow ( HeapObject * obj , Object * * slot , <nl> + Object * value ) ; <nl> void RecordWriteIntoCodeSlow ( Code * host , RelocInfo * rinfo , Object * value ) ; <nl> void RecordWriteOfCodeEntrySlow ( JSFunction * host , Object * * slot , Code * value ) ; <nl> void RecordCodeTargetPatch ( Code * host , Address pc , HeapObject * value ) ; <nl> mmm a / src / heap / mark - compact . h <nl> ppp b / src / heap / mark - compact . h <nl> class EvacuationScope BASE_EMBEDDED { <nl> MarkCompactCollector * collector_ ; <nl> } ; <nl> <nl> - <nl> - const char * AllocationSpaceName ( AllocationSpace space ) ; <nl> + V8_EXPORT_PRIVATE const char * AllocationSpaceName ( AllocationSpace space ) ; <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / heap / spaces . h <nl> ppp b / src / heap / spaces . h <nl> class MemoryChunk { <nl> } <nl> inline LocalArrayBufferTracker * local_tracker ( ) { return local_tracker_ ; } <nl> <nl> - void AllocateOldToNewSlots ( ) ; <nl> + V8_EXPORT_PRIVATE void AllocateOldToNewSlots ( ) ; <nl> void ReleaseOldToNewSlots ( ) ; <nl> void AllocateOldToOldSlots ( ) ; <nl> void ReleaseOldToOldSlots ( ) ; <nl> class MemoryAllocator { <nl> / / method which is used to avoid using virtual functions <nl> / / iterating a specific space . <nl> <nl> - class ObjectIterator : public Malloced { <nl> + class V8_EXPORT_PRIVATE ObjectIterator : public Malloced { <nl> public : <nl> virtual ~ ObjectIterator ( ) { } <nl> virtual HeapObject * Next ( ) = 0 ; <nl> class PageRange { <nl> / / If objects are allocated in the page during iteration the iterator may <nl> / / or may not iterate over those objects . The caller must create a new <nl> / / iterator in order to be sure to visit these new objects . <nl> - class HeapObjectIterator : public ObjectIterator { <nl> + class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator { <nl> public : <nl> / / Creates a new object iterator in a given space . <nl> explicit HeapObjectIterator ( PagedSpace * space ) ; <nl> mmm a / src / inspector / BUILD . gn <nl> ppp b / src / inspector / BUILD . gn <nl> config ( " inspector_config " ) { <nl> ] <nl> } <nl> if ( is_component_build ) { <nl> - defines = [ <nl> - " V8_SHARED " , <nl> - " BUILDING_V8_SHARED " , <nl> - ] <nl> + defines = [ " BUILDING_V8_SHARED " ] <nl> } <nl> } <nl> <nl> mmm a / src / interpreter / interpreter . h <nl> ppp b / src / interpreter / interpreter . h <nl> class Interpreter { <nl> void TraceCodegen ( Handle < Code > code ) ; <nl> const char * LookupNameOfBytecodeHandler ( Code * code ) ; <nl> <nl> - Local < v8 : : Object > GetDispatchCountersObject ( ) ; <nl> + V8_EXPORT_PRIVATE Local < v8 : : Object > GetDispatchCountersObject ( ) ; <nl> <nl> Address dispatch_table_address ( ) { <nl> return reinterpret_cast < Address > ( & dispatch_table_ [ 0 ] ) ; <nl> mmm a / src / ostreams . h <nl> ppp b / src / ostreams . h <nl> <nl> <nl> # include " include / v8config . h " <nl> # include " src / base / macros . h " <nl> + # include " src / globals . h " <nl> <nl> namespace v8 { <nl> namespace internal { <nl> class OFStreamBase : public std : : streambuf { <nl> <nl> <nl> / / An output stream writing to a file . <nl> - class OFStream : public std : : ostream { <nl> + class V8_EXPORT_PRIVATE OFStream : public std : : ostream { <nl> public : <nl> explicit OFStream ( FILE * f ) ; <nl> virtual ~ OFStream ( ) ; <nl> mmm a / src / snapshot / natives . h <nl> ppp b / src / snapshot / natives . h <nl> enum NativeType { <nl> TEST <nl> } ; <nl> <nl> + / / Extra handling for V8_EXPORT_PRIVATE in combination with USING_V8_SHARED <nl> + / / since definition of methods of classes marked as dllimport is not allowed . <nl> template < NativeType type > <nl> + # ifdef USING_V8_SHARED <nl> class NativesCollection { <nl> + # else <nl> + class V8_EXPORT_PRIVATE NativesCollection { <nl> + # endif / / USING_V8_SHARED <nl> + <nl> public : <nl> / / The following methods are implemented in js2c - generated code : <nl> <nl> mmm a / src / utils . h <nl> ppp b / src / utils . h <nl> void init_memcopy_functions ( Isolate * isolate ) ; <nl> const int kMinComplexMemCopy = 64 ; <nl> <nl> / / Copy memory area . No restrictions . <nl> - void MemMove ( void * dest , const void * src , size_t size ) ; <nl> + V8_EXPORT_PRIVATE void MemMove ( void * dest , const void * src , size_t size ) ; <nl> typedef void ( * MemMoveFunction ) ( void * dest , const void * src , size_t size ) ; <nl> <nl> / / Keep the distinction of " move " vs . " copy " for the benefit of other <nl> V8_INLINE void MemCopy ( void * dest , const void * src , size_t size ) { <nl> ( * memcopy_uint8_function ) ( reinterpret_cast < uint8_t * > ( dest ) , <nl> reinterpret_cast < const uint8_t * > ( src ) , size ) ; <nl> } <nl> - V8_INLINE void MemMove ( void * dest , const void * src , size_t size ) { <nl> + V8_EXPORT_PRIVATE V8_INLINE void MemMove ( void * dest , const void * src , <nl> + size_t size ) { <nl> memmove ( dest , src , size ) ; <nl> } <nl> <nl> V8_INLINE void MemCopy ( void * dest , const void * src , size_t size ) { <nl> ( * memcopy_uint8_function ) ( reinterpret_cast < uint8_t * > ( dest ) , <nl> reinterpret_cast < const uint8_t * > ( src ) , size ) ; <nl> } <nl> - V8_INLINE void MemMove ( void * dest , const void * src , size_t size ) { <nl> + V8_EXPORT_PRIVATE V8_INLINE void MemMove ( void * dest , const void * src , <nl> + size_t size ) { <nl> memmove ( dest , src , size ) ; <nl> } <nl> # else <nl> V8_INLINE void MemMove ( void * dest , const void * src , size_t size ) { <nl> V8_INLINE void MemCopy ( void * dest , const void * src , size_t size ) { <nl> memcpy ( dest , src , size ) ; <nl> } <nl> - V8_INLINE void MemMove ( void * dest , const void * src , size_t size ) { <nl> + V8_EXPORT_PRIVATE V8_INLINE void MemMove ( void * dest , const void * src , <nl> + size_t size ) { <nl> memmove ( dest , src , size ) ; <nl> } <nl> const int kMinComplexMemCopy = 16 * kPointerSize ; <nl> mmm a / src / v8 . gyp <nl> ppp b / src / v8 . gyp <nl> <nl> ' . . ' , <nl> ] , <nl> ' defines ' : [ <nl> - ' V8_SHARED ' , <nl> ' BUILDING_V8_SHARED ' , <nl> ] , <nl> ' direct_dependent_settings ' : { <nl> ' defines ' : [ <nl> - ' V8_SHARED ' , <nl> ' USING_V8_SHARED ' , <nl> ] , <nl> } , <nl> <nl> } ] , <nl> [ ' component = = " shared_library " ' , { <nl> ' defines ' : [ <nl> - ' V8_SHARED ' , <nl> ' BUILDING_V8_SHARED ' , <nl> ] , <nl> ' direct_dependent_settings ' : { <nl> ' defines ' : [ <nl> - ' V8_SHARED ' , <nl> ' USING_V8_SHARED ' , <nl> ] , <nl> } , <nl> <nl> [ ' component = = " shared_library " ' , { <nl> ' defines ' : [ <nl> ' BUILDING_V8_SHARED ' , <nl> - ' V8_SHARED ' , <nl> ] , <nl> } ] , <nl> ] <nl> <nl> } ] , <nl> [ ' component = = " shared_library " ' , { <nl> ' defines ' : [ <nl> - ' V8_SHARED ' , <nl> ' BUILDING_V8_SHARED ' , <nl> ] , <nl> ' direct_dependent_settings ' : { <nl> ' defines ' : [ <nl> - ' V8_SHARED ' , <nl> ' USING_V8_SHARED ' , <nl> ] , <nl> } , <nl> <nl> [ ' component = = " shared_library " ' , { <nl> ' defines ' : [ <nl> ' BUILDING_V8_SHARED ' , <nl> - ' V8_SHARED ' , <nl> ] , <nl> } ] , <nl> [ ' v8_postmortem_support = = " true " ' , { <nl> mmm a / src / wasm / wasm - js . h <nl> ppp b / src / wasm / wasm - js . h <nl> <nl> # ifndef V8_WASM_JS_H_ <nl> # define V8_WASM_JS_H_ <nl> <nl> - # ifndef V8_SHARED <nl> # include " src / allocation . h " <nl> # include " src / base / hashmap . h " <nl> - # else <nl> - # include " include / v8 . h " <nl> - # include " src / base / compiler - specific . h " <nl> - # endif / / ! V8_SHARED <nl> <nl> namespace v8 { <nl> namespace internal { <nl> | [ d8 ] Fix the shared - library build | v8/v8 | 2c10ca8086a4d595ecf9aa843d2031b068470d65 | 2016-09-19T12:47:22Z |
mmm a / lib / IRGen / ClassMetadataVisitor . h <nl> ppp b / lib / IRGen / ClassMetadataVisitor . h <nl> template < class Impl > class ClassMetadataVisitor <nl> / / isn ' t necessary . <nl> / / FIXME : Figure out what can be removed altogether in non - objc - interop <nl> / / mode and remove it . rdar : / / problem / 18801263 <nl> - asImpl ( ) . addSuperClass ( ) ; <nl> + asImpl ( ) . addSuperclass ( ) ; <nl> asImpl ( ) . addClassCacheData ( ) ; <nl> asImpl ( ) . addClassDataPointer ( ) ; <nl> <nl> class ClassMetadataScanner : public ClassMetadataVisitor < Impl > { <nl> void addIVarDestroyer ( ) { addPointer ( ) ; } <nl> void addValueWitnessTable ( ) { addPointer ( ) ; } <nl> void addDestructorFunction ( ) { addPointer ( ) ; } <nl> - void addSuperClass ( ) { addPointer ( ) ; } <nl> + void addSuperclass ( ) { addPointer ( ) ; } <nl> void addClassFlags ( ) { addInt32 ( ) ; } <nl> void addInstanceAddressPoint ( ) { addInt32 ( ) ; } <nl> void addInstanceSize ( ) { addInt32 ( ) ; } <nl> mmm a / lib / IRGen / ForeignClassMetadataVisitor . h <nl> ppp b / lib / IRGen / ForeignClassMetadataVisitor . h <nl> class ForeignClassMetadataVisitor <nl> super : : layout ( ) ; <nl> asImpl ( ) . addNominalTypeDescriptor ( ) ; <nl> asImpl ( ) . noteStartOfSuperClass ( ) ; <nl> - asImpl ( ) . addSuperClass ( ) ; <nl> + asImpl ( ) . addSuperclass ( ) ; <nl> asImpl ( ) . addReservedWord ( ) ; <nl> asImpl ( ) . addReservedWord ( ) ; <nl> asImpl ( ) . addReservedWord ( ) ; <nl> class ForeignClassMetadataScanner : public ForeignClassMetadataVisitor < Impl > { <nl> void addMetadataFlags ( ) { addPointer ( ) ; } <nl> void addValueWitnessTable ( ) { addPointer ( ) ; } <nl> void addNominalTypeDescriptor ( ) { addPointer ( ) ; } <nl> - void addSuperClass ( ) { addPointer ( ) ; } <nl> + void addSuperclass ( ) { addPointer ( ) ; } <nl> void addReservedWord ( ) { addPointer ( ) ; } <nl> <nl> private : <nl> mmm a / lib / IRGen / GenMeta . cpp <nl> ppp b / lib / IRGen / GenMeta . cpp <nl> namespace { <nl> AddressPoint = B . getNextOffsetFromGlobal ( ) ; <nl> } <nl> <nl> - void addSuperClass ( ) { <nl> + void addSuperclass ( ) { <nl> / / If this is a root class , use SwiftObject as our formal parent . <nl> if ( ! Target - > hasSuperclass ( ) ) { <nl> / / This is only required for ObjC interoperation . <nl> namespace { <nl> <nl> void noteStartOfSuperClass ( ) { } <nl> <nl> - void addSuperClass ( ) { <nl> + void addSuperclass ( ) { <nl> auto superclassDecl = Target - > getSuperclassDecl ( ) ; <nl> if ( ! superclassDecl | | ! superclassDecl - > isForeign ( ) ) { <nl> B . addNullPointer ( IGM . TypeMetadataPtrTy ) ; <nl> | IRGen : Rename addSuperClass ( ) = > addSuperclass ( ) for consistency | apple/swift | e3ff63a289a439ef2850e8d132af0eb1b95c1533 | 2018-07-10T06:56:25Z |
mmm a / tensorflow / core / common_runtime / executor . cc <nl> ppp b / tensorflow / core / common_runtime / executor . cc <nl> limitations under the License . <nl> # include " absl / strings / string_view . h " <nl> # include " tensorflow / core / common_runtime / costmodel_manager . h " <nl> # include " tensorflow / core / common_runtime / executor_factory . h " <nl> + # include " tensorflow / core / common_runtime / metrics . h " <nl> # include " tensorflow / core / common_runtime / pending_counts . h " <nl> # include " tensorflow / core / common_runtime / renamed_device . h " <nl> # include " tensorflow / core / common_runtime / step_stats_collector . h " <nl> Status ExecutorImpl : : Initialize ( const Graph & graph ) { <nl> TF_RETURN_IF_ERROR ( GetNodeAttr ( n - > attrs ( ) , " frame_name " , & enter_name ) ) ; <nl> EnsureFrameInfo ( enter_name ) - > input_count + + ; <nl> } <nl> + <nl> + / / Record information about whether each output of the op is used . <nl> + std : : vector < bool > used_outputs ( n - > num_outputs ( ) , false ) ; <nl> + for ( const Edge * e : n - > out_edges ( ) ) { <nl> + if ( e - > src_output ( ) > = 0 ) { <nl> + used_outputs [ e - > src_output ( ) ] = true ; <nl> + } <nl> + } <nl> + for ( bool used_output : used_outputs ) { <nl> + if ( ! used_output ) { <nl> + metrics : : RecordUnusedOutput ( n - > type_string ( ) ) ; <nl> + } <nl> + } <nl> } <nl> <nl> / / Initialize PendingCounts only after item - > pending_id is initialized for <nl> mmm a / tensorflow / core / common_runtime / metrics . cc <nl> ppp b / tensorflow / core / common_runtime / metrics . cc <nl> auto * graph_run_output_tensor_bytes = monitoring : : Sampler < 0 > : : New ( <nl> / / Power of 2 with bucket count 14 ( 256G ) <nl> { monitoring : : Buckets : : Exponential ( 1 , 4 , 14 ) } ) ; <nl> <nl> + auto * graph_unused_outputs = monitoring : : Counter < 1 > : : New ( <nl> + " / tensorflow / core / graph_unused_outputs " , <nl> + " The number of unused outputs for ops of a given type . " , " name " ) ; <nl> + <nl> auto * tf_data_autotune_counter = monitoring : : Counter < 1 > : : New ( <nl> " / tensorflow / data / autotune " , " tf . data autotuning " , " name " ) ; <nl> <nl> void IncrementMLIRImportFailureCount ( ) { <nl> mlir_import_failure_count_cell - > IncrementBy ( 1 ) ; <nl> } <nl> <nl> + void RecordUnusedOutput ( const string & op_name ) { <nl> + graph_unused_outputs - > GetCell ( op_name ) - > IncrementBy ( 1 ) ; <nl> + } <nl> + <nl> } / / namespace metrics <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / common_runtime / metrics . h <nl> ppp b / tensorflow / core / common_runtime / metrics . h <nl> void RecordGraphOutputTensors ( const size_t size ) ; <nl> <nl> void UpdateGraphExecTime ( const uint64 running_time_usecs ) ; <nl> <nl> + / / Records that one output of an op of type ` op_name ` was unused . <nl> + void RecordUnusedOutput ( const string & op_name ) ; <nl> + <nl> / / Updates the metrics stored about time spent building graphs . <nl> / / <nl> / / By " GraphBuild " , we refer to building a client graph , which is a sub - graph of <nl> | [ Metrics ] Add a monitoring counter for unused outputs of nodes . | tensorflow/tensorflow | 746c75c381461bf4212e9e9304e08abec26fd4b5 | 2019-12-31T17:37:34Z |
mmm a / lib / Sema / CSGen . cpp <nl> ppp b / lib / Sema / CSGen . cpp <nl> namespace { <nl> if ( ! optTy ) <nl> return Type ( ) ; <nl> <nl> - CS . addConstraint ( ConstraintKind : : Conversion , <nl> - expr - > getSubExpr ( ) - > getType ( ) , optTy , <nl> + CS . addConstraint ( ConstraintKind : : OptionalObject , <nl> + optTy , expr - > getSubExpr ( ) - > getType ( ) , <nl> CS . getConstraintLocator ( expr ) ) ; <nl> return optTy ; <nl> } <nl> mmm a / test / Parse / try . swift <nl> ppp b / test / Parse / try . swift <nl> func callThrowingClosureWithoutTry ( closure : Int throws - > Int ) rethrows { <nl> closure ( 0 ) / / expected - error { { call can throw but is not marked with ' try ' } } <nl> } <nl> <nl> + func producesOptional ( ) throws - > Int ? { return nil } <nl> + let doubleOptional = try ? producesOptional ( ) <nl> + let _ : String = doubleOptional / / expected - error { { cannot convert value of type ' Int ? ? ' to specified type ' String ' } } <nl> | ' try ? ' is supposed to stack optionals , not merge them . | apple/swift | 185326755c91ed4926fba286c87b35829ed03a21 | 2015-08-06T21:02:40Z |
mmm a / tensorflow / compiler / mlir / tensorflow / tests / tpu_cluster_formation . mlir <nl> ppp b / tensorflow / compiler / mlir / tensorflow / tests / tpu_cluster_formation . mlir <nl> func @ cluster_nested_op_using_resource ( ) { <nl> / / CHECK : " tf . opA " ( ) ( { <nl> / / CHECK : " tf . AssignAddVariableOp " ( [ [ VAR ] ] , [ [ CONST ] ] ) <nl> <nl> + <nl> + / / mmm - - <nl> + <nl> + <nl> + ! tf_res = type tensor < * x ! tf . resource < tensor < f32 > > > <nl> + <nl> + / / Test multiple replicated clusters interleaved and uses resource variables . <nl> + / / CHECK - LABEL : func @ multiple_replicated_interleaved <nl> + func @ multiple_replicated_interleaved ( % arg0 : ! tf_res ) { <nl> + " tf . TPUReplicateMetadata " ( ) { _tpu_replicate = " a " , num_replicas = 2 , topology = " topology " } : ( ) - > ( ) <nl> + " tf . TPUReplicateMetadata " ( ) { _tpu_replicate = " b " , num_replicas = 2 , topology = " topology " } : ( ) - > ( ) <nl> + " tf . TPUReplicateMetadata " ( ) { _tpu_replicate = " c " , num_replicas = 2 , topology = " topology " } : ( ) - > ( ) <nl> + % 0 = " tf . TPUReplicatedInput " ( % arg0 , % arg0 ) : ( ! tf_res , ! tf_res ) - > ! tf_res <nl> + % 1 = " tf . TPUReplicatedInput " ( % arg0 , % arg0 ) : ( ! tf_res , ! tf_res ) - > ! tf_res <nl> + % 2 = " tf . TPUReplicatedInput " ( % arg0 , % arg0 ) : ( ! tf_res , ! tf_res ) - > ! tf_res <nl> + % 3 = " tf . ReadVariableOp " ( % 0 ) { _tpu_replicate = " a " } : ( ! tf_res ) - > tensor < f32 > <nl> + % 4 = " tf . ReadVariableOp " ( % 1 ) { _tpu_replicate = " b " } : ( ! tf_res ) - > tensor < f32 > <nl> + % 5 = " tf . ReadVariableOp " ( % 2 ) { _tpu_replicate = " c " } : ( ! tf_res ) - > tensor < f32 > <nl> + % 6 = " tf . Identity " ( % 3 ) { _tpu_replicate = " a " } : ( tensor < f32 > ) - > tensor < f32 > <nl> + % 7 = " tf . Identity " ( % 4 ) { _tpu_replicate = " b " } : ( tensor < f32 > ) - > tensor < f32 > <nl> + % 8 = " tf . Identity " ( % 5 ) { _tpu_replicate = " c " } : ( tensor < f32 > ) - > tensor < f32 > <nl> + % 9 : 2 = " tf . TPUReplicatedOutput " ( % 6 ) : ( tensor < f32 > ) - > ( tensor < f32 > , tensor < f32 > ) <nl> + % 10 : 2 = " tf . TPUReplicatedOutput " ( % 7 ) : ( tensor < f32 > ) - > ( tensor < f32 > , tensor < f32 > ) <nl> + % 11 : 2 = " tf . TPUReplicatedOutput " ( % 8 ) : ( tensor < f32 > ) - > ( tensor < f32 > , tensor < f32 > ) <nl> + return <nl> + } <nl> + <nl> + / / CHECK : tf_device . replicate <nl> + / / CHECK : tf_device . replicate <nl> + / / CHECK : tf_device . replicate <nl> + <nl> + <nl> / / mmm - - <nl> <nl> <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / tpu_cluster_formation . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / tpu_cluster_formation . cc <nl> bool ShouldMoveOpAfterCluster ( <nl> const llvm : : SmallSetVector < Operation * , 8 > & preceding_users , <nl> const TF : : ResourceAliasAnalysis : : Info & resource_alias_analysis , <nl> const llvm : : SmallDenseSet < int64_t > & observed_resource_ids ) { <nl> + const bool is_replicate = llvm : : isa < tf_device : : ReplicateOp > ( op ) ; <nl> auto result = op - > walk ( [ & ] ( Operation * inner_op ) { <nl> for ( Value operand : inner_op - > getOperands ( ) ) { <nl> Operation * def = operand . getDefiningOp ( ) ; <nl> bool ShouldMoveOpAfterCluster ( <nl> } <nl> } <nl> <nl> + / / Don ' t visit replicate op inner op operands as new resource <nl> + / / values / arguments may have been created but are not known in <nl> + / / ` resource_alias_analysis ` . <nl> + if ( is_replicate & & inner_op ! = op ) return WalkResult : : advance ( ) ; <nl> + <nl> / / Check for uses of any resource in or after cluster . <nl> for ( Value operand : TF : : filter_resources ( inner_op - > getOperands ( ) ) ) { <nl> if ( resource_alias_analysis . IsUnknownResource ( operand ) ) continue ; <nl> | Update TPU cluster formation to not check replicate inner ops resource operands . | tensorflow/tensorflow | 4d1287cfbb032fa9ba6c4c56011d6d38bfbb9f6c | 2020-10-15T15:55:05Z |
mmm a / imgui . h <nl> ppp b / imgui . h <nl> struct ImDrawChannel <nl> ImVector < ImDrawIdx > _IdxBuffer ; <nl> } ; <nl> <nl> + <nl> / / Split / Merge functions are used to split the draw list into different layers which can be drawn into out of order . <nl> - / / This is used by the Columns api , so items of each column can be batched together in a same draw call . <nl> + / / This is used by the Columns / Tables API , so items of each column can be batched together in a same draw call . <nl> struct ImDrawListSplitter <nl> { <nl> int _Current ; / / Current channel number ( 0 ) <nl> mmm a / imgui_demo . cpp <nl> ppp b / imgui_demo . cpp <nl> static void ShowDemoWindowWidgets ( ) <nl> ImGui : : TableNextColumn ( ) ; <nl> ImGui : : Selectable ( label , & selected [ i ] ) ; / / FIXME - TABLE : Selection overlap <nl> } <nl> - ImGui : : EndTable ( ) ; <nl> + ImGui : : EndTable ( ) ; <nl> } <nl> ImGui : : Separator ( ) ; <nl> if ( ImGui : : BeginTable ( " split2 " , 3 , ImGuiTableFlags_Resizable | ImGuiTableFlags_NoSavedSettings ) ) <nl> static void ShowDemoWindowTables ( ) <nl> <nl> if ( open_action ! = - 1 ) <nl> ImGui : : SetNextItemOpen ( open_action ! = 0 ) ; <nl> - if ( ImGui : : TreeNode ( " Vertical scrolling " ) ) <nl> + if ( ImGui : : TreeNode ( " Vertical scrolling , with clipping " ) ) <nl> { <nl> HelpMarker ( " Here we activate ScrollY , which will create a child window container to allow hosting scrollable contents . \ n \ nWe also demonstrate using ImGuiListClipper to virtualize the submission of many items . " ) ; <nl> static ImGuiTableFlags flags = ImGuiTableFlags_ScrollY | ImGuiTableFlags_RowBg | ImGuiTableFlags_BordersOuter | ImGuiTableFlags_BordersV | ImGuiTableFlags_Resizable | ImGuiTableFlags_Reorderable | ImGuiTableFlags_Hideable ; <nl> static void ShowDemoWindowTables ( ) <nl> ImGui : : TableSetupColumn ( " Two " , ImGuiTableColumnFlags_None ) ; <nl> ImGui : : TableSetupColumn ( " Three " , ImGuiTableColumnFlags_None ) ; <nl> ImGui : : TableHeadersRow ( ) ; <nl> + <nl> + / / Demonstrate using clipper for large vertical lists <nl> ImGuiListClipper clipper ; <nl> clipper . Begin ( 1000 ) ; <nl> while ( clipper . Step ( ) ) <nl> static void ShowDemoWindowTables ( ) <nl> ImGui : : TableSetupColumn ( " Action " , ImGuiTableColumnFlags_NoSort | ImGuiTableColumnFlags_WidthFixed , - 1 . 0f , MyItemColumnID_Action ) ; <nl> ImGui : : TableSetupColumn ( " Quantity " , ImGuiTableColumnFlags_PreferSortDescending | ImGuiTableColumnFlags_WidthStretch , - 1 . 0f , MyItemColumnID_Quantity ) ; <nl> ImGui : : TableSetupScrollFreeze ( 0 , 1 ) ; / / Make row always visible <nl> + ImGui : : TableHeadersRow ( ) ; <nl> <nl> / / Sort our data if sort specs have been changed ! <nl> if ( ImGuiTableSortSpecs * sorts_specs = ImGui : : TableGetSortSpecs ( ) ) <nl> static void ShowDemoWindowTables ( ) <nl> sorts_specs - > SpecsDirty = false ; <nl> } <nl> <nl> - / / Display data <nl> - ImGui : : TableHeadersRow ( ) ; <nl> + / / Demonstrate using clipper for large vertical lists <nl> ImGuiListClipper clipper ; <nl> clipper . Begin ( items . Size ) ; <nl> while ( clipper . Step ( ) ) <nl> for ( int row_n = clipper . DisplayStart ; row_n < clipper . DisplayEnd ; row_n + + ) <nl> { <nl> + / / Display a data item <nl> MyItem * item = & items [ row_n ] ; <nl> ImGui : : PushID ( item - > ID ) ; <nl> ImGui : : TableNextRow ( ) ; <nl> static void ShowDemoWindowTables ( ) <nl> static int freeze_cols = 1 ; <nl> static int freeze_rows = 1 ; <nl> static int items_count = IM_ARRAYSIZE ( template_items_names ) ; <nl> - static ImVec2 outer_size_value = ImVec2 ( 0 , TEXT_BASE_HEIGHT * 15 ) ; <nl> + static ImVec2 outer_size_value = ImVec2 ( 0 , TEXT_BASE_HEIGHT * 12 ) ; <nl> static float row_min_height = 0 . 0f ; / / Auto <nl> static float inner_width_with_scroll = 0 . 0f ; / / Auto - extend <nl> static bool outer_size_enabled = true ; <nl> static void ShowDemoWindowTables ( ) <nl> / / FIXME - TABLE FIXME - NAV : How we can get decent up / down even though we have the buttons here ? <nl> ImGui : : PushButtonRepeat ( true ) ; <nl> # if 1 <nl> + / / Demonstrate using clipper for large vertical lists <nl> ImGuiListClipper clipper ; <nl> clipper . Begin ( items . Size ) ; <nl> while ( clipper . Step ( ) ) <nl> { <nl> for ( int row_n = clipper . DisplayStart ; row_n < clipper . DisplayEnd ; row_n + + ) <nl> # else <nl> + / / Without clipper <nl> { <nl> - for ( int row_n = 0 ; row_n < items_count ; row_n + + ) <nl> + for ( int row_n = 0 ; row_n < items . Size ; row_n + + ) <nl> # endif <nl> { <nl> MyItem * item = & items [ row_n ] ; <nl> static void ShowDemoWindowTables ( ) <nl> } <nl> ImGui : : PopButtonRepeat ( ) ; <nl> <nl> + / / Store some info to display debug details below <nl> table_scroll_cur = ImVec2 ( ImGui : : GetScrollX ( ) , ImGui : : GetScrollY ( ) ) ; <nl> table_scroll_max = ImVec2 ( ImGui : : GetScrollMaxX ( ) , ImGui : : GetScrollMaxY ( ) ) ; <nl> table_draw_list = ImGui : : GetWindowDrawList ( ) ; <nl> static void ShowDemoWindowTables ( ) <nl> ImGui : : SameLine ( 0 . 0f , 0 . 0f ) ; <nl> const int table_draw_list_draw_cmd_count = table_draw_list - > CmdBuffer . Size ; <nl> if ( table_draw_list = = parent_draw_list ) <nl> - ImGui : : Text ( " : DrawCmd : + % d ( in same window ) " , table_draw_list_draw_cmd_count - parent_draw_list_draw_cmd_count ) ; <nl> + ImGui : : Text ( " : DrawCmd : + % d ( in same window ) " , <nl> + table_draw_list_draw_cmd_count - parent_draw_list_draw_cmd_count ) ; <nl> else <nl> ImGui : : Text ( " : DrawCmd : + % d ( in child window ) , Scroll : ( % . f / % . f ) ( % . f / % . f ) " , <nl> table_draw_list_draw_cmd_count - 1 , table_scroll_cur . x , table_scroll_max . x , table_scroll_cur . y , table_scroll_max . y ) ; <nl> static void ShowDemoWindowColumns ( ) <nl> ImVec2 child_size = ImVec2 ( 0 , ImGui : : GetFontSize ( ) * 20 . 0f ) ; <nl> ImGui : : BeginChild ( " # # ScrollingRegion " , child_size , false , ImGuiWindowFlags_HorizontalScrollbar ) ; <nl> ImGui : : Columns ( 10 ) ; <nl> + <nl> + / / Also demonstrate using clipper for large vertical lists <nl> int ITEMS_COUNT = 2000 ; <nl> - ImGuiListClipper clipper ; / / Also demonstrate using the clipper for large list <nl> + ImGuiListClipper clipper ; <nl> clipper . Begin ( ITEMS_COUNT ) ; <nl> while ( clipper . Step ( ) ) <nl> { <nl> mmm a / imgui_internal . h <nl> ppp b / imgui_internal . h <nl> struct ImGuiTableColumn <nl> bool IsVisible ; / / Is the column not marked Hidden by the user ? ( could be clipped by scrolling , etc ) . <nl> bool IsVisibleNextFrame ; <nl> bool IsClipped ; / / Set when not overlapping the host window clipping rectangle . <nl> - bool SkipItems ; <nl> - ImGuiNavLayer NavLayerCurrent ; <nl> + bool IsSkipItems ; <nl> + ImS8 NavLayerCurrent ; / / ImGuiNavLayer in 1 byte <nl> ImS8 DisplayOrder ; / / Index within Table ' s IndexToDisplayOrder [ ] ( column may be reordered by users ) <nl> ImS8 IndexWithinVisibleSet ; / / Index within visible set ( < = IndexToDisplayOrder ) <nl> ImS8 PrevVisibleColumn ; / / Index of prev visible column within Columns [ ] , - 1 if first visible column <nl> mmm a / imgui_tables . cpp <nl> ppp b / imgui_tables . cpp <nl> void ImGui : : TableUpdateLayout ( ImGuiTable * table ) <nl> const int column_n = table - > DisplayOrderToIndex [ order_n ] ; <nl> ImGuiTableColumn * column = & table - > Columns [ column_n ] ; <nl> <nl> - column - > NavLayerCurrent = ( table - > FreezeRowsCount > 0 | | column_n < table - > FreezeColumnsCount ) ? ImGuiNavLayer_Menu : ImGuiNavLayer_Main ; <nl> + column - > NavLayerCurrent = ( ImS8 ) ( ( table - > FreezeRowsCount > 0 | | column_n < table - > FreezeColumnsCount ) ? ImGuiNavLayer_Menu : ImGuiNavLayer_Main ) ; <nl> <nl> if ( table - > FreezeColumnsCount > 0 & & table - > FreezeColumnsCount = = visible_n ) <nl> offset_x + = work_rect . Min . x - table - > OuterRect . Min . x ; <nl> void ImGui : : TableUpdateLayout ( ImGuiTable * table ) <nl> column - > ClipRect . Max . x = offset_x ; <nl> column - > ClipRect . Max . y = FLT_MAX ; <nl> column - > ClipRect . ClipWithFull ( host_clip_rect ) ; <nl> - column - > IsClipped = column - > SkipItems = true ; <nl> + column - > IsClipped = column - > IsSkipItems = true ; <nl> continue ; <nl> } <nl> <nl> void ImGui : : TableUpdateLayout ( ImGuiTable * table ) <nl> if ( column - > IsClipped ) <nl> table - > VisibleUnclippedMaskByIndex & = ~ ( ( ImU64 ) 1 < < column_n ) ; / / Columns with the _WidthAlwaysAutoResize sizing policy will never be updated then . <nl> <nl> - column - > SkipItems = ! column - > IsVisible | | table - > HostSkipItems ; <nl> + column - > IsSkipItems = ! column - > IsVisible | | table - > HostSkipItems ; <nl> <nl> / / Detect hovered column <nl> if ( is_hovering_table & & g . IO . MousePos . x > = column - > ClipRect . Min . x & & g . IO . MousePos . x < column - > ClipRect . Max . x ) <nl> void ImGui : : TableUpdateDrawChannels ( ImGuiTable * table ) <nl> const int channels_for_dummy = ( table - > ColumnsVisibleCount < table - > ColumnsCount | | table - > VisibleUnclippedMaskByIndex ! = table - > VisibleMaskByIndex ) ? + 1 : 0 ; <nl> const int channels_total = channels_for_bg + ( channels_for_row * freeze_row_multiplier ) + channels_for_dummy ; <nl> table - > DrawSplitter . Split ( table - > InnerWindow - > DrawList , channels_total ) ; <nl> - table - > DummyDrawChannel = ( channels_for_dummy > 0 ) ? ( ImU8 ) ( channels_total - 1 ) : - 1 ; <nl> + table - > DummyDrawChannel = ( ImU8 ) ( ( channels_for_dummy > 0 ) ? channels_total - 1 : - 1 ) ; <nl> table - > BgDrawChannelUnfrozen = ( ImU8 ) ( ( table - > FreezeRowsCount > 0 ) ? channels_for_row + 1 : 0 ) ; <nl> <nl> int draw_channel_current = 1 ; <nl> void ImGui : : TableEndRow ( ImGuiTable * table ) <nl> for ( int column_n = 0 ; column_n < table - > ColumnsCount ; column_n + + ) <nl> { <nl> ImGuiTableColumn * column = & table - > Columns [ column_n ] ; <nl> - column - > NavLayerCurrent = ( column_n < table - > FreezeColumnsCount ) ? ImGuiNavLayer_Menu : ImGuiNavLayer_Main ; <nl> + column - > NavLayerCurrent = ( ImS8 ) ( ( column_n < table - > FreezeColumnsCount ) ? ImGuiNavLayer_Menu : ImGuiNavLayer_Main ) ; <nl> } <nl> if ( unfreeze_rows_actual ) <nl> { <nl> void ImGui : : TableBeginCell ( ImGuiTable * table , int column_n ) <nl> window - > DC . ColumnsOffset . x = start_x - window - > Pos . x - window - > DC . Indent . x ; / / FIXME - WORKRECT <nl> window - > DC . CurrLineTextBaseOffset = table - > RowTextBaseline ; <nl> window - > DC . LastItemId = 0 ; <nl> - window - > DC . NavLayerCurrent = column - > NavLayerCurrent ; <nl> + window - > DC . NavLayerCurrent = ( ImGuiNavLayer ) column - > NavLayerCurrent ; <nl> <nl> window - > WorkRect . Min . y = window - > DC . CursorPos . y ; <nl> window - > WorkRect . Min . x = column - > MinX + table - > CellPaddingX + table - > CellSpacingX1 ; <nl> void ImGui : : TableBeginCell ( ImGuiTable * table , int column_n ) <nl> if ( ! column - > IsVisible ) <nl> window - > DC . CursorPos . y = ImMax ( window - > DC . CursorPos . y , table - > RowPosY2 ) ; <nl> <nl> - window - > SkipItems = column - > SkipItems ; <nl> + window - > SkipItems = column - > IsSkipItems ; <nl> if ( table - > Flags & ImGuiTableFlags_NoClip ) <nl> { <nl> table - > DrawSplitter . SetCurrentChannel ( window - > DrawList , 1 ) ; <nl> | Tables : Additionally commentary about clipper in the demo + minor padding tweak . | ocornut/imgui | fe6131168a397b5b91fe429a38708238452734f4 | 2020-12-04T18:15:25Z |
mmm a / lib / Basics / Result . cpp <nl> ppp b / lib / Basics / Result . cpp <nl> <nl> <nl> # include " Result . h " <nl> <nl> + # include " Basics / StaticStrings . h " <nl> # include " Basics / error . h " <nl> # include " Basics / voc - errors . h " <nl> <nl> <nl> <nl> using namespace arangodb ; <nl> <nl> - Result : : Result ( ) : _errorNumber ( TRI_ERROR_NO_ERROR ) { } <nl> + Result : : Result ( ) noexcept ( noexcept ( std : : allocator < char > ( ) ) ) <nl> + : _errorNumber ( TRI_ERROR_NO_ERROR ) { } <nl> <nl> - Result : : Result ( int errorNumber ) : _errorNumber ( errorNumber ) { } <nl> + Result : : Result ( int errorNumber ) noexcept ( noexcept ( std : : allocator < char > ( ) ) ) <nl> + : _errorNumber ( errorNumber ) { } <nl> <nl> Result : : Result ( int errorNumber , std : : string const & errorMessage ) <nl> : _errorNumber ( errorNumber ) , _errorMessage ( errorMessage ) { } <nl> std : : ostream & operator < < ( std : : ostream & out , arangodb : : Result const & result ) { <nl> VPackBuilder dump ; <nl> { <nl> VPackObjectBuilder b ( & dump ) ; <nl> - dump . add ( " errorNumber " , VPackValue ( result . errorNumber ( ) ) ) ; <nl> - dump . add ( " errorMessage " , VPackValue ( result . errorMessage ( ) ) ) ; <nl> + dump . add ( StaticStrings : : ErrorNum , VPackValue ( result . errorNumber ( ) ) ) ; <nl> + dump . add ( StaticStrings : : ErrorMessage , VPackValue ( result . errorMessage ( ) ) ) ; <nl> } <nl> out < < dump . slice ( ) . toJson ( ) ; <nl> return out ; <nl> mmm a / lib / Basics / Result . h <nl> ppp b / lib / Basics / Result . h <nl> <nl> # define ARANGODB_BASICS_RESULT_H 1 <nl> <nl> # include < iosfwd > <nl> + # include < memory > <nl> # include < string > <nl> - # include < utility > <nl> <nl> namespace arangodb { <nl> class Result final { <nl> public : <nl> - Result ( ) ; <nl> - <nl> Result ( bool / * avoidCastingErrors * / ) = delete ; <nl> <nl> + Result ( ) noexcept ( noexcept ( std : : allocator < char > ( ) ) ) ; <nl> + <nl> / / cppcheck - suppress noExplicitConstructor <nl> - / * implicit * / Result ( int errorNumber ) ; <nl> + / * implicit * / Result ( int errorNumber ) noexcept ( noexcept ( std : : allocator < char > ( ) ) ) ; <nl> <nl> Result ( int errorNumber , std : : string const & errorMessage ) ; <nl> <nl> | Bug fix / make result ctor noexcept ( ) | arangodb/arangodb | 064577777fb986db371ee27dbad67ebfaf41e129 | 2019-10-22T07:39:35Z |
mmm a / test / api - digester / Inputs / cake . swift <nl> ppp b / test / api - digester / Inputs / cake . swift <nl> public struct S1 { <nl> internal func foo3 ( ) { } <nl> private func foo4 ( ) { } <nl> fileprivate func foo5 ( ) { } <nl> + public func foo6 ( ) - > Void { } <nl> } <nl> <nl> public class C1 { <nl> mmm a / test / api - digester / Outputs / cake . json <nl> ppp b / test / api - digester / Outputs / cake . json <nl> <nl> } <nl> ] <nl> } , <nl> + { <nl> + " kind " : " Function " , <nl> + " name " : " foo6 " , <nl> + " printedName " : " foo6 ( ) " , <nl> + " declKind " : " Func " , <nl> + " usr " : " s : FV4cake2S14foo6FT_T_ " , <nl> + " location " : " " , <nl> + " moduleName " : " cake " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNameAlias " , <nl> + " name " : " Void " , <nl> + " printedName " : " Void " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Void " , <nl> + " printedName " : " ( ) " <nl> + } <nl> + ] <nl> + } <nl> + ] <nl> + } , <nl> { <nl> " kind " : " Constructor " , <nl> " name " : " init " , <nl> mmm a / test / api - digester / stdlib - stable . json <nl> ppp b / test / api - digester / stdlib - stable . json <nl> <nl> ] , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Void " , <nl> - " printedName " : " Void " <nl> + " printedName " : " Void " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Void " , <nl> + " printedName " : " ( ) " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UIntMax " , <nl> - " printedName " : " UIntMax " <nl> + " printedName " : " UIntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt64 " , <nl> + " printedName " : " UInt64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UIntMax " , <nl> - " printedName " : " UIntMax " <nl> + " printedName " : " UIntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt64 " , <nl> + " printedName " : " UInt64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IndexDistance " , <nl> - " printedName " : " EmptyCollection . IndexDistance " <nl> + " printedName " : " EmptyCollection . IndexDistance " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " EmptyCollection . Index ? " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IndexDistance " , <nl> - " printedName " : " EmptyCollection . IndexDistance " <nl> + " printedName " : " EmptyCollection . IndexDistance " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IndexDistance " , <nl> - " printedName " : " EmptyCollection . IndexDistance " <nl> + " printedName " : " EmptyCollection . IndexDistance " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " EmptyCollection . Index " <nl> + " printedName " : " EmptyCollection . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " Float " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " RawSignificand " , <nl> - " printedName " : " Float . RawSignificand " <nl> + " printedName " : " Float . RawSignificand " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt32 " , <nl> + " printedName " : " UInt32 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UIntMax " , <nl> - " printedName " : " UIntMax " <nl> + " printedName " : " UIntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt64 " , <nl> + " printedName " : " UInt64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " Double " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " RawSignificand " , <nl> - " printedName " : " Double . RawSignificand " <nl> + " printedName " : " Double . RawSignificand " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt64 " , <nl> + " printedName " : " UInt64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> ] , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Void " , <nl> - " printedName " : " Void " <nl> + " printedName " : " Void " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Void " , <nl> + " printedName " : " ( ) " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> ] , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Void " , <nl> - " printedName " : " Void " <nl> + " printedName " : " Void " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Void " , <nl> + " printedName " : " ( ) " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> ] , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Void " , <nl> - " printedName " : " Void " <nl> + " printedName " : " Void " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Void " , <nl> + " printedName " : " ( ) " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " ( UTF8 . CodeUnit ) " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " CodeUnit " , <nl> - " printedName " : " UTF8 . CodeUnit " <nl> + " printedName " : " UTF8 . CodeUnit " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt8 " , <nl> + " printedName " : " UInt8 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> " printedName " : " Bool " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " CodeUnit " , <nl> - " printedName " : " UTF8 . CodeUnit " <nl> + " printedName " : " UTF8 . CodeUnit " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt8 " , <nl> + " printedName " : " UInt8 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> ] , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Void " , <nl> - " printedName " : " Void " <nl> + " printedName " : " Void " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Void " , <nl> + " printedName " : " ( ) " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " ( UTF16 . CodeUnit ) " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " CodeUnit " , <nl> - " printedName " : " UTF16 . CodeUnit " <nl> + " printedName " : " UTF16 . CodeUnit " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt16 " , <nl> + " printedName " : " UInt16 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> " static " : true , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " CodeUnit " , <nl> - " printedName " : " UTF16 . CodeUnit " <nl> + " printedName " : " UTF16 . CodeUnit " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt16 " , <nl> + " printedName " : " UInt16 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " static " : true , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " CodeUnit " , <nl> - " printedName " : " UTF16 . CodeUnit " <nl> + " printedName " : " UTF16 . CodeUnit " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt16 " , <nl> + " printedName " : " UInt16 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " Bool " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " CodeUnit " , <nl> - " printedName " : " UTF16 . CodeUnit " <nl> + " printedName " : " UTF16 . CodeUnit " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt16 " , <nl> + " printedName " : " UInt16 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " Bool " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " CodeUnit " , <nl> - " printedName " : " UTF16 . CodeUnit " <nl> + " printedName " : " UTF16 . CodeUnit " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt16 " , <nl> + " printedName " : " UInt16 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> ] , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Void " , <nl> - " printedName " : " Void " <nl> + " printedName " : " Void " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Void " , <nl> + " printedName " : " ( ) " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " ( UTF32 . CodeUnit ) " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " CodeUnit " , <nl> - " printedName " : " UTF32 . CodeUnit " <nl> + " printedName " : " UTF32 . CodeUnit " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt32 " , <nl> + " printedName " : " UInt32 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " AnyIndex " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " AnyIndex " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " inout AnyIndex " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " inout AnyIndex " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> ] , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Void " , <nl> - " printedName " : " Void " <nl> + " printedName " : " Void " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Void " , <nl> + " printedName " : " ( ) " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> ] , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Void " , <nl> - " printedName " : " Void " <nl> + " printedName " : " Void " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Void " , <nl> + " printedName " : " ( ) " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> ] , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Void " , <nl> - " printedName " : " Void " <nl> + " printedName " : " Void " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Void " , <nl> + " printedName " : " ( ) " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " AnyCollection < Mirror . Child > " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Child " , <nl> - " printedName " : " Mirror . Child " <nl> + " printedName " : " Mirror . Child " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Tuple " , <nl> + " printedName " : " ( label : Optional < String > , value : Any ) " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Optional " , <nl> + " printedName " : " Optional < String > " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " String " , <nl> + " printedName " : " String " <nl> + } <nl> + ] <nl> + } , <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " ProtocolComposition " , <nl> + " printedName " : " Any " <nl> + } <nl> + ] <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Children " , <nl> - " printedName " : " Mirror . Children " <nl> + " printedName " : " Mirror . Children " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " AnyCollection " , <nl> + " printedName " : " AnyCollection < ( label : Optional < String > , value : Any ) > " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Tuple " , <nl> + " printedName " : " ( label : Optional < String > , value : Any ) " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Optional " , <nl> + " printedName " : " Optional < String > " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " String " , <nl> + " printedName " : " String " <nl> + } <nl> + ] <nl> + } , <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " ProtocolComposition " , <nl> + " printedName " : " Any " <nl> + } <nl> + ] <nl> + } <nl> + ] <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Children " , <nl> - " printedName " : " Mirror . Children " <nl> + " printedName " : " Mirror . Children " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " AnyCollection " , <nl> + " printedName " : " AnyCollection < ( label : Optional < String > , value : Any ) > " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Tuple " , <nl> + " printedName " : " ( label : Optional < String > , value : Any ) " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Optional " , <nl> + " printedName " : " Optional < String > " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " String " , <nl> + " printedName " : " String " <nl> + } <nl> + ] <nl> + } , <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " ProtocolComposition " , <nl> + " printedName " : " Any " <nl> + } <nl> + ] <nl> + } <nl> + ] <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " AnyObject " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " AnyClass " , <nl> - " printedName " : " AnyClass " <nl> + " printedName " : " AnyClass " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " ExistentialMetatype " , <nl> + " printedName " : " AnyObject . Type " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " AnyObject " , <nl> + " printedName " : " AnyObject " <nl> + } <nl> + ] <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " OpaquePointer " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " RawPointer " , <nl> - " printedName " : " RawPointer " <nl> + " printedName " : " RawPointer " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " BuiltinRawPointer " , <nl> + " printedName " : " Builtin . RawPointer " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " ( Float32 ) " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Float32 " , <nl> - " printedName " : " Float32 " <nl> + " printedName " : " Float32 " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Float " , <nl> + " printedName " : " Float " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> " printedName " : " ( Float64 ) " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Float64 " , <nl> - " printedName " : " Float64 " <nl> + " printedName " : " Float64 " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Double " , <nl> + " printedName " : " Double " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> " printedName " : " ( Float64 , Float64 , Float64 , Float64 ) " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Float64 " , <nl> - " printedName " : " Float64 " <nl> + " printedName " : " Float64 " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Double " , <nl> + " printedName " : " Double " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Float64 " , <nl> - " printedName " : " Float64 " <nl> + " printedName " : " Float64 " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Double " , <nl> + " printedName " : " Double " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Float64 " , <nl> - " printedName " : " Float64 " <nl> + " printedName " : " Float64 " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Double " , <nl> + " printedName " : " Double " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Float64 " , <nl> - " printedName " : " Float64 " <nl> + " printedName " : " Float64 " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Double " , <nl> + " printedName " : " Double " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> " printedName " : " ( Float64 , Float64 ) " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Float64 " , <nl> - " printedName " : " Float64 " <nl> + " printedName " : " Float64 " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Double " , <nl> + " printedName " : " Double " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Float64 " , <nl> - " printedName " : " Float64 " <nl> + " printedName " : " Float64 " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Double " , <nl> + " printedName " : " Double " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> " printedName " : " ( Float64 , Float64 ) " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Float64 " , <nl> - " printedName " : " Float64 " <nl> + " printedName " : " Float64 " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Double " , <nl> + " printedName " : " Double " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Float64 " , <nl> - " printedName " : " Float64 " <nl> + " printedName " : " Float64 " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Double " , <nl> + " printedName " : " Double " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> ] , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Void " , <nl> - " printedName " : " Void " <nl> + " printedName " : " Void " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Void " , <nl> + " printedName " : " ( ) " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " AnyClass " , <nl> - " printedName " : " AnyClass " <nl> + " printedName " : " AnyClass " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " ExistentialMetatype " , <nl> + " printedName " : " AnyObject . Type " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " AnyObject " , <nl> + " printedName " : " AnyObject " <nl> + } <nl> + ] <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " AnyClass " , <nl> - " printedName " : " AnyClass " <nl> + " printedName " : " AnyClass " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " ExistentialMetatype " , <nl> + " printedName " : " AnyObject . Type " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " AnyObject " , <nl> + " printedName " : " AnyObject " <nl> + } <nl> + ] <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " Repeated . Index " <nl> + " printedName " : " Repeated . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " Repeated . Index " <nl> + " printedName " : " Repeated . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " Repeated . Index " <nl> + " printedName " : " Repeated . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " Repeated . Index " <nl> + " printedName " : " Repeated . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UIntMax " , <nl> - " printedName " : " UIntMax " <nl> + " printedName " : " UIntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt64 " , <nl> + " printedName " : " UInt64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UIntMax " , <nl> - " printedName " : " UIntMax " <nl> + " printedName " : " UIntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt64 " , <nl> + " printedName " : " UInt64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UIntMax " , <nl> - " printedName " : " UIntMax " <nl> + " printedName " : " UIntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt64 " , <nl> + " printedName " : " UInt64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> ] , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Void " , <nl> - " printedName " : " Void " <nl> + " printedName " : " Void " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Void " , <nl> + " printedName " : " ( ) " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " AnyIndex " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " AnyIndex " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " inout AnyIndex " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " inout AnyIndex " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> ] , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Void " , <nl> - " printedName " : " Void " <nl> + " printedName " : " Void " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Void " , <nl> + " printedName " : " ( ) " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " Float80 " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " RawSignificand " , <nl> - " printedName " : " Float80 . RawSignificand " <nl> + " printedName " : " Float80 . RawSignificand " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt64 " , <nl> + " printedName " : " UInt64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Indices " , <nl> - " printedName " : " UnsafeBufferPointer . Indices " <nl> + " printedName " : " UnsafeBufferPointer . Indices " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " CountableRange " , <nl> + " printedName " : " CountableRange < Int > " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Indices " , <nl> - " printedName " : " UnsafeBufferPointer . Indices " <nl> + " printedName " : " UnsafeBufferPointer . Indices " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " CountableRange " , <nl> + " printedName " : " CountableRange < Int > " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Indices " , <nl> - " printedName " : " UnsafeMutableBufferPointer . Indices " <nl> + " printedName " : " UnsafeMutableBufferPointer . Indices " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " CountableRange " , <nl> + " printedName " : " CountableRange < Int > " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Indices " , <nl> - " printedName " : " UnsafeMutableBufferPointer . Indices " <nl> + " printedName " : " UnsafeMutableBufferPointer . Indices " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " CountableRange " , <nl> + " printedName " : " CountableRange < Int > " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UTF8Index " , <nl> - " printedName " : " String . UTF8Index " <nl> + " printedName " : " String . UTF8Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF8View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " String . UTF16View . Index " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UnicodeScalarIndex " , <nl> - " printedName " : " String . UnicodeScalarIndex " <nl> + " printedName " : " String . UnicodeScalarIndex " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UnicodeScalarView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " String . UTF16View . Index " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " String . UnicodeScalarIndex ? " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UnicodeScalarIndex " , <nl> - " printedName " : " String . UnicodeScalarIndex " <nl> + " printedName " : " String . UnicodeScalarIndex " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UnicodeScalarView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " String . Index ? " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IndexDistance " , <nl> - " printedName " : " String . UTF16View . IndexDistance " <nl> + " printedName " : " String . UTF16View . IndexDistance " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Indices " , <nl> - " printedName " : " String . UTF16View . Indices . Indices " <nl> + " printedName " : " String . UTF16View . Indices . Indices " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Indices " , <nl> + " printedName " : " String . UTF16View . Indices " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Indices " , <nl> - " printedName " : " String . UTF16View . Indices . Indices " <nl> + " printedName " : " String . UTF16View . Indices . Indices " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Indices " , <nl> + " printedName " : " String . UTF16View . Indices " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IndexDistance " , <nl> - " printedName " : " String . UTF16View . Indices . IndexDistance " <nl> + " printedName " : " String . UTF16View . Indices . IndexDistance " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " String . UTF16View . Indices . Index ? " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IndexDistance " , <nl> - " printedName " : " String . UTF16View . Indices . IndexDistance " <nl> + " printedName " : " String . UTF16View . Indices . IndexDistance " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IndexDistance " , <nl> - " printedName " : " String . UTF16View . Indices . IndexDistance " <nl> + " printedName " : " String . UTF16View . Indices . IndexDistance " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . UTF16View . Indices . Index " <nl> + " printedName " : " String . UTF16View . Indices . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " String . UTF16View . Index " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IndexDistance " , <nl> - " printedName " : " String . UTF16View . IndexDistance " <nl> + " printedName " : " String . UTF16View . IndexDistance " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " String . UTF16View . Index " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IndexDistance " , <nl> - " printedName " : " String . UTF16View . IndexDistance " <nl> + " printedName " : " String . UTF16View . IndexDistance " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IndexDistance " , <nl> - " printedName " : " String . UTF16View . IndexDistance " <nl> + " printedName " : " String . UTF16View . IndexDistance " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UTF16Index " , <nl> - " printedName " : " String . UTF16Index " <nl> + " printedName " : " String . UTF16Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " String . UTF8View . Index " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UnicodeScalarIndex " , <nl> - " printedName " : " String . UnicodeScalarIndex " <nl> + " printedName " : " String . UnicodeScalarIndex " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UnicodeScalarView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " String . UTF8View . Index " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " String . UnicodeScalarIndex ? " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UnicodeScalarIndex " , <nl> - " printedName " : " String . UnicodeScalarIndex " <nl> + " printedName " : " String . UnicodeScalarIndex " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UnicodeScalarView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " String . Index ? " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " ContiguousArray < CChar > " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " CChar " , <nl> - " printedName " : " CChar " <nl> + " printedName " : " CChar " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int8 " , <nl> + " printedName " : " Int8 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " ContiguousArray < CChar > " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " CChar " , <nl> - " printedName " : " CChar " <nl> + " printedName " : " CChar " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int8 " , <nl> + " printedName " : " Int8 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " String . Index ? " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UnicodeScalarIndex " , <nl> - " printedName " : " String . UnicodeScalarIndex " <nl> + " printedName " : " String . UnicodeScalarIndex " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UnicodeScalarView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " String . Index ? " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UTF16Index " , <nl> - " printedName " : " String . UTF16Index " <nl> + " printedName " : " String . UTF16Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " String . Index ? " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UTF8Index " , <nl> - " printedName " : " String . UTF8Index " <nl> + " printedName " : " String . UTF8Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF8View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " UnsafePointer < CChar > " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " CChar " , <nl> - " printedName " : " CChar " <nl> + " printedName " : " CChar " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int8 " , <nl> + " printedName " : " Int8 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> " printedName " : " UnsafePointer < CChar > " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " CChar " , <nl> - " printedName " : " CChar " <nl> + " printedName " : " CChar " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int8 " , <nl> + " printedName " : " Int8 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> " printedName " : " String " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Float32 " , <nl> - " printedName " : " Float32 " <nl> + " printedName " : " Float32 " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Float " , <nl> + " printedName " : " Float " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " String " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Float64 " , <nl> - " printedName " : " Float64 " <nl> + " printedName " : " Float64 " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Double " , <nl> + " printedName " : " Double " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IndexDistance " , <nl> - " printedName " : " String . CharacterView . IndexDistance " <nl> + " printedName " : " String . CharacterView . IndexDistance " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IndexDistance " , <nl> - " printedName " : " String . IndexDistance " <nl> + " printedName " : " String . IndexDistance " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " String . Index ? " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IndexDistance " , <nl> - " printedName " : " String . IndexDistance " <nl> + " printedName " : " String . IndexDistance " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IndexDistance " , <nl> - " printedName " : " String . IndexDistance " <nl> + " printedName " : " String . IndexDistance " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int " , <nl> + " printedName " : " Int " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " Range < String . Index > " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " Range < String . Index > " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " ClosedRange < String . Index > " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " ClosedRange < String . Index > " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " Character " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " Character " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " Range < String . Index > " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> " printedName " : " ClosedRange < String . Index > " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } <nl> <nl> " printedName " : " String . UnicodeScalarIndex ? " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UnicodeScalarIndex " , <nl> - " printedName " : " String . UnicodeScalarIndex " <nl> + " printedName " : " String . UnicodeScalarIndex " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UnicodeScalarView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UTF16Index " , <nl> - " printedName " : " String . UTF16Index " <nl> + " printedName " : " String . UTF16Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF16View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " String . UnicodeScalarIndex ? " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UnicodeScalarIndex " , <nl> - " printedName " : " String . UnicodeScalarIndex " <nl> + " printedName " : " String . UnicodeScalarIndex " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UnicodeScalarView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UTF8Index " , <nl> - " printedName " : " String . UTF8Index " <nl> + " printedName " : " String . UTF8Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UTF8View . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UnicodeScalarIndex " , <nl> - " printedName " : " String . UnicodeScalarIndex " <nl> + " printedName " : " String . UnicodeScalarIndex " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . UnicodeScalarView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " String . Index ? " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Index " , <nl> - " printedName " : " String . Index " <nl> + " printedName " : " String . Index " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Index " , <nl> + " printedName " : " String . CharacterView . Index " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " AnyIndex " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " AnyIndex " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " inout AnyIndex " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " inout AnyIndex " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " Getter " , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> ] , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Void " , <nl> - " printedName " : " Void " <nl> + " printedName " : " Void " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Void " , <nl> + " printedName " : " ( ) " <nl> + } <nl> + ] <nl> } , <nl> { <nl> " kind " : " TypeNominal " , <nl> <nl> " printedName " : " StaticString " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Int32 " , <nl> - " printedName " : " Int32 " <nl> + " printedName " : " Int32 " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " BuiltinInteger " , <nl> + " printedName " : " Builtin . Int32 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " UIntMax " , <nl> - " printedName " : " UIntMax " <nl> + " printedName " : " UIntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " UInt64 " , <nl> + " printedName " : " UInt64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " moduleName " : " Swift " , <nl> " children " : [ <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " IntMax " , <nl> - " printedName " : " IntMax " <nl> + " printedName " : " IntMax " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " Int64 " , <nl> + " printedName " : " Int64 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> <nl> " printedName " : " Bool " <nl> } , <nl> { <nl> - " kind " : " TypeNominal " , <nl> + " kind " : " TypeNameAlias " , <nl> " name " : " Int1 " , <nl> - " printedName " : " Int1 " <nl> + " printedName " : " Int1 " , <nl> + " children " : [ <nl> + { <nl> + " kind " : " TypeNominal " , <nl> + " name " : " BuiltinInteger " , <nl> + " printedName " : " Builtin . Int1 " <nl> + } <nl> + ] <nl> } <nl> ] <nl> } , <nl> mmm a / tools / swift - api - digester / DigesterEnums . def <nl> ppp b / tools / swift - api - digester / DigesterEnums . def <nl> NODE_KIND ( Root ) <nl> NODE_KIND ( TypeDecl ) <nl> NODE_KIND ( TypeNominal ) <nl> NODE_KIND ( TypeFunc ) <nl> + NODE_KIND ( TypeNameAlias ) <nl> NODE_KIND ( Function ) <nl> NODE_KIND ( Constructor ) <nl> NODE_KIND ( Getter ) <nl> mmm a / tools / swift - api - digester / swift - api - digester . cpp <nl> ppp b / tools / swift - api - digester / swift - api - digester . cpp <nl> bool SDKNodeType : : classof ( const SDKNode * N ) { <nl> switch ( N - > getKind ( ) ) { <nl> case SDKNodeKind : : TypeNominal : <nl> case SDKNodeKind : : TypeFunc : <nl> + case SDKNodeKind : : TypeNameAlias : <nl> return true ; <nl> default : <nl> return false ; <nl> class SDKNodeTypeFunc : public SDKNodeType { <nl> static bool classof ( const SDKNode * N ) ; <nl> } ; <nl> <nl> + class SDKNodeTypeNameAlias : public SDKNodeType { <nl> + public : <nl> + SDKNodeTypeNameAlias ( SDKNodeInitInfo Info ) : SDKNodeType ( Info , <nl> + SDKNodeKind : : TypeNameAlias ) { } <nl> + static bool classof ( const SDKNode * N ) ; <nl> + } ; <nl> + <nl> template < typename T > const T * <nl> SDKNode : : getAs ( ) const { <nl> if ( T : : classof ( this ) ) <nl> bool SDKNodeDecl : : classof ( const SDKNode * N ) { <nl> case SDKNodeKind : : Root : <nl> case SDKNodeKind : : TypeNominal : <nl> case SDKNodeKind : : TypeFunc : <nl> + case SDKNodeKind : : TypeNameAlias : <nl> return false ; <nl> } <nl> } <nl> bool SDKNode : : operator = = ( const SDKNode & Other ) const { <nl> return false ; <nl> <nl> switch ( getKind ( ) ) { <nl> + case SDKNodeKind : : TypeNameAlias : <nl> case SDKNodeKind : : TypeNominal : <nl> case SDKNodeKind : : TypeFunc : { <nl> auto Left = this - > getAs < SDKNodeType > ( ) ; <nl> case SDKNodeKind : : X : \ <nl> static NodeUniquePtr constructTypeNode ( Type T ) { <nl> NodeUniquePtr Root = SDKNodeInitInfo ( T ) . createSDKNode ( SDKNodeKind : : TypeNominal ) ; <nl> <nl> - if ( isa < NameAliasType > ( T . getPointer ( ) ) ) <nl> + if ( auto NAT = dyn_cast < NameAliasType > ( T . getPointer ( ) ) ) { <nl> + NodeUniquePtr Root = SDKNodeInitInfo ( T ) . createSDKNode ( SDKNodeKind : : TypeNameAlias ) ; <nl> + Root - > addChild ( constructTypeNode ( NAT - > getCanonicalType ( ) ) ) ; <nl> return Root ; <nl> + } <nl> <nl> if ( auto Fun = T - > getAs < AnyFunctionType > ( ) ) { <nl> NodeUniquePtr Root = SDKNodeInitInfo ( T ) . createSDKNode ( SDKNodeKind : : TypeFunc ) ; <nl> class PrunePass : public MatchedNodeListener , public SDKTreeDiffPass { <nl> case SDKNodeKind : : Constructor : <nl> case SDKNodeKind : : TypeAlias : <nl> case SDKNodeKind : : TypeFunc : <nl> - case SDKNodeKind : : TypeNominal : { <nl> + case SDKNodeKind : : TypeNominal : <nl> + case SDKNodeKind : : TypeNameAlias : { <nl> / / If matched nodes are both function / var / TypeAlias decls , mapping their <nl> / / parameters sequentially . <nl> SequentialNodeMatcher ( Left - > getChildren ( ) , <nl> | swift - api - digester : consider name alias type as a standalone node kind . ( ) | apple/swift | 368247604fc9e0ed11413b35e0e2747a7e7d68ff | 2016-11-10T00:44:27Z |
mmm a / script / create - dist . py <nl> ppp b / script / create - dist . py <nl> <nl> if sys . platform = = " win32 " : <nl> import _winreg <nl> <nl> - from lib . config import BASE_URL , PLATFORM , get_target_arch , get_zip_name <nl> + from lib . config import BASE_URL , PLATFORM , enable_verbose_mode , \ <nl> + get_target_arch , get_zip_name <nl> from lib . util import scoped_cwd , rm_rf , get_electron_version , make_zip , \ <nl> execute , electron_gyp <nl> <nl> <nl> <nl> <nl> def main ( ) : <nl> + args = parse_args ( ) <nl> + <nl> + if args . verbose : <nl> + enable_verbose_mode ( ) <nl> + <nl> rm_rf ( DIST_DIR ) <nl> os . makedirs ( DIST_DIR ) <nl> <nl> def main ( ) : <nl> copy_vcruntime_binaries ( ) <nl> copy_ucrt_binaries ( ) <nl> <nl> - args = parse_args ( ) <nl> - <nl> if PLATFORM ! = ' win32 ' and not args . no_api_docs : <nl> create_api_json_schema ( ) <nl> create_typescript_definitions ( ) <nl> def parse_args ( ) : <nl> parser . add_argument ( ' - - no_api_docs ' , <nl> action = ' store_true ' , <nl> help = ' Skip generating the Electron API Documentation ! ' ) <nl> + parser . add_argument ( ' - v ' , ' - - verbose ' , <nl> + action = ' store_true ' , <nl> + help = ' Prints the output of the subprocesses ' ) <nl> return parser . parse_args ( ) <nl> <nl> <nl> | Enable verbose mode | electron/electron | 2138c5bff086592ba22a3b05c13a29a71d5bfd8b | 2017-11-16T21:05:02Z |
mmm a / table / format . h <nl> ppp b / table / format . h <nl> class Footer { <nl> kEncodedLength = 2 * BlockHandle : : kMaxEncodedLength + 8 <nl> } ; <nl> <nl> - const uint64_t kInvalidTableMagicNumber = 0 ; <nl> + static const uint64_t kInvalidTableMagicNumber = 0 ; <nl> <nl> private : <nl> / / Set the table_magic_number only when it was not previously <nl> | Fix a member variables initialization order issue | facebook/rocksdb | aa734ce9abcab3ff6a7e54c3bab28f337e03078f | 2014-02-11T22:16:46Z |
mmm a / Documentation / Books / Users / ReleaseNotes / UpgradingChanges30 . mdpp <nl> ppp b / Documentation / Books / Users / ReleaseNotes / UpgradingChanges30 . mdpp <nl> ids will now make the edge ids unique before returning the connected edges . This <nl> desired anyway , as results will be returned only once per distinct input edge id . However , <nl> it may break client applications that rely on the old behavior . <nl> <nl> + ! SUBSECTION Databases API <nl> + <nl> + ` _listDatabases ( ) ` has been renamed to ` _databases ( ) ` ( making it consistent with ` _collections ( ) ` ) <nl> + <nl> ! SUBSECTION Collection API <nl> <nl> ! SUBSUBSECTION Example matching <nl> | Add note about databases API | arangodb/arangodb | 3b67b1be3eb04f712615f502d4289e779727d790 | 2016-05-11T15:55:54Z |
mmm a / src / jump - target . cc <nl> ppp b / src / jump - target . cc <nl> void JumpTarget : : ComputeEntryFrame ( int mergable_elements ) { <nl> entry_frame_ = new VirtualFrame ( cgen_ ) ; <nl> int index = 0 ; <nl> for ( ; index < entry_frame_ - > elements_ . length ( ) ; index + + ) { <nl> - / / If the element is determined , set it now and count registers . <nl> - / / Undetermined elements are initially recorded as if in memory . <nl> + / / If the element is determined , set it now . Count registers . Mark <nl> + / / elements as copied exactly when they have a copy . Undetermined <nl> + / / elements are initially recorded as if in memory . <nl> if ( elements [ index ] ! = NULL ) { <nl> entry_frame_ - > elements_ [ index ] = * elements [ index ] ; <nl> + entry_frame_ - > elements_ [ index ] . clear_copied ( ) ; <nl> if ( elements [ index ] - > is_register ( ) ) { <nl> entry_frame_ - > register_locations_ [ elements [ index ] - > reg ( ) . code ( ) ] = <nl> index ; <nl> + } else if ( elements [ index ] - > is_copy ( ) ) { <nl> + entry_frame_ - > elements_ [ elements [ index ] - > index ( ) ] . set_copied ( ) ; <nl> } <nl> } <nl> } <nl> void JumpTarget : : ComputeEntryFrame ( int mergable_elements ) { <nl> entry_frame_ - > elements_ . Add ( FrameElement : : MemoryElement ( ) ) ; <nl> } else { <nl> entry_frame_ - > elements_ . Add ( * elements [ index ] ) ; <nl> + entry_frame_ - > elements_ [ index ] . clear_copied ( ) ; <nl> if ( elements [ index ] - > is_register ( ) ) { <nl> entry_frame_ - > register_locations_ [ elements [ index ] - > reg ( ) . code ( ) ] = <nl> index ; <nl> + } else if ( elements [ index ] - > is_copy ( ) ) { <nl> + entry_frame_ - > elements_ [ elements [ index ] - > index ( ) ] . set_copied ( ) ; <nl> } <nl> } <nl> } <nl> void JumpTarget : : ComputeEntryFrame ( int mergable_elements ) { <nl> } <nl> } <nl> <nl> - / / If there was a register choice , use it . If not do nothing <nl> - / / ( the element is already recorded as in memory ) <nl> if ( best_reg_code ! = no_reg . code_ ) { <nl> + / / If there was a register choice , use it . Preserve the copied <nl> + / / flag on the element . <nl> + bool is_copied = entry_frame_ - > elements_ [ i ] . is_copied ( ) ; <nl> Register reg = { best_reg_code } ; <nl> entry_frame_ - > elements_ [ i ] = <nl> FrameElement : : RegisterElement ( reg , <nl> FrameElement : : NOT_SYNCED ) ; <nl> + if ( is_copied ) entry_frame_ - > elements_ [ i ] . set_copied ( ) ; <nl> entry_frame_ - > register_locations_ [ best_reg_code ] = i ; <nl> } <nl> + / / If there was no register found , the element is already <nl> + / / recorded as in memory . <nl> } <nl> } <nl> <nl> - / / Set the copied flags in the frame to be exact . This assumes that <nl> - / / the backing store of copies is always lower in the frame . <nl> + / / Set the static type of frame elements . <nl> for ( int i = 0 ; i < length ; i + + ) { <nl> FrameElement * current = & entry_frame_ - > elements_ [ i ] ; <nl> - current - > clear_copied ( ) ; <nl> - if ( current - > is_copy ( ) ) { <nl> - entry_frame_ - > elements_ [ current - > index ( ) ] . set_copied ( ) ; <nl> - } <nl> - <nl> if ( direction_ = = BIDIRECTIONAL & & i > = high_water_mark ) { <nl> current - > set_static_type ( StaticType : : unknown ( ) ) ; <nl> } else { <nl> | Modify JumpTarget : : ComputeEntryFrame to mark copied elements | v8/v8 | fc6834c5ea8e216d0b2d8dd5741e6be2d6bbe362 | 2009-05-11T15:02:46Z |
mmm a / modules / ocl / src / hog . cpp <nl> ppp b / modules / ocl / src / hog . cpp <nl> void cv : : ocl : : device : : hog : : compute_hists ( int nbins , <nl> / block_stride_x ; <nl> int img_block_height = ( height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y ) <nl> / block_stride_y ; <nl> + int blocks_total = img_block_width * img_block_height ; <nl> <nl> int grad_quadstep = grad . step > > 2 ; <nl> int qangle_step = qangle . step ; <nl> void cv : : ocl : : device : : hog : : compute_hists ( int nbins , <nl> <nl> int hists_size = ( nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y * 12 ) * sizeof ( float ) ; <nl> int final_hists_size = ( nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y ) * sizeof ( float ) ; <nl> - int smem = hists_size + final_hists_size ; <nl> <nl> - args . push_back ( std : : make_pair ( sizeof ( cl_int ) , ( void * ) & width ) ) ; <nl> + int smem = ( hists_size + final_hists_size ) * blocks_in_group ; <nl> + <nl> args . push_back ( std : : make_pair ( sizeof ( cl_int ) , ( void * ) & cblock_stride_x ) ) ; <nl> args . push_back ( std : : make_pair ( sizeof ( cl_int ) , ( void * ) & cblock_stride_y ) ) ; <nl> args . push_back ( std : : make_pair ( sizeof ( cl_int ) , ( void * ) & cnbins ) ) ; <nl> args . push_back ( std : : make_pair ( sizeof ( cl_int ) , ( void * ) & cblock_hist_size ) ) ; <nl> args . push_back ( std : : make_pair ( sizeof ( cl_int ) , ( void * ) & img_block_width ) ) ; <nl> + args . push_back ( std : : make_pair ( sizeof ( cl_int ) , ( void * ) & blocks_in_group ) ) ; <nl> + args . push_back ( std : : make_pair ( sizeof ( cl_int ) , ( void * ) & blocks_total ) ) ; <nl> args . push_back ( std : : make_pair ( sizeof ( cl_int ) , ( void * ) & grad_quadstep ) ) ; <nl> args . push_back ( std : : make_pair ( sizeof ( cl_int ) , ( void * ) & qangle_step ) ) ; <nl> args . push_back ( std : : make_pair ( sizeof ( cl_mem ) , ( void * ) & grad . data ) ) ; <nl> void cv : : ocl : : device : : hog : : normalize_hists ( int nbins , <nl> String kernelName ; <nl> <nl> int block_hist_size = nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y ; <nl> - int nthreads = power_2up ( block_hist_size ) ; <nl> - <nl> - int img_block_width = ( width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x ) / block_stride_x ; <nl> - int img_block_height = ( height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y ) / block_stride_y ; <nl> - size_t globalThreads [ 3 ] = { img_block_width * nthreads , img_block_height , 1 } ; <nl> - size_t localThreads [ 3 ] = { nthreads , 1 , 1 } ; <nl> + int img_block_width = ( width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x ) <nl> + / block_stride_x ; <nl> + int img_block_height = ( height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y ) <nl> + / block_stride_y ; <nl> + int nthreads ; <nl> + size_t globalThreads [ 3 ] = { 1 , 1 , 1 } ; <nl> + size_t localThreads [ 3 ] = { 1 , 1 , 1 } ; <nl> <nl> if ( nbins = = 9 ) <nl> { <nl> mmm a / modules / ocl / src / moments . cpp <nl> ppp b / modules / ocl / src / moments . cpp <nl> <nl> # include " precomp . hpp " <nl> # include < iostream > <nl> <nl> + # include " opencv2 / imgproc / types_c . h " <nl> + # include " opencv2 / imgproc / imgproc_c . h " <nl> <nl> namespace cv <nl> { <nl> namespace ocl <nl> { <nl> extern const char * moments ; <nl> <nl> - # if 0 <nl> / / The function calculates center of gravity and the central second order moments <nl> static void icvCompleteMomentState ( CvMoments * moments ) <nl> { <nl> static void ocl_cvMoments ( const void * array , CvMoments * mom , int binary ) <nl> openCLExecuteKernel ( Context : : getContext ( ) , & moments , " dst_sum " , globalThreadss , localThreadss , args_sum , - 1 , - 1 ) ; <nl> <nl> Mat dstsum ( sum ) ; <nl> - mom - > m00 = dstsum [ 0 ] ; <nl> - mom - > m10 = dstsum [ 1 ] ; <nl> - mom - > m01 = dstsum [ 2 ] ; <nl> - mom - > m20 = dstsum [ 3 ] ; <nl> - mom - > m11 = dstsum [ 4 ] ; <nl> - mom - > m02 = dstsum [ 5 ] ; <nl> - mom - > m30 = dstsum [ 6 ] ; <nl> - mom - > m21 = dstsum [ 7 ] ; <nl> - mom - > m12 = dstsum [ 8 ] ; <nl> - mom - > m03 = dstsum [ 9 ] ; <nl> + mom - > m00 = dstsum . at < double > ( 0 , 0 ) ; <nl> + mom - > m10 = dstsum . at < double > ( 0 , 1 ) ; <nl> + mom - > m01 = dstsum . at < double > ( 0 , 2 ) ; <nl> + mom - > m20 = dstsum . at < double > ( 0 , 3 ) ; <nl> + mom - > m11 = dstsum . at < double > ( 0 , 4 ) ; <nl> + mom - > m02 = dstsum . at < double > ( 0 , 5 ) ; <nl> + mom - > m30 = dstsum . at < double > ( 0 , 6 ) ; <nl> + mom - > m21 = dstsum . at < double > ( 0 , 7 ) ; <nl> + mom - > m12 = dstsum . at < double > ( 0 , 8 ) ; <nl> + mom - > m03 = dstsum . at < double > ( 0 , 9 ) ; <nl> <nl> icvCompleteMomentState ( mom ) ; <nl> } <nl> <nl> - # endif <nl> <nl> Moments ocl_moments ( InputArray _array , bool binaryImage ) <nl> { <nl> - # if 0 <nl> CvMoments om ; <nl> Mat arr = _array . getMat ( ) ; <nl> CvMat c_array = arr ; <nl> ocl_cvMoments ( & c_array , & om , binaryImage ) ; <nl> return om ; <nl> - # endif <nl> - CV_Error ( Error : : StsNotImplemented , " ocl_moments is not implemented " ) ; <nl> - ( void ) _array ; <nl> - ( void ) binaryImage ; <nl> - return Moments ( ) ; <nl> } <nl> <nl> } <nl> mmm a / modules / ocl / test / main . cpp <nl> ppp b / modules / ocl / test / main . cpp <nl> <nl> <nl> # ifdef HAVE_OPENCL <nl> <nl> - using namespace std ; <nl> using namespace cv ; <nl> using namespace cv : : ocl ; <nl> using namespace cvtest ; <nl> using namespace testing ; <nl> + using std : : cout ; <nl> + using std : : endl ; <nl> <nl> void print_info ( ) <nl> { <nl> int main ( int argc , char * * argv ) <nl> CommandLineParser cmd ( argc , argv , keys ) ; <nl> if ( cmd . get < string > ( " h " ) = = " true " ) <nl> { <nl> - cout < < " Avaible options besides goole test option : " < < endl ; <nl> + cout < < " Available options besides google test options : " < < endl ; <nl> cmd . printMessage ( ) ; <nl> return 0 ; <nl> } <nl> - string type = cmd . get < string > ( " t " ) ; <nl> + string type = cmd . get < String > ( " t " ) ; <nl> unsigned int pid = cmd . get < unsigned int > ( " p " ) ; <nl> int device = cmd . get < int > ( " d " ) ; <nl> <nl> mmm a / modules / ocl / test / test_moments . cpp <nl> ppp b / modules / ocl / test / test_moments . cpp <nl> using namespace cv ; <nl> using namespace cv : : ocl ; <nl> using namespace cvtest ; <nl> using namespace testing ; <nl> - using namespace std ; <nl> - extern string workdir ; <nl> + <nl> PARAM_TEST_CASE ( MomentsTest , MatType , bool ) <nl> { <nl> int type ; <nl> PARAM_TEST_CASE ( MomentsTest , MatType , bool ) <nl> } ; <nl> <nl> <nl> - TEST_P ( MomentsTest , Mat ) <nl> + TEST_P ( MomentsTest , DISABLED_Mat ) <nl> { <nl> bool binaryImage = 0 ; <nl> SetUp ( ) ; <nl> TEST_P ( MomentsTest , Mat ) <nl> cv : : Moments oclMom = cv : : ocl : : ocl_moments ( _array , binaryImage ) ; <nl> <nl> Compare ( CvMom , oclMom ) ; <nl> - <nl> } <nl> } <nl> INSTANTIATE_TEST_CASE_P ( OCL_ImgProc , MomentsTest , Combine ( <nl> Values ( CV_8UC1 , CV_16UC1 , CV_16SC1 , CV_64FC1 ) , Values ( true , false ) ) ) ; <nl> + <nl> # endif / / HAVE_OPENCL <nl> mmm a / modules / ocl / test / test_objdetect . cpp <nl> ppp b / modules / ocl / test / test_objdetect . cpp <nl> <nl> # include " test_precomp . hpp " <nl> # include " opencv2 / objdetect . hpp " <nl> <nl> - using namespace std ; <nl> using namespace cv ; <nl> using namespace testing ; <nl> <nl> # ifdef HAVE_OPENCL <nl> <nl> - extern string workdir ; <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / HOG / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> PARAM_TEST_CASE ( HOG , Size , int ) <nl> { <nl> PARAM_TEST_CASE ( Haar , int , CascadeName ) <nl> <nl> int flags ; <nl> std : : string cascadeName ; <nl> - vector < Rect > faces , oclfaces ; <nl> + std : : vector < Rect > faces , oclfaces ; <nl> Mat img ; <nl> ocl : : oclMat d_img ; <nl> <nl> virtual void SetUp ( ) <nl> { <nl> flags = GET_PARAM ( 0 ) ; <nl> - cascadeName = ( string ( cvtest : : TS : : ptr ( ) - > get_data_path ( ) ) + " cv / cascadeandhog / cascades / " ) . append ( GET_PARAM ( 1 ) ) ; <nl> + cascadeName = ( std : : string ( cvtest : : TS : : ptr ( ) - > get_data_path ( ) ) + " cv / cascadeandhog / cascades / " ) . append ( GET_PARAM ( 1 ) ) ; <nl> ASSERT_TRUE ( cascade . load ( cascadeName ) ) ; <nl> ASSERT_TRUE ( cpucascade . load ( cascadeName ) ) ; <nl> img = readImage ( " cv / shared / lena . png " , IMREAD_GRAYSCALE ) ; <nl> mmm a / modules / ocl / test / test_optflow . cpp <nl> ppp b / modules / ocl / test / test_optflow . cpp <nl> using namespace cv ; <nl> using namespace cv : : ocl ; <nl> using namespace cvtest ; <nl> using namespace testing ; <nl> - using namespace std ; <nl> - <nl> - extern string workdir ; <nl> - <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / GoodFeaturesToTrack <nl> PARAM_TEST_CASE ( TVL1 , bool ) <nl> <nl> } ; <nl> <nl> - TEST_P ( TVL1 , Accuracy ) <nl> + TEST_P ( TVL1 , DISABLED_Accuracy ) / / TODO implementations of TV1 in video module are different in 2 . 4 and master branches <nl> { <nl> cv : : Mat frame0 = readImage ( " gpu / opticalflow / rubberwhale1 . png " , cv : : IMREAD_GRAYSCALE ) ; <nl> ASSERT_FALSE ( frame0 . empty ( ) ) ; <nl> | OCL : tests : repair / disable failed tests in ocl module | opencv/opencv | b7b584984ec6ce34f326ec28d8f89e310cbed3c7 | 2013-08-22T13:06:47Z |
mmm a / samples / cpp / watershed . cpp <nl> ppp b / samples / cpp / watershed . cpp <nl> <nl> using namespace cv ; <nl> using namespace std ; <nl> <nl> + void help ( ) <nl> + { <nl> + cout < < " \ nThis program demonstrates the famous watershed segmentation algorithm in OpenCV : watershed ( ) \ n " <nl> + " Usage : \ n " <nl> + " . / watershed [ image_name - - default is fruits . jpg ] \ n " < < endl ; <nl> + <nl> + <nl> + cout < < " Hot keys : \ n " <nl> + " \ tESC - quit the program \ n " <nl> + " \ tr - restore the original image \ n " <nl> + " \ tw or SPACE - run watershed segmentation algorithm \ n " <nl> + " \ t \ t ( before running it , * roughly * mark the areas to segment on the image ) \ n " <nl> + " \ t ( before that , roughly outline several markers on the image ) \ n " ; <nl> + } <nl> Mat markerMask , img ; <nl> Point prevPt ( - 1 , - 1 ) ; <nl> <nl> void onMouse ( int event , int x , int y , int flags , void * ) <nl> } <nl> } <nl> <nl> - <nl> int main ( int argc , char * * argv ) <nl> { <nl> char * filename = argc > = 2 ? argv [ 1 ] : ( char * ) " fruits . jpg " ; <nl> int main ( int argc , char * * argv ) <nl> <nl> if ( img0 . empty ( ) ) <nl> { <nl> - cout < < " Usage : watershed < image_name > \ n " ; <nl> + cout < < " Couldn ' g open image " < < filename < < " . Usage : watershed < image_name > \ n " ; <nl> return 0 ; <nl> } <nl> - <nl> - cout < < " Hot keys : \ n " <nl> - " \ tESC - quit the program \ n " <nl> - " \ tr - restore the original image \ n " <nl> - " \ tw or SPACE - run watershed algorithm \ n " <nl> - " \ t \ t ( before running it , roughly mark the areas on the image ) \ n " <nl> - " \ t ( before that , roughly outline several markers on the image ) \ n " ; <nl> - <nl> + help ( ) ; <nl> namedWindow ( " image " , 1 ) ; <nl> <nl> img0 . copyTo ( img ) ; <nl> | revamped | opencv/opencv | 85c27e5a69c0782faca79412b9bf4156be765b7a | 2010-12-04T08:31:05Z |
mmm a / lib / AST / Verifier . cpp <nl> ppp b / lib / AST / Verifier . cpp <nl> namespace { <nl> [ & ] { D - > print ( Out ) ; } ) ; <nl> } <nl> <nl> - void checkSourceRanges ( FuncDecl * FD ) { <nl> - for ( auto P : FD - > getArgParamPatterns ( ) ) { <nl> - if ( ! P - > isImplicit ( ) & & ! isGoodSourceRange ( P - > getSourceRange ( ) ) ) { <nl> - Out < < " bad source range for arg param pattern : " ; <nl> - P - > print ( Out ) ; <nl> - Out < < " \ n " ; <nl> - abort ( ) ; <nl> - } <nl> - } <nl> - checkSourceRanges ( cast < Decl > ( FD ) ) ; <nl> - } <nl> - <nl> / / / \ brief Verify that the given source ranges is contained within the <nl> / / / parent ' s source range . <nl> void checkSourceRanges ( SourceRange Current , <nl> | AST Verifier : remove a redundant check ( now we verify all Patterns ) | apple/swift | f46dd5e91cc2668114c8440f5f18fa91e60ca5a3 | 2013-09-25T23:41:08Z |
mmm a / jstests / sharding / conf_server_write_concern_data . js <nl> ppp b / jstests / sharding / conf_server_write_concern_data . js <nl> <nl> var st = new ShardingTest ( { shards : 2 } ) ; <nl> var confDB = st . s . getDB ( ' config ' ) ; <nl> var confDBOnConfigPrimary = st . configRS . getPrimary ( ) . getDB ( ' config ' ) ; <nl> + var localDBOnConfigPrimary = st . configRS . getPrimary ( ) . getDB ( ' local ' ) ; <nl> <nl> - / / w : majority should work both through mongos and directly against the config server <nl> + / / w : majority should work both through mongos and directly against the config server for both <nl> + / / local and non - local database <nl> assert . writeOK ( confDB . settings . update ( <nl> { _id : ' balancer ' } , { $ set : { stopped : true } } , { writeConcern : { w : ' majority ' } } ) ) ; <nl> assert . writeOK ( confDBOnConfigPrimary . settings . update ( <nl> { _id : ' balancer ' } , { $ set : { stopped : true } } , { writeConcern : { w : ' majority ' } } ) ) ; <nl> + assert . writeOK ( localDBOnConfigPrimary . TestDB . update ( <nl> + { _id : ' TestColl ' } , { $ set : { TestField : ' TestValue ' } } , { writeConcern : { w : ' majority ' } } ) ) ; <nl> <nl> - / / w : 1 should never work when called directly against the config server <nl> + / / w : 1 should not work , unless it is for the local database <nl> assert . writeError ( confDBOnConfigPrimary . settings . update ( <nl> { _id : ' balancer ' } , { $ set : { stopped : true } } , { writeConcern : { w : 1 } } ) ) ; <nl> + assert . writeOK ( localDBOnConfigPrimary . TestDB . update ( <nl> + { _id : ' TestColl ' } , { $ set : { TestField : ' TestValue ' } } , { writeConcern : { w : 1 } } ) ) ; <nl> <nl> / / Write concerns other than w : 1 and w : majority should fail . <nl> assert . writeError ( <nl> mmm a / src / mongo / db / commands / get_last_error . cpp <nl> ppp b / src / mongo / db / commands / get_last_error . cpp <nl> class CmdGetLastError : public Command { <nl> / / Validate write concern no matter what , this matches 2 . 4 behavior <nl> / / <nl> if ( status . isOK ( ) ) { <nl> - / / Ensure options are valid for this host <nl> - status = validateWriteConcern ( txn , writeConcern ) ; <nl> + / / Ensure options are valid for this host . Since getLastError doesn ' t do writes itself , <nl> + / / treat it as if these are admin database writes , which need to be replicated so we do <nl> + / / the strictest checks write concern checks . <nl> + status = validateWriteConcern ( txn , writeConcern , NamespaceString : : kAdminDb ) ; <nl> } <nl> <nl> if ( ! status . isOK ( ) ) { <nl> mmm a / src / mongo / db / namespace_string . cpp <nl> ppp b / src / mongo / db / namespace_string . cpp <nl> bool legalClientSystemNS ( StringData ns , bool write ) { <nl> return false ; <nl> } <nl> <nl> + const StringData NamespaceString : : kAdminDb = " admin " _sd ; <nl> + const StringData NamespaceString : : kLocalDb = " local " _sd ; <nl> + <nl> const NamespaceString NamespaceString : : kConfigCollectionNamespace ( kConfigCollection ) ; <nl> <nl> bool NamespaceString : : isListCollectionsCursorNS ( ) const { <nl> mmm a / src / mongo / db / namespace_string . h <nl> ppp b / src / mongo / db / namespace_string . h <nl> class NamespaceString { <nl> public : <nl> / / Reserved system namespaces <nl> <nl> + / / Namespace for the admin database <nl> + static const StringData kAdminDb ; <nl> + <nl> + / / Namespace for the local database <nl> + static const StringData kLocalDb ; <nl> + <nl> / / Namespace for storing configuration data , which needs to be replicated if the server is <nl> / / running as a replica set . Documents in this collection should represent some configuration <nl> / / state of the server , which needs to be recovered / consulted at startup . Each document in this <nl> mmm a / src / mongo / db / write_concern . cpp <nl> ppp b / src / mongo / db / write_concern . cpp <nl> StatusWith < WriteConcernOptions > extractWriteConcern ( OperationContext * txn , <nl> } <nl> } else if ( supportsWriteConcern ) { <nl> / / If it supports writeConcern and does not use the default , validate the writeConcern . <nl> - Status wcStatus = validateWriteConcern ( txn , writeConcern ) ; <nl> + Status wcStatus = validateWriteConcern ( txn , writeConcern , dbName ) ; <nl> if ( ! wcStatus . isOK ( ) ) { <nl> return wcStatus ; <nl> } <nl> StatusWith < WriteConcernOptions > extractWriteConcern ( OperationContext * txn , <nl> return writeConcern ; <nl> } <nl> <nl> - Status validateWriteConcern ( OperationContext * txn , const WriteConcernOptions & writeConcern ) { <nl> + Status validateWriteConcern ( OperationContext * txn , <nl> + const WriteConcernOptions & writeConcern , <nl> + StringData dbName ) { <nl> if ( writeConcern . syncMode = = WriteConcernOptions : : SyncMode : : JOURNAL & & <nl> ! txn - > getServiceContext ( ) - > getGlobalStorageEngine ( ) - > isDurable ( ) ) { <nl> return Status ( ErrorCodes : : BadValue , <nl> Status validateWriteConcern ( OperationContext * txn , const WriteConcernOptions & wr <nl> / / logic ) should never be making non - majority writes against the config server , because sharding <nl> / / is not resilient against rollbacks of metadata writes . <nl> if ( serverGlobalParams . clusterRole = = ClusterRole : : ConfigServer & & <nl> - ! writeConcern . validForConfigServers ( ) ) { <nl> + dbName ! = NamespaceString : : kLocalDb & & ! writeConcern . validForConfigServers ( ) ) { <nl> / / The only cases where we allow non - majority writes are from within the config servers <nl> / / themselves , because these wait for write concern explicitly . <nl> if ( ! txn - > getClient ( ) - > isInDirectClient ( ) ) { <nl> Status waitForWriteConcern ( OperationContext * txn , <nl> const OpTime & replOpTime , <nl> const WriteConcernOptions & writeConcern , <nl> WriteConcernResult * result ) { <nl> - / / We assume all options have been validated earlier , if not , programming error <nl> - dassertOK ( validateWriteConcern ( txn , writeConcern ) ) ; <nl> - <nl> auto replCoord = repl : : ReplicationCoordinator : : get ( txn ) ; <nl> <nl> / / Next handle blocking on disk <nl> mmm a / src / mongo / db / write_concern . h <nl> ppp b / src / mongo / db / write_concern . h <nl> StatusWith < WriteConcernOptions > extractWriteConcern ( OperationContext * txn , <nl> const bool supportsWriteConcern ) ; <nl> <nl> / * * <nl> - * Verifies that a WriteConcern is valid for this particular host . <nl> + * Verifies that a WriteConcern is valid for this particular host and database . <nl> * / <nl> - Status validateWriteConcern ( OperationContext * txn , const WriteConcernOptions & writeConcern ) ; <nl> + Status validateWriteConcern ( OperationContext * txn , <nl> + const WriteConcernOptions & writeConcern , <nl> + StringData dbName ) ; <nl> <nl> struct WriteConcernResult { <nl> WriteConcernResult ( ) { <nl> | SERVER - 25204 Allow writeConcern w : 1 for ' local ' on CSRS config servers | mongodb/mongo | 8855c03bdf307ef74825e0274344b1ce8df0852b | 2016-07-25T19:16:17Z |
mmm a / emcc <nl> ppp b / emcc <nl> try : <nl> prev = newargs [ i - 1 ] <nl> if prev in [ ' - MT ' , ' - install_name ' , ' - I ' , ' - L ' ] : continue # ignore this gcc - style argument <nl> <nl> - if not arg . startswith ( ' - ' ) and ( arg . endswith ( SOURCE_SUFFIXES + BITCODE_SUFFIXES + DYNAMICLIB_SUFFIXES + ASSEMBLY_SUFFIXES ) or shared . Building . is_ar ( arg ) ) : # we already removed - o < target > , so all these should be inputs <nl> + if not arg . startswith ( ' - ' ) and ( arg . endswith ( SOURCE_SUFFIXES + BITCODE_SUFFIXES + DYNAMICLIB_SUFFIXES + ASSEMBLY_SUFFIXES ) or shared . Building . is_ar ( arg ) or ( os . path . islink ( arg ) and os . path . realpath ( arg ) . endswith ( SOURCE_SUFFIXES + BITCODE_SUFFIXES + DYNAMICLIB_SUFFIXES + ASSEMBLY_SUFFIXES ) ) ) : # we already removed - o < target > , so all these should be inputs <nl> newargs [ i ] = ' ' <nl> + <nl> + if ( os . path . islink ( arg ) and os . path . realpath ( arg ) . endswith ( SOURCE_SUFFIXES + BITCODE_SUFFIXES + DYNAMICLIB_SUFFIXES + ASSEMBLY_SUFFIXES ) ) : <nl> + arg = os . path . realpath ( arg ) <nl> + <nl> if os . path . exists ( arg ) : <nl> if arg . endswith ( SOURCE_SUFFIXES ) : <nl> input_files . append ( arg ) <nl> | * Added symlink support . | emscripten-core/emscripten | ff5cad0ba476f74ee4743fb91bf4e256dfccde04 | 2013-04-27T08:49:40Z |
mmm a / tensorflow / g3doc / api_docs / python / index . md <nl> ppp b / tensorflow / g3doc / api_docs / python / index . md <nl> <nl> * [ ` initialize_all_variables ` ] ( . . / . . / api_docs / python / state_ops . md # initialize_all_variables ) <nl> * [ ` initialize_local_variables ` ] ( . . / . . / api_docs / python / state_ops . md # initialize_local_variables ) <nl> * [ ` initialize_variables ` ] ( . . / . . / api_docs / python / state_ops . md # initialize_variables ) <nl> + * [ ` is_variable_initialized ` ] ( . . / . . / api_docs / python / state_ops . md # is_variable_initialized ) <nl> * [ ` latest_checkpoint ` ] ( . . / . . / api_docs / python / state_ops . md # latest_checkpoint ) <nl> * [ ` local_variables ` ] ( . . / . . / api_docs / python / state_ops . md # local_variables ) <nl> * [ ` make_template ` ] ( . . / . . / api_docs / python / state_ops . md # make_template ) <nl> mmm a / tensorflow / g3doc / api_docs / python / state_ops . md <nl> ppp b / tensorflow / g3doc / api_docs / python / state_ops . md <nl> This is just a shortcut for ` initialize_variables ( local_variables ( ) ) ` <nl> An Op that initializes all local variables in the graph . <nl> <nl> <nl> + - - - <nl> + <nl> + # # # ` tf . is_variable_initialized ( variable ) ` { # is_variable_initialized } <nl> + <nl> + Returns an Op to check if a variable has been initialized . <nl> + <nl> + # # # # # Args : <nl> + <nl> + <nl> + * < b > ` variable ` < / b > : A ` Variable ` . <nl> + <nl> + # # # # # Returns : <nl> + <nl> + An operation to check whether a variable has been initialized . <nl> + <nl> + <nl> - - - <nl> <nl> # # # ` tf . assert_variables_initialized ( var_list = None ) ` { # assert_variables_initialized } <nl> | Update generated Python Op docs . | tensorflow/tensorflow | 6284e6ff51421a5425870128b92c5bbb3fe2625a | 2016-04-07T23:42:37Z |
mmm a / src / wasm / function - body - decoder - impl . h <nl> ppp b / src / wasm / function - body - decoder - impl . h <nl> class WasmDecoder : public Decoder { <nl> } <nl> <nl> inline bool Validate ( const byte * pc , CallIndirectImmediate < validate > & imm ) { <nl> - if ( ! VALIDATE ( module_ ! = nullptr & & ! module_ - > function_tables . empty ( ) ) ) { <nl> + if ( ! VALIDATE ( module_ ! = nullptr & & ! module_ - > tables . empty ( ) ) ) { <nl> error ( " function table has to exist to execute call_indirect " ) ; <nl> return false ; <nl> } <nl> mmm a / src / wasm / module - compiler . cc <nl> ppp b / src / wasm / module - compiler . cc <nl> MaybeHandle < WasmInstanceObject > InstanceBuilder : : Build ( ) { <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / Reserve the metadata for indirect function tables . <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - int function_table_count = static_cast < int > ( module_ - > function_tables . size ( ) ) ; <nl> - table_instances_ . reserve ( module_ - > function_tables . size ( ) ) ; <nl> - for ( int index = 0 ; index < function_table_count ; + + index ) { <nl> - table_instances_ . emplace_back ( ) ; <nl> - } <nl> + int table_count = static_cast < int > ( module_ - > tables . size ( ) ) ; <nl> + table_instances_ . resize ( table_count ) ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / Process the imports for the module . <nl> MaybeHandle < WasmInstanceObject > InstanceBuilder : : Build ( ) { <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / Initialize the indirect tables . <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - if ( function_table_count > 0 ) { <nl> + if ( table_count > 0 ) { <nl> InitializeTables ( instance ) ; <nl> } <nl> <nl> MaybeHandle < WasmInstanceObject > InstanceBuilder : : Build ( ) { <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / Initialize the indirect function tables . <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - if ( function_table_count > 0 ) { <nl> + if ( table_count > 0 ) { <nl> LoadTableSegments ( instance ) ; <nl> } <nl> <nl> int InstanceBuilder : : ProcessImports ( Handle < WasmInstanceObject > instance ) { <nl> } <nl> uint32_t table_num = import . index ; <nl> DCHECK_EQ ( table_num , num_imported_tables ) ; <nl> - const WasmIndirectFunctionTable & table = <nl> - module_ - > function_tables [ table_num ] ; <nl> + const WasmTable & table = module_ - > tables [ table_num ] ; <nl> TableInstance & table_instance = table_instances_ [ table_num ] ; <nl> table_instance . table_object = Handle < WasmTableObject > : : cast ( value ) ; <nl> instance - > set_table_object ( * table_instance . table_object ) ; <nl> bool InstanceBuilder : : NeedsWrappers ( ) const { <nl> for ( auto & table_instance : table_instances_ ) { <nl> if ( ! table_instance . js_wrappers . is_null ( ) ) return true ; <nl> } <nl> - for ( auto & table : module_ - > function_tables ) { <nl> + for ( auto & table : module_ - > tables ) { <nl> if ( table . exported ) return true ; <nl> } <nl> return false ; <nl> void InstanceBuilder : : ProcessExports ( Handle < WasmInstanceObject > instance ) { <nl> case kExternalTable : { <nl> / / Export a table as a WebAssembly . Table object . <nl> TableInstance & table_instance = table_instances_ [ exp . index ] ; <nl> - const WasmIndirectFunctionTable & table = <nl> - module_ - > function_tables [ exp . index ] ; <nl> + const WasmTable & table = module_ - > tables [ exp . index ] ; <nl> if ( table_instance . table_object . is_null ( ) ) { <nl> uint32_t maximum = table . has_maximum_size ? table . maximum_size <nl> : FLAG_wasm_max_table_size ; <nl> void InstanceBuilder : : ProcessExports ( Handle < WasmInstanceObject > instance ) { <nl> } <nl> <nl> void InstanceBuilder : : InitializeTables ( Handle < WasmInstanceObject > instance ) { <nl> - size_t table_count = module_ - > function_tables . size ( ) ; <nl> + size_t table_count = module_ - > tables . size ( ) ; <nl> for ( size_t index = 0 ; index < table_count ; + + index ) { <nl> - const WasmIndirectFunctionTable & table = module_ - > function_tables [ index ] ; <nl> + const WasmTable & table = module_ - > tables [ index ] ; <nl> TableInstance & table_instance = table_instances_ [ index ] ; <nl> <nl> if ( ! instance - > has_indirect_function_table ( ) ) { <nl> void InstanceBuilder : : InitializeTables ( Handle < WasmInstanceObject > instance ) { <nl> <nl> void InstanceBuilder : : LoadTableSegments ( Handle < WasmInstanceObject > instance ) { <nl> NativeModule * native_module = module_object_ - > native_module ( ) ; <nl> - int function_table_count = static_cast < int > ( module_ - > function_tables . size ( ) ) ; <nl> - for ( int index = 0 ; index < function_table_count ; + + index ) { <nl> + int table_count = static_cast < int > ( module_ - > tables . size ( ) ) ; <nl> + for ( int index = 0 ; index < table_count ; + + index ) { <nl> TableInstance & table_instance = table_instances_ [ index ] ; <nl> <nl> / / TODO ( titzer ) : this does redundant work if there are multiple tables , <nl> mmm a / src / wasm / module - decoder . cc <nl> ppp b / src / wasm / module - decoder . cc <nl> class ModuleDecoderImpl : public Decoder { <nl> case kExternalTable : { <nl> / / = = = = = Imported table = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> if ( ! AddTable ( module_ . get ( ) ) ) break ; <nl> - import - > index = <nl> - static_cast < uint32_t > ( module_ - > function_tables . size ( ) ) ; <nl> - module_ - > function_tables . emplace_back ( ) ; <nl> - WasmIndirectFunctionTable * table = & module_ - > function_tables . back ( ) ; <nl> + import - > index = static_cast < uint32_t > ( module_ - > tables . size ( ) ) ; <nl> + module_ - > tables . emplace_back ( ) ; <nl> + WasmTable * table = & module_ - > tables . back ( ) ; <nl> table - > imported = true ; <nl> - expect_u8 ( " element type " , kWasmAnyFunctionTypeCode ) ; <nl> + expect_u8 ( " element type " , kLocalAnyFunc ) ; <nl> uint8_t flags = validate_table_flags ( " element count " ) ; <nl> consume_resizable_limits ( <nl> " element count " , " elements " , FLAG_wasm_max_table_size , <nl> class ModuleDecoderImpl : public Decoder { <nl> } <nl> <nl> void DecodeTableSection ( ) { <nl> - uint32_t table_count = consume_count ( " table count " , kV8MaxWasmTables ) ; <nl> + / / TODO ( ahaas ) : Set the correct limit to { kV8MaxWasmTables } once the <nl> + / / implementation of AnyRef landed . <nl> + uint32_t max_count = FLAG_experimental_wasm_anyref ? 10 : kV8MaxWasmTables ; <nl> + uint32_t table_count = consume_count ( " table count " , max_count ) ; <nl> <nl> for ( uint32_t i = 0 ; ok ( ) & & i < table_count ; i + + ) { <nl> if ( ! AddTable ( module_ . get ( ) ) ) break ; <nl> - module_ - > function_tables . emplace_back ( ) ; <nl> - WasmIndirectFunctionTable * table = & module_ - > function_tables . back ( ) ; <nl> - expect_u8 ( " table type " , kWasmAnyFunctionTypeCode ) ; <nl> + module_ - > tables . emplace_back ( ) ; <nl> + WasmTable * table = & module_ - > tables . back ( ) ; <nl> + table - > type = consume_reference_type ( ) ; <nl> uint8_t flags = validate_table_flags ( " table elements " ) ; <nl> consume_resizable_limits ( <nl> " table elements " , " elements " , FLAG_wasm_max_table_size , <nl> class ModuleDecoderImpl : public Decoder { <nl> break ; <nl> } <nl> case kExternalTable : { <nl> - WasmIndirectFunctionTable * table = nullptr ; <nl> + WasmTable * table = nullptr ; <nl> exp - > index = consume_table_index ( module_ . get ( ) , & table ) ; <nl> if ( table ) table - > exported = true ; <nl> break ; <nl> class ModuleDecoderImpl : public Decoder { <nl> uint32_t element_count = <nl> consume_count ( " element count " , FLAG_wasm_max_table_size ) ; <nl> <nl> - if ( element_count > 0 & & module_ - > function_tables . size ( ) = = 0 ) { <nl> + if ( element_count > 0 & & module_ - > tables . size ( ) = = 0 ) { <nl> error ( pc_ , " The element section requires a table " ) ; <nl> } <nl> for ( uint32_t i = 0 ; ok ( ) & & i < element_count ; + + i ) { <nl> class ModuleDecoderImpl : public Decoder { <nl> if ( table_index ! = 0 ) { <nl> errorf ( pos , " illegal table index % u ! = 0 " , table_index ) ; <nl> } <nl> - if ( table_index > = module_ - > function_tables . size ( ) ) { <nl> + if ( table_index > = module_ - > tables . size ( ) ) { <nl> errorf ( pos , " out of bounds table index % u " , table_index ) ; <nl> break ; <nl> } <nl> class ModuleDecoderImpl : public Decoder { <nl> } <nl> <nl> bool AddTable ( WasmModule * module ) { <nl> - if ( module - > function_tables . size ( ) > 0 ) { <nl> + if ( FLAG_experimental_wasm_anyref ) return true ; <nl> + if ( module - > tables . size ( ) > 0 ) { <nl> error ( " At most one table is supported " ) ; <nl> return false ; <nl> } else { <nl> class ModuleDecoderImpl : public Decoder { <nl> return consume_index ( " global index " , module - > globals , global ) ; <nl> } <nl> <nl> - uint32_t consume_table_index ( WasmModule * module , <nl> - WasmIndirectFunctionTable * * table ) { <nl> - return consume_index ( " table index " , module - > function_tables , table ) ; <nl> + uint32_t consume_table_index ( WasmModule * module , WasmTable * * table ) { <nl> + return consume_index ( " table index " , module - > tables , table ) ; <nl> } <nl> <nl> template < typename T > <nl> class ModuleDecoderImpl : public Decoder { <nl> } <nl> } <nl> <nl> + / / Reads a single 8 - bit integer , interpreting it as a reference type . <nl> + ValueType consume_reference_type ( ) { <nl> + byte val = consume_u8 ( " reference type " ) ; <nl> + ValueTypeCode t = static_cast < ValueTypeCode > ( val ) ; <nl> + switch ( t ) { <nl> + case kLocalAnyFunc : <nl> + return kWasmAnyFunc ; <nl> + case kLocalAnyRef : <nl> + if ( ! FLAG_experimental_wasm_anyref ) { <nl> + error ( pc_ - 1 , <nl> + " Invalid type . Set - - experimental - wasm - anyref to use ' AnyRef ' " ) ; <nl> + } <nl> + return kWasmAnyRef ; <nl> + default : <nl> + break ; <nl> + } <nl> + error ( pc_ - 1 , " invalid reference type " ) ; <nl> + return kWasmStmt ; <nl> + } <nl> + <nl> FunctionSig * consume_sig ( Zone * zone ) { <nl> constexpr bool has_return_values = true ; <nl> return consume_sig_internal ( zone , has_return_values ) ; <nl> mmm a / src / wasm / wasm - constants . h <nl> ppp b / src / wasm / wasm - constants . h <nl> enum ValueTypeCode : uint8_t { <nl> } ; <nl> / / Binary encoding of other types . <nl> constexpr uint8_t kWasmFunctionTypeCode = 0x60 ; <nl> - constexpr uint8_t kWasmAnyFunctionTypeCode = 0x70 ; <nl> <nl> / / Binary encoding of import / export kinds . <nl> enum ImportExportKindCode : uint8_t { <nl> mmm a / src / wasm / wasm - interpreter . cc <nl> ppp b / src / wasm / wasm - interpreter . cc <nl> class CodeMap { <nl> InterpreterCode * GetIndirectCode ( uint32_t table_index , uint32_t entry_index ) { <nl> uint32_t saved_index ; <nl> USE ( saved_index ) ; <nl> - if ( table_index > = module_ - > function_tables . size ( ) ) return nullptr ; <nl> + if ( table_index > = module_ - > tables . size ( ) ) return nullptr ; <nl> / / Mask table index for SSCA mitigation . <nl> saved_index = table_index ; <nl> - table_index & = <nl> - static_cast < int32_t > ( ( table_index - module_ - > function_tables . size ( ) ) & <nl> - ~ static_cast < int32_t > ( table_index ) ) > > <nl> - 31 ; <nl> + table_index & = static_cast < int32_t > ( ( table_index - module_ - > tables . size ( ) ) & <nl> + ~ static_cast < int32_t > ( table_index ) ) > > <nl> + 31 ; <nl> DCHECK_EQ ( table_index , saved_index ) ; <nl> - const WasmIndirectFunctionTable * table = <nl> - & module_ - > function_tables [ table_index ] ; <nl> + const WasmTable * table = & module_ - > tables [ table_index ] ; <nl> if ( entry_index > = table - > values . size ( ) ) return nullptr ; <nl> / / Mask entry_index for SSCA mitigation . <nl> saved_index = entry_index ; <nl> class ThreadImpl { <nl> code - > at ( pc ) ) ; <nl> uint32_t entry_index = Pop ( ) . to < uint32_t > ( ) ; <nl> / / Assume only one table for now . <nl> - DCHECK_LE ( module ( ) - > function_tables . size ( ) , 1u ) ; <nl> + DCHECK_LE ( module ( ) - > tables . size ( ) , 1u ) ; <nl> ExternalCallResult result = <nl> CallIndirectFunction ( 0 , entry_index , imm . sig_index ) ; <nl> switch ( result . type ) { <nl> mmm a / src / wasm / wasm - module - builder . cc <nl> ppp b / src / wasm / wasm - module - builder . cc <nl> void WasmModuleBuilder : : WriteTo ( ZoneBuffer & buffer ) const { <nl> if ( indirect_functions_ . size ( ) > 0 ) { <nl> size_t start = EmitSection ( kTableSectionCode , buffer ) ; <nl> buffer . write_u8 ( 1 ) ; / / table count <nl> - buffer . write_u8 ( kWasmAnyFunctionTypeCode ) ; <nl> + buffer . write_u8 ( kLocalAnyFunc ) ; <nl> buffer . write_u8 ( kHasMaximumFlag ) ; <nl> buffer . write_size ( indirect_functions_ . size ( ) ) ; <nl> buffer . write_size ( indirect_functions_ . size ( ) ) ; <nl> mmm a / src / wasm / wasm - module . cc <nl> ppp b / src / wasm / wasm - module . cc <nl> size_t EstimateWasmModuleSize ( const WasmModule * module ) { <nl> size_t estimate = <nl> sizeof ( WasmModule ) + VectorSize ( module - > signatures ) + <nl> VectorSize ( module - > signature_ids ) + VectorSize ( module - > functions ) + <nl> - VectorSize ( module - > data_segments ) + VectorSize ( module - > function_tables ) + <nl> + VectorSize ( module - > data_segments ) + VectorSize ( module - > tables ) + <nl> VectorSize ( module - > import_table ) + VectorSize ( module - > export_table ) + <nl> VectorSize ( module - > exceptions ) + VectorSize ( module - > table_inits ) ; <nl> / / TODO ( wasm ) : include names table and wire bytes in size estimate <nl> mmm a / src / wasm / wasm - module . h <nl> ppp b / src / wasm / wasm - module . h <nl> struct WasmDataSegment { <nl> } ; <nl> <nl> / / Static representation of a wasm indirect call table . <nl> - struct WasmIndirectFunctionTable { <nl> - MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS ( WasmIndirectFunctionTable ) ; <nl> - <nl> + struct WasmTable { <nl> + MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS ( WasmTable ) ; <nl> + ValueType type = kWasmStmt ; / / table type . <nl> uint32_t initial_size = 0 ; / / initial table size . <nl> uint32_t maximum_size = 0 ; / / maximum table size . <nl> bool has_maximum_size = false ; / / true if there is a maximum size . <nl> struct V8_EXPORT_PRIVATE WasmModule { <nl> std : : vector < uint32_t > signature_ids ; / / by signature index <nl> std : : vector < WasmFunction > functions ; <nl> std : : vector < WasmDataSegment > data_segments ; <nl> - std : : vector < WasmIndirectFunctionTable > function_tables ; <nl> + std : : vector < WasmTable > tables ; <nl> std : : vector < WasmImport > import_table ; <nl> std : : vector < WasmExport > export_table ; <nl> std : : vector < WasmException > exceptions ; <nl> mmm a / src / wasm / wasm - objects . cc <nl> ppp b / src / wasm / wasm - objects . cc <nl> size_t EstimateNativeAllocationsSize ( const WasmModule * module ) { <nl> size_t estimate = sizeof ( WasmInstanceNativeAllocations ) + <nl> ( 1 * kPointerSize * module - > num_imported_mutable_globals ) + <nl> ( 2 * kPointerSize * module - > num_imported_functions ) ; <nl> - for ( auto & table : module - > function_tables ) { <nl> + for ( auto & table : module - > tables ) { <nl> estimate + = 3 * kPointerSize * table . initial_size ; <nl> } <nl> return estimate ; <nl> mmm a / test / cctest / wasm / wasm - run - utils . cc <nl> ppp b / test / cctest / wasm / wasm - run - utils . cc <nl> Handle < JSFunction > TestingModuleBuilder : : WrapCode ( uint32_t index ) { <nl> <nl> void TestingModuleBuilder : : AddIndirectFunctionTable ( <nl> const uint16_t * function_indexes , uint32_t table_size ) { <nl> - test_module_ - > function_tables . emplace_back ( ) ; <nl> - WasmIndirectFunctionTable & table = test_module_ - > function_tables . back ( ) ; <nl> + test_module_ - > tables . emplace_back ( ) ; <nl> + WasmTable & table = test_module_ - > tables . back ( ) ; <nl> table . initial_size = table_size ; <nl> table . maximum_size = table_size ; <nl> table . has_maximum_size = true ; <nl> void TestingModuleBuilder : : PopulateIndirectFunctionTable ( ) { <nl> auto instance = instance_object ( ) ; <nl> uint32_t num_tables = 1 ; / / TODO ( titzer ) : multiple tables . <nl> for ( uint32_t i = 0 ; i < num_tables ; i + + ) { <nl> - WasmIndirectFunctionTable & table = test_module_ - > function_tables [ i ] ; <nl> + WasmTable & table = test_module_ - > tables [ i ] ; <nl> int table_size = static_cast < int > ( instance - > indirect_function_table_size ( ) ) ; <nl> for ( int j = 0 ; j < table_size ; j + + ) { <nl> WasmFunction & function = test_module_ - > functions [ table . values [ j ] ] ; <nl> mmm a / test / unittests / wasm / function - body - decoder - unittest . cc <nl> ppp b / test / unittests / wasm / function - body - decoder - unittest . cc <nl> class TestModuleBuilder { <nl> mod . maximum_pages = 100 ; <nl> } <nl> <nl> - void InitializeFunctionTable ( ) { mod . function_tables . emplace_back ( ) ; } <nl> + void InitializeTable ( ) { mod . tables . emplace_back ( ) ; } <nl> <nl> WasmModule * module ( ) { return & mod ; } <nl> <nl> TEST_F ( FunctionBodyDecoderTest , MultiReturnType ) { <nl> TEST_F ( FunctionBodyDecoderTest , SimpleIndirectCalls ) { <nl> FunctionSig * sig = sigs . i_i ( ) ; <nl> TestModuleBuilder builder ; <nl> - builder . InitializeFunctionTable ( ) ; <nl> + builder . InitializeTable ( ) ; <nl> module = builder . module ( ) ; <nl> <nl> byte f0 = builder . AddSignature ( sigs . i_v ( ) ) ; <nl> TEST_F ( FunctionBodyDecoderTest , SimpleIndirectCalls ) { <nl> TEST_F ( FunctionBodyDecoderTest , IndirectCallsOutOfBounds ) { <nl> FunctionSig * sig = sigs . i_i ( ) ; <nl> TestModuleBuilder builder ; <nl> - builder . InitializeFunctionTable ( ) ; <nl> + builder . InitializeTable ( ) ; <nl> module = builder . module ( ) ; <nl> <nl> EXPECT_FAILURE_S ( sig , WASM_CALL_INDIRECT0 ( 0 , WASM_ZERO ) ) ; <nl> TEST_F ( FunctionBodyDecoderTest , IndirectCallsOutOfBounds ) { <nl> TEST_F ( FunctionBodyDecoderTest , IndirectCallsWithMismatchedSigs3 ) { <nl> FunctionSig * sig = sigs . i_i ( ) ; <nl> TestModuleBuilder builder ; <nl> - builder . InitializeFunctionTable ( ) ; <nl> + builder . InitializeTable ( ) ; <nl> module = builder . module ( ) ; <nl> <nl> byte f0 = builder . AddFunction ( sigs . i_f ( ) ) ; <nl> TEST_F ( FunctionBodyDecoderTest , IndirectCallsWithoutTableCrash ) { <nl> TEST_F ( FunctionBodyDecoderTest , IncompleteIndirectCall ) { <nl> FunctionSig * sig = sigs . i_i ( ) ; <nl> TestModuleBuilder builder ; <nl> - builder . InitializeFunctionTable ( ) ; <nl> + builder . InitializeTable ( ) ; <nl> module = builder . module ( ) ; <nl> <nl> static byte code [ ] = { kExprCallIndirect } ; <nl> TEST_F ( FunctionBodyDecoderTest , IncompleteStore ) { <nl> FunctionSig * sig = sigs . i_i ( ) ; <nl> TestModuleBuilder builder ; <nl> builder . InitializeMemory ( ) ; <nl> - builder . InitializeFunctionTable ( ) ; <nl> + builder . InitializeTable ( ) ; <nl> module = builder . module ( ) ; <nl> <nl> static byte code [ ] = { kExprI32StoreMem } ; <nl> TEST_F ( FunctionBodyDecoderTest , IncompleteS8x16Shuffle ) { <nl> FunctionSig * sig = sigs . i_i ( ) ; <nl> TestModuleBuilder builder ; <nl> builder . InitializeMemory ( ) ; <nl> - builder . InitializeFunctionTable ( ) ; <nl> + builder . InitializeTable ( ) ; <nl> module = builder . module ( ) ; <nl> <nl> static byte code [ ] = { kSimdPrefix , <nl> mmm a / test / unittests / wasm / module - decoder - unittest . cc <nl> ppp b / test / unittests / wasm / module - decoder - unittest . cc <nl> TEST_F ( WasmModuleVerifyTest , OneIndirectFunction ) { <nl> / / funcs mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> ONE_EMPTY_FUNCTION , <nl> / / table declaration mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - SECTION ( Table , 4 ) , ENTRY_COUNT ( 1 ) , kWasmAnyFunctionTypeCode , 0 , 1 } ; <nl> + SECTION ( Table , 4 ) , ENTRY_COUNT ( 1 ) , kLocalAnyFunc , 0 , 1 } ; <nl> <nl> ModuleResult result = DecodeModule ( data , data + sizeof ( data ) ) ; <nl> EXPECT_OK ( result ) ; <nl> if ( result . ok ( ) ) { <nl> EXPECT_EQ ( 1u , result . val - > signatures . size ( ) ) ; <nl> EXPECT_EQ ( 1u , result . val - > functions . size ( ) ) ; <nl> - EXPECT_EQ ( 1u , result . val - > function_tables . size ( ) ) ; <nl> - EXPECT_EQ ( 1u , result . val - > function_tables [ 0 ] . initial_size ) ; <nl> + EXPECT_EQ ( 1u , result . val - > tables . size ( ) ) ; <nl> + EXPECT_EQ ( 1u , result . val - > tables [ 0 ] . initial_size ) ; <nl> } <nl> } <nl> <nl> TEST_F ( WasmModuleVerifyTest , ElementSectionWithInternalTable ) { <nl> static const byte data [ ] = { <nl> / / table mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - SECTION ( Table , 4 ) , ENTRY_COUNT ( 1 ) , kWasmAnyFunctionTypeCode , 0 , 1 , <nl> + SECTION ( Table , 4 ) , ENTRY_COUNT ( 1 ) , kLocalAnyFunc , 0 , 1 , <nl> / / elements mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> SECTION ( Element , 1 ) , <nl> 0 / / entry count <nl> TEST_F ( WasmModuleVerifyTest , ElementSectionWithImportedTable ) { <nl> static const byte data [ ] = { <nl> / / imports mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> SECTION ( Import , 9 ) , ENTRY_COUNT ( 1 ) , <nl> - NAME_LENGTH ( 1 ) , / / - - <nl> - ' m ' , / / module name <nl> - NAME_LENGTH ( 1 ) , / / - - <nl> - ' t ' , / / table name <nl> - kExternalTable , / / import kind <nl> - kWasmAnyFunctionTypeCode , / / elem_type <nl> - 0 , / / no maximum field <nl> - 1 , / / initial size <nl> + NAME_LENGTH ( 1 ) , / / - - <nl> + ' m ' , / / module name <nl> + NAME_LENGTH ( 1 ) , / / - - <nl> + ' t ' , / / table name <nl> + kExternalTable , / / import kind <nl> + kLocalAnyFunc , / / elem_type <nl> + 0 , / / no maximum field <nl> + 1 , / / initial size <nl> / / elements mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> SECTION ( Element , 1 ) , <nl> 0 / / entry count <nl> TEST_F ( WasmModuleVerifyTest , Regression_735887 ) { <nl> / / funcs mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> ONE_EMPTY_FUNCTION , <nl> / / table declaration mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - SECTION ( Table , 4 ) , ENTRY_COUNT ( 1 ) , kWasmAnyFunctionTypeCode , 0 , 1 , <nl> + SECTION ( Table , 4 ) , ENTRY_COUNT ( 1 ) , kLocalAnyFunc , 0 , 1 , <nl> / / elements mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> SECTION ( Element , 7 ) , <nl> 1 , / / entry count <nl> TEST_F ( WasmModuleVerifyTest , OneIndirectFunction_one_entry ) { <nl> / / funcs mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> ONE_EMPTY_FUNCTION , <nl> / / table declaration mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> - SECTION ( Table , 4 ) , ENTRY_COUNT ( 1 ) , kWasmAnyFunctionTypeCode , 0 , 1 , <nl> + SECTION ( Table , 4 ) , ENTRY_COUNT ( 1 ) , kLocalAnyFunc , 0 , 1 , <nl> / / elements mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> SECTION ( Element , 7 ) , <nl> 1 , / / entry count <nl> TEST_F ( WasmModuleVerifyTest , OneIndirectFunction_one_entry ) { <nl> if ( result . ok ( ) ) { <nl> EXPECT_EQ ( 1u , result . val - > signatures . size ( ) ) ; <nl> EXPECT_EQ ( 1u , result . val - > functions . size ( ) ) ; <nl> - EXPECT_EQ ( 1u , result . val - > function_tables . size ( ) ) ; <nl> - EXPECT_EQ ( 1u , result . val - > function_tables [ 0 ] . initial_size ) ; <nl> + EXPECT_EQ ( 1u , result . val - > tables . size ( ) ) ; <nl> + EXPECT_EQ ( 1u , result . val - > tables [ 0 ] . initial_size ) ; <nl> } <nl> } <nl> <nl> TEST_F ( WasmModuleVerifyTest , MultipleIndirectFunctions ) { <nl> / / funcs mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> FOUR_EMPTY_FUNCTIONS , <nl> / / table declaration mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> - SECTION ( Table , 4 ) , ENTRY_COUNT ( 1 ) , kWasmAnyFunctionTypeCode , 0 , 8 , <nl> + SECTION ( Table , 4 ) , ENTRY_COUNT ( 1 ) , kLocalAnyFunc , 0 , 8 , <nl> / / table elements mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> SECTION ( Element , 14 ) , <nl> 1 , / / entry count <nl> TEST_F ( WasmModuleVerifyTest , MultipleIndirectFunctions ) { <nl> if ( result . ok ( ) ) { <nl> EXPECT_EQ ( 2u , result . val - > signatures . size ( ) ) ; <nl> EXPECT_EQ ( 4u , result . val - > functions . size ( ) ) ; <nl> - EXPECT_EQ ( 1u , result . val - > function_tables . size ( ) ) ; <nl> - EXPECT_EQ ( 8u , result . val - > function_tables [ 0 ] . initial_size ) ; <nl> + EXPECT_EQ ( 1u , result . val - > tables . size ( ) ) ; <nl> + EXPECT_EQ ( 8u , result . val - > tables [ 0 ] . initial_size ) ; <nl> } <nl> } <nl> <nl> TEST_F ( WasmModuleVerifyTest , IndirectFunctionInvalidIndex ) { <nl> EXPECT_FAILURE ( data ) ; <nl> } <nl> <nl> + TEST_F ( WasmModuleVerifyTest , MultipleTablesWithoutFlag ) { <nl> + static const byte data [ ] = { <nl> + SECTION ( Table , 7 ) , / / table section <nl> + ENTRY_COUNT ( 2 ) , / / 2 tables <nl> + kLocalAnyFunc , / / table 1 : type <nl> + 0 , / / table 1 : no maximum <nl> + 10 , / / table 1 : minimum size <nl> + kLocalAnyFunc , / / table 2 : type <nl> + 0 , / / table 2 : no maximum <nl> + 10 , / / table 2 : minimum size <nl> + } ; <nl> + EXPECT_FAILURE ( data ) ; <nl> + } <nl> + <nl> + TEST_F ( WasmModuleVerifyTest , MultipleTablesWithFlag ) { <nl> + FlagScope < bool > flag_scope ( & FLAG_experimental_wasm_anyref , true ) ; <nl> + static const byte data [ ] = { <nl> + SECTION ( Table , 7 ) , / / table section <nl> + ENTRY_COUNT ( 2 ) , / / 2 tables <nl> + kLocalAnyFunc , / / table 1 : type <nl> + 0 , / / table 1 : no maximum <nl> + 10 , / / table 1 : minimum size <nl> + kLocalAnyRef , / / table 2 : type <nl> + 0 , / / table 2 : no maximum <nl> + 11 , / / table 2 : minimum size <nl> + } ; <nl> + <nl> + ModuleResult result = DecodeModule ( data , data + sizeof ( data ) ) ; <nl> + EXPECT_OK ( result ) ; <nl> + <nl> + EXPECT_EQ ( 2u , result . val - > tables . size ( ) ) ; <nl> + <nl> + EXPECT_EQ ( 10u , result . val - > tables [ 0 ] . initial_size ) ; <nl> + EXPECT_EQ ( kWasmAnyFunc , result . val - > tables [ 0 ] . type ) ; <nl> + <nl> + EXPECT_EQ ( 11u , result . val - > tables [ 1 ] . initial_size ) ; <nl> + EXPECT_EQ ( kWasmAnyRef , result . val - > tables [ 1 ] . type ) ; <nl> + } <nl> + <nl> class WasmSignatureDecodeTest : public TestWithZone { <nl> public : <nl> WasmSignatureDecodeTest ( ) <nl> | [ wasm ] [ anyref ] Allow tables of different reference types | v8/v8 | d87287bc4832aa43581cdd2edde823dafadc930e | 2018-07-10T13:50:36Z |
mmm a / modules / dnn / test / test_layers . cpp <nl> ppp b / modules / dnn / test / test_layers . cpp <nl> TEST_P ( ConvolutionEltwiseActivationFusion , Accuracy ) <nl> if ( eltwiseOp ! = " sum " & & weightedEltwise ) <nl> throw SkipTestException ( " weighted eltwise not supported " ) ; <nl> LayerParams eltwiseParams ; <nl> - TestLayerFusion : : makeDefaultTestEltwiseLayer ( eltwiseParams , eltwiseOp , false ) ; <nl> + TestLayerFusion : : makeDefaultTestEltwiseLayer ( eltwiseParams , eltwiseOp , weightedEltwise ) ; <nl> <nl> std : : string actType = get < 3 > ( GetParam ( ) ) ; <nl> LayerParams activationParams ; <nl> TEST_P ( ConvolutionEltwiseActivationFusion , Accuracy ) <nl> Target targetId = get < 1 > ( get < 4 > ( GetParam ( ) ) ) ; <nl> <nl> / / bug : https : / / github . com / opencv / opencv / issues / 17945 <nl> - if ( eltwiseOp ! = " sum " & & backendId = = DNN_BACKEND_OPENCV & & ( targetId = = DNN_TARGET_OPENCL | | targetId = = DNN_TARGET_OPENCL_FP16 ) ) <nl> + if ( ( eltwiseOp ! = " sum " | | weightedEltwise ) & & backendId = = DNN_BACKEND_OPENCV & & ( targetId = = DNN_TARGET_OPENCL | | targetId = = DNN_TARGET_OPENCL_FP16 ) ) <nl> applyTestTag ( CV_TEST_TAG_DNN_SKIP_OPENCL ) ; <nl> <nl> / / bug : https : / / github . com / opencv / opencv / issues / 17953 <nl> TEST_P ( ConvolutionActivationEltwiseFusion , Accuracy ) <nl> if ( eltwiseOp ! = " sum " & & weightedEltwise ) <nl> throw SkipTestException ( " weighted eltwise not supported " ) ; <nl> LayerParams eltwiseParams ; <nl> - TestLayerFusion : : makeDefaultTestEltwiseLayer ( eltwiseParams , eltwiseOp , false ) ; <nl> + TestLayerFusion : : makeDefaultTestEltwiseLayer ( eltwiseParams , eltwiseOp , weightedEltwise ) ; <nl> <nl> Backend backendId = get < 0 > ( get < 4 > ( GetParam ( ) ) ) ; <nl> Target targetId = get < 1 > ( get < 4 > ( GetParam ( ) ) ) ; <nl> | fix typo in fusion tests | opencv/opencv | 1df533c914c282f4c99d42307b9ee3b86df34caa | 2020-09-02T08:55:36Z |
mmm a / xbmc / pvr / PVRActionListener . cpp <nl> ppp b / xbmc / pvr / PVRActionListener . cpp <nl> <nl> # include " messaging / ApplicationMessenger . h " <nl> # include " input / Key . h " <nl> # include " guilib / LocalizeStrings . h " <nl> + # include " guilib / GUIWindowManager . h " <nl> # include " dialogs / GUIDialogNumeric . h " <nl> # include " settings / AdvancedSettings . h " <nl> # include " settings / Settings . h " <nl> bool CPVRActionListener : : OnAction ( const CAction & action ) <nl> case REMOTE_8 : <nl> case REMOTE_9 : <nl> { <nl> - if ( g_application . IsFullScreen ( ) & & g_application . CurrentFileItem ( ) . IsLiveTV ( ) ) <nl> + if ( g_application . CurrentFileItem ( ) . IsLiveTV ( ) & & <nl> + ( g_windowManager . IsWindowActive ( WINDOW_FULLSCREEN_VIDEO ) | | <nl> + g_windowManager . IsWindowActive ( WINDOW_VISUALISATION ) ) ) <nl> { <nl> if ( g_PVRManager . IsPlaying ( ) ) <nl> { <nl> | [ pvr ] [ fix ] limit numeric dialog to fullscreen / visualisation window ( fixes ) | xbmc/xbmc | bb86468bcb3bfe29d3068411171f9da3af8a542e | 2015-08-02T11:40:54Z |
mmm a / third_party / mlir / include / mlir / IR / OpBase . td <nl> ppp b / third_party / mlir / include / mlir / IR / OpBase . td <nl> def TypeArrayAttr : TypedArrayAttrBase < TypeAttr , " type array attribute " > { <nl> let constBuilderCall = ? ; <nl> } <nl> <nl> + def I32ElementsAttr : Attr < <nl> + CPred < " $ _self . isa < DenseIntElementsAttr > ( ) & & " <nl> + " $ _self . cast < DenseIntElementsAttr > ( ) . getType ( ) . " <nl> + " getElementType ( ) . isInteger ( 32 ) " > , <nl> + " 32 - bit integer elements attribute " > { <nl> + let storageType = [ { DenseIntElementsAttr } ] ; <nl> + let returnType = [ { DenseIntElementsAttr } ] ; <nl> + let constBuilderCall = " $ _builder . getDenseElementsAttr ( " <nl> + " $ _builder . getTensorType ( { } , $ _builder . getIntegerType ( 32 ) ) , " <nl> + " { $ _builder . getI32IntegerAttr ( $ 0 ) } ) " ; <nl> + let convertFromStorage = " $ _self " ; <nl> + } <nl> + <nl> / / Attributes containing symbol references . <nl> def SymbolRefAttr : Attr < CPred < " $ _self . isa < SymbolRefAttr > ( ) " > , <nl> " symbol reference attribute " > { <nl> mmm a / third_party / mlir / test / lib / TestDialect / TestOps . td <nl> ppp b / third_party / mlir / test / lib / TestDialect / TestOps . td <nl> def SingleBlockImplicitTerminatorOp : TEST_Op < " SingleBlockImplicitTerminator " , <nl> let regions = ( region SizedRegion < 1 > : $ region ) ; <nl> } <nl> <nl> + def I32ElementsAttributesOp : TEST_Op < " i32ElementsAttr " > { <nl> + let arguments = ( ins I32ElementsAttr : $ attr ) ; <nl> + } <nl> + <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> / / Test Patterns <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> | Add I32ElementsAttr to OpBase | tensorflow/tensorflow | f68e9d4016244489ce12f786cd7c099384dd1caa | 2019-08-23T03:06:42Z |
mmm a / atom . gyp <nl> ppp b / atom . gyp <nl> <nl> ' product_name % ' : ' Electron ' , <nl> ' company_name % ' : ' GitHub , Inc ' , <nl> ' company_abbr % ' : ' github ' , <nl> - ' version % ' : ' 0 . 37 . 0 ' , <nl> + ' version % ' : ' 0 . 37 . 1 ' , <nl> } , <nl> ' includes ' : [ <nl> ' filenames . gypi ' , <nl> mmm a / atom / browser / resources / mac / Info . plist <nl> ppp b / atom / browser / resources / mac / Info . plist <nl> <nl> < key > CFBundleIconFile < / key > <nl> < string > atom . icns < / string > <nl> < key > CFBundleVersion < / key > <nl> - < string > 0 . 37 . 0 < / string > <nl> + < string > 0 . 37 . 1 < / string > <nl> < key > CFBundleShortVersionString < / key > <nl> - < string > 0 . 37 . 0 < / string > <nl> + < string > 0 . 37 . 1 < / string > <nl> < key > LSApplicationCategoryType < / key > <nl> < string > public . app - category . developer - tools < / string > <nl> < key > LSMinimumSystemVersion < / key > <nl> mmm a / atom / browser / resources / win / atom . rc <nl> ppp b / atom / browser / resources / win / atom . rc <nl> END <nl> / / <nl> <nl> VS_VERSION_INFO VERSIONINFO <nl> - FILEVERSION 0 , 37 , 0 , 0 <nl> - PRODUCTVERSION 0 , 37 , 0 , 0 <nl> + FILEVERSION 0 , 37 , 1 , 0 <nl> + PRODUCTVERSION 0 , 37 , 1 , 0 <nl> FILEFLAGSMASK 0x3fL <nl> # ifdef _DEBUG <nl> FILEFLAGS 0x1L <nl> BEGIN <nl> BEGIN <nl> VALUE " CompanyName " , " GitHub , Inc . " <nl> VALUE " FileDescription " , " Electron " <nl> - VALUE " FileVersion " , " 0 . 37 . 0 " <nl> + VALUE " FileVersion " , " 0 . 37 . 1 " <nl> VALUE " InternalName " , " electron . exe " <nl> VALUE " LegalCopyright " , " Copyright ( C ) 2015 GitHub , Inc . All rights reserved . " <nl> VALUE " OriginalFilename " , " electron . exe " <nl> VALUE " ProductName " , " Electron " <nl> - VALUE " ProductVersion " , " 0 . 37 . 0 " <nl> + VALUE " ProductVersion " , " 0 . 37 . 1 " <nl> VALUE " SquirrelAwareVersion " , " 1 " <nl> END <nl> END <nl> mmm a / atom / common / atom_version . h <nl> ppp b / atom / common / atom_version . h <nl> <nl> <nl> # define ATOM_MAJOR_VERSION 0 <nl> # define ATOM_MINOR_VERSION 37 <nl> - # define ATOM_PATCH_VERSION 0 <nl> + # define ATOM_PATCH_VERSION 1 <nl> <nl> # define ATOM_VERSION_IS_RELEASE 1 <nl> <nl> mmm a / package . json <nl> ppp b / package . json <nl> <nl> { <nl> " name " : " electron " , <nl> - " version " : " 0 . 37 . 0 " , <nl> + " version " : " 0 . 37 . 1 " , <nl> " devDependencies " : { <nl> " asar " : " ^ 0 . 10 . 0 " , <nl> " eslint " : " ^ 2 . 1 . 0 " , <nl> mmm a / vendor / brightray <nl> ppp b / vendor / brightray <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit c1f3bb4ecf4cacb33bf56b9ebd3656a4defbeb64 <nl> + Subproject commit d6bafa3ac1b56b24e8944ebbc87ed51ad417a21d <nl> | Merge pull request from atom / delay - load - powrprof | electron/electron | bc9c48261a03b4a68ff54efe268a8780a0599ffd | 2016-03-13T02:38:39Z |
new file mode 100644 <nl> index 0000000000 . . 654f998203 <nl> mmm / dev / null <nl> ppp b / math / miller_rabin . cpp <nl> <nl> + / * * <nl> + * Copyright 2020 @ author tjgurwara99 <nl> + * @ file <nl> + * <nl> + * A basic implementation of Miller - Rabin primality test . <nl> + * / <nl> + <nl> + # include < cassert > <nl> + # include < iostream > <nl> + # include < random > <nl> + # include < vector > <nl> + <nl> + / * * <nl> + * Function to give a binary representation of a number in reverse order <nl> + * @ param num integer number that we want to convert <nl> + * @ return result vector of the number input in reverse binary <nl> + * / <nl> + template < typename T > std : : vector < T > reverse_binary ( T num ) { <nl> + std : : vector < T > result ; <nl> + T temp = num ; <nl> + while ( temp > 0 ) { <nl> + result . push_back ( temp % 2 ) ; <nl> + temp = temp / 2 ; <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> + / * * <nl> + * Function for modular exponentiation . <nl> + * This function is an efficient modular exponentiation function . <nl> + * It can be used with any big integer library such as Boost multiprecision <nl> + * to give result any modular exponentiation problem relatively quickly . <nl> + * @ param base number being raised to a power as integer <nl> + * @ param rev_binary_exponent reverse binary of the power the base is being <nl> + * raised to <nl> + * @ param mod modulo <nl> + * @ return r the modular exponentiation of \ f $ a ^ { n } \ equiv r \ mod { m } \ f $ where <nl> + * \ f $ n \ f $ is the base 10 representation of rev_binary_exponent and \ f $ m = mod \ f $ <nl> + * parameter . <nl> + * / <nl> + template < typename T > <nl> + T modular_exponentiation ( T base , const std : : vector < T > & rev_binary_exponent , <nl> + T mod ) { <nl> + if ( mod = = 1 ) <nl> + return 0 ; <nl> + T b = 1 ; <nl> + if ( rev_binary_exponent . size ( ) = = 0 ) <nl> + return b ; <nl> + T A = base ; <nl> + if ( rev_binary_exponent [ 0 ] = = 1 ) <nl> + b = base ; <nl> + <nl> + for ( typename std : : vector < T > : : const_iterator it = <nl> + rev_binary_exponent . cbegin ( ) + 1 ; <nl> + it ! = rev_binary_exponent . cend ( ) ; + + it ) { <nl> + A = A * A % mod ; <nl> + if ( * it = = 1 ) <nl> + b = A * b % mod ; <nl> + } <nl> + return b ; <nl> + } <nl> + <nl> + / * * Function for testing the conditions that are satisfied when a number is <nl> + * prime . <nl> + * @ param d number such that \ f $ d \ cdot 2 ^ r = n - 1 \ f $ where \ f $ n = num \ f $ <nl> + * parameter and \ f $ r \ geq 1 \ f $ <nl> + * @ param num number being tested for primality . <nl> + * @ return ' false ' if n is composite <nl> + * @ return ' true ' if n is ( probably ) prime . <nl> + * / <nl> + template < typename T > bool miller_test ( T d , T num ) { <nl> + / / random number seed <nl> + std : : random_device rd_seed ; <nl> + / / random number generator <nl> + std : : mt19937 gen ( rd_seed ( ) ) ; <nl> + / / Uniformly distributed range [ 2 , num - 2 ] for random numbers <nl> + std : : uniform_int_distribution < > distribution ( 2 , num - 2 ) ; <nl> + / / Random number generated in the range [ 2 , num - 2 ] . <nl> + T random = distribution ( gen ) ; <nl> + / / vector for reverse binary of the power <nl> + std : : vector < T > power = reverse_binary ( d ) ; <nl> + / / x = random ^ d % num <nl> + T x = modular_exponentiation ( random , power , num ) ; <nl> + / / miller conditions <nl> + if ( x = = 1 | | x = = num - 1 ) { <nl> + return true ; <nl> + } <nl> + <nl> + while ( d ! = num - 1 ) { <nl> + x = ( x * x ) % num ; <nl> + d * = 2 ; <nl> + if ( x = = 1 ) { <nl> + return false ; <nl> + } <nl> + if ( x = = num - 1 ) { <nl> + return true ; <nl> + } <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + / * * <nl> + * Function that test ( probabilistically ) whether a given number is a prime <nl> + * based on the Miller - Rabin Primality Test . <nl> + * @ param num number to be tested for primality . <nl> + * @ param repeats number of repetitions for the test to increase probability of <nl> + * correct result . <nl> + * @ return ' false ' if num is composite <nl> + * @ return ' true ' if num is ( probably ) prime <nl> + * <nl> + * \ detail <nl> + * First we check whether the num input is less than 4 , if so we can determine <nl> + * whether this is a prime or composite by checking for 2 and 3 . <nl> + * Next we check whether this num is odd ( as all primes greater than 2 are odd ) . <nl> + * Next we write our num in the following format \ f $ num = 2 ^ r \ cdot d + 1 \ f $ . After <nl> + * finding r and d for our input num , we use for loop repeat number of times <nl> + * inside which we check the miller conditions using the function miller_test . <nl> + * If miller_test returns false then the number is composite <nl> + * After the loop finishes completely without issuing a false return call , <nl> + * we can conclude that this number is probably prime . <nl> + * / <nl> + template < typename T > bool miller_rabin_primality_test ( T num , T repeats ) { <nl> + if ( num < = 4 ) { <nl> + / / If num = = 2 or num = = 3 then prime <nl> + if ( num = = 2 | | num = = 3 ) { <nl> + return true ; <nl> + } else { <nl> + return false ; <nl> + } <nl> + } <nl> + / / If num is even then not prime <nl> + if ( num % 2 = = 0 ) { <nl> + return false ; <nl> + } <nl> + / / Finding d and r in num = 2 ^ r * d + 1 <nl> + T d = num - 1 , r = 0 ; <nl> + while ( d % 2 = = 0 ) { <nl> + d = d / 2 ; <nl> + r + + ; <nl> + } <nl> + <nl> + for ( T i = 0 ; i < repeats ; + + i ) { <nl> + if ( ! miller_test ( d , num ) ) { <nl> + return false ; <nl> + } <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + / * * <nl> + * Functions for testing the miller_rabin_primality_test ( ) function with some <nl> + * assert statements . <nl> + * / <nl> + void tests ( ) { <nl> + / / First test on 2 <nl> + assert ( ( ( void ) " 2 is prime but function says otherwise . \ n " , <nl> + miller_rabin_primality_test ( 2 , 1 ) = = true ) ) ; <nl> + std : : cout < < " First test passes . " < < std : : endl ; <nl> + / / Second test on 5 <nl> + assert ( ( ( void ) " 5 should be prime but the function says otherwise . \ n " , <nl> + miller_rabin_primality_test ( 5 , 3 ) = = true ) ) ; <nl> + std : : cout < < " Second test passes . " < < std : : endl ; <nl> + / / Third test on 23 <nl> + assert ( ( ( void ) " 23 should be prime but the function says otherwise . \ n " , <nl> + miller_rabin_primality_test ( 23 , 3 ) = = true ) ) ; <nl> + std : : cout < < " Third test passes . " < < std : : endl ; <nl> + / / Fourth test on 16 <nl> + assert ( ( ( void ) " 16 is not a prime but the function says otherwise . \ n " , <nl> + miller_rabin_primality_test ( 16 , 3 ) = = false ) ) ; <nl> + std : : cout < < " Fourth test passes . " < < std : : endl ; <nl> + / / Fifth test on 27 <nl> + assert ( ( ( void ) " 27 is not a prime but the function says otherwise . \ n " , <nl> + miller_rabin_primality_test ( 27 , 3 ) = = false ) ) ; <nl> + std : : cout < < " Fifth test passes . " < < std : : endl ; <nl> + } <nl> + <nl> + / * * <nl> + * Main function <nl> + * / <nl> + int main ( ) { <nl> + tests ( ) ; <nl> + return 0 ; <nl> + } <nl> | feat : Added a probabilistic Miller - Rabin Primality Test ( ) | TheAlgorithms/C-Plus-Plus | a48d05fb6223cedde57fa487fb79ee51651d5985 | 2020-06-21T17:40:57Z |
mmm a / src / library_sdl . js <nl> ppp b / src / library_sdl . js <nl> var LibrarySDL = { <nl> Mix_LoadWAV_RW : function ( rwopsID , freesrc ) { <nl> var rwops = SDL . rwops [ rwopsID ] ; <nl> <nl> + # if USE_SDL = = 2 <nl> + if ( rwops = = = undefined ) { <nl> + var type = { { { makeGetValue ( ' rwopsID + ' + 20 / * type * / , ' 0 ' , ' i32 ' ) } } } ; <nl> + <nl> + if ( type = = = 2 / * SDL_RWOPS_STDFILE * / ) { <nl> + var fp = { { { makeGetValue ( ' rwopsID + ' + 28 / * hidden . stdio . fp * / , ' 0 ' , ' i32 ' ) } } } ; <nl> + var stream = FS . getStreamFromPtr ( fp ) ; <nl> + if ( stream ) { <nl> + rwops = { filename : stream . path } ; <nl> + } <nl> + } <nl> + else if ( type = = = 4 / * SDL_RWOPS_MEMORY * / | | type = = = 5 / * SDL_RWOPS_MEMORY_RO * / ) { <nl> + var base = { { { makeGetValue ( ' rwopsID + ' + 24 / * hidden . mem . base * / , ' 0 ' , ' i32 ' ) } } } ; <nl> + var stop = { { { makeGetValue ( ' rwopsID + ' + 32 / * hidden . mem . stop * / , ' 0 ' , ' i32 ' ) } } } ; <nl> + <nl> + rwops = { bytes : base , count : stop - base } ; <nl> + } <nl> + } <nl> + # endif <nl> + <nl> if ( rwops = = = undefined ) <nl> return 0 ; <nl> <nl> mmm a / tests / sdl_audio . c <nl> ppp b / tests / sdl_audio . c <nl> int play2 ( ) { <nl> <nl> int channel2 = Mix_PlayChannel ( - 1 , sound2 , 0 ) ; <nl> assert ( channel2 = = 1 ) ; <nl> - # ifndef USE_SDL2 / / rw stuff fails in SDL2 <nl> int channel3 = Mix_PlayChannel ( - 1 , sound3 , 0 ) ; <nl> assert ( channel3 = = 2 ) ; <nl> - # endif <nl> assert ( Mix_PlayMusic ( music , 1 ) = = 0 ) ; <nl> return channel2 ; <nl> } <nl> int main ( int argc , char * * argv ) { <nl> fread ( bytes , 1 , info . st_size , f ) ; <nl> fclose ( f ) ; <nl> <nl> - # ifndef USE_SDL2 / / rw stuff fails in SDL2 <nl> SDL_RWops * ops = SDL_RWFromConstMem ( bytes , info . st_size ) ; <nl> sound3 = Mix_LoadWAV_RW ( ops , 0 ) ; <nl> SDL_FreeRW ( ops ) ; <nl> free ( bytes ) ; <nl> - # endif <nl> } <nl> <nl> { <nl> | Merge pull request from Daft - Freak / patch - 6 | emscripten-core/emscripten | 6bae614789da6203a1472403cd91ef90dba197ed | 2015-05-29T03:03:43Z |
mmm a / include / rocksdb / status . h <nl> ppp b / include / rocksdb / status . h <nl> class Status { <nl> public : <nl> / / Create a success status . <nl> Status ( ) : code_ ( kOk ) , subcode_ ( kNone ) , state_ ( nullptr ) { } <nl> - ~ Status ( ) { delete [ ] state_ ; } <nl> + ~ Status ( ) { free ( ( void * ) state_ ) ; } <nl> <nl> / / Copy the specified status . <nl> Status ( const Status & s ) ; <nl> inline Status & Status : : operator = ( const Status & s ) { <nl> if ( this ! = & s ) { <nl> code_ = s . code_ ; <nl> subcode_ = s . subcode_ ; <nl> - delete [ ] state_ ; <nl> + free ( ( void * ) state_ ) ; <nl> state_ = ( s . state_ = = nullptr ) ? nullptr : CopyState ( s . state_ ) ; <nl> } <nl> return * this ; <nl> inline Status & Status : : operator = ( Status & & s ) <nl> s . code_ = kOk ; <nl> subcode_ = std : : move ( s . subcode_ ) ; <nl> s . subcode_ = kNone ; <nl> - delete [ ] state_ ; <nl> + free ( ( void * ) state_ ) ; <nl> state_ = nullptr ; <nl> std : : swap ( state_ , s . state_ ) ; <nl> } <nl> mmm a / util / status . cc <nl> ppp b / util / status . cc <nl> <nl> # include " rocksdb / status . h " <nl> # include < stdio . h > <nl> # include < cstring > <nl> + # include < string . h > <nl> # include " port / port . h " <nl> <nl> namespace rocksdb { <nl> <nl> const char * Status : : CopyState ( const char * state ) { <nl> - const size_t cch = <nl> - std : : strlen ( state ) + 1 ; / / + 1 for the null terminator <nl> - char * const result = <nl> - new char [ cch ] ; <nl> - result [ cch - 1 ] = ' \ 0 ' ; <nl> # ifdef OS_WIN <nl> - errno_t ret ; <nl> - ret = strncpy_s ( result , cch , state , cch - 1 ) ; <nl> - assert ( ret = = 0 ) ; <nl> + return _strdup ( state ) ; <nl> # else <nl> - std : : strncpy ( result , state , cch - 1 ) ; <nl> + return strdup ( state ) ; <nl> # endif <nl> - return result ; <nl> } <nl> <nl> Status : : Status ( Code _code , SubCode _subcode , const Slice & msg , const Slice & msg2 ) <nl> Status : : Status ( Code _code , SubCode _subcode , const Slice & msg , const Slice & msg2 <nl> const size_t len1 = msg . size ( ) ; <nl> const size_t len2 = msg2 . size ( ) ; <nl> const size_t size = len1 + ( len2 ? ( 2 + len2 ) : 0 ) ; <nl> - char * const result = new char [ size + 1 ] ; / / + 1 for null terminator <nl> + char * const result = ( char * ) malloc ( size + 1 ) ; / / + 1 for null terminator <nl> memcpy ( result , msg . data ( ) , len1 ) ; <nl> if ( len2 ) { <nl> result [ len1 ] = ' : ' ; <nl> | Remove bogus gcc - 8 . 1 warning ( ) | facebook/rocksdb | e5ae1bb46564689e56a38f3509daffa4aca3b29c | 2018-06-27T19:23:07Z |
mmm a / system / lib / libc / emscripten_memcpy . c <nl> ppp b / system / lib / libc / emscripten_memcpy . c <nl> void * memcpy ( void * restrict dest , const void * restrict src , size_t n ) <nl> * d + + = * s + + ; <nl> } <nl> return dest ; <nl> - <nl> - <nl> - for ( ; n ; n - - ) * d + + = * s + + ; <nl> - return dest ; <nl> } <nl> | Remove some dead code in memcpy ( ) | emscripten-core/emscripten | e8cd588cdca3ac0dce91f5142bbc6f96511767db | 2020-02-14T22:06:50Z |
mmm a / xbmc / FileItem . cpp <nl> ppp b / xbmc / FileItem . cpp <nl> bool CFileItemList : : Contains ( const std : : string & fileName , bool ignoreURLOptions <nl> { <nl> CSingleLock lock ( m_lock ) ; <nl> <nl> - if ( m_fastLookup & & ! ignoreUrlOptions ) <nl> + if ( m_fastLookup & & ! ignoreURLOptions ) <nl> return m_map . find ( fileName ) ! = m_map . end ( ) ; <nl> <nl> / / slow method . . . <nl> | fixed : Compile error . Somehow force push wasn ' t noticed by jenkins | xbmc/xbmc | df226e63784ff019de9c9022bdfac96ab8d57f61 | 2016-01-08T13:00:04Z |
mmm a / include / caffe / layer . hpp <nl> ppp b / include / caffe / layer . hpp <nl> <nl> # include " caffe / common . hpp " <nl> # include " caffe / proto / caffe . pb . h " <nl> <nl> + namespace caffe { <nl> + <nl> using std : : string ; <nl> using std : : vector ; <nl> <nl> - namespace caffe { <nl> - <nl> template < typename Dtype > <nl> class Layer { <nl> public : <nl> mmm a / include / caffe / net . hpp <nl> ppp b / include / caffe / net . hpp <nl> <nl> # include " caffe / layer . hpp " <nl> # include " caffe / proto / caffe . pb . h " <nl> <nl> + namespace caffe { <nl> + <nl> using std : : map ; <nl> using std : : pair ; <nl> using std : : set ; <nl> using std : : string ; <nl> using std : : vector ; <nl> <nl> - namespace caffe { <nl> - <nl> - <nl> template < typename Dtype > <nl> class Net { <nl> public : <nl> mmm a / include / caffe / util / insert_splits . hpp <nl> ppp b / include / caffe / util / insert_splits . hpp <nl> <nl> <nl> # include " caffe / proto / caffe . pb . h " <nl> <nl> + namespace caffe { <nl> + <nl> using std : : pair ; <nl> using std : : string ; <nl> <nl> - namespace caffe { <nl> - <nl> / / Copy NetParameters with SplitLayers added to replace any shared bottom <nl> / / blobs with unique bottom blobs provided by the SplitLayer . <nl> void InsertSplits ( const NetParameter & param , NetParameter * param_split ) ; <nl> mmm a / include / caffe / util / io . hpp <nl> ppp b / include / caffe / util / io . hpp <nl> <nl> <nl> # include " caffe / blob . hpp " <nl> <nl> - using std : : string ; <nl> - using : : google : : protobuf : : Message ; <nl> - <nl> # define HDF5_NUM_DIMS 4 <nl> <nl> namespace caffe { <nl> <nl> + using std : : string ; <nl> + using : : google : : protobuf : : Message ; <nl> + <nl> bool ReadProtoFromTextFile ( const char * filename , Message * proto ) ; <nl> <nl> inline bool ReadProtoFromTextFile ( const string & filename , Message * proto ) { <nl> mmm a / include / caffe / util / upgrade_proto . hpp <nl> ppp b / include / caffe / util / upgrade_proto . hpp <nl> <nl> # include " caffe / proto / caffe . pb . h " <nl> # include " caffe / proto / caffe_pretty_print . pb . h " <nl> <nl> - using std : : string ; <nl> - <nl> namespace caffe { <nl> <nl> + using std : : string ; <nl> + <nl> / / Return true iff any layer contains parameters specified using <nl> / / deprecated V0LayerParameter . <nl> bool NetNeedsUpgrade ( const NetParameter & net_param ) ; <nl> mmm a / src / caffe / layer_factory . cpp <nl> ppp b / src / caffe / layer_factory . cpp <nl> <nl> # include " caffe / vision_layers . hpp " <nl> # include " caffe / proto / caffe . pb . h " <nl> <nl> - using std : : string ; <nl> - <nl> namespace caffe { <nl> <nl> + using std : : string ; <nl> <nl> / / A function to get a specific layer from the specification given in <nl> / / LayerParameter . Ideally this would be replaced by a factory pattern , <nl> mmm a / src / caffe / layers / accuracy_layer . cpp <nl> ppp b / src / caffe / layers / accuracy_layer . cpp <nl> <nl> # include " caffe / util / math_functions . hpp " <nl> # include " caffe / util / io . hpp " <nl> <nl> - using std : : max ; <nl> <nl> namespace caffe { <nl> <nl> + using std : : max ; <nl> + <nl> template < typename Dtype > <nl> void AccuracyLayer < Dtype > : : SetUp ( <nl> const vector < Blob < Dtype > * > & bottom , vector < Blob < Dtype > * > * top ) { <nl> mmm a / src / caffe / layers / bnll_layer . cpp <nl> ppp b / src / caffe / layers / bnll_layer . cpp <nl> <nl> # include " caffe / layer . hpp " <nl> # include " caffe / vision_layers . hpp " <nl> <nl> - using std : : min ; <nl> - <nl> namespace caffe { <nl> <nl> + using std : : min ; <nl> + <nl> const float kBNLL_THRESHOLD = 50 . ; <nl> <nl> template < typename Dtype > <nl> mmm a / src / caffe / layers / bnll_layer . cu <nl> ppp b / src / caffe / layers / bnll_layer . cu <nl> <nl> # include " caffe / layer . hpp " <nl> # include " caffe / vision_layers . hpp " <nl> <nl> - using std : : max ; <nl> - <nl> namespace caffe { <nl> <nl> + using std : : max ; <nl> + <nl> const float kBNLL_THRESHOLD = 50 . ; <nl> <nl> template < typename Dtype > <nl> mmm a / src / caffe / layers / data_layer . cpp <nl> ppp b / src / caffe / layers / data_layer . cpp <nl> <nl> # include " caffe / vision_layers . hpp " <nl> # include " caffe / proto / caffe . pb . h " <nl> <nl> - using std : : string ; <nl> - <nl> namespace caffe { <nl> <nl> template < typename Dtype > <nl> mmm a / src / caffe / layers / data_layer . cu <nl> ppp b / src / caffe / layers / data_layer . cu <nl> <nl> # include " caffe / util / io . hpp " <nl> # include " caffe / vision_layers . hpp " <nl> <nl> - using std : : string ; <nl> - <nl> namespace caffe { <nl> <nl> template < typename Dtype > <nl> mmm a / src / caffe / layers / hdf5_data_layer . cu <nl> ppp b / src / caffe / layers / hdf5_data_layer . cu <nl> TODO : <nl> # include " caffe / util / io . hpp " <nl> # include " caffe / vision_layers . hpp " <nl> <nl> - using std : : string ; <nl> - <nl> namespace caffe { <nl> <nl> template < typename Dtype > <nl> mmm a / src / caffe / layers / image_data_layer . cpp <nl> ppp b / src / caffe / layers / image_data_layer . cpp <nl> <nl> # include " caffe / util / rng . hpp " <nl> # include " caffe / vision_layers . hpp " <nl> <nl> + namespace caffe { <nl> + <nl> using std : : iterator ; <nl> - using std : : string ; <nl> using std : : pair ; <nl> <nl> - namespace caffe { <nl> - <nl> template < typename Dtype > <nl> void * ImageDataLayerPrefetch ( void * layer_pointer ) { <nl> CHECK ( layer_pointer ) ; <nl> mmm a / src / caffe / layers / image_data_layer . cu <nl> ppp b / src / caffe / layers / image_data_layer . cu <nl> <nl> # include " caffe / util / io . hpp " <nl> # include " caffe / vision_layers . hpp " <nl> <nl> - using std : : string ; <nl> - using std : : pair ; <nl> - <nl> namespace caffe { <nl> <nl> + using std : : pair ; <nl> + <nl> template < typename Dtype > <nl> Dtype ImageDataLayer < Dtype > : : Forward_gpu ( const vector < Blob < Dtype > * > & bottom , <nl> vector < Blob < Dtype > * > * top ) { <nl> mmm a / src / caffe / layers / window_data_layer . cpp <nl> ppp b / src / caffe / layers / window_data_layer . cpp <nl> <nl> # include " caffe / util / rng . hpp " <nl> # include " caffe / vision_layers . hpp " <nl> <nl> - using std : : string ; <nl> - using std : : map ; <nl> - using std : : pair ; <nl> - <nl> / / caffe . proto > LayerParameter > WindowDataParameter <nl> / / ' source ' field specifies the window_file <nl> / / ' crop_size ' indicates the desired warped size <nl> <nl> namespace caffe { <nl> <nl> + using std : : map ; <nl> + using std : : pair ; <nl> + <nl> template < typename Dtype > <nl> void * WindowDataLayerPrefetch ( void * layer_pointer ) { <nl> WindowDataLayer < Dtype > * layer = <nl> mmm a / src / caffe / layers / window_data_layer . cu <nl> ppp b / src / caffe / layers / window_data_layer . cu <nl> <nl> # include " caffe / util / io . hpp " <nl> # include " caffe / vision_layers . hpp " <nl> <nl> - using std : : string ; <nl> - using std : : map ; <nl> - using std : : pair ; <nl> - <nl> / / caffe . proto > LayerParameter > WindowDataParameter <nl> / / ' source ' field specifies the window_file <nl> / / ' crop_size ' indicates the desired warped size <nl> <nl> namespace caffe { <nl> <nl> + using std : : map ; <nl> + using std : : pair ; <nl> + <nl> template < typename Dtype > <nl> Dtype WindowDataLayer < Dtype > : : Forward_gpu ( const vector < Blob < Dtype > * > & bottom , <nl> vector < Blob < Dtype > * > * top ) { <nl> | move using statements inside namespace caffe to avoid polluting the whole name space . | BVLC/caffe | 7dccb008c1422acf4a51036bea49c274788dc939 | 2014-07-16T23:15:09Z |
mmm a / jstests / repl / dbcase . js <nl> ppp b / jstests / repl / dbcase . js <nl> function check ( n ) { <nl> names = s . getDBNames ( ) ; <nl> n1Idx = names . indexOf ( n1 ) ; <nl> n2Idx = names . indexOf ( n2 ) ; <nl> - / / Check n1 and n2 are not both present . <nl> - assert ( n1Idx = = - 1 | | n2Idx = = - 1 ) ; <nl> + if ( n1Idx ! = - 1 & & n2Idx ! = - 1 ) { <nl> + / / n1 and n2 may both be reported as present transiently . <nl> + return false ; <nl> + } <nl> / / Return true if we matched expected n . <nl> return - 1 ! = names . indexOf ( n ) ; <nl> } ) ; <nl> | handle transient reported name conflict in dbcase test | mongodb/mongo | 9958fc16cad0df53ddd3218c6fd9f0e88cdaa87d | 2011-07-12T03:49:18Z |
mmm a / js / apps / system / _admin / aardvark / APP / frontend / js / collections / arangoDocument . js <nl> ppp b / js / apps / system / _admin / aardvark / APP / frontend / js / collections / arangoDocument . js <nl> window . ArangoDocument = Backbone . Collection . extend ( { <nl> } ) ; <nl> } , <nl> <nl> - addDocument : function ( collectionID , key ) { <nl> + addDocument : function ( collectionID , key , body ) { <nl> var self = this ; <nl> - self . createTypeDocument ( collectionID , key ) ; <nl> + self . createTypeDocument ( collectionID , key , body ) ; <nl> } , <nl> <nl> - createTypeEdge : function ( collectionID , from , to , key , callback ) { <nl> + createTypeEdge : function ( collectionID , from , to , key , body , callback ) { <nl> var newEdge ; <nl> <nl> - if ( key ) { <nl> - newEdge = JSON . stringify ( { <nl> - _key : key , <nl> - _from : from , <nl> - _to : to <nl> - } ) ; <nl> - } else { <nl> - newEdge = JSON . stringify ( { <nl> - _from : from , <nl> - _to : to <nl> - } ) ; <nl> + try { <nl> + if ( key ) { <nl> + body . _key = key ; <nl> + } <nl> + body . _from = from ; <nl> + body . _to = to ; <nl> + newEdge = JSON . stringify ( body ) ; <nl> + } <nl> + catch ( x ) { <nl> + body = { } ; <nl> + return this . createTypeEdge ( collectionID , from , to , key , body , callback ) ; <nl> } <nl> <nl> $ . ajax ( { <nl> window . ArangoDocument = Backbone . Collection . extend ( { <nl> } ) ; <nl> } , <nl> <nl> - createTypeDocument : function ( collectionID , key , callback , returnNew , <nl> + createTypeDocument : function ( collectionID , key , body , callback , returnNew , <nl> smartJoinAttribute , smartJoinAttributeValue , <nl> smartGraphAttribute , smartGraphAttributeValue ) { <nl> - var newDocument = { } ; <nl> - <nl> - if ( smartJoinAttribute & & smartJoinAttributeValue & & key ) { <nl> - / / case : smartJoin , bot value are needed and NOT optional <nl> - newDocument . _key = smartJoinAttributeValue + ' : ' + key ; <nl> - newDocument [ smartJoinAttribute ] = smartJoinAttributeValue ; <nl> - } else if ( smartGraphAttribute & & smartGraphAttributeValue ) { <nl> - / / case : smartGraph with value <nl> - / / other to smartJoin , we can : <nl> - / / 1 . ) Create without smartGraphAttribute and without smartGraphAttributeValue <nl> - / / 2 . ) Create only with smartGraphAttributeValue <nl> - if ( key ) { <nl> - newDocument . _key = smartGraphAttributeValue + ' : ' + key ; <nl> + var newDocument = body ; <nl> + <nl> + try { <nl> + if ( smartJoinAttribute & & smartJoinAttributeValue & & key ) { <nl> + / / case : smartJoin , bot value are needed and NOT optional <nl> + newDocument . _key = smartJoinAttributeValue + ' : ' + key ; <nl> + newDocument [ smartJoinAttribute ] = smartJoinAttributeValue ; <nl> + } else if ( smartGraphAttribute & & smartGraphAttributeValue ) { <nl> + / / case : smartGraph with value <nl> + / / other to smartJoin , we can : <nl> + / / 1 . ) Create without smartGraphAttribute and without smartGraphAttributeValue <nl> + / / 2 . ) Create only with smartGraphAttributeValue <nl> + if ( key ) { <nl> + newDocument . _key = smartGraphAttributeValue + ' : ' + key ; <nl> + } <nl> + newDocument [ smartGraphAttribute ] = smartGraphAttributeValue ; <nl> + } else if ( key ) { <nl> + newDocument . _key = key ; <nl> } <nl> - newDocument [ smartGraphAttribute ] = smartGraphAttributeValue ; <nl> - } else if ( key ) { <nl> - newDocument . _key = key ; <nl> + } <nl> + catch ( x ) { <nl> + body = { } ; <nl> + return this . createTypeDocument ( collectionID , key , body , callback , returnNew , <nl> + smartJoinAttribute , smartJoinAttributeValue , <nl> + smartGraphAttribute , smartGraphAttributeValue ) ; <nl> } <nl> newDocument = JSON . stringify ( newDocument ) ; <nl> <nl> mmm a / js / apps / system / _admin / aardvark / APP / frontend / js / templates / modalTable . html <nl> ppp b / js / apps / system / _admin / aardvark / APP / frontend / js / templates / modalTable . html <nl> <nl> < % } % > <nl> < % <nl> break ; <nl> + case " jsoneditor " : <nl> + % > <nl> + < div id = " jsoneditor " / > % > <nl> + < % <nl> + break ; <nl> } <nl> % > <nl> < % if ( row . info ) { % > <nl> mmm a / js / apps / system / _admin / aardvark / APP / frontend / js / views / documentsView . js <nl> ppp b / js / apps / system / _admin / aardvark / APP / frontend / js / views / documentsView . js <nl> <nl> / * jshint browser : true * / <nl> / * jshint unused : false * / <nl> - / * global document , frontendConfig , arangoHelper , _ , $ , window , arangoHelper , templateEngine , Joi , btoa * / <nl> + / * global document , frontendConfig , arangoHelper , _ , $ , window , arangoHelper , templateEngine , Joi , btoa , JSONEditor * / <nl> / * global numeral * / <nl> <nl> ( function ( ) { <nl> <nl> var callback = function ( error , type ) { <nl> if ( error ) { <nl> arangoHelper . arangoError ( ' Error ' , ' Could not fetch collection type ' ) ; <nl> - } else { <nl> + } else { <nl> if ( this . collection . getSmartJoinAttribute ( ) ) { <nl> tableContent . push ( this . createDocumentKeyInput ( true ) ) ; <nl> <nl> <nl> ] <nl> ) <nl> ) ; <nl> - <nl> + tableContent . push ( window . modalView . createJsonEditor ( ) ) ; <nl> + <nl> buttons . push ( <nl> window . modalView . createSuccessButton ( ' Create ' , this . addSmartAttributeDocument . bind ( this ) ) <nl> ) ; <nl> <nl> ) <nl> ) ; <nl> <nl> + tableContent . push ( window . modalView . createJsonEditor ( ) ) ; <nl> + <nl> buttons . push ( <nl> window . modalView . createSuccessButton ( ' Create ' , this . addSmartGraphDocument . bind ( this ) ) <nl> ) ; <nl> <nl> ] <nl> ) <nl> ) ; <nl> + <nl> + tableContent . push ( window . modalView . createJsonEditor ( ) ) ; <nl> + <nl> buttons . push ( <nl> window . modalView . createSuccessButton ( ' Create ' , this . addEdge . bind ( this ) ) <nl> ) ; <nl> <nl> ) ; <nl> } else { <nl> tableContent . push ( this . createDocumentKeyInput ( false ) ) ; <nl> + <nl> + tableContent . push ( window . modalView . createJsonEditor ( ) ) ; <nl> + <nl> buttons . push ( <nl> window . modalView . createSuccessButton ( ' Create ' , this . addDocument . bind ( this ) ) <nl> ) ; <nl> <nl> tableContent <nl> ) ; <nl> } <nl> + <nl> + var container = document . getElementById ( ' jsoneditor ' ) ; <nl> + this . resize ( ) ; <nl> + var options = { <nl> + onChange : function ( ) { <nl> + } , <nl> + onModeChange : function ( newMode ) { <nl> + void ( newMode ) ; <nl> + } , <nl> + search : true , <nl> + mode : ' code ' , <nl> + modes : [ ' tree ' , ' code ' ] , <nl> + ace : window . ace <nl> + } ; <nl> + this . editor = new JSONEditor ( container , options ) ; <nl> } <nl> } . bind ( this ) ; <nl> arangoHelper . collectionApiType ( collid , true , callback ) ; <nl> <nl> var from = $ ( ' . modal - body # new - edge - from - attr ' ) . last ( ) . val ( ) ; <nl> var to = $ ( ' . modal - body # new - edge - to ' ) . last ( ) . val ( ) ; <nl> var key = $ ( ' . modal - body # new - edge - key - attr ' ) . last ( ) . val ( ) ; <nl> - <nl> + var body ; <nl> + try { / / TODO : refactor this . <nl> + body = this . editor . get ( ) ; <nl> + } catch ( x ) { <nl> + arangoHelper . arangoError ( " failed to parse JSON document " , x . message ) ; <nl> + return ; <nl> + } <nl> if ( key ! = = ' ' | | key ! = = undefined ) { <nl> - this . documentStore . createTypeEdge ( collid , from , to , key , this . goToDocument ) ; <nl> + this . documentStore . createTypeEdge ( collid , from , to , key , body , this . goToDocument ) ; <nl> } else { <nl> - this . documentStore . createTypeEdge ( collid , from , to , null , this . goToDocument ) ; <nl> + this . documentStore . createTypeEdge ( collid , from , to , null , body , this . goToDocument ) ; <nl> } <nl> } , <nl> <nl> addDocument : function ( ) { <nl> var collid = window . location . hash . split ( ' / ' ) [ 1 ] ; <nl> var key = $ ( ' . modal - body # new - document - key - attr ' ) . last ( ) . val ( ) ; <nl> + var body ; <nl> + try { <nl> + body = this . editor . get ( ) ; <nl> + } catch ( x ) { <nl> + arangoHelper . arangoError ( " failed to parse JSON document " , x . message ) ; <nl> + return ; <nl> + } <nl> <nl> if ( key ! = = ' ' | | key ! = = undefined ) { <nl> - this . documentStore . createTypeDocument ( collid , key , this . goToDocument ) ; <nl> + this . documentStore . createTypeDocument ( collid , key , body , this . goToDocument ) ; <nl> } else { <nl> - this . documentStore . createTypeDocument ( collid , null , this . goToDocument ) ; <nl> + this . documentStore . createTypeDocument ( collid , null , body , this . goToDocument ) ; <nl> } <nl> } , <nl> <nl> <nl> var collid = window . location . hash . split ( ' / ' ) [ 1 ] ; <nl> var key = $ ( ' . modal - body # new - document - key - attr ' ) . last ( ) . val ( ) ; <nl> var smartJoinAttributeValue = $ ( ' . modal - body # new - smart - val - attr ' ) . last ( ) . val ( ) ; <nl> + var body ; <nl> + try { <nl> + body = this . editor . get ( ) ; <nl> + } catch ( x ) { <nl> + arangoHelper . arangoError ( " failed to parse JSON document " , x . message ) ; <nl> + return ; <nl> + } <nl> <nl> if ( key ! = = ' ' | | key ! = = undefined ) { <nl> - this . documentStore . createTypeDocument ( collid , key , this . goToDocument , false , <nl> + this . documentStore . createTypeDocument ( collid , key , body , this . goToDocument , false , <nl> this . collection . getSmartJoinAttribute ( ) , smartJoinAttributeValue , null , null ) ; <nl> } else { <nl> - this . documentStore . createTypeDocument ( collid , null , this . goToDocument , false , <nl> + this . documentStore . createTypeDocument ( collid , null , body , this . goToDocument , false , <nl> this . collection . getSmartJoinAttribute ( ) , smartJoinAttributeValue , null , null ) ; <nl> } <nl> } , <nl> <nl> var collid = window . location . hash . split ( ' / ' ) [ 1 ] ; <nl> var key = $ ( ' . modal - body # new - document - key - attr ' ) . last ( ) . val ( ) ; <nl> var smartGraphAttributeValue = $ ( ' . modal - body # new - smartGraph - val - attr ' ) . last ( ) . val ( ) ; <nl> + var body ; <nl> + try { <nl> + body = this . editor . get ( ) ; <nl> + } catch ( x ) { <nl> + arangoHelper . arangoError ( " failed to parse JSON document " , x . message ) ; <nl> + return ; <nl> + } <nl> <nl> if ( smartGraphAttributeValue = = = ' ' ) { <nl> smartGraphAttributeValue = null ; <nl> <nl> key = null ; <nl> } <nl> <nl> - this . documentStore . createTypeDocument ( collid , key , this . goToDocument , false , null , null , <nl> + this . documentStore . createTypeDocument ( collid , key , body , this . goToDocument , false , null , null , <nl> smartGraphAttribute , smartGraphAttributeValue ) ; <nl> } , <nl> <nl> mmm a / js / apps / system / _admin / aardvark / APP / frontend / js / views / graphViewer . js <nl> ppp b / js / apps / system / _admin / aardvark / APP / frontend / js / views / graphViewer . js <nl> <nl> / * jshint browser : true * / <nl> / * jshint unused : false * / <nl> - / * global arangoHelper , _ , frontendConfig , slicePath , icon , Joi , wheelnav , document , sigma , Backbone , templateEngine , $ , window * / <nl> + / * global arangoHelper , _ , frontendConfig , slicePath , icon , Joi , wheelnav , document , sigma , Backbone , templateEngine , $ , window , JSONEditor * / <nl> ( function ( ) { <nl> ' use strict ' ; <nl> <nl> <nl> <nl> var callback = function ( error , id , msg ) { <nl> if ( ! error ) { <nl> - var edge = { <nl> - source : from , <nl> - target : to , <nl> - id : id , <nl> - color : self . graphConfig . edgeColor | | self . ecolor <nl> - } ; <nl> + var edge ; <nl> + try { <nl> + edge = this . editor . get ( ) ; <nl> + } catch ( x ) { <nl> + arangoHelper . arangoError ( " failed to parse JSON document " , x . message ) ; <nl> + return ; <nl> + } <nl> + try { <nl> + edge . source = from ; <nl> + } catch ( x ) { <nl> + edge = { } ; <nl> + edge . source = from ; <nl> + } <nl> + edge . target = to ; <nl> + edge . id = id ; <nl> + edge . color = self . graphConfig . edgeColor | | self . ecolor ; <nl> <nl> if ( self . graphConfig . edgeEditable = = = ' true ' ) { <nl> edge . size = 1 ; <nl> <nl> / / then clear states <nl> self . clearOldContextMenu ( true ) ; <nl> window . modalView . hide ( ) ; <nl> - } ; <nl> + } . bind ( this ) ; <nl> <nl> - var data = { <nl> - _from : from , <nl> - _to : to <nl> - } ; <nl> + var body ; <nl> + try { <nl> + body = this . editor . get ( ) ; <nl> + } catch ( x ) { <nl> + arangoHelper . arangoError ( " failed to parse JSON document " , x . message ) ; <nl> + return ; <nl> + } <nl> + <nl> + try { <nl> + body . _from = from ; <nl> + } catch ( x ) { <nl> + body = { } ; <nl> + body . _from = from ; <nl> + } <nl> + <nl> + body . _to = to ; <nl> if ( key ! = = ' ' & & key ! = = undefined ) { <nl> - data . _key = key ; <nl> + body . _key = key ; <nl> } <nl> - this . collection . createEdge ( self . name , collection , data , callback ) ; <nl> + this . collection . createEdge ( self . name , collection , body , callback ) ; <nl> } , <nl> <nl> addEdgeModal : function ( edgeDefinitions ) { <nl> <nl> ) ; <nl> } <nl> <nl> + tableContent . push ( window . modalView . createJsonEditor ( ) ) ; <nl> + <nl> buttons . push ( <nl> window . modalView . createSuccessButton ( ' Create ' , this . addEdge . bind ( this ) ) <nl> ) ; <nl> <nl> buttons , <nl> tableContent <nl> ) ; <nl> + var container = document . getElementById ( ' jsoneditor ' ) ; <nl> + this . resize ( ) ; <nl> + var options = { <nl> + onChange : function ( ) { <nl> + } , <nl> + onModeChange : function ( newMode ) { <nl> + void ( newMode ) ; <nl> + } , <nl> + search : true , <nl> + mode : ' code ' , <nl> + modes : [ ' tree ' , ' code ' ] , <nl> + ace : window . ace <nl> + } ; <nl> + this . editor = new JSONEditor ( container , options ) ; <nl> } else { <nl> arangoHelper . arangoError ( ' Graph ' , ' No valid edge definitions found . ' ) ; <nl> } <nl> mmm a / js / apps / system / _admin / aardvark / APP / frontend / js / views / modalView . js <nl> ppp b / js / apps / system / _admin / aardvark / APP / frontend / js / views / modalView . js <nl> <nl> PASSWORD : ' password ' , <nl> SELECT : ' select ' , <nl> SELECT2 : ' select2 ' , <nl> + JSONEDITOR : ' jsoneditor ' , <nl> CHECKBOX : ' checkbox ' <nl> } , <nl> <nl> <nl> return obj ; <nl> } , <nl> <nl> + createJsonEditor : function ( <nl> + id , label , value , info , placeholder , mandatory , addDelete , addAdd , maxEntrySize , tags ) { <nl> + var obj = createTextStub ( this . tables . JSONEDITOR , ' Document body ' , value , ' ' , placeholder , <nl> + mandatory , undefined , addDelete , addAdd , maxEntrySize , tags ) ; <nl> + obj . id = id ; <nl> + return obj ; <nl> + } , <nl> + <nl> createPasswordEntry : function ( id , label , value , info , placeholder , mandatory , regexp ) { <nl> var obj = createTextStub ( this . tables . PASSWORD , label , value , info , placeholder , mandatory , regexp ) ; <nl> obj . id = id ; <nl> | add ability to create documents with a specified body ( ) | arangodb/arangodb | 6ca87a09b3c82a7b1dd6b1976d0cf977d90804dd | 2020-05-05T08:16:56Z |
deleted file mode 100644 <nl> index 97614c8addf4 . . 000000000000 <nl> mmm a / jstests / sharding / error1 . js <nl> ppp / dev / null <nl> <nl> - s = new ShardingTest ( " error1 " , 2 , 1 , 1 ) ; <nl> - s . stopBalancer ( ) <nl> - s . adminCommand ( { enablesharding : " test " } ) ; <nl> - <nl> - a = s . _connections [ 0 ] . getDB ( " test " ) ; <nl> - b = s . _connections [ 1 ] . getDB ( " test " ) ; <nl> - <nl> - / / mmm - simple getLastError mmm - <nl> - <nl> - db = s . getDB ( " test " ) ; <nl> - db . foo . insert ( { _id : 1 } ) ; <nl> - assert . isnull ( db . getLastError ( ) , " gle 1 " ) ; <nl> - db . foo . insert ( { _id : 1 } ) ; <nl> - assert ( db . getLastError ( ) , " gle21 " ) ; <nl> - assert ( db . getLastError ( ) , " gle22 " ) ; <nl> - <nl> - / / mmm sharded getlasterror <nl> - <nl> - s . adminCommand ( { shardcollection : " test . foo2 " , key : { num : 1 } } ) ; <nl> - <nl> - db . foo2 . save ( { _id : 1 , num : 5 } ) ; <nl> - db . foo2 . save ( { _id : 2 , num : 10 } ) ; <nl> - db . foo2 . save ( { _id : 3 , num : 15 } ) ; <nl> - db . foo2 . save ( { _id : 4 , num : 20 } ) ; <nl> - <nl> - s . adminCommand ( { split : " test . foo2 " , middle : { num : 10 } } ) ; <nl> - s . adminCommand ( { movechunk : " test . foo2 " , find : { num : 20 } , to : s . getOther ( s . getServer ( " test " ) ) . name , _waitForDelete : true } ) ; <nl> - <nl> - print ( " a : " + a . foo2 . count ( ) ) ; <nl> - print ( " b : " + b . foo2 . count ( ) ) ; <nl> - assert ( a . foo2 . count ( ) > 0 & & a . foo2 . count ( ) < 4 , " se1 " ) ; <nl> - assert ( b . foo2 . count ( ) > 0 & & b . foo2 . count ( ) < 4 , " se2 " ) ; <nl> - assert . eq ( 4 , db . foo2 . count ( ) , " se3 " ) ; <nl> - <nl> - db . foo2 . save ( { _id : 5 , num : 25 } ) ; <nl> - assert ( ! db . getLastError ( ) , " se3 . 5 " ) ; <nl> - s . sync ( ) ; <nl> - assert . eq ( 5 , db . foo2 . count ( ) , " se4 " ) ; <nl> - <nl> - <nl> - <nl> - db . foo2 . insert ( { _id : 5 , num : 30 } ) ; <nl> - assert ( db . getLastError ( ) , " se5 " ) ; <nl> - assert ( db . getLastError ( ) , " se6 " ) ; <nl> - <nl> - assert . eq ( 5 , db . foo2 . count ( ) , " se5 " ) ; <nl> - <nl> - <nl> - / / assert in mongos <nl> - s . adminCommand ( { shardcollection : " test . foo3 " , key : { num : 1 } } ) ; <nl> - assert . isnull ( db . getLastError ( ) , " gle C1 " ) ; <nl> - <nl> - db . foo3 . insert ( { } ) ; / / this fails with no shard key error <nl> - assert ( db . getLastError ( ) , " gle C2a " ) ; <nl> - assert ( db . getLastError ( ) , " gle C2b " ) ; <nl> - <nl> - db . foo3 . insert ( { num : 1 } ) ; <nl> - assert . isnull ( db . getLastError ( ) , " gle C3a " ) ; <nl> - <nl> - / / mmm - <nl> - s . stop ( ) ; <nl> mmm a / jstests / sharding / gle_sharded_wc . js <nl> ppp b / jstests / sharding / gle_sharded_wc . js <nl> assert . commandWorked ( admin . runCommand ( { moveChunk : coll . toString ( ) , <nl> <nl> st . printShardingStatus ( ) ; <nl> <nl> + var gle = null ; <nl> + <nl> / / <nl> / / No journal insert , GLE fails <nl> coll . remove ( { } ) ; <nl> coll . remove ( { } ) ; <nl> coll . insert ( [ { _id : 1 } , { _id : - 1 } ] ) ; <nl> / / Wait for write to be written to shards before shutting it down . <nl> printjson ( gle = coll . getDB ( ) . runCommand ( { getLastError : 1 } ) ) ; <nl> - <nl> st . rs0 . stop ( st . rs0 . getPrimary ( ) , true ) ; / / wait for stop <nl> printjson ( gle = coll . getDB ( ) . runCommand ( { getLastError : 1 } ) ) ; <nl> / / Should get an error about contacting dead host . <nl> assert . eq ( coll . count ( { _id : 1 } ) , 1 ) ; <nl> / / NOTE : This is DIFFERENT from 2 . 4 , since we don ' t need to contact a host we didn ' t get <nl> / / successful writes from . <nl> coll . remove ( { _id : 1 } ) ; <nl> - coll . insert ( [ { _id : 1 } , { _id : - 1 } ] ) ; <nl> + / / The insert throws if write commands are enabled , since we get a response <nl> + if ( coll . getMongo ( ) . useWriteCommands ( ) ) { <nl> + assert . throws ( function ( ) { <nl> + coll . insert ( [ { _id : 1 } , { _id : - 1 } ] ) ; <nl> + } ) ; <nl> + } <nl> + else { <nl> + coll . insert ( [ { _id : 1 } , { _id : - 1 } ] ) ; <nl> + } <nl> printjson ( gle = coll . getDB ( ) . runCommand ( { getLastError : 1 } ) ) ; <nl> assert ( gle . ok ) ; <nl> assert ( gle . err ) ; <nl> mmm a / jstests / sharding / gle_sharded_write . js <nl> ppp b / jstests / sharding / gle_sharded_write . js <nl> <nl> / / <nl> / / Ensures GLE correctly reports basic write stats and failures <nl> + / / Note that test should work correctly with and without write commands . <nl> / / <nl> <nl> var options = { separateConfig : true } ; <nl> assert . commandWorked ( admin . runCommand ( { moveChunk : coll . toString ( ) , <nl> <nl> st . printShardingStatus ( ) ; <nl> <nl> - / / Don ' t use write commands <nl> - coll . getMongo ( ) . useWriteCommands = function ( ) { return false ; } ; <nl> - <nl> var gle = null ; <nl> <nl> / / <nl> assert ( ! gle . errmsg ) ; <nl> assert ( gle . shards ) ; <nl> assert . eq ( coll . count ( ) , 0 ) ; <nl> <nl> + / / <nl> + / / Repeated calls to GLE should work <nl> + coll . remove ( { } ) ; <nl> + coll . update ( { _id : 1 } , { $ invalid : " xxx " } , true ) ; <nl> + printjson ( gle = coll . getDB ( ) . runCommand ( { getLastError : 1 } ) ) ; <nl> + assert ( gle . ok ) ; <nl> + assert ( gle . err ) ; <nl> + assert ( gle . code ) ; <nl> + assert ( ! gle . errmsg ) ; <nl> + assert ( gle . singleShard ) ; <nl> + printjson ( gle = coll . getDB ( ) . runCommand ( { getLastError : 1 } ) ) ; <nl> + assert ( gle . ok ) ; <nl> + assert ( gle . err ) ; <nl> + assert ( gle . code ) ; <nl> + assert ( ! gle . errmsg ) ; <nl> + assert ( gle . singleShard ) ; <nl> + assert . eq ( coll . count ( ) , 0 ) ; <nl> + <nl> / / <nl> / / First shard down <nl> / / <nl> assert ( gle . errmsg ) ; <nl> / / NOTE : This is DIFFERENT from 2 . 4 , since we don ' t need to contact a host we didn ' t get <nl> / / successful writes from . <nl> coll . remove ( { _id : 1 } ) ; <nl> - coll . insert ( [ { _id : 1 } , { _id : - 1 } ] ) ; <nl> + / / The insert throws if write commands are enabled , since we get a response <nl> + if ( coll . getMongo ( ) . useWriteCommands ( ) ) { <nl> + assert . throws ( function ( ) { <nl> + coll . insert ( [ { _id : 1 } , { _id : - 1 } ] ) ; <nl> + } ) ; <nl> + } <nl> + else { <nl> + coll . insert ( [ { _id : 1 } , { _id : - 1 } ] ) ; <nl> + } <nl> printjson ( gle = coll . getDB ( ) . runCommand ( { getLastError : 1 } ) ) ; <nl> assert ( gle . ok ) ; <nl> assert ( gle . err ) ; <nl> mmm a / src / mongo / db / commands / write_commands / write_commands . cpp <nl> ppp b / src / mongo / db / commands / write_commands / write_commands . cpp <nl> namespace mongo { <nl> NamespaceString ( parseNs ( dbname , cmdObj ) ) , <nl> cmdObj ) ) ; <nl> <nl> + / / TODO : Remove this when we standardize GLE reporting from commands <nl> if ( ! status . isOK ( ) ) { <nl> setLastError ( status . code ( ) , status . reason ( ) . c_str ( ) ) ; <nl> } <nl> mmm a / src / mongo / s / commands / cluster_write_cmd . cpp <nl> ppp b / src / mongo / s / commands / cluster_write_cmd . cpp <nl> <nl> # include " mongo / db / commands / write_commands / write_commands_common . h " <nl> # include " mongo / s / cluster_write . h " <nl> # include " mongo / db / lasterror . h " <nl> + # include " mongo / db / stats / counters . h " <nl> # include " mongo / s / client_info . h " <nl> # include " mongo / s / write_ops / batched_command_request . h " <nl> # include " mongo / s / write_ops / batched_command_response . h " <nl> namespace mongo { <nl> const std : : string & dbname , <nl> const BSONObj & cmdObj ) { <nl> <nl> - return auth : : checkAuthForWriteCommand ( client - > getAuthorizationSession ( ) , <nl> - _writeType , <nl> - NamespaceString ( parseNs ( dbname , cmdObj ) ) , <nl> - cmdObj ) ; <nl> + Status status = auth : : checkAuthForWriteCommand ( client - > getAuthorizationSession ( ) , <nl> + _writeType , <nl> + NamespaceString ( parseNs ( dbname , <nl> + cmdObj ) ) , <nl> + cmdObj ) ; <nl> + <nl> + / / TODO : Remove this when we standardize GLE reporting from commands <nl> + if ( ! status . isOK ( ) ) { <nl> + setLastError ( status . code ( ) , status . reason ( ) . c_str ( ) ) ; <nl> + } <nl> + <nl> + return status ; <nl> } <nl> <nl> / / Cluster write command entry point . <nl> namespace mongo { <nl> BatchedCommandResponse response ; <nl> ClusterWriter writer ( true / * autosplit * / , 0 / * timeout * / ) ; <nl> <nl> - / / TODO : if we do namespace parsing , push this to the type <nl> - if ( ! request . parseBSON ( cmdObj , & errMsg ) | | ! request . isValid ( & errMsg ) ) { <nl> + / / NOTE : Sometimes this command is invoked with LE disabled for legacy writes <nl> + LastError * cmdLastError = lastError . get ( false ) ; <nl> + <nl> + { <nl> + / / Disable the last error object for the duration of the write <nl> + LastError : : Disabled disableLastError ( cmdLastError ) ; <nl> + <nl> + / / TODO : if we do namespace parsing , push this to the type <nl> + if ( ! request . parseBSON ( cmdObj , & errMsg ) | | ! request . isValid ( & errMsg ) ) { <nl> + <nl> + / / Batch parse failure <nl> + response . setOk ( false ) ; <nl> + response . setErrCode ( ErrorCodes : : FailedToParse ) ; <nl> + response . setErrMessage ( errMsg ) ; <nl> + } <nl> + else { <nl> + <nl> + / / Fixup the namespace to be a full ns internally <nl> + NamespaceString nss ( dbName , request . getNS ( ) ) ; <nl> + request . setNS ( nss . ns ( ) ) ; <nl> + <nl> + writer . write ( request , & response ) ; <nl> + } <nl> <nl> - / / Batch parse failure <nl> - response . setOk ( false ) ; <nl> - response . setErrCode ( ErrorCodes : : FailedToParse ) ; <nl> - response . setErrMessage ( errMsg ) ; <nl> + dassert ( response . isValid ( NULL ) ) ; <nl> } <nl> - else { <nl> <nl> - / / Fixup the namespace to be a full ns internally <nl> - NamespaceString nss ( dbName , request . getNS ( ) ) ; <nl> - request . setNS ( nss . ns ( ) ) ; <nl> - writer . write ( request , & response ) ; <nl> + if ( cmdLastError ) { <nl> + / / Populate the lastError object based on the write response <nl> + cmdLastError - > reset ( ) ; <nl> + batchErrorToLastError ( request , response , cmdLastError ) ; <nl> } <nl> <nl> - dassert ( response . isValid ( NULL ) ) ; <nl> <nl> / / Save the last opTimes written on each shard for this client , to allow GLE to work <nl> if ( ClientInfo : : exists ( ) & & writer . getStats ( ) . hasShardStats ( ) ) { <nl> mmm a / src / mongo / s / server . cpp <nl> ppp b / src / mongo / s / server . cpp <nl> namespace mongo { <nl> m . header ( ) - > id = r . id ( ) ; <nl> replyToQuery ( ResultFlag_ErrSet , p , m , buildErrReply ( ex ) ) ; <nl> } <nl> - else { <nl> - le - > raiseError ( ex . getCode ( ) , ex . what ( ) ) ; <nl> - } <nl> + <nl> + / / We * always * populate the last error for now <nl> + le - > raiseError ( ex . getCode ( ) , ex . what ( ) ) ; <nl> } <nl> catch ( const DBException & ex ) { <nl> <nl> log ( ) < < " Exception thrown " <nl> - < < " while processing " < < opToString ( m . operation ( ) ) < < " op " <nl> - < < " for " < < r . getns ( ) < < causedBy ( ex ) < < endl ; <nl> + < < " while processing " < < opToString ( m . operation ( ) ) < < " op " <nl> + < < " for " < < r . getns ( ) < < causedBy ( ex ) < < endl ; <nl> <nl> if ( r . expectResponse ( ) ) { <nl> m . header ( ) - > id = r . id ( ) ; <nl> replyToQuery ( ResultFlag_ErrSet , p , m , buildErrReply ( ex ) ) ; <nl> } <nl> - else { <nl> - le - > raiseError ( ex . getCode ( ) , ex . what ( ) ) ; <nl> - } <nl> - } <nl> <nl> - / / Clear out the last error for GLE unless it ' s been explicitly disabled <nl> - if ( r . expectResponse ( ) & & ! le - > disabled ) <nl> - le - > reset ( ) ; <nl> + / / We * always * populate the last error for now <nl> + le - > raiseError ( ex . getCode ( ) , ex . what ( ) ) ; <nl> + } <nl> } <nl> <nl> virtual void disconnected ( AbstractMessagingPort * p ) { <nl> mmm a / src / mongo / s / strategy . cpp <nl> ppp b / src / mongo / s / strategy . cpp <nl> namespace mongo { <nl> ( void ) parsed ; / / for compile <nl> dassert ( parsed & & response . isValid ( NULL ) ) ; <nl> <nl> - / / Populate the lastError object based on the write <nl> + / / Populate the lastError object based on the write response <nl> lastError . get ( false ) - > reset ( ) ; <nl> - bool hadError = batchErrorToLastError ( * request , <nl> - response , <nl> - lastError . get ( false ) ) ; <nl> + bool hadError = batchErrorToLastError ( * request , response , lastError . get ( false ) ) ; <nl> <nl> / / Need to specially count inserts <nl> if ( op = = dbInsert ) { <nl> for ( int i = 0 ; i < response . getN ( ) ; + + i ) <nl> r . gotInsert ( ) ; <nl> } <nl> - <nl> - / / If this is an ordered batch and we had a non - write - concern error , we should <nl> - / / stop sending . <nl> + / / Check if this is an ordered batch and we had an error which should stop processing <nl> if ( request - > getOrdered ( ) & & hadError ) <nl> break ; <nl> } <nl> | SERVER - 12419 make mongos write commands report last error | mongodb/mongo | a1c9bdb119919d53de74c906888a4125ed0a8ab0 | 2014-02-05T16:14:15Z |
mmm a / test / mjsunit / mjsunit . status <nl> ppp b / test / mjsunit / mjsunit . status <nl> <nl> ' regress / regress - 752764 ' : [ SKIP ] , <nl> ' regress / regress - 779407 ' : [ SKIP ] , <nl> ' harmony / bigint / regressions ' : [ SKIP ] , <nl> + <nl> + # Pre - r6 MIPS32 doesn ' t have instructions needed to properly handle 64 - bit <nl> + # atomic instructions . <nl> + ' wasm / atomics64 - stress ' : [ PASS , [ ' mips_arch_variant ! = r6 ' , SKIP ] ] , <nl> } ] , # ' arch = = mipsel or arch = = mips ' <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> <nl> ' regress / regress - 779407 ' : [ SKIP ] , <nl> } ] , # ' arch = = mips64el or arch = = mips64 ' <nl> <nl> + [ ' ( arch = = mips64el or arch = = mips64 ) and simulator_run ' , { <nl> + # Slow tests which have flaky timeout on simulator . <nl> + ' wasm / atomics64 - stress ' : [ SKIP ] , <nl> + } ] , # ' ( arch = = mips64el or arch = = mips64 ) and simulator_run ' <nl> + <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> [ ' system = = windows ' , { <nl> # TODO ( mstarzinger ) : Too slow with turbo fan . <nl> | [ tests ] Skip wasm / atomics64 - stress test on pre - r6 MIPS32 | v8/v8 | 5bd58639dc646b0b76f56ad2445aaeeebbc7e1f8 | 2018-12-10T14:00:36Z |
mmm a / . github / workflows / persubmit . yml <nl> ppp b / . github / workflows / persubmit . yml <nl> on : <nl> types : [ opened , synchronize , reopened ] <nl> <nl> jobs : <nl> - build_and_test : <nl> - name : Build and Test <nl> + build_and_test_cpu : <nl> + name : Build and Test ( CPU ) <nl> if : $ { { ! contains ( github . event . pull_request . labels . * . name , ' skip ci ' ) & & github . event . sender . login ! = ' taichi - gardener ' } } <nl> strategy : <nl> matrix : <nl> jobs : <nl> env : <nl> CI_PLATFORM : $ { { matrix . os } } <nl> <nl> - - name : Build Project <nl> + - name : Build <nl> run : | <nl> export TAICHI_REPO_DIR = ` pwd ` <nl> export PATH = $ TAICHI_REPO_DIR / taichi - llvm / bin / : $ PATH <nl> jobs : <nl> env : <nl> CI_SETUP_CMAKE_ARGS : - DTI_WITH_OPENGL : BOOL = OFF - DTI_WITH_CC : BOOL = $ { { matrix . with_cc } } <nl> <nl> - - name : Functionallity Test <nl> + - name : Test <nl> run : | <nl> export TAICHI_REPO_DIR = ` pwd ` <nl> export PATH = $ TAICHI_REPO_DIR / bin : $ PATH <nl> jobs : <nl> ti diagnose <nl> ti test - vr2 - t2 <nl> <nl> + build_and_test_cuda : <nl> + name : Build and Test ( CUDA ) <nl> + if : $ { { ! contains ( github . event . pull_request . labels . * . name , ' skip ci ' ) & & github . event . sender . login ! = ' taichi - gardener ' } } <nl> + runs - on : [ zhen ] <nl> + steps : <nl> + - uses : actions / checkout @ v2 <nl> + <nl> + - name : Build <nl> + run : | <nl> + git - - version <nl> + export TAICHI_REPO_DIR = ` pwd ` <nl> + export PATH = / home / github / taichi - llvm / bin / : $ PATH <nl> + export CXX = clang + + - 8 <nl> + export PYTHON = / usr / bin / python3 . 7 <nl> + $ PYTHON misc / ci_setup . py ci <nl> + env : <nl> + CI_SETUP_CMAKE_ARGS : - DTI_WITH_OPENGL : BOOL = OFF - DTI_WITH_CC : BOOL = $ { { matrix . with_cc } } <nl> + <nl> + - name : Test <nl> + run : | <nl> + export PYTHON = / usr / bin / python3 . 7 <nl> + export TAICHI_REPO_DIR = ` pwd ` <nl> + export PATH = $ TAICHI_REPO_DIR / bin : $ PATH <nl> + export PATH = / home / github / taichi - llvm / bin / : $ PATH <nl> + export PYTHONPATH = $ TAICHI_REPO_DIR / python <nl> + $ PYTHON examples / laplace . py <nl> + ti diagnose <nl> + ti test - vr2 - t2 <nl> + <nl> check_previous_run : <nl> name : Checks the Workflow Run of the Previous Commit <nl> runs - on : ubuntu - latest <nl> mmm a / misc / ci_setup . py <nl> ppp b / misc / ci_setup . py <nl> def run ( self ) : <nl> # compile . . <nl> os . makedirs ( ' build ' , exist_ok = True ) <nl> arg = environ . get ( ' CI_SETUP_CMAKE_ARGS ' , ' ' ) <nl> - execute_command ( ' cd build & & cmake . . - DTI_WITH_CUDA : BOOL = OFF ' + <nl> - arg ) <nl> + execute_command ( <nl> + f ' cd build & & cmake . . - DPYTHON_EXECUTABLE = { sys . executable } { arg } ' <nl> + ) <nl> execute_command ( ' cd build & & make - j 10 ' ) <nl> return <nl> if test_installation ( ) : <nl> | [ Workflow ] Build and test the CUDA backend using GitHub actions ( ) | taichi-dev/taichi | f12043fd96f350640f08b8938aa14a6d906d8bea | 2020-09-12T22:31:47Z |
new file mode 100644 <nl> index 00000000000 . . 89bc3d54388 <nl> mmm / dev / null <nl> ppp b / folly / EvictingCacheMap . h <nl> <nl> + / * <nl> + * Copyright 2014 Facebook , Inc . <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # ifndef FOLLY_EVICTINGHASHMAP_H_ <nl> + # define FOLLY_EVICTINGHASHMAP_H_ <nl> + <nl> + # include < algorithm > <nl> + # include < exception > <nl> + # include < functional > <nl> + <nl> + # include < boost / utility . hpp > <nl> + # include < boost / intrusive / list . hpp > <nl> + # include < boost / intrusive / unordered_set . hpp > <nl> + # include < boost / iterator / iterator_adaptor . hpp > <nl> + <nl> + namespace folly { <nl> + <nl> + / * * <nl> + * A general purpose LRU evicting cache . Designed to support constant time <nl> + * set / get operations . It maintains a doubly linked list of items that are <nl> + * threaded through an index ( a hash map ) . The access ordered is maintained <nl> + * on the list by moving an element to the front of list on a get . New elements <nl> + * are added to the front of the list . The index size is set to half the <nl> + * capacity ( setting capacity to 0 is a special case . see notes at the end of <nl> + * this section ) . So assuming uniform distribution of keys , set / get are both <nl> + * constant time operations . <nl> + * <nl> + * On reaching capacity limit , clearSize_ LRU items are evicted at a time . If <nl> + * a callback is specified with setPruneHook , it is invoked for each eviction . <nl> + * <nl> + * This is NOT a thread - safe implementation . <nl> + * <nl> + * Configurability : capacity of the cache , number of items to evict , eviction <nl> + * callback and the hasher to hash the keys can all be supplied by the caller . <nl> + * <nl> + * If at a given state , N1 - N6 are the nodes in MRU to LRU order and hashing <nl> + * to index keys as { ( N1 , N5 ) - > H1 , ( N4 , N5 , N5 ) - > H2 , N3 - > Hi } , the datastructure <nl> + * layout is as below . N1 . . N6 is a list threaded through the hash . <nl> + * Assuming , each the number of nodes hashed to each index key is bounded , the <nl> + * following operations run in constant time . <nl> + * i ) get computes the index key , walks the list of elements hashed to <nl> + * the key and moves it to the front of the list , if found . <nl> + * ii ) set inserts a new node into the list and places the same node on to the <nl> + * list of elements hashing to the corresponding index key . <nl> + * ii ) prune deletes nodes from the end of the list as well from the index . <nl> + * <nl> + * + mmm - + + mmm - + + mmm - + <nl> + * | H1 | < - > | N1 | < - > | N5 | <nl> + * + mmm - + + mmm - + + mmm - + <nl> + * ^ ^ ^ <nl> + * | ___ / \ <nl> + * | / \ <nl> + * | _ / ________ \ ___ <nl> + * / | \ <nl> + * / | \ <nl> + * v v v <nl> + * + mmm - + + mmm - + + mmm - + + mmm - + <nl> + * | H2 | < - > | N4 | < - > | N2 | < - > | N6 | <nl> + * + mmm - + + mmm - + + mmm - + + mmm - + <nl> + * . ^ ^ <nl> + * . | | <nl> + * . | | <nl> + * . | _____ | <nl> + * . | / <nl> + * v v <nl> + * + mmm - + + mmm - + <nl> + * | Hi | < - > | N3 | <nl> + * + mmm - + + mmm - + <nl> + * <nl> + * N . B 1 : Changing the capacity with setMaxSize does not change the index size <nl> + * and it could end up in too many elements indexed to the same slot in index . <nl> + * The set / get performance will get worse in this case . So it is best to avoid <nl> + * resizing . <nl> + * <nl> + * N . B 2 : Setting capacity to 0 , using setMaxSize or initialization , turns off <nl> + * evictions based on sizeof the cache making it an INFINITE size cache <nl> + * unless evictions of LRU items are triggered by calling prune ( ) by clients <nl> + * ( using their own eviction criteria ) . <nl> + * / <nl> + template < class TKey , class TValue , class THash = std : : hash < TKey > > <nl> + class EvictingCacheMap : private boost : : noncopyable { <nl> + <nl> + private : <nl> + / / typedefs for brevity <nl> + struct Node ; <nl> + typedef boost : : intrusive : : link_mode < boost : : intrusive : : safe_link > link_mode ; <nl> + typedef boost : : intrusive : : unordered_set < Node > NodeMap ; <nl> + typedef boost : : intrusive : : list < Node > NodeList ; <nl> + typedef std : : pair < const TKey , TValue > TPair ; <nl> + <nl> + public : <nl> + typedef std : : function < void ( TKey , TValue & & ) > PruneHookCall ; <nl> + <nl> + / / iterator base : returns TPair on dereference <nl> + template < typename Value , typename TIterator > <nl> + class iterator_base <nl> + : public boost : : iterator_adaptor < iterator_base < Value , TIterator > , <nl> + TIterator , <nl> + Value , <nl> + boost : : bidirectional_traversal_tag > { <nl> + public : <nl> + iterator_base ( ) { <nl> + } <nl> + explicit iterator_base ( TIterator it ) <nl> + : iterator_base : : iterator_adaptor_ ( it ) { <nl> + } <nl> + Value & dereference ( ) const { <nl> + return this - > base_reference ( ) - > pr ; <nl> + } <nl> + } ; <nl> + <nl> + / / iterators <nl> + typedef iterator_base < <nl> + TPair , typename NodeList : : iterator > iterator ; <nl> + typedef iterator_base < <nl> + const TPair , typename NodeList : : const_iterator > const_iterator ; <nl> + typedef iterator_base < <nl> + TPair , typename NodeList : : reverse_iterator > reverse_iterator ; <nl> + typedef iterator_base < <nl> + const TPair , <nl> + typename NodeList : : const_reverse_iterator > const_reverse_iterator ; <nl> + <nl> + / * * <nl> + * Construct a EvictingCacheMap <nl> + * @ param maxSize maximum size of the cache map . Once the map size exceeds <nl> + * maxSize , the map will begin to evict . <nl> + * @ param clearSize the number of elements to clear at a time when the <nl> + * eviction size is reached . <nl> + * / <nl> + explicit EvictingCacheMap ( std : : size_t maxSize , std : : size_t clearSize = 1 ) <nl> + : nIndexBuckets_ ( std : : max ( maxSize / 2 , std : : size_t ( kMinNumIndexBuckets ) ) ) , <nl> + indexBuckets_ ( new typename NodeMap : : bucket_type [ nIndexBuckets_ ] ) , <nl> + indexTraits_ ( indexBuckets_ . get ( ) , nIndexBuckets_ ) , <nl> + index_ ( indexTraits_ ) , <nl> + maxSize_ ( maxSize ) , <nl> + clearSize_ ( clearSize ) { } <nl> + <nl> + <nl> + ~ EvictingCacheMap ( ) { <nl> + setPruneHook ( nullptr ) ; <nl> + / / ignore any potential exceptions from pruneHook_ <nl> + pruneWithFailSafeOption ( size ( ) , nullptr , true ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Adjust the max size of EvictingCacheMap . Note that this does not update <nl> + * nIndexBuckets_ accordingly . This API can cause performance to get very <nl> + * bad , e . g . , the nIndexBuckets_ is still 100 after maxSize is updated to 1M . <nl> + * <nl> + * Calling this function with an arugment of 0 removes the limit on the cache <nl> + * size and elements are not evicted unless clients explictly call prune . <nl> + * <nl> + * If you intend to resize dynamically using this , then picking an index size <nl> + * that works well and initializing with corresponding maxSize is the only <nl> + * reasonable option . <nl> + * / <nl> + void setMaxSize ( size_t maxSize ) { <nl> + if ( maxSize ! = 0 & & maxSize < size ( ) ) { <nl> + / / Prune the excess elements with our new constraints . <nl> + prune ( std : : max ( size ( ) - maxSize , clearSize_ ) ) ; <nl> + } <nl> + maxSize_ = maxSize ; <nl> + } <nl> + <nl> + size_t getMaxSize ( ) const { <nl> + return maxSize_ ; <nl> + } <nl> + <nl> + void setClearSize ( size_t clearSize ) { <nl> + clearSize_ = clearSize ; <nl> + } <nl> + <nl> + / * * <nl> + * Check for existence of a specific key in the map . This operation has <nl> + * no effect on LRU order . <nl> + * @ param key key to search for <nl> + * @ return true if exists , false otherwise <nl> + * / <nl> + bool exists ( const TKey & key ) const { <nl> + return findInIndex ( key ) ! = index_ . end ( ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Get the value associated with a specific key . This function always <nl> + * promotes a found value to the head of the LRU . <nl> + * @ param key key associated with the value <nl> + * @ return the value if it exists <nl> + * @ throw std : : out_of_range exception of the key does not exist <nl> + * / <nl> + TValue & get ( const TKey & key ) { <nl> + auto it = find ( key ) ; <nl> + if ( it = = end ( ) ) { <nl> + throw std : : out_of_range ( " Key does not exist " ) ; <nl> + } <nl> + return it - > second ; <nl> + } <nl> + <nl> + / * * <nl> + * Get the iterator associated with a specific key . This function always <nl> + * promotes a found value to the head of the LRU . <nl> + * @ param key key to associate with value <nl> + * @ return the iterator of the object ( a std : : pair of const TKey , TValue ) or <nl> + * end ( ) if it does not exist <nl> + * / <nl> + iterator find ( const TKey & key ) { <nl> + auto it = findInIndex ( key ) ; <nl> + if ( it = = index_ . end ( ) ) { <nl> + return end ( ) ; <nl> + } <nl> + lru_ . erase ( lru_ . iterator_to ( * it ) ) ; <nl> + lru_ . push_front ( * it ) ; <nl> + return iterator ( lru_ . iterator_to ( * it ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Get the value associated with a specific key . This function never <nl> + * promotes a found value to the head of the LRU . <nl> + * @ param key key associated with the value <nl> + * @ return the value if it exists <nl> + * @ throw std : : out_of_range exception of the key does not exist <nl> + * / <nl> + const TValue & getWithoutPromotion ( const TKey & key ) const { <nl> + auto it = findWithoutPromotion ( key ) ; <nl> + if ( it = = end ( ) ) { <nl> + throw std : : out_of_range ( " Key does not exist " ) ; <nl> + } <nl> + return it - > second ; <nl> + } <nl> + <nl> + TValue & getWithoutPromotion ( const TKey & key ) { <nl> + auto const & cThis = * this ; <nl> + return const_cast < TValue & > ( cThis . getWithoutPromotion ( key ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Get the iterator associated with a specific key . This function never <nl> + * promotes a found value to the head of the LRU . <nl> + * @ param key key to associate with value <nl> + * @ return the iterator of the object ( a std : : pair of const TKey , TValue ) or <nl> + * end ( ) if it does not exist <nl> + * / <nl> + const_iterator findWithoutPromotion ( const TKey & key ) const { <nl> + auto it = findInIndex ( key ) ; <nl> + return ( it = = index_ . end ( ) ) ? end ( ) : const_iterator ( lru_ . iterator_to ( * it ) ) ; <nl> + } <nl> + <nl> + iterator findWithoutPromotion ( const TKey & key ) { <nl> + auto it = findInIndex ( key ) ; <nl> + return ( it = = index_ . end ( ) ) ? end ( ) : iterator ( lru_ . iterator_to ( * it ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Erase the key - value pair associated with key if it exists . <nl> + * @ param key key associated with the value <nl> + * @ return true if the key existed and was erased , else false <nl> + * / <nl> + bool erase ( const TKey & key ) { <nl> + auto it = findInIndex ( key ) ; <nl> + if ( it = = index_ . end ( ) ) { <nl> + return false ; <nl> + } <nl> + auto node = & ( * it ) ; <nl> + std : : unique_ptr < Node > nptr ( node ) ; <nl> + lru_ . erase ( lru_ . iterator_to ( * node ) ) ; <nl> + index_ . erase ( it ) ; <nl> + return true ; <nl> + } <nl> + <nl> + / * * <nl> + * Set a key - value pair in the dictionary <nl> + * @ param key key to associate with value <nl> + * @ param value value to associate with the key <nl> + * @ param promote boolean flag indicating whether or not to move something <nl> + * to the front of an LRU . This only really matters if you ' re setting <nl> + * a value that already exists . <nl> + * / <nl> + void set ( const TKey & key , TValue value , bool promote = true ) { <nl> + auto it = findInIndex ( key ) ; <nl> + if ( it ! = index_ . end ( ) ) { <nl> + it - > pr . second = std : : move ( value ) ; <nl> + if ( promote ) { <nl> + lru_ . erase ( lru_ . iterator_to ( * it ) ) ; <nl> + lru_ . push_front ( * it ) ; <nl> + } <nl> + } else { <nl> + auto node = new Node ( key , std : : move ( value ) ) ; <nl> + index_ . insert ( * node ) ; <nl> + lru_ . push_front ( * node ) ; <nl> + <nl> + / / no evictions if maxSize_ is 0 i . e . unlimited capacity <nl> + if ( maxSize_ > 0 & & size ( ) > maxSize_ ) { <nl> + prune ( clearSize_ ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Get the number of elements in the dictionary <nl> + * @ return the size of the dictionary <nl> + * / <nl> + std : : size_t size ( ) const { <nl> + return index_ . size ( ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Typical empty function <nl> + * @ return true if empty , false otherwise <nl> + * / <nl> + bool empty ( ) const { <nl> + return index_ . empty ( ) ; <nl> + } <nl> + <nl> + void clear ( ) { <nl> + prune ( size ( ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Set the prune hook , which is the function invoked on the key and value <nl> + * on each eviction . Will throw If the pruneHook throws , unless the <nl> + * EvictingCacheMap object is being destroyed in which case it will <nl> + * be ignored . <nl> + * @ param pruneHook new callback to use on eviction . <nl> + * @ param promote boolean flag indicating whether or not to move something <nl> + * to the front of an LRU . <nl> + * @ return the iterator of the object ( a std : : pair of const TKey , TValue ) or <nl> + * end ( ) if it does not exist <nl> + * / <nl> + void setPruneHook ( PruneHookCall pruneHook ) { <nl> + pruneHook_ = pruneHook ; <nl> + } <nl> + <nl> + <nl> + / * * <nl> + * Prune the minimum of pruneSize and size ( ) from the back of the LRU . <nl> + * Will throw if pruneHook throws . <nl> + * @ param pruneSize minimum number of elements to prune <nl> + * @ param pruneHook a custom pruneHook function <nl> + * / <nl> + void prune ( std : : size_t pruneSize , PruneHookCall pruneHook = nullptr ) { <nl> + / / do not swallow exceptions for prunes not triggered from destructor <nl> + pruneWithFailSafeOption ( pruneSize , pruneHook , false ) ; <nl> + } <nl> + <nl> + / / Iterators and such <nl> + iterator begin ( ) { <nl> + return iterator ( lru_ . begin ( ) ) ; <nl> + } <nl> + iterator end ( ) { <nl> + return iterator ( lru_ . end ( ) ) ; <nl> + } <nl> + const_iterator begin ( ) const { <nl> + return const_iterator ( lru_ . begin ( ) ) ; <nl> + } <nl> + const_iterator end ( ) const { <nl> + return const_iterator ( lru_ . end ( ) ) ; <nl> + } <nl> + <nl> + const_iterator cbegin ( ) const { <nl> + return const_iterator ( lru_ . cbegin ( ) ) ; <nl> + } <nl> + const_iterator cend ( ) const { <nl> + return const_iterator ( lru_ . cend ( ) ) ; <nl> + } <nl> + <nl> + reverse_iterator rbegin ( ) { <nl> + return reverse_iterator ( lru_ . rbegin ( ) ) ; <nl> + } <nl> + reverse_iterator rend ( ) { <nl> + return reverse_iterator ( lru_ . rend ( ) ) ; <nl> + } <nl> + <nl> + const_reverse_iterator rbegin ( ) const { <nl> + return const_reverse_iterator ( lru_ . rbegin ( ) ) ; <nl> + } <nl> + const_reverse_iterator rend ( ) const { <nl> + return const_reverse_iterator ( lru_ . rend ( ) ) ; <nl> + } <nl> + <nl> + const_reverse_iterator crbegin ( ) const { <nl> + return const_reverse_iterator ( lru_ . crbegin ( ) ) ; <nl> + } <nl> + const_reverse_iterator crend ( ) const { <nl> + return const_reverse_iterator ( lru_ . crend ( ) ) ; <nl> + } <nl> + <nl> + private : <nl> + struct Node <nl> + : public boost : : intrusive : : unordered_set_base_hook < link_mode > , <nl> + public boost : : intrusive : : list_base_hook < link_mode > { <nl> + Node ( const TKey & key , TValue & & value ) <nl> + : pr ( std : : make_pair ( key , std : : move ( value ) ) ) { <nl> + } <nl> + TPair pr ; <nl> + friend bool operator = = ( const Node & lhs , const Node & rhs ) { <nl> + return lhs . pr . first = = rhs . pr . first ; <nl> + } <nl> + friend std : : size_t hash_value ( const Node & node ) { <nl> + return THash ( ) ( node . pr . first ) ; <nl> + } <nl> + } ; <nl> + <nl> + struct KeyHasher { <nl> + std : : size_t operator ( ) ( const Node & node ) { <nl> + return THash ( ) ( node . pr . first ) ; <nl> + } <nl> + std : : size_t operator ( ) ( const TKey & key ) { <nl> + return THash ( ) ( key ) ; <nl> + } <nl> + } ; <nl> + <nl> + struct KeyValueEqual { <nl> + bool operator ( ) ( const TKey & lhs , const Node & rhs ) { <nl> + return lhs = = rhs . pr . first ; <nl> + } <nl> + bool operator ( ) ( const Node & lhs , const TKey & rhs ) { <nl> + return lhs . pr . first = = rhs ; <nl> + } <nl> + } ; <nl> + <nl> + / * * <nl> + * Get the iterator in in the index associated with a specific key . This is <nl> + * merely a search in the index and does not promote the object . <nl> + * @ param key key to associate with value <nl> + * @ return the NodeMap : : iterator to the Node containing the object <nl> + * ( a std : : pair of const TKey , TValue ) or index_ . end ( ) if it does not exist <nl> + * / <nl> + typename NodeMap : : iterator findInIndex ( const TKey & key ) { <nl> + return index_ . find ( key , KeyHasher ( ) , KeyValueEqual ( ) ) ; <nl> + } <nl> + <nl> + typename NodeMap : : const_iterator findInIndex ( const TKey & key ) const { <nl> + return index_ . find ( key , KeyHasher ( ) , KeyValueEqual ( ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Prune the minimum of pruneSize and size ( ) from the back of the LRU . <nl> + * @ param pruneSize minimum number of elements to prune <nl> + * @ param pruneHook a custom pruneHook function <nl> + * @ param failSafe true if exceptions are to ignored , false by default <nl> + * / <nl> + void pruneWithFailSafeOption ( std : : size_t pruneSize , <nl> + PruneHookCall pruneHook , bool failSafe ) { <nl> + auto & ph = ( nullptr = = pruneHook ) ? pruneHook_ : pruneHook ; <nl> + <nl> + for ( std : : size_t i = 0 ; i < pruneSize & & ! lru_ . empty ( ) ; i + + ) { <nl> + auto * node = & ( * lru_ . rbegin ( ) ) ; <nl> + std : : unique_ptr < Node > nptr ( node ) ; <nl> + <nl> + lru_ . erase ( lru_ . iterator_to ( * node ) ) ; <nl> + index_ . erase ( index_ . iterator_to ( * node ) ) ; <nl> + if ( ph ) { <nl> + try { <nl> + ph ( node - > pr . first , std : : move ( node - > pr . second ) ) ; <nl> + } catch ( . . . ) { <nl> + if ( ! failSafe ) { <nl> + throw ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + static const std : : size_t kMinNumIndexBuckets = 100 ; <nl> + PruneHookCall pruneHook_ ; <nl> + std : : size_t nIndexBuckets_ ; <nl> + std : : unique_ptr < typename NodeMap : : bucket_type [ ] > indexBuckets_ ; <nl> + typename NodeMap : : bucket_traits indexTraits_ ; <nl> + NodeMap index_ ; <nl> + NodeList lru_ ; <nl> + std : : size_t maxSize_ ; <nl> + std : : size_t clearSize_ ; <nl> + } ; <nl> + <nl> + } / / folly <nl> + <nl> + # endif / * FOLLY_EVICTINGHASHMAP_H_ * / <nl> new file mode 100644 <nl> index 00000000000 . . cf326c92383 <nl> mmm / dev / null <nl> ppp b / folly / test / EvictingCacheMapTest . cpp <nl> <nl> + / * <nl> + * Copyright 2014 Facebook , Inc . <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # include < gtest / gtest . h > <nl> + <nl> + # include < set > <nl> + <nl> + # include < folly / EvictingCacheMap . h > <nl> + <nl> + using namespace folly ; <nl> + <nl> + TEST ( EvictingCacheMap , SanityTest ) { <nl> + EvictingCacheMap < int , int > map ( 0 ) ; <nl> + <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + EXPECT_FALSE ( map . exists ( 1 ) ) ; <nl> + map . set ( 1 , 1 ) ; <nl> + EXPECT_EQ ( 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_EQ ( 1 , map . get ( 1 ) ) ; <nl> + EXPECT_TRUE ( map . exists ( 1 ) ) ; <nl> + map . set ( 1 , 2 ) ; <nl> + EXPECT_EQ ( 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_EQ ( 2 , map . get ( 1 ) ) ; <nl> + EXPECT_TRUE ( map . exists ( 1 ) ) ; <nl> + map . erase ( 1 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + EXPECT_FALSE ( map . exists ( 1 ) ) ; <nl> + <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + EXPECT_FALSE ( map . exists ( 1 ) ) ; <nl> + map . set ( 1 , 1 ) ; <nl> + EXPECT_EQ ( 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_EQ ( 1 , map . get ( 1 ) ) ; <nl> + EXPECT_TRUE ( map . exists ( 1 ) ) ; <nl> + map . set ( 1 , 2 ) ; <nl> + EXPECT_EQ ( 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_EQ ( 2 , map . get ( 1 ) ) ; <nl> + EXPECT_TRUE ( map . exists ( 1 ) ) ; <nl> + <nl> + EXPECT_FALSE ( map . exists ( 2 ) ) ; <nl> + map . set ( 2 , 1 ) ; <nl> + EXPECT_TRUE ( map . exists ( 2 ) ) ; <nl> + EXPECT_EQ ( 2 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_EQ ( 1 , map . get ( 2 ) ) ; <nl> + map . set ( 2 , 2 ) ; <nl> + EXPECT_EQ ( 2 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_EQ ( 2 , map . get ( 2 ) ) ; <nl> + EXPECT_TRUE ( map . exists ( 2 ) ) ; <nl> + map . erase ( 2 ) ; <nl> + EXPECT_EQ ( 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_FALSE ( map . exists ( 2 ) ) ; <nl> + map . erase ( 1 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + EXPECT_FALSE ( map . exists ( 1 ) ) ; <nl> + } <nl> + <nl> + <nl> + TEST ( EvictingCacheMap , PruneTest ) { <nl> + EvictingCacheMap < int , int > map ( 0 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_EQ ( i + 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) ) ; <nl> + } <nl> + <nl> + map . prune ( 1000000 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_EQ ( i + 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) ) ; <nl> + } <nl> + <nl> + map . prune ( 100 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_EQ ( i + 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) ) ; <nl> + } <nl> + <nl> + map . prune ( 99 ) ; <nl> + EXPECT_EQ ( 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 99 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + EXPECT_TRUE ( map . exists ( 99 ) ) ; <nl> + EXPECT_EQ ( 99 , map . get ( 99 ) ) ; <nl> + <nl> + map . prune ( 100 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_EQ ( i + 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) ) ; <nl> + } <nl> + <nl> + map . prune ( 90 ) ; <nl> + EXPECT_EQ ( 10 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 90 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + for ( int i = 90 ; i < 100 ; i + + ) { <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST ( EvictingCacheMap , PruneHookTest ) { <nl> + EvictingCacheMap < int , int > map ( 0 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + <nl> + int sum = 0 ; <nl> + auto pruneCb = [ & ] ( int & & k , int & & v ) { <nl> + EXPECT_EQ ( k , v ) ; <nl> + sum + = k ; <nl> + } ; <nl> + <nl> + map . setPruneHook ( pruneCb ) ; <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_EQ ( i + 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) ) ; <nl> + } <nl> + <nl> + map . prune ( 1000000 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + EXPECT_EQ ( ( 99 * 100 ) / 2 , sum ) ; <nl> + sum = 0 ; <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_EQ ( i + 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) ) ; <nl> + } <nl> + <nl> + map . prune ( 100 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + EXPECT_EQ ( ( 99 * 100 ) / 2 , sum ) ; <nl> + sum = 0 ; <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_EQ ( i + 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) ) ; <nl> + } <nl> + <nl> + map . prune ( 99 ) ; <nl> + EXPECT_EQ ( 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 99 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + EXPECT_TRUE ( map . exists ( 99 ) ) ; <nl> + EXPECT_EQ ( 99 , map . get ( 99 ) ) ; <nl> + <nl> + EXPECT_EQ ( ( 98 * 99 ) / 2 , sum ) ; <nl> + sum = 0 ; <nl> + <nl> + map . prune ( 100 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + <nl> + EXPECT_EQ ( 99 , sum ) ; <nl> + sum = 0 ; <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_EQ ( i + 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) ) ; <nl> + } <nl> + <nl> + map . prune ( 90 ) ; <nl> + EXPECT_EQ ( 10 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 90 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + for ( int i = 90 ; i < 100 ; i + + ) { <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) ) ; <nl> + } <nl> + EXPECT_EQ ( ( 89 * 90 ) / 2 , sum ) ; <nl> + sum = 0 ; <nl> + } <nl> + <nl> + TEST ( EvictingCacheMap , SetMaxSize ) { <nl> + EvictingCacheMap < int , int > map ( 100 , 20 ) ; <nl> + for ( int i = 0 ; i < 90 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + } <nl> + <nl> + EXPECT_EQ ( 90 , map . size ( ) ) ; <nl> + map . setMaxSize ( 50 ) ; <nl> + EXPECT_EQ ( map . size ( ) , 50 ) ; <nl> + <nl> + for ( int i = 0 ; i < 90 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + } <nl> + EXPECT_EQ ( 40 , map . size ( ) ) ; <nl> + map . setMaxSize ( 0 ) ; <nl> + EXPECT_EQ ( 40 , map . size ( ) ) ; <nl> + map . setMaxSize ( 10 ) ; <nl> + EXPECT_EQ ( 10 , map . size ( ) ) ; <nl> + } <nl> + <nl> + TEST ( EvictingCacheMap , SetClearSize ) { <nl> + EvictingCacheMap < int , int > map ( 100 , 20 ) ; <nl> + for ( int i = 0 ; i < 90 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + } <nl> + <nl> + EXPECT_EQ ( 90 , map . size ( ) ) ; <nl> + map . setClearSize ( 40 ) ; <nl> + map . setMaxSize ( 50 ) ; <nl> + EXPECT_EQ ( map . size ( ) , 50 ) ; <nl> + <nl> + for ( int i = 0 ; i < 90 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + } <nl> + EXPECT_EQ ( 20 , map . size ( ) ) ; <nl> + map . setMaxSize ( 0 ) ; <nl> + EXPECT_EQ ( 20 , map . size ( ) ) ; <nl> + map . setMaxSize ( 10 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + } <nl> + <nl> + TEST ( EvictingCacheMap , DestructorInvocationTest ) { <nl> + struct SumInt { <nl> + SumInt ( int val , int * ref ) : val ( val ) , ref ( ref ) { } <nl> + ~ SumInt ( ) { <nl> + * ref + = val ; <nl> + } <nl> + int val ; <nl> + int * ref ; <nl> + } ; <nl> + <nl> + EvictingCacheMap < int , SumInt > map ( 0 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + <nl> + int sum ; <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , SumInt ( i , & sum ) ) ; <nl> + EXPECT_EQ ( i + 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) . val ) ; <nl> + } <nl> + <nl> + sum = 0 ; <nl> + map . prune ( 1000000 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + EXPECT_EQ ( ( 99 * 100 ) / 2 , sum ) ; <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , SumInt ( i , & sum ) ) ; <nl> + EXPECT_EQ ( i + 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) . val ) ; <nl> + } <nl> + <nl> + sum = 0 ; <nl> + map . prune ( 100 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + EXPECT_EQ ( ( 99 * 100 ) / 2 , sum ) ; <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , SumInt ( i , & sum ) ) ; <nl> + EXPECT_EQ ( i + 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) . val ) ; <nl> + } <nl> + <nl> + sum = 0 ; <nl> + map . prune ( 99 ) ; <nl> + EXPECT_EQ ( 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 99 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + EXPECT_TRUE ( map . exists ( 99 ) ) ; <nl> + EXPECT_EQ ( 99 , map . get ( 99 ) . val ) ; <nl> + <nl> + EXPECT_EQ ( ( 98 * 99 ) / 2 , sum ) ; <nl> + <nl> + sum = 0 ; <nl> + map . prune ( 100 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + <nl> + EXPECT_EQ ( 99 , sum ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , SumInt ( i , & sum ) ) ; <nl> + EXPECT_EQ ( i + 1 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) . val ) ; <nl> + } <nl> + <nl> + sum = 0 ; <nl> + map . prune ( 90 ) ; <nl> + EXPECT_EQ ( 10 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 90 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + for ( int i = 90 ; i < 100 ; i + + ) { <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) . val ) ; <nl> + } <nl> + EXPECT_EQ ( ( 89 * 90 ) / 2 , sum ) ; <nl> + sum = 0 ; <nl> + } <nl> + <nl> + TEST ( EvictingCacheMap , LruSanityTest ) { <nl> + EvictingCacheMap < int , int > map ( 10 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_GE ( 10 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) ) ; <nl> + } <nl> + <nl> + EXPECT_EQ ( 10 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 90 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + for ( int i = 90 ; i < 100 ; i + + ) { <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST ( EvictingCacheMap , LruPromotionTest ) { <nl> + EvictingCacheMap < int , int > map ( 10 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_GE ( 10 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) ) ; <nl> + for ( int j = 0 ; j < std : : min ( i + 1 , 9 ) ; j + + ) { <nl> + EXPECT_TRUE ( map . exists ( j ) ) ; <nl> + EXPECT_EQ ( j , map . get ( j ) ) ; <nl> + } <nl> + } <nl> + <nl> + EXPECT_EQ ( 10 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 9 ; i + + ) { <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + } <nl> + EXPECT_TRUE ( map . exists ( 99 ) ) ; <nl> + for ( int i = 10 ; i < 99 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST ( EvictingCacheMap , LruNoPromotionTest ) { <nl> + EvictingCacheMap < int , int > map ( 10 ) ; <nl> + EXPECT_EQ ( 0 , map . size ( ) ) ; <nl> + EXPECT_TRUE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + <nl> + for ( int i = 0 ; i < 100 ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_GE ( 10 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) ) ; <nl> + for ( int j = 0 ; j < std : : min ( i + 1 , 9 ) ; j + + ) { <nl> + if ( map . exists ( j ) ) { <nl> + EXPECT_EQ ( j , map . getWithoutPromotion ( j ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + EXPECT_EQ ( 10 , map . size ( ) ) ; <nl> + EXPECT_FALSE ( map . empty ( ) ) ; <nl> + for ( int i = 0 ; i < 90 ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + } <nl> + for ( int i = 90 ; i < 100 ; i + + ) { <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST ( EvictingCacheMap , IteratorSanityTest ) { <nl> + const int nItems = 1000 ; <nl> + EvictingCacheMap < int , int > map ( nItems ) ; <nl> + EXPECT_TRUE ( map . begin ( ) = = map . end ( ) ) ; <nl> + for ( int i = 0 ; i < nItems ; i + + ) { <nl> + EXPECT_FALSE ( map . exists ( i ) ) ; <nl> + map . set ( i , i * 2 ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i * 2 , map . get ( i ) ) ; <nl> + } <nl> + <nl> + std : : set < int > seen ; <nl> + for ( auto & it : map ) { <nl> + EXPECT_EQ ( 0 , seen . count ( it . first ) ) ; <nl> + seen . insert ( it . first ) ; <nl> + EXPECT_EQ ( it . first * 2 , it . second ) ; <nl> + } <nl> + EXPECT_EQ ( nItems , seen . size ( ) ) ; <nl> + } <nl> + <nl> + TEST ( EvictingCacheMap , FindTest ) { <nl> + const int nItems = 1000 ; <nl> + EvictingCacheMap < int , int > map ( nItems ) ; <nl> + for ( int i = 0 ; i < nItems ; i + + ) { <nl> + map . set ( i * 2 , i * 2 ) ; <nl> + EXPECT_TRUE ( map . exists ( i * 2 ) ) ; <nl> + EXPECT_EQ ( i * 2 , map . get ( i * 2 ) ) ; <nl> + } <nl> + for ( int i = 0 ; i < nItems * 2 ; i + + ) { <nl> + if ( i % 2 = = 0 ) { <nl> + auto it = map . find ( i ) ; <nl> + EXPECT_FALSE ( it = = map . end ( ) ) ; <nl> + EXPECT_EQ ( i , it - > first ) ; <nl> + EXPECT_EQ ( i , it - > second ) ; <nl> + } else { <nl> + EXPECT_TRUE ( map . find ( i ) = = map . end ( ) ) ; <nl> + } <nl> + } <nl> + for ( int i = nItems * 2 - 1 ; i > = 0 ; i - - ) { <nl> + if ( i % 2 = = 0 ) { <nl> + auto it = map . find ( i ) ; <nl> + EXPECT_FALSE ( it = = map . end ( ) ) ; <nl> + EXPECT_EQ ( i , it - > first ) ; <nl> + EXPECT_EQ ( i , it - > second ) ; <nl> + } else { <nl> + EXPECT_TRUE ( map . find ( i ) = = map . end ( ) ) ; <nl> + } <nl> + } <nl> + EXPECT_EQ ( 0 , map . begin ( ) - > first ) ; <nl> + } <nl> + <nl> + TEST ( EvictingCacheMap , FindWithoutPromotionTest ) { <nl> + const int nItems = 1000 ; <nl> + EvictingCacheMap < int , int > map ( nItems ) ; <nl> + for ( int i = 0 ; i < nItems ; i + + ) { <nl> + map . set ( i * 2 , i * 2 ) ; <nl> + EXPECT_TRUE ( map . exists ( i * 2 ) ) ; <nl> + EXPECT_EQ ( i * 2 , map . get ( i * 2 ) ) ; <nl> + } <nl> + for ( int i = nItems * 2 - 1 ; i > = 0 ; i - - ) { <nl> + if ( i % 2 = = 0 ) { <nl> + auto it = map . findWithoutPromotion ( i ) ; <nl> + EXPECT_FALSE ( it = = map . end ( ) ) ; <nl> + EXPECT_EQ ( i , it - > first ) ; <nl> + EXPECT_EQ ( i , it - > second ) ; <nl> + } else { <nl> + EXPECT_TRUE ( map . findWithoutPromotion ( i ) = = map . end ( ) ) ; <nl> + } <nl> + } <nl> + EXPECT_EQ ( ( nItems - 1 ) * 2 , map . begin ( ) - > first ) ; <nl> + } <nl> + <nl> + TEST ( EvictingCacheMap , IteratorOrderingTest ) { <nl> + const int nItems = 1000 ; <nl> + EvictingCacheMap < int , int > map ( nItems ) ; <nl> + for ( int i = 0 ; i < nItems ; i + + ) { <nl> + map . set ( i , i ) ; <nl> + EXPECT_TRUE ( map . exists ( i ) ) ; <nl> + EXPECT_EQ ( i , map . get ( i ) ) ; <nl> + } <nl> + <nl> + int expected = nItems - 1 ; <nl> + for ( auto it = map . begin ( ) ; it ! = map . end ( ) ; + + it ) { <nl> + EXPECT_EQ ( expected , it - > first ) ; <nl> + expected - - ; <nl> + } <nl> + <nl> + expected = 0 ; <nl> + for ( auto it = map . rbegin ( ) ; it ! = map . rend ( ) ; + + it ) { <nl> + EXPECT_EQ ( expected , it - > first ) ; <nl> + expected + + ; <nl> + } <nl> + <nl> + { <nl> + auto it = map . end ( ) ; <nl> + expected = 0 ; <nl> + EXPECT_TRUE ( it ! = map . begin ( ) ) ; <nl> + do { <nl> + - - it ; <nl> + EXPECT_EQ ( expected , it - > first ) ; <nl> + expected + + ; <nl> + } while ( it ! = map . begin ( ) ) ; <nl> + EXPECT_EQ ( nItems , expected ) ; <nl> + } <nl> + <nl> + { <nl> + auto it = map . rend ( ) ; <nl> + expected = nItems - 1 ; <nl> + do { <nl> + - - it ; <nl> + EXPECT_EQ ( expected , it - > first ) ; <nl> + expected - - ; <nl> + } while ( it ! = map . rbegin ( ) ) ; <nl> + EXPECT_EQ ( - 1 , expected ) ; <nl> + } <nl> + } <nl> | move EvictingCacheMap to folly | facebook/folly | 3ff4b4f4ae7c4bc45c8b6890c4d3c6f09b729527 | 2014-08-14T18:49:04Z |
mmm a / html / admin / js / views / graphView . js <nl> ppp b / html / admin / js / views / graphView . js <nl> window . graphView = Backbone . View . extend ( { <nl> } ; <nl> <nl> $ ( " # creationDialog " ) . remove ( ) ; <nl> - ui = new GraphViewerUI ( document . getElementById ( " content " ) , aaconfig , 940 , 770 ) ; <nl> + ui = new GraphViewerUI ( document . getElementById ( " content " ) , aaconfig , 940 , 680 ) ; <nl> } , <nl> <nl> <nl> | GraphViewer : Further UI refinement | arangodb/arangodb | 08f1fe857ae79780966ea636f14b688d15d05162 | 2013-04-30T12:58:42Z |
mmm a / CHANGELOG . md <nl> ppp b / CHANGELOG . md <nl> <nl> * Fix ClickHouse determines default time zone as UCT instead of UTC . [ # 5828 ] ( https : / / github . com / yandex / ClickHouse / pull / 5828 ) ( [ alexey - milovidov ] ( https : / / github . com / alexey - milovidov ) ) <nl> * Fix bug about executing distributed DROP / ALTER / TRUNCATE / OPTIMIZE ON CLUSTER queries on follower replica before leader replica . Now they will be executed directly on leader replica . [ # 5757 ] ( https : / / github . com / yandex / ClickHouse / pull / 5757 ) ( [ alesapin ] ( https : / / github . com / alesapin ) ) <nl> * Fix segfault in Delta codec which affects columns with values less than 32 bits size . The bug led to random memory corruption . [ # 5786 ] ( https : / / github . com / yandex / ClickHouse / pull / 5786 ) ( [ alesapin ] ( https : / / github . com / alesapin ) ) <nl> + * Fix race condition , which cause that some queries may not appear in query_log after SYSTEM FLUSH LOGS query . [ # 5685 ] ( https : / / github . com / yandex / ClickHouse / pull / 5685 ) ( [ Anton Popov ] ( https : / / github . com / CurtizJ ) ) <nl> * Fix segfault in TTL merge with non - physical columns in block . [ # 5819 ] ( https : / / github . com / yandex / ClickHouse / pull / 5819 ) ( [ Anton Popov ] ( https : / / github . com / CurtizJ ) ) <nl> * Added missing support for constant arguments to evalMLModel function . [ # 5820 ] ( https : / / github . com / yandex / ClickHouse / pull / 5820 ) ( [ alexey - milovidov ] ( https : / / github . com / alexey - milovidov ) ) <nl> <nl> mmm a / CHANGELOG_RU . md <nl> ppp b / CHANGELOG_RU . md <nl> <nl> * Исправлена ошибка в проверке частей в LowCardinality колонках . [ # 5832 ] ( https : / / github . com / yandex / ClickHouse / pull / 5832 ) ( [ alesapin ] ( https : / / github . com / alesapin ) ) <nl> * Исправлена ошибка определения формата таймзоны по умолчанию ( UCT вместо UTC ) . [ # 5828 ] ( https : / / github . com / yandex / ClickHouse / pull / 5828 ) ( [ alexey - milovidov ] ( https : / / github . com / alexey - milovidov ) ) <nl> * Исправлена ошибка в распределенных запросах вида DROP / ALTER / TRUNCATE / OPTIMIZE ON CLUSTER . [ # 5757 ] ( https : / / github . com / yandex / ClickHouse / pull / 5757 ) ( [ alesapin ] ( https : / / github . com / alesapin ) ) <nl> - * Исправлена ошибка сегментации в колонках с величинами размером меньше 32 бит . Ошибка могла приводить к повреждениям памяти . [ # 5786 ] ( https : / / github . com / yandex / ClickHouse / pull / 5786 ) ( [ alesapin ] ( https : / / github . com / alesapin ) ) <nl> + * Исправлена ошибка сегментации в кодеке сжатия Delta в колонках с величинами размером меньше 32 бит . Ошибка могла приводить к повреждениям памяти . [ # 5786 ] ( https : / / github . com / yandex / ClickHouse / pull / 5786 ) ( [ alesapin ] ( https : / / github . com / alesapin ) ) <nl> + * Исправлена ошибка , которая при распределенных запросах могла привести к тому , что некоторые запросы не появлялись в query_log после SYSTEM FLUSH LOGS запроса . [ # 5685 ] ( https : / / github . com / yandex / ClickHouse / pull / 5685 ) ( [ Anton Popov ] ( https : / / github . com / CurtizJ ) ) <nl> * Исправлена ошибка сегментации при слиянии кусков с истекшим TTL в случае , когда в блоке присутствуют столбцы , не входящие в структуру таблицы . [ # 5819 ] ( https : / / github . com / yandex / ClickHouse / pull / 5819 ) ( [ Anton Popov ] ( https : / / github . com / CurtizJ ) ) <nl> * Добавлена отсутствовавшая поддержка константных аргументов для функции evalMLModel . [ # 5820 ] ( https : / / github . com / yandex / ClickHouse / pull / 5820 ) ( [ alexey - milovidov ] ( https : / / github . com / alexey - milovidov ) ) <nl> <nl> | Fix | ClickHouse/ClickHouse | 50c06ad41838bb76721b2404f8a6325c8fe8a188 | 2019-07-08T15:24:05Z |
mmm a / src / core / core . cpp <nl> ppp b / src / core / core . cpp <nl> std : : size_t System : : CurrentCoreIndex ( ) { <nl> } <nl> <nl> Kernel : : Scheduler & System : : CurrentScheduler ( ) { <nl> - return * CurrentCpuCore ( ) . Scheduler ( ) ; <nl> + return CurrentCpuCore ( ) . Scheduler ( ) ; <nl> } <nl> <nl> - const std : : shared_ptr < Kernel : : Scheduler > & System : : Scheduler ( std : : size_t core_index ) { <nl> - ASSERT ( core_index < NUM_CPU_CORES ) ; <nl> - return impl - > cpu_cores [ core_index ] - > Scheduler ( ) ; <nl> + Kernel : : Scheduler & System : : Scheduler ( std : : size_t core_index ) { <nl> + return CpuCore ( core_index ) . Scheduler ( ) ; <nl> + } <nl> + <nl> + const Kernel : : Scheduler & System : : Scheduler ( std : : size_t core_index ) const { <nl> + return CpuCore ( core_index ) . Scheduler ( ) ; <nl> } <nl> <nl> Kernel : : Process * System : : CurrentProcess ( ) { <nl> Cpu & System : : CpuCore ( std : : size_t core_index ) { <nl> return * impl - > cpu_cores [ core_index ] ; <nl> } <nl> <nl> + const Cpu & System : : CpuCore ( std : : size_t core_index ) const { <nl> + ASSERT ( core_index < NUM_CPU_CORES ) ; <nl> + return * impl - > cpu_cores [ core_index ] ; <nl> + } <nl> + <nl> ExclusiveMonitor & System : : Monitor ( ) { <nl> return * impl - > cpu_exclusive_monitor ; <nl> } <nl> mmm a / src / core / core . h <nl> ppp b / src / core / core . h <nl> class System { <nl> / / / Gets a CPU interface to the CPU core with the specified index <nl> Cpu & CpuCore ( std : : size_t core_index ) ; <nl> <nl> + / / / Gets a CPU interface to the CPU core with the specified index <nl> + const Cpu & CpuCore ( std : : size_t core_index ) const ; <nl> + <nl> / / / Gets the exclusive monitor <nl> ExclusiveMonitor & Monitor ( ) ; <nl> <nl> class System { <nl> const VideoCore : : RendererBase & Renderer ( ) const ; <nl> <nl> / / / Gets the scheduler for the CPU core with the specified index <nl> - const std : : shared_ptr < Kernel : : Scheduler > & Scheduler ( std : : size_t core_index ) ; <nl> + Kernel : : Scheduler & Scheduler ( std : : size_t core_index ) ; <nl> + <nl> + / / / Gets the scheduler for the CPU core with the specified index <nl> + const Kernel : : Scheduler & Scheduler ( std : : size_t core_index ) const ; <nl> <nl> / / / Provides a pointer to the current process <nl> Kernel : : Process * CurrentProcess ( ) ; <nl> mmm a / src / core / core_cpu . cpp <nl> ppp b / src / core / core_cpu . cpp <nl> Cpu : : Cpu ( ExclusiveMonitor & exclusive_monitor , CpuBarrier & cpu_barrier , std : : size <nl> arm_interface = std : : make_unique < ARM_Unicorn > ( ) ; <nl> } <nl> <nl> - scheduler = std : : make_shared < Kernel : : Scheduler > ( * arm_interface ) ; <nl> + scheduler = std : : make_unique < Kernel : : Scheduler > ( * arm_interface ) ; <nl> } <nl> <nl> Cpu : : ~ Cpu ( ) = default ; <nl> mmm a / src / core / core_cpu . h <nl> ppp b / src / core / core_cpu . h <nl> class Cpu { <nl> return * arm_interface ; <nl> } <nl> <nl> - const std : : shared_ptr < Kernel : : Scheduler > & Scheduler ( ) const { <nl> - return scheduler ; <nl> + Kernel : : Scheduler & Scheduler ( ) { <nl> + return * scheduler ; <nl> + } <nl> + <nl> + const Kernel : : Scheduler & Scheduler ( ) const { <nl> + return * scheduler ; <nl> } <nl> <nl> bool IsMainCore ( ) const { <nl> class Cpu { <nl> <nl> std : : unique_ptr < ARM_Interface > arm_interface ; <nl> CpuBarrier & cpu_barrier ; <nl> - std : : shared_ptr < Kernel : : Scheduler > scheduler ; <nl> + std : : unique_ptr < Kernel : : Scheduler > scheduler ; <nl> <nl> std : : atomic < bool > reschedule_pending = false ; <nl> std : : size_t core_index ; <nl> mmm a / src / core / gdbstub / gdbstub . cpp <nl> ppp b / src / core / gdbstub / gdbstub . cpp <nl> void RegisterModule ( std : : string name , VAddr beg , VAddr end , bool add_elf_ext ) { <nl> <nl> static Kernel : : Thread * FindThreadById ( int id ) { <nl> for ( u32 core = 0 ; core < Core : : NUM_CPU_CORES ; core + + ) { <nl> - const auto & threads = Core : : System : : GetInstance ( ) . Scheduler ( core ) - > GetThreadList ( ) ; <nl> + const auto & threads = Core : : System : : GetInstance ( ) . Scheduler ( core ) . GetThreadList ( ) ; <nl> for ( auto & thread : threads ) { <nl> if ( thread - > GetThreadID ( ) = = static_cast < u32 > ( id ) ) { <nl> current_core = core ; <nl> static void HandleQuery ( ) { <nl> } else if ( strncmp ( query , " fThreadInfo " , strlen ( " fThreadInfo " ) ) = = 0 ) { <nl> std : : string val = " m " ; <nl> for ( u32 core = 0 ; core < Core : : NUM_CPU_CORES ; core + + ) { <nl> - const auto & threads = Core : : System : : GetInstance ( ) . Scheduler ( core ) - > GetThreadList ( ) ; <nl> + const auto & threads = Core : : System : : GetInstance ( ) . Scheduler ( core ) . GetThreadList ( ) ; <nl> for ( const auto & thread : threads ) { <nl> val + = fmt : : format ( " { : x } " , thread - > GetThreadID ( ) ) ; <nl> val + = " , " ; <nl> static void HandleQuery ( ) { <nl> buffer + = " l < ? xml version = \ " 1 . 0 \ " ? > " ; <nl> buffer + = " < threads > " ; <nl> for ( u32 core = 0 ; core < Core : : NUM_CPU_CORES ; core + + ) { <nl> - const auto & threads = Core : : System : : GetInstance ( ) . Scheduler ( core ) - > GetThreadList ( ) ; <nl> + const auto & threads = Core : : System : : GetInstance ( ) . Scheduler ( core ) . GetThreadList ( ) ; <nl> for ( const auto & thread : threads ) { <nl> buffer + = <nl> fmt : : format ( R " * ( < thread id = " { : x } " core = " { : d } " name = " Thread { : x } " > < / thread > ) * " , <nl> mmm a / src / core / hle / kernel / address_arbiter . cpp <nl> ppp b / src / core / hle / kernel / address_arbiter . cpp <nl> static std : : vector < SharedPtr < Thread > > GetThreadsWaitingOnAddress ( VAddr address ) <nl> std : : vector < SharedPtr < Thread > > & waiting_threads , <nl> VAddr arb_addr ) { <nl> const auto & scheduler = Core : : System : : GetInstance ( ) . Scheduler ( core_index ) ; <nl> - const auto & thread_list = scheduler - > GetThreadList ( ) ; <nl> + const auto & thread_list = scheduler . GetThreadList ( ) ; <nl> <nl> for ( const auto & thread : thread_list ) { <nl> if ( thread - > GetArbiterWaitAddress ( ) = = arb_addr ) <nl> mmm a / src / core / hle / kernel / process . cpp <nl> ppp b / src / core / hle / kernel / process . cpp <nl> void Process : : PrepareForTermination ( ) { <nl> } <nl> } ; <nl> <nl> - auto & system = Core : : System : : GetInstance ( ) ; <nl> - stop_threads ( system . Scheduler ( 0 ) - > GetThreadList ( ) ) ; <nl> - stop_threads ( system . Scheduler ( 1 ) - > GetThreadList ( ) ) ; <nl> - stop_threads ( system . Scheduler ( 2 ) - > GetThreadList ( ) ) ; <nl> - stop_threads ( system . Scheduler ( 3 ) - > GetThreadList ( ) ) ; <nl> + const auto & system = Core : : System : : GetInstance ( ) ; <nl> + stop_threads ( system . Scheduler ( 0 ) . GetThreadList ( ) ) ; <nl> + stop_threads ( system . Scheduler ( 1 ) . GetThreadList ( ) ) ; <nl> + stop_threads ( system . Scheduler ( 2 ) . GetThreadList ( ) ) ; <nl> + stop_threads ( system . Scheduler ( 3 ) . GetThreadList ( ) ) ; <nl> } <nl> <nl> / * * <nl> mmm a / src / core / hle / kernel / svc . cpp <nl> ppp b / src / core / hle / kernel / svc . cpp <nl> static ResultCode SignalProcessWideKey ( VAddr condition_variable_addr , s32 target <nl> std : : vector < SharedPtr < Thread > > & waiting_threads , <nl> VAddr condvar_addr ) { <nl> const auto & scheduler = Core : : System : : GetInstance ( ) . Scheduler ( core_index ) ; <nl> - const auto & thread_list = scheduler - > GetThreadList ( ) ; <nl> + const auto & thread_list = scheduler . GetThreadList ( ) ; <nl> <nl> for ( const auto & thread : thread_list ) { <nl> if ( thread - > GetCondVarWaitAddress ( ) = = condvar_addr ) <nl> mmm a / src / core / hle / kernel / thread . cpp <nl> ppp b / src / core / hle / kernel / thread . cpp <nl> void Thread : : CancelWakeupTimer ( ) { <nl> static boost : : optional < s32 > GetNextProcessorId ( u64 mask ) { <nl> for ( s32 index = 0 ; index < Core : : NUM_CPU_CORES ; + + index ) { <nl> if ( mask & ( 1ULL < < index ) ) { <nl> - if ( ! Core : : System : : GetInstance ( ) . Scheduler ( index ) - > GetCurrentThread ( ) ) { <nl> + if ( ! Core : : System : : GetInstance ( ) . Scheduler ( index ) . GetCurrentThread ( ) ) { <nl> / / Core is enabled and not running any threads , use this one <nl> return index ; <nl> } <nl> void Thread : : ResumeFromWait ( ) { <nl> new_processor_id = processor_id ; <nl> } <nl> if ( ideal_core ! = - 1 & & <nl> - Core : : System : : GetInstance ( ) . Scheduler ( ideal_core ) - > GetCurrentThread ( ) = = nullptr ) { <nl> + Core : : System : : GetInstance ( ) . Scheduler ( ideal_core ) . GetCurrentThread ( ) = = nullptr ) { <nl> new_processor_id = ideal_core ; <nl> } <nl> <nl> ASSERT ( * new_processor_id < 4 ) ; <nl> <nl> / / Add thread to new core ' s scheduler <nl> - auto & next_scheduler = Core : : System : : GetInstance ( ) . Scheduler ( * new_processor_id ) ; <nl> + auto * next_scheduler = & Core : : System : : GetInstance ( ) . Scheduler ( * new_processor_id ) ; <nl> <nl> if ( * new_processor_id ! = processor_id ) { <nl> / / Remove thread from previous core ' s scheduler <nl> void Thread : : ResumeFromWait ( ) { <nl> next_scheduler - > ScheduleThread ( this , current_priority ) ; <nl> <nl> / / Change thread ' s scheduler <nl> - scheduler = next_scheduler . get ( ) ; <nl> + scheduler = next_scheduler ; <nl> <nl> Core : : System : : GetInstance ( ) . CpuCore ( processor_id ) . PrepareReschedule ( ) ; <nl> } <nl> ResultVal < SharedPtr < Thread > > Thread : : Create ( KernelCore & kernel , std : : string name <nl> thread - > name = std : : move ( name ) ; <nl> thread - > callback_handle = kernel . ThreadWakeupCallbackHandleTable ( ) . Create ( thread ) . Unwrap ( ) ; <nl> thread - > owner_process = & owner_process ; <nl> - thread - > scheduler = Core : : System : : GetInstance ( ) . Scheduler ( processor_id ) . get ( ) ; <nl> + thread - > scheduler = & Core : : System : : GetInstance ( ) . Scheduler ( processor_id ) ; <nl> thread - > scheduler - > AddThread ( thread , priority ) ; <nl> thread - > tls_address = thread - > owner_process - > MarkNextAvailableTLSSlotAsUsed ( * thread ) ; <nl> <nl> void Thread : : ChangeCore ( u32 core , u64 mask ) { <nl> new_processor_id = processor_id ; <nl> } <nl> if ( ideal_core ! = - 1 & & <nl> - Core : : System : : GetInstance ( ) . Scheduler ( ideal_core ) - > GetCurrentThread ( ) = = nullptr ) { <nl> + Core : : System : : GetInstance ( ) . Scheduler ( ideal_core ) . GetCurrentThread ( ) = = nullptr ) { <nl> new_processor_id = ideal_core ; <nl> } <nl> <nl> ASSERT ( * new_processor_id < 4 ) ; <nl> <nl> / / Add thread to new core ' s scheduler <nl> - auto & next_scheduler = Core : : System : : GetInstance ( ) . Scheduler ( * new_processor_id ) ; <nl> + auto * next_scheduler = & Core : : System : : GetInstance ( ) . Scheduler ( * new_processor_id ) ; <nl> <nl> if ( * new_processor_id ! = processor_id ) { <nl> / / Remove thread from previous core ' s scheduler <nl> void Thread : : ChangeCore ( u32 core , u64 mask ) { <nl> next_scheduler - > ScheduleThread ( this , current_priority ) ; <nl> <nl> / / Change thread ' s scheduler <nl> - scheduler = next_scheduler . get ( ) ; <nl> + scheduler = next_scheduler ; <nl> <nl> Core : : System : : GetInstance ( ) . CpuCore ( processor_id ) . PrepareReschedule ( ) ; <nl> } <nl> mmm a / src / yuzu / debugger / wait_tree . cpp <nl> ppp b / src / yuzu / debugger / wait_tree . cpp <nl> std : : vector < std : : unique_ptr < WaitTreeThread > > WaitTreeItem : : MakeThreadItemList ( ) <nl> } <nl> } ; <nl> <nl> - add_threads ( Core : : System : : GetInstance ( ) . Scheduler ( 0 ) - > GetThreadList ( ) ) ; <nl> - add_threads ( Core : : System : : GetInstance ( ) . Scheduler ( 1 ) - > GetThreadList ( ) ) ; <nl> - add_threads ( Core : : System : : GetInstance ( ) . Scheduler ( 2 ) - > GetThreadList ( ) ) ; <nl> - add_threads ( Core : : System : : GetInstance ( ) . Scheduler ( 3 ) - > GetThreadList ( ) ) ; <nl> + const auto & system = Core : : System : : GetInstance ( ) ; <nl> + add_threads ( system . Scheduler ( 0 ) . GetThreadList ( ) ) ; <nl> + add_threads ( system . Scheduler ( 1 ) . GetThreadList ( ) ) ; <nl> + add_threads ( system . Scheduler ( 2 ) . GetThreadList ( ) ) ; <nl> + add_threads ( system . Scheduler ( 3 ) . GetThreadList ( ) ) ; <nl> <nl> return item_list ; <nl> } <nl> | core_cpu : Make Cpu scheduler instances unique_ptrs instead of shared_ptrs | yuzu-emu/yuzu | 5484742fdaf036db03ac7b8c746df5004f74efad | 2018-10-15T18:15:56Z |
mmm a / code / graph - algorithms / bridge_tree / bridge_tree . cpp <nl> ppp b / code / graph - algorithms / bridge_tree / bridge_tree . cpp <nl> <nl> # include < bits / stdc + + . h > <nl> using namespace std ; <nl> typedef long long ll ; <nl> - <nl> + / / Part of Cosmos by OpenGenus Foundation <nl> const int MAXN = 1e5 + 5 ; <nl> vector < int > adj [ MAXN ] , tree [ MAXN ] ; / / The bridge edge tree formed from the given graph . <nl> int disc [ MAXN ] , low [ MAXN ] , vis [ MAXN ] ; <nl> int main ( ) { <nl> cout < < i < < " " < < tree [ i ] [ j ] < < endl ; <nl> } <nl> return 0 ; <nl> - } <nl> \ No newline at end of file <nl> + } <nl> | header comment added | OpenGenus/cosmos | 919d5442fdf711ad32dff3f33a0197621e83288e | 2017-10-06T15:38:06Z |
mmm a / src / mongo / s / catalog / catalog_manager . cpp <nl> ppp b / src / mongo / s / catalog / catalog_manager . cpp <nl> <nl> # include " mongo / db / client . h " <nl> # include " mongo / db / operation_context . h " <nl> # include " mongo / db / write_concern_options . h " <nl> - # include " mongo / executor / network_interface . h " <nl> # include " mongo / rpc / get_status_from_command_result . h " <nl> # include " mongo / s / catalog / dist_lock_manager . h " <nl> # include " mongo / s / catalog / type_chunk . h " <nl> <nl> # include " mongo / s / client / shard . h " <nl> # include " mongo / s / client / shard_registry . h " <nl> # include " mongo / s / grid . h " <nl> - # include " mongo / s / set_shard_version_request . h " <nl> # include " mongo / s / shard_util . h " <nl> # include " mongo / s / write_ops / batched_command_request . h " <nl> # include " mongo / s / write_ops / batched_command_response . h " <nl> Status CatalogManager : : enableSharding ( const std : : string & dbName ) { <nl> return updateDatabase ( dbName , db ) ; <nl> } <nl> <nl> - Status CatalogManager : : dropCollection ( OperationContext * txn , const NamespaceString & ns ) { <nl> - logChange ( txn - > getClient ( ) - > clientAddress ( true ) , " dropCollection . start " , ns . ns ( ) , BSONObj ( ) ) ; <nl> - <nl> - vector < ShardType > allShards ; <nl> - Status status = getAllShards ( & allShards ) ; <nl> - if ( ! status . isOK ( ) ) { <nl> - return status ; <nl> - } <nl> - <nl> - LOG ( 1 ) < < " dropCollection " < < ns < < " started " ; <nl> - <nl> - / / Lock the collection globally so that split / migrate cannot run <nl> - auto scopedDistLock = getDistLockManager ( ) - > lock ( ns . ns ( ) , " drop " ) ; <nl> - if ( ! scopedDistLock . isOK ( ) ) { <nl> - return scopedDistLock . getStatus ( ) ; <nl> - } <nl> - <nl> - LOG ( 1 ) < < " dropCollection " < < ns < < " locked " ; <nl> - <nl> - std : : map < string , BSONObj > errors ; <nl> - auto * shardRegistry = grid . shardRegistry ( ) ; <nl> - <nl> - for ( const auto & shardEntry : allShards ) { <nl> - auto dropResult = shardRegistry - > runCommandWithNotMasterRetries ( <nl> - shardEntry . getName ( ) , ns . db ( ) . toString ( ) , BSON ( " drop " < < ns . coll ( ) ) ) ; <nl> - <nl> - if ( ! dropResult . isOK ( ) ) { <nl> - return dropResult . getStatus ( ) ; <nl> - } <nl> - <nl> - auto dropStatus = getStatusFromCommandResult ( dropResult . getValue ( ) ) ; <nl> - if ( ! dropStatus . isOK ( ) ) { <nl> - if ( dropStatus . code ( ) = = ErrorCodes : : NamespaceNotFound ) { <nl> - continue ; <nl> - } <nl> - <nl> - errors . emplace ( shardEntry . getHost ( ) , dropResult . getValue ( ) ) ; <nl> - } <nl> - } <nl> - <nl> - if ( ! errors . empty ( ) ) { <nl> - StringBuilder sb ; <nl> - sb < < " Dropping collection failed on the following hosts : " ; <nl> - <nl> - for ( auto it = errors . cbegin ( ) ; it ! = errors . cend ( ) ; + + it ) { <nl> - if ( it ! = errors . cbegin ( ) ) { <nl> - sb < < " , " ; <nl> - } <nl> - <nl> - sb < < it - > first < < " : " < < it - > second ; <nl> - } <nl> - <nl> - return { ErrorCodes : : OperationFailed , sb . str ( ) } ; <nl> - } <nl> - <nl> - LOG ( 1 ) < < " dropCollection " < < ns < < " shard data deleted " ; <nl> - <nl> - / / Remove chunk data <nl> - Status result = remove ( ChunkType : : ConfigNS , BSON ( ChunkType : : ns ( ns . ns ( ) ) ) , 0 , nullptr ) ; <nl> - if ( ! result . isOK ( ) ) { <nl> - return result ; <nl> - } <nl> - <nl> - LOG ( 1 ) < < " dropCollection " < < ns < < " chunk data deleted " ; <nl> - <nl> - / / Mark the collection as dropped <nl> - CollectionType coll ; <nl> - coll . setNs ( ns ) ; <nl> - coll . setDropped ( true ) ; <nl> - coll . setEpoch ( ChunkVersion : : DROPPED ( ) . epoch ( ) ) ; <nl> - coll . setUpdatedAt ( grid . shardRegistry ( ) - > getNetwork ( ) - > now ( ) ) ; <nl> - <nl> - result = updateCollection ( ns . ns ( ) , coll ) ; <nl> - if ( ! result . isOK ( ) ) { <nl> - return result ; <nl> - } <nl> - <nl> - LOG ( 1 ) < < " dropCollection " < < ns < < " collection marked as dropped " ; <nl> - <nl> - for ( const auto & shardEntry : allShards ) { <nl> - SetShardVersionRequest ssv = SetShardVersionRequest : : makeForVersioningNoPersist ( <nl> - connectionString ( ) , <nl> - shardEntry . getName ( ) , <nl> - fassertStatusOK ( 28753 , ConnectionString : : parse ( shardEntry . getHost ( ) ) ) , <nl> - ns , <nl> - ChunkVersion : : DROPPED ( ) , <nl> - true ) ; <nl> - <nl> - auto ssvResult = shardRegistry - > runCommandWithNotMasterRetries ( <nl> - shardEntry . getName ( ) , " admin " , ssv . toBSON ( ) ) ; <nl> - <nl> - if ( ! ssvResult . isOK ( ) ) { <nl> - return ssvResult . getStatus ( ) ; <nl> - } <nl> - <nl> - auto ssvStatus = getStatusFromCommandResult ( ssvResult . getValue ( ) ) ; <nl> - if ( ! ssvStatus . isOK ( ) ) { <nl> - return ssvStatus ; <nl> - } <nl> - <nl> - auto unsetShardingStatus = shardRegistry - > runCommandWithNotMasterRetries ( <nl> - shardEntry . getName ( ) , " admin " , BSON ( " unsetSharding " < < 1 ) ) ; <nl> - <nl> - if ( ! unsetShardingStatus . isOK ( ) ) { <nl> - return unsetShardingStatus . getStatus ( ) ; <nl> - } <nl> - <nl> - auto unsetShardingResult = getStatusFromCommandResult ( unsetShardingStatus . getValue ( ) ) ; <nl> - if ( ! unsetShardingResult . isOK ( ) ) { <nl> - return unsetShardingResult ; <nl> - } <nl> - } <nl> - <nl> - LOG ( 1 ) < < " dropCollection " < < ns < < " completed " ; <nl> - <nl> - logChange ( txn - > getClient ( ) - > clientAddress ( true ) , " dropCollection " , ns . ns ( ) , BSONObj ( ) ) ; <nl> - <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> } / / namespace mongo <nl> mmm a / src / mongo / s / catalog / catalog_manager . h <nl> ppp b / src / mongo / s / catalog / catalog_manager . h <nl> class CatalogManager { <nl> * some of the known failures : <nl> * - NamespaceNotFound - collection does not exist <nl> * / <nl> - Status dropCollection ( OperationContext * txn , const NamespaceString & ns ) ; <nl> + virtual Status dropCollection ( OperationContext * txn , const NamespaceString & ns ) = 0 ; <nl> <nl> / * * <nl> * Retrieves all databases for a shard . <nl> mmm a / src / mongo / s / catalog / catalog_manager_mock . cpp <nl> ppp b / src / mongo / s / catalog / catalog_manager_mock . cpp <nl> Status CatalogManagerMock : : getCollections ( const string * dbName , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + Status CatalogManagerMock : : dropCollection ( OperationContext * txn , const NamespaceString & ns ) { <nl> + return { ErrorCodes : : InternalError , " Method not implemented " } ; <nl> + } <nl> + <nl> Status CatalogManagerMock : : getDatabasesForShard ( const string & shardName , vector < string > * dbs ) { <nl> return Status : : OK ( ) ; <nl> } <nl> mmm a / src / mongo / s / catalog / catalog_manager_mock . h <nl> ppp b / src / mongo / s / catalog / catalog_manager_mock . h <nl> class CatalogManagerMock : public CatalogManager { <nl> Status getCollections ( const std : : string * dbName , <nl> std : : vector < CollectionType > * collections ) override ; <nl> <nl> + Status dropCollection ( OperationContext * txn , const NamespaceString & ns ) override ; <nl> + <nl> Status getDatabasesForShard ( const std : : string & shardName , <nl> std : : vector < std : : string > * dbs ) override ; <nl> <nl> mmm a / src / mongo / s / catalog / legacy / catalog_manager_legacy . cpp <nl> ppp b / src / mongo / s / catalog / legacy / catalog_manager_legacy . cpp <nl> <nl> # include " mongo / db / commands . h " <nl> # include " mongo / db / operation_context . h " <nl> # include " mongo / db / server_options . h " <nl> + # include " mongo / executor / network_interface . h " <nl> + # include " mongo / rpc / get_status_from_command_result . h " <nl> # include " mongo / platform / atomic_word . h " <nl> # include " mongo / s / catalog / config_server_version . h " <nl> # include " mongo / s / catalog / legacy / cluster_client_internal . h " <nl> <nl> # include " mongo / s / catalog / legacy / legacy_dist_lock_manager . h " <nl> # include " mongo / s / catalog / type_config_version . h " <nl> # include " mongo / s / grid . h " <nl> + # include " mongo / s / set_shard_version_request . h " <nl> # include " mongo / s / shard_key_pattern . h " <nl> # include " mongo / s / write_ops / batched_command_request . h " <nl> # include " mongo / s / write_ops / batched_command_response . h " <nl> Status CatalogManagerLegacy : : getCollections ( const std : : string * dbName , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + Status CatalogManagerLegacy : : dropCollection ( OperationContext * txn , const NamespaceString & ns ) { <nl> + logChange ( txn - > getClient ( ) - > clientAddress ( true ) , " dropCollection . start " , ns . ns ( ) , BSONObj ( ) ) ; <nl> + <nl> + vector < ShardType > allShards ; <nl> + Status status = getAllShards ( & allShards ) ; <nl> + if ( ! status . isOK ( ) ) { <nl> + return status ; <nl> + } <nl> + <nl> + LOG ( 1 ) < < " dropCollection " < < ns < < " started " ; <nl> + <nl> + / / Lock the collection globally so that split / migrate cannot run <nl> + auto scopedDistLock = getDistLockManager ( ) - > lock ( ns . ns ( ) , " drop " ) ; <nl> + if ( ! scopedDistLock . isOK ( ) ) { <nl> + return scopedDistLock . getStatus ( ) ; <nl> + } <nl> + <nl> + LOG ( 1 ) < < " dropCollection " < < ns < < " locked " ; <nl> + <nl> + std : : map < string , BSONObj > errors ; <nl> + auto * shardRegistry = grid . shardRegistry ( ) ; <nl> + <nl> + for ( const auto & shardEntry : allShards ) { <nl> + auto dropResult = shardRegistry - > runCommandWithNotMasterRetries ( <nl> + shardEntry . getName ( ) , ns . db ( ) . toString ( ) , BSON ( " drop " < < ns . coll ( ) ) ) ; <nl> + <nl> + if ( ! dropResult . isOK ( ) ) { <nl> + return dropResult . getStatus ( ) ; <nl> + } <nl> + <nl> + auto dropStatus = getStatusFromCommandResult ( dropResult . getValue ( ) ) ; <nl> + if ( ! dropStatus . isOK ( ) ) { <nl> + if ( dropStatus . code ( ) = = ErrorCodes : : NamespaceNotFound ) { <nl> + continue ; <nl> + } <nl> + <nl> + errors . emplace ( shardEntry . getHost ( ) , dropResult . getValue ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + if ( ! errors . empty ( ) ) { <nl> + StringBuilder sb ; <nl> + sb < < " Dropping collection failed on the following hosts : " ; <nl> + <nl> + for ( auto it = errors . cbegin ( ) ; it ! = errors . cend ( ) ; + + it ) { <nl> + if ( it ! = errors . cbegin ( ) ) { <nl> + sb < < " , " ; <nl> + } <nl> + <nl> + sb < < it - > first < < " : " < < it - > second ; <nl> + } <nl> + <nl> + return { ErrorCodes : : OperationFailed , sb . str ( ) } ; <nl> + } <nl> + <nl> + LOG ( 1 ) < < " dropCollection " < < ns < < " shard data deleted " ; <nl> + <nl> + / / Remove chunk data <nl> + Status result = remove ( ChunkType : : ConfigNS , BSON ( ChunkType : : ns ( ns . ns ( ) ) ) , 0 , nullptr ) ; <nl> + if ( ! result . isOK ( ) ) { <nl> + return result ; <nl> + } <nl> + <nl> + LOG ( 1 ) < < " dropCollection " < < ns < < " chunk data deleted " ; <nl> + <nl> + / / Mark the collection as dropped <nl> + CollectionType coll ; <nl> + coll . setNs ( ns ) ; <nl> + coll . setDropped ( true ) ; <nl> + coll . setEpoch ( ChunkVersion : : DROPPED ( ) . epoch ( ) ) ; <nl> + coll . setUpdatedAt ( grid . shardRegistry ( ) - > getNetwork ( ) - > now ( ) ) ; <nl> + <nl> + result = updateCollection ( ns . ns ( ) , coll ) ; <nl> + if ( ! result . isOK ( ) ) { <nl> + return result ; <nl> + } <nl> + <nl> + LOG ( 1 ) < < " dropCollection " < < ns < < " collection marked as dropped " ; <nl> + <nl> + for ( const auto & shardEntry : allShards ) { <nl> + SetShardVersionRequest ssv = SetShardVersionRequest : : makeForVersioningNoPersist ( <nl> + connectionString ( ) , <nl> + shardEntry . getName ( ) , <nl> + fassertStatusOK ( 28753 , ConnectionString : : parse ( shardEntry . getHost ( ) ) ) , <nl> + ns , <nl> + ChunkVersion : : DROPPED ( ) , <nl> + true ) ; <nl> + <nl> + auto ssvResult = shardRegistry - > runCommandWithNotMasterRetries ( <nl> + shardEntry . getName ( ) , " admin " , ssv . toBSON ( ) ) ; <nl> + <nl> + if ( ! ssvResult . isOK ( ) ) { <nl> + return ssvResult . getStatus ( ) ; <nl> + } <nl> + <nl> + auto ssvStatus = getStatusFromCommandResult ( ssvResult . getValue ( ) ) ; <nl> + if ( ! ssvStatus . isOK ( ) ) { <nl> + return ssvStatus ; <nl> + } <nl> + <nl> + auto unsetShardingStatus = shardRegistry - > runCommandWithNotMasterRetries ( <nl> + shardEntry . getName ( ) , " admin " , BSON ( " unsetSharding " < < 1 ) ) ; <nl> + <nl> + if ( ! unsetShardingStatus . isOK ( ) ) { <nl> + return unsetShardingStatus . getStatus ( ) ; <nl> + } <nl> + <nl> + auto unsetShardingResult = getStatusFromCommandResult ( unsetShardingStatus . getValue ( ) ) ; <nl> + if ( ! unsetShardingResult . isOK ( ) ) { <nl> + return unsetShardingResult ; <nl> + } <nl> + } <nl> + <nl> + LOG ( 1 ) < < " dropCollection " < < ns < < " completed " ; <nl> + <nl> + logChange ( txn - > getClient ( ) - > clientAddress ( true ) , " dropCollection " , ns . ns ( ) , BSONObj ( ) ) ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> void CatalogManagerLegacy : : logAction ( const ActionLogType & actionLog ) { <nl> / / Create the action log collection and ensure that it is capped . Wrap in try / catch , <nl> / / because creating an existing collection throws . <nl> mmm a / src / mongo / s / catalog / legacy / catalog_manager_legacy . h <nl> ppp b / src / mongo / s / catalog / legacy / catalog_manager_legacy . h <nl> class CatalogManagerLegacy final : public CatalogManager { <nl> <nl> Status getCollections ( const std : : string * dbName , std : : vector < CollectionType > * collections ) ; <nl> <nl> + Status dropCollection ( OperationContext * txn , const NamespaceString & ns ) override ; <nl> + <nl> Status getDatabasesForShard ( const std : : string & shardName , <nl> std : : vector < std : : string > * dbs ) override ; <nl> <nl> mmm a / src / mongo / s / catalog / replset / catalog_manager_replica_set . cpp <nl> ppp b / src / mongo / s / catalog / replset / catalog_manager_replica_set . cpp <nl> Status CatalogManagerReplicaSet : : getCollections ( const std : : string * dbName , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + Status CatalogManagerReplicaSet : : dropCollection ( OperationContext * txn , const NamespaceString & ns ) { <nl> + logChange ( txn - > getClient ( ) - > clientAddress ( true ) , " dropCollection . start " , ns . ns ( ) , BSONObj ( ) ) ; <nl> + <nl> + vector < ShardType > allShards ; <nl> + Status status = getAllShards ( & allShards ) ; <nl> + if ( ! status . isOK ( ) ) { <nl> + return status ; <nl> + } <nl> + <nl> + LOG ( 1 ) < < " dropCollection " < < ns < < " started " ; <nl> + <nl> + / / Lock the collection globally so that split / migrate cannot run <nl> + auto scopedDistLock = getDistLockManager ( ) - > lock ( ns . ns ( ) , " drop " ) ; <nl> + if ( ! scopedDistLock . isOK ( ) ) { <nl> + return scopedDistLock . getStatus ( ) ; <nl> + } <nl> + <nl> + LOG ( 1 ) < < " dropCollection " < < ns < < " locked " ; <nl> + <nl> + std : : map < string , BSONObj > errors ; <nl> + auto * shardRegistry = grid . shardRegistry ( ) ; <nl> + <nl> + for ( const auto & shardEntry : allShards ) { <nl> + auto dropResult = shardRegistry - > runCommandWithNotMasterRetries ( <nl> + shardEntry . getName ( ) , ns . db ( ) . toString ( ) , BSON ( " drop " < < ns . coll ( ) ) ) ; <nl> + <nl> + if ( ! dropResult . isOK ( ) ) { <nl> + return dropResult . getStatus ( ) ; <nl> + } <nl> + <nl> + auto dropStatus = getStatusFromCommandResult ( dropResult . getValue ( ) ) ; <nl> + if ( ! dropStatus . isOK ( ) ) { <nl> + if ( dropStatus . code ( ) = = ErrorCodes : : NamespaceNotFound ) { <nl> + continue ; <nl> + } <nl> + <nl> + errors . emplace ( shardEntry . getHost ( ) , dropResult . getValue ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + if ( ! errors . empty ( ) ) { <nl> + StringBuilder sb ; <nl> + sb < < " Dropping collection failed on the following hosts : " ; <nl> + <nl> + for ( auto it = errors . cbegin ( ) ; it ! = errors . cend ( ) ; + + it ) { <nl> + if ( it ! = errors . cbegin ( ) ) { <nl> + sb < < " , " ; <nl> + } <nl> + <nl> + sb < < it - > first < < " : " < < it - > second ; <nl> + } <nl> + <nl> + return { ErrorCodes : : OperationFailed , sb . str ( ) } ; <nl> + } <nl> + <nl> + LOG ( 1 ) < < " dropCollection " < < ns < < " shard data deleted " ; <nl> + <nl> + / / Remove chunk data <nl> + Status result = remove ( ChunkType : : ConfigNS , BSON ( ChunkType : : ns ( ns . ns ( ) ) ) , 0 , nullptr ) ; <nl> + if ( ! result . isOK ( ) ) { <nl> + return result ; <nl> + } <nl> + <nl> + LOG ( 1 ) < < " dropCollection " < < ns < < " chunk data deleted " ; <nl> + <nl> + / / Mark the collection as dropped <nl> + CollectionType coll ; <nl> + coll . setNs ( ns ) ; <nl> + coll . setDropped ( true ) ; <nl> + coll . setEpoch ( ChunkVersion : : DROPPED ( ) . epoch ( ) ) ; <nl> + coll . setUpdatedAt ( grid . shardRegistry ( ) - > getNetwork ( ) - > now ( ) ) ; <nl> + <nl> + result = updateCollection ( ns . ns ( ) , coll ) ; <nl> + if ( ! result . isOK ( ) ) { <nl> + return result ; <nl> + } <nl> + <nl> + LOG ( 1 ) < < " dropCollection " < < ns < < " collection marked as dropped " ; <nl> + <nl> + for ( const auto & shardEntry : allShards ) { <nl> + SetShardVersionRequest ssv = SetShardVersionRequest : : makeForVersioningNoPersist ( <nl> + connectionString ( ) , <nl> + shardEntry . getName ( ) , <nl> + fassertStatusOK ( 28781 , ConnectionString : : parse ( shardEntry . getHost ( ) ) ) , <nl> + ns , <nl> + ChunkVersion : : DROPPED ( ) , <nl> + true ) ; <nl> + <nl> + auto ssvResult = shardRegistry - > runCommandWithNotMasterRetries ( <nl> + shardEntry . getName ( ) , " admin " , ssv . toBSON ( ) ) ; <nl> + <nl> + if ( ! ssvResult . isOK ( ) ) { <nl> + return ssvResult . getStatus ( ) ; <nl> + } <nl> + <nl> + auto ssvStatus = getStatusFromCommandResult ( ssvResult . getValue ( ) ) ; <nl> + if ( ! ssvStatus . isOK ( ) ) { <nl> + return ssvStatus ; <nl> + } <nl> + <nl> + auto unsetShardingStatus = shardRegistry - > runCommandWithNotMasterRetries ( <nl> + shardEntry . getName ( ) , " admin " , BSON ( " unsetSharding " < < 1 ) ) ; <nl> + <nl> + if ( ! unsetShardingStatus . isOK ( ) ) { <nl> + return unsetShardingStatus . getStatus ( ) ; <nl> + } <nl> + <nl> + auto unsetShardingResult = getStatusFromCommandResult ( unsetShardingStatus . getValue ( ) ) ; <nl> + if ( ! unsetShardingResult . isOK ( ) ) { <nl> + return unsetShardingResult ; <nl> + } <nl> + } <nl> + <nl> + LOG ( 1 ) < < " dropCollection " < < ns < < " completed " ; <nl> + <nl> + logChange ( txn - > getClient ( ) - > clientAddress ( true ) , " dropCollection " , ns . ns ( ) , BSONObj ( ) ) ; <nl> + <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> void CatalogManagerReplicaSet : : logAction ( const ActionLogType & actionLog ) { <nl> if ( _actionLogCollectionCreated . load ( ) = = 0 ) { <nl> BSONObj createCmd = BSON ( " create " < < ActionLogType : : ConfigNS < < " capped " < < true < < " size " <nl> mmm a / src / mongo / s / catalog / replset / catalog_manager_replica_set . h <nl> ppp b / src / mongo / s / catalog / replset / catalog_manager_replica_set . h <nl> class CatalogManagerReplicaSet final : public CatalogManager { <nl> Status getCollections ( const std : : string * dbName , <nl> std : : vector < CollectionType > * collections ) override ; <nl> <nl> + Status dropCollection ( OperationContext * txn , const NamespaceString & ns ) override ; <nl> + <nl> Status getDatabasesForShard ( const std : : string & shardName , <nl> std : : vector < std : : string > * dbs ) override ; <nl> <nl> | SERVER - 19855 Move dropCollection implementation out of CatalogManager | mongodb/mongo | 312fe328972341c071516b2e92227325471293ea | 2015-08-13T12:52:46Z |
mmm a / src / clustering / immediate_consistency / remote_replicator_client . cc <nl> ppp b / src / clustering / immediate_consistency / remote_replicator_client . cc <nl> <nl> # include " stl_utils . hpp " <nl> # include " store_view . hpp " <nl> <nl> + class remote_replicator_client_t : : timestamp_range_tracker_t { <nl> + public : <nl> + timestamp_range_tracker_t ( <nl> + const region_t & _store_region , <nl> + state_timestamp_t _prev_timestamp ) : <nl> + store_region ( _store_region ) , <nl> + prev_timestamp ( _prev_timestamp ) { } <nl> + <nl> + / * Records that the backfill has copied values up to the given timestamp for the <nl> + given range . * / <nl> + void record_backfill ( const region_t & region , const state_timestamp_t & ts ) { <nl> + rassert ( region . beg = = store_region . beg ) ; <nl> + rassert ( region . end = = store_region . end ) ; <nl> + rassert ( key_range_t : : right_bound_t ( region . inner . left ) = = <nl> + ( entries . empty ( ) <nl> + ? key_range_t : : right_bound_t ( store_region . inner . left ) <nl> + : entries . back ( ) . first ) ) ; <nl> + rassert ( region . inner . right < = store_region . inner . right ) ; <nl> + if ( entries . empty ( ) | | entries . back ( ) . second ! = ts ) { <nl> + entries . push_back ( std : : make_pair ( region . inner . right , ts ) ) ; <nl> + } else { <nl> + / * Rather than making a second entry with the same timestamp , coalesce <nl> + the two entries . * / <nl> + entries . back ( ) . first = region . inner . right ; <nl> + } <nl> + } <nl> + <nl> + / * Records that the write with the given timestamp has been applies in the given <nl> + region . * / <nl> + void record_write ( const region_t & region , state_timestamp_t ts ) { <nl> + rassert ( ts = = prev_timestamp . next ( ) ) ; <nl> + prev_timestamp = ts ; <nl> + if ( region_is_empty ( region ) ) { <nl> + rassert ( entries . empty ( ) | | entries [ 0 ] . second > = ts ) ; <nl> + return ; <nl> + } <nl> + rassert ( region . beg = = store_region . beg ) ; <nl> + rassert ( region . end = = store_region . end ) ; <nl> + rassert ( region . inner . left = = store_region . inner . left ) ; <nl> + rassert ( region . inner . right = = entries [ 0 ] . first ) ; <nl> + rassert ( entries [ 0 ] . second . next ( ) = = ts ) ; <nl> + entries [ 0 ] . second = ts ; <nl> + if ( entries . size ( ) > 1 & & entries [ 1 ] . second = = ts ) { <nl> + entries . pop_front ( ) ; <nl> + } <nl> + } <nl> + <nl> + / * Returns the timestamp of the last streaming write processed . * / <nl> + state_timestamp_t get_prev_timestamp ( ) const { <nl> + return prev_timestamp ; <nl> + } <nl> + <nl> + / * Returns the highest timestamp present in the map * / <nl> + state_timestamp_t get_max_timestamp ( ) const { <nl> + if ( entries . empty ( ) ) { <nl> + return prev_timestamp ; <nl> + } else { <nl> + return entries . back ( ) . second ; <nl> + } <nl> + } <nl> + <nl> + / * Returns the rightmost point we ' ve backfilled to so far * / <nl> + key_range_t : : right_bound_t get_backfill_threshold ( ) const { <nl> + if ( entries . empty ( ) ) { <nl> + return key_range_t : : right_bound_t ( store_region . inner . left ) ; <nl> + } else { <nl> + return entries . back ( ) . first ; <nl> + } <nl> + } <nl> + <nl> + / * Returns ` true ` if the timestamp is consistent throughout the entire region * / <nl> + bool is_homogeneous ( ) const { <nl> + return ! entries . empty ( ) & & entries [ 0 ] . first = = store_region . inner . right ; <nl> + } <nl> + <nl> + / * Given the next streaming write , extracts the part of it that should be applied <nl> + such that the streaming write and the backfill together will neither skip nor <nl> + duplicate any change . If the backfill hasn ' t sent us any changes with timestamps <nl> + equal to or greater than the next write , we wouldn ' t be able to determine where <nl> + to clip the write , so this will crash . It also updates the <nl> + ` timestamp_range_tracker_t ` ' s internal record to reflect the fact that the write <nl> + will be applied to the store . * / <nl> + void clip_next_write_backfilling ( <nl> + state_timestamp_t timestamp , region_t * region_out ) { <nl> + guarantee ( can_clip_next_write_backfilling ( ) ) ; <nl> + clip_next_write_paused ( timestamp , region_out ) ; <nl> + } <nl> + <nl> + / * Returns ` true ` if the backfill has sent us some changes with timestamps equal <nl> + to or greater than the next write . * / <nl> + bool can_clip_next_write_backfilling ( ) const { <nl> + return ! entries . empty ( ) & & <nl> + ( entries . size ( ) > 1 | | entries [ 0 ] . first = = store_region . inner . right ) ; <nl> + } <nl> + <nl> + / * Similar to ` clip_next_write_backfilling ( ) ` , except that if the backfill <nl> + hasn ' t sent us any changes with timestamps equal to or greater than the next <nl> + write , it will assume that the timestamps in all yet - to - be - backfilled regions <nl> + will be equal to or greater than the next write . Therefore , this is only safe to <nl> + use in ` PAUSED ` mode ; when we exit ` PAUSED ` mode , we ' ll ensure that the backfill <nl> + resumes from a timestamp equal to or greater than the next write . * / <nl> + void clip_next_write_paused ( <nl> + state_timestamp_t timestamp , region_t * region_out ) { <nl> + guarantee ( timestamp = = prev_timestamp . next ( ) , " sanity check failed " ) ; <nl> + if ( entries . empty ( ) | | entries [ 0 ] . second > prev_timestamp ) { <nl> + * region_out = region_t : : empty ( ) ; <nl> + return ; <nl> + } <nl> + guarantee ( entries [ 0 ] . second = = prev_timestamp ) ; <nl> + * region_out = store_region ; <nl> + region_out - > inner . right = entries . front ( ) . first ; <nl> + } <nl> + <nl> + private : <nl> + / * Same as ` remote_replicator_client_t : : store_ - > get_region ( ) ` . * / <nl> + region_t store_region ; <nl> + <nl> + / * The timestamp of the next streaming write that should be applied . * / <nl> + state_timestamp_t prev_timestamp ; <nl> + <nl> + / * Each entry in ` entries ` represents a possibly - empty range . The right bound of <nl> + the range is the ` right_bound_t ` on the entry ; the left bound of the range is the <nl> + ` right_bound_t ` on the previous entry , or ` store_region . inner . left ` for the very <nl> + first entry . There will always be at least one entry . <nl> + <nl> + The entries ' ` right_bound_t ` s and timestamps will be strictly monotonically <nl> + increasing , but the difference between adjacent entries ' timestamps may be more <nl> + than one . * / <nl> + std : : deque < std : : pair < key_range_t : : right_bound_t , state_timestamp_t > > entries ; <nl> + } ; <nl> + <nl> remote_replicator_client_t : : remote_replicator_client_t ( <nl> backfill_throttler_t * backfill_throttler , <nl> const backfill_config_t & backfill_config , <nl> remote_replicator_client_t : : remote_replicator_client_t ( <nl> backfill . * / <nl> store - > wait_until_ok_to_receive_backfill ( interruptor ) ; <nl> <nl> - / * We ' re about to start subscribing to the stream of writes coming from the primary , <nl> - but first we want to grab the mutex so they ' ll queue up until we ' re ready to start <nl> - processing them . * / <nl> - scoped_ptr_t < rwlock_acq_t > rwlock_acq ( <nl> - new rwlock_acq_t ( & rwlock_ , access_t : : write , interruptor ) ) ; <nl> - <nl> / * Subscribe to the stream of writes coming from the primary * / <nl> remote_replicator_client_intro_t intro ; <nl> { <nl> remote_replicator_client_t : : remote_replicator_client_t ( <nl> callback_t ( remote_replicator_client_t * p , signal_t * ps ) : <nl> parent ( p ) , pause_signal ( ps ) { } <nl> bool on_progress ( const region_map_t < version_t > & chunk ) THROWS_NOTHING { <nl> - mutex_assertion_t : : acq_t mutex_assertion_acq ( & mutex_assertion_ ) ; <nl> + mutex_assertion_t : : acq_t mutex_assertion_acq ( & parent - > mutex_assertion_ ) ; <nl> rassert ( parent - > next_write_waiter_ = = nullptr | | <nl> parent - > next_write_waiter_ - > is_pulsed ( ) | | <nl> ! parent - > next_write_can_proceed ( & mutex_assertion_acq ) , <nl> remote_replicator_client_t : : remote_replicator_client_t ( <nl> } ) ; <nl> if ( parent - > next_write_can_proceed ( & mutex_assertion_acq ) ) { <nl> if ( parent - > next_write_waiter_ ! = nullptr ) { <nl> - parent - > next_write_waiter - > pulse_if_not_already_pulsed ( ) ; <nl> + parent - > next_write_waiter_ - > pulse_if_not_already_pulsed ( ) ; <nl> } <nl> } <nl> / * If the backfill throttler is telling us to pause , then interrupt <nl> remote_replicator_client_t : : remote_replicator_client_t ( <nl> guarantee ( mode_ = = backfill_mode_t : : BACKFILLING ) ; <nl> mode_ = backfill_mode_t : : PAUSED ; <nl> if ( next_write_waiter_ ! = nullptr ) { <nl> - next_write_waiter - > pulse_if_not_already_pulsed ( ) ; <nl> + next_write_waiter_ - > pulse_if_not_already_pulsed ( ) ; <nl> } <nl> } <nl> } <nl> remote_replicator_client_t : : remote_replicator_client_t ( <nl> rwlock_acq_t rwlock_acq ( & rwlock_ , access_t : : write , interruptor ) ; <nl> mutex_assertion_t : : acq_t mutex_assertion_acq ( & mutex_assertion_ ) ; <nl> <nl> - guarantee ( tracker_ - > is_finished ( ) ) ; <nl> - guarantee ( tracker_ - > get_prev_timestamp ( ) . next ( ) = = <nl> + guarantee ( tracker_ - > is_homogeneous ( ) ) ; <nl> + guarantee ( tracker_ - > get_prev_timestamp ( ) = = <nl> timestamp_enforcer_ - > get_latest_all_before_completed ( ) ) ; <nl> tracker_ . reset ( ) ; / * we don ' t need it anymore * / <nl> <nl> remote_replicator_client_t : : remote_replicator_client_t ( <nl> mode_ = backfill_mode_t : : STREAMING ; <nl> <nl> if ( next_write_waiter_ ! = nullptr ) { <nl> - next_write_waiter - > pulse_if_not_already_pulsed ( ) ; <nl> + next_write_waiter_ - > pulse_if_not_already_pulsed ( ) ; <nl> } <nl> } <nl> <nl> remote_replicator_client_t : : remote_replicator_client_t ( <nl> send ( mailbox_manager , intro . ready_mailbox ) ; <nl> } <nl> <nl> + remote_replicator_client_t : : ~ remote_replicator_client_t ( ) { <nl> + / * The destructor is declared here instead of the header file so that we can see the <nl> + destructor for ` timestamp_range_tracker_t ` * / <nl> + } <nl> + <nl> void remote_replicator_client_t : : on_write_async ( <nl> signal_t * interruptor , <nl> write_t & & write , <nl> void remote_replicator_client_t : : on_write_async ( <nl> rwlock_acq_t rwlock_acq ( & rwlock_ , access_t : : read , interruptor ) ; <nl> <nl> mutex_assertion_t : : acq_t mutex_assertion_acq ( & mutex_assertion_ ) ; <nl> - if ( ! next_write_can_proceed ( & mutex_assertion_acq ) { <nl> + if ( ! next_write_can_proceed ( & mutex_assertion_acq ) ) { <nl> cond_t cond ; <nl> guarantee ( next_write_waiter_ = = nullptr ) ; <nl> assignment_sentry_t < cond_t * > cond_sentry ( & next_write_waiter_ , & cond ) ; <nl> void remote_replicator_client_t : : on_write_async ( <nl> / * Once the constructor is done , all writes will take this branch ; it ' s the <nl> common case . * / <nl> timestamp_enforcer_ - > complete ( timestamp ) ; <nl> + mutex_assertion_acq . reset ( ) ; <nl> rwlock_acq . reset ( ) ; <nl> <nl> write_response_t dummy_response ; <nl> void remote_replicator_client_t : : on_write_async ( <nl> write_token_t token ; <nl> store_ - > new_write_token ( & token ) ; <nl> timestamp_enforcer_ - > complete ( timestamp ) ; <nl> + mutex_assertion_acq . reset ( ) ; <nl> rwlock_acq . reset ( ) ; <nl> <nl> if ( ! region_is_empty ( clip_region ) ) { <nl> region_map_t < binary_blob_t > new_metainfo ( <nl> - clip_region , binary_blob_t ( version_t ( branch_id , timestamp ) ) ) ; <nl> + clip_region , binary_blob_t ( version_t ( branch_id_ , timestamp ) ) ) ; <nl> write_t subwrite ; <nl> - if ( write . shard ( & subwrite , clip_region ) ) { <nl> + if ( write . shard ( clip_region , & subwrite ) ) { <nl> # ifndef NDEBUG <nl> - metainfo_checker_t checker ( region , <nl> + metainfo_checker_t checker ( clip_region , <nl> [ & ] ( const region_t & , const binary_blob_t & bb ) { <nl> rassert ( bb = = binary_blob_t ( <nl> - version_t ( branch_id , timestamp . pred ( ) ) ) ) ; <nl> + version_t ( branch_id_ , timestamp . pred ( ) ) ) ) ; <nl> } ) ; <nl> # endif <nl> write_response_t dummy_response ; <nl> - store - > write ( DEBUG_ONLY ( checker , ) new_metainfo , write , & dummy_response , <nl> - write_durability_t : : SOFT , timestamp , order_token , token , <nl> + store_ - > write ( DEBUG_ONLY ( checker , ) new_metainfo , write , & dummy_response , <nl> + write_durability_t : : SOFT , timestamp , order_token , & token , <nl> interruptor ) ; <nl> } else { <nl> - store - > set_metainfo ( new_metainfo , order_token , token , <nl> + store_ - > set_metainfo ( new_metainfo , order_token , & token , <nl> write_durability_t : : SOFT , interruptor ) ; <nl> } <nl> } <nl> void remote_replicator_client_t : : on_read ( <nl> } <nl> <nl> bool remote_replicator_client_t : : next_write_can_proceed ( <nl> - const mutex_assertion_t : : acq_t * mutex_acq ) { <nl> + mutex_assertion_t : : acq_t * mutex_assertion_acq ) { <nl> + mutex_assertion_acq - > assert_is_holding ( & mutex_assertion_ ) ; <nl> return mode_ ! = backfill_mode_t : : BACKFILLING | | <nl> - tracker_ - > can_process_next_write_backfilling ( ) ; <nl> + tracker_ - > can_clip_next_write_backfilling ( ) ; <nl> } <nl> <nl> mmm a / src / clustering / immediate_consistency / remote_replicator_client . hpp <nl> ppp b / src / clustering / immediate_consistency / remote_replicator_client . hpp <nl> class remote_replicator_client_t { <nl> <nl> signal_t * interruptor ) THROWS_ONLY ( interrupted_exc_t ) ; <nl> <nl> + ~ remote_replicator_client_t ( ) ; <nl> + <nl> private : <nl> + class timestamp_range_tracker_t ; <nl> + <nl> / * ` on_write_async ( ) ` , ` on_write_sync ( ) ` , and ` on_read ( ) ` are mailbox callbacks for <nl> ` write_async_mailbox_ ` , ` write_sync_mailbox_ ` , and ` read_mailbox_ ` . * / <nl> void on_write_async ( <nl> class remote_replicator_client_t { <nl> backfill_mode_t mode_ ; <nl> <nl> / * ` timestamp_range_tracker_t ` is essentially a ` region_map_t < state_timestamp_t > ` , <nl> - but in a different format . The domain is the region that has been backfilled thus <nl> - far ; the values are equal to the current timestamps in the B - tree metainfo . * / <nl> - class timestamp_range_tracker_t { <nl> - public : <nl> - timestamp_range_tracker_t ( <nl> - const region_t & _store_region , <nl> - state_timestamp_t _prev_timestamp ) : <nl> - store_region ( _store_region ) , <nl> - prev_timestamp ( _prev_timestamp ) { } <nl> - <nl> - / * Records that the backfill has copied values up to the given timestamp for the <nl> - given range . * / <nl> - void record_backfill ( const region_t & region , const timestamp_t & ts ) { <nl> - rassert ( region . beg = = store_region . beg ) ; <nl> - rassert ( region . end = = store_region . end ) ; <nl> - rassert ( key_range_t : : right_bound_t ( region . left ) = = <nl> - ( entries . empty ( ) <nl> - ? key_range_t : : right_bound_t ( store_region . left ) <nl> - : entries . back ( ) . first ) ) ; <nl> - rassert ( region . right < = store_region . right ) ; <nl> - if ( entries . empty ( ) | | entries . back ( ) . second ! = ts ) { <nl> - entries . push_back ( std : : make_pair ( region . right , ts ) ) ; <nl> - } else { <nl> - / * Rather than making a second entry with the same timestamp , coalesce <nl> - the two entries . * / <nl> - entries . back ( ) . first = region . right ; <nl> - } <nl> - } <nl> - <nl> - / * Records that the write with the given timestamp has been applies in the given <nl> - region . * / <nl> - void record_write ( const region_t & region , state_timestamp_t ts ) { <nl> - rassert ( ts = = prev_timestamp . next ( ) ) ; <nl> - prev_timestamp = ts ; <nl> - if ( region_is_empty ( region ) ) { <nl> - rassert ( entries . empty ( ) | | entries [ 0 ] . second > = ts ) ; <nl> - return ; <nl> - } <nl> - rassert ( region . beg = = store_region . beg ) ; <nl> - rassert ( region . end = = store_region . end ) ; <nl> - rassert ( region . inner . left = = store_region . inner . left ) ; <nl> - rassert ( region . inner . right = = entries [ 0 ] . first ) ; <nl> - entries [ 0 ] . second = timestamp ; <nl> - if ( entries . size ( ) > 1 & & entries [ 1 ] . second = = timestamp ) { <nl> - entries . pop_front ( ) ; <nl> - } <nl> - } <nl> - <nl> - / * Returns the timestamp of the last streaming write processed . * / <nl> - state_timestamp_t get_prev_timestamp ( ) const { <nl> - return prev_timestamp ; <nl> - } <nl> - <nl> - / * Returns the highest timestamp present in the map * / <nl> - state_timestamp_t get_max_timestamp ( ) const { <nl> - if ( entries . empty ( ) ) { <nl> - return prev_timestamp ; <nl> - } else { <nl> - return entries . back ( ) . second ; <nl> - } <nl> - } <nl> - <nl> - / * Returns the rightmost point we ' ve backfilled to so far * / <nl> - key_range_t : : right_bound_t get_backfill_threshold ( ) const { <nl> - if ( entries . empty ( ) ) { <nl> - return key_range_t : : right_bound_t ( store_region . left ) ; <nl> - } else { <nl> - return entries . back ( ) . first ; <nl> - } <nl> - } <nl> - <nl> - / * Returns ` true ` if the timestamp is consistent throughout the entire region * / <nl> - bool is_homogeneous ( ) const { <nl> - return ! entries . empty ( ) & & entries [ 0 ] . first = = store_region . inner . right ; <nl> - } <nl> - <nl> - / * Given the next streaming write , extracts the part of it that should be applied <nl> - such that the streaming write and the backfill together will neither skip nor <nl> - duplicate any change . If the backfill hasn ' t sent us any changes with timestamps <nl> - equal to or greater than the next write , we wouldn ' t be able to determine where <nl> - to clip the write , so this will crash . It also updates the <nl> - ` timestamp_range_tracker_t ` ' s internal record to reflect the fact that the write <nl> - will be applied to the store . * / <nl> - void clip_next_write_backfilling ( <nl> - state_timestamp_t timestamp , region_t * region_out ) { <nl> - guarantee ( can_process_next_write_backfilling ( ) ) ; <nl> - clip_next_write_paused ( timestamp , region_out ) ; <nl> - } <nl> - <nl> - / * Returns ` true ` if the backfill has sent us some changes with timestamps equal <nl> - to or greater than the next write . * / <nl> - bool can_clip_next_write_backfilling ( ) const { <nl> - return ! entries . empty ( ) & & <nl> - ( entries . size ( ) > 1 | | entries [ 0 ] . first = = store_region . inner . right ) ; <nl> - } <nl> - <nl> - / * Similar to ` clip_next_write_backfilling ( ) ` , except that if the backfill <nl> - hasn ' t sent us any changes with timestamps equal to or greater than the next <nl> - write , it will assume that the timestamps in all yet - to - be - backfilled regions <nl> - will be equal to or greater than the next write . Therefore , this is only safe to <nl> - use in ` PAUSED ` mode ; when we exit ` PAUSED ` mode , we ' ll ensure that the backfill <nl> - resumes from a timestamp equal to or greater than the next write . * / <nl> - void clip_next_write_paused ( <nl> - state_timestamp_t timestamp , region_t * region_out ) { <nl> - guarantee ( timestamp = = prev_timestamp . next ( ) , " sanity check failed " ) ; <nl> - if ( entries . empty ( ) | | entries [ 0 ] . second > prev_timestamp ) { <nl> - * region_out = region_t : : empty ( ) ; <nl> - return ; <nl> - } <nl> - guarantee ( entries [ 0 ] . second = = prev_timestamp ) ; <nl> - * region_out = store_region ; <nl> - region_out - > inner . right = entries . front ( ) . first ; <nl> - } <nl> - <nl> - private : <nl> - / * Same as ` remote_replicator_client_t : : store_ - > get_region ( ) ` . * / <nl> - region_t store_region ; <nl> - <nl> - / * The timestamp of the next streaming write that should be applied . * / <nl> - state_timestamp_t prev_timestamp ; <nl> - <nl> - / * Each entry in ` entries ` represents a possibly - empty range . The right bound of <nl> - the range is the ` right_bound_t ` on the entry ; the left bound of the range is the <nl> - ` right_bound_t ` on the previous entry , or ` store_region . inner . left ` for the very <nl> - first entry . There will always be at least one entry . <nl> - <nl> - The entries ' ` right_bound_t ` s and timestamps will be strictly monotonically <nl> - increasing , but the difference between adjacent entries ' timestamps may be more <nl> - than one . * / <nl> - std : : deque < std : : pair < key_range_t : : right_bound_t , state_timestamp_t > > entries ; <nl> - <nl> - } ; <nl> + but in a different format and optimized for this specific use case . The domain of <nl> + ` tracker_ ` is the region that has been backfilled thus far ; the values are equal to <nl> + the current timestamps in the B - tree metainfo . ` tracker_ ` exists only during the <nl> + backfill ; it gets destroyed after the backfill is over . * / <nl> scoped_ptr_t < timestamp_range_tracker_t > tracker_ ; <nl> <nl> / * Returns ` true ` if the next write can be applied now , instead of having to wait for <nl> the backfill to make more progress . * / <nl> - bool next_write_can_proceed ( const mutex_assertion_t : : acq_t * mutex_acq ) ; <nl> + bool next_write_can_proceed ( mutex_assertion_t : : acq_t * mutex_acq ) ; <nl> <nl> / * If the next write cannot proceed , it will set ` next_write_waiter_ ` and wait for it <nl> to be pulsed . * / <nl> | Make continuous backfilling pass tests . | rethinkdb/rethinkdb | 4b74ca0f10b1d4c3c69d0a22739f31cf7f84ed11 | 2015-06-29T21:48:11Z |
Binary files a / java / libphonenumber / src / com / google / i18n / phonenumbers / data / PhoneNumberMetadataProto_AN and b / java / libphonenumber / src / com / google / i18n / phonenumbers / data / PhoneNumberMetadataProto_AN differ <nl> Binary files a / java / libphonenumber / src / com / google / i18n / phonenumbers / data / PhoneNumberMetadataProto_ET and b / java / libphonenumber / src / com / google / i18n / phonenumbers / data / PhoneNumberMetadataProto_ET differ <nl> Binary files a / java / libphonenumber / src / com / google / i18n / phonenumbers / data / PhoneNumberMetadataProto_KW and b / java / libphonenumber / src / com / google / i18n / phonenumbers / data / PhoneNumberMetadataProto_KW differ <nl> Binary files a / java / libphonenumber / src / com / google / i18n / phonenumbers / data / PhoneNumberMetadataProto_MN and b / java / libphonenumber / src / com / google / i18n / phonenumbers / data / PhoneNumberMetadataProto_MN differ <nl> Binary files a / java / libphonenumber / src / com / google / i18n / phonenumbers / data / PhoneNumberMetadataProto_SX and b / java / libphonenumber / src / com / google / i18n / phonenumbers / data / PhoneNumberMetadataProto_SX differ <nl> Binary files a / java / libphonenumber / src / com / google / i18n / phonenumbers / data / PhoneNumberMetadataProto_TC and b / java / libphonenumber / src / com / google / i18n / phonenumbers / data / PhoneNumberMetadataProto_TC differ <nl> Binary files a / java / libphonenumber / src / com / google / i18n / phonenumbers / data / PhoneNumberMetadataProto_TL and b / java / libphonenumber / src / com / google / i18n / phonenumbers / data / PhoneNumberMetadataProto_TL differ <nl> mmm a / java / release_notes . txt <nl> ppp b / java / release_notes . txt <nl> <nl> + May 30th , 2012 : libphonenumber - 4 . 8 . 3 <nl> + * Metadata update only : <nl> + - AN , ET , KW , MN , SX , TC , TL <nl> + <nl> May 16th , 2012 : libphonenumber - 4 . 8 . 2 <nl> * Metadata update only : <nl> - BH , CR , EE , JO , KW , LA , QA , SI , TH <nl> mmm a / javascript / i18n / phonenumbers / metadata . js <nl> ppp b / javascript / i18n / phonenumbers / metadata . js <nl> i18n . phonenumbers . metadata . countryToMetadata = { <nl> , [ , , " NA " , " NA " ] <nl> ] <nl> , " AN " : [ , [ , , " 5 \ \ d { 6 } " , " \ \ d { 7 } " ] <nl> - , [ , , " 5 ( ? : 4 \ \ d | 8 [ 239 ] ) \ \ d { 4 } " , " \ \ d { 7 } " , , , " 5451234 " ] <nl> - , [ , , " 5 ( ? : 1 [ 01 ] | 2 [ 0 - 7 ] | 5 \ \ d | 8 [ 016 - 8 ] ) \ \ d { 4 } " , " \ \ d { 7 } " , , , " 5101234 " ] <nl> + , [ , , " 5 ( ? : 4 [ 2 - 8 ] | 8 [ 239 ] ) \ \ d { 4 } " , " \ \ d { 7 } " , , , " 5451234 " ] <nl> + , [ , , " 5 ( ? : 1 [ 02 ] | 2 \ \ d | 5 [ 0 - 79 ] | 8 [ 016 - 8 ] ) \ \ d { 4 } " , " \ \ d { 7 } " , , , " 5101234 " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> i18n . phonenumbers . metadata . countryToMetadata = { <nl> ] <nl> , " ET " : [ , [ , , " [ 1 - 59 ] \ \ d { 8 } " , " \ \ d { 7 , 9 } " ] <nl> , [ , , " ( ? : 11 ( ? : 1 ( ? : 1 [ 124 ] | 2 [ 2 - 57 ] | 3 [ 1 - 5 ] | 5 [ 5 - 8 ] | 8 [ 6 - 8 ] ) | 2 ( ? : 13 | 3 [ 6 - 8 ] | 5 [ 89 ] | 7 [ 05 - 9 ] | 8 [ 2 - 6 ] ) | 3 ( ? : 2 [ 01 ] | 3 [ 0 - 289 ] | 4 [ 1289 ] | 7 [ 1 - 4 ] | 87 ) | 4 ( ? : 1 [ 69 ] | 3 [ 2 - 49 ] | 4 [ 0 - 3 ] | 6 [ 5 - 8 ] ) | 5 ( ? : 1 [ 57 ] | 44 | 5 [ 0 - 4 ] ) | 6 ( ? : 18 | 2 [ 69 ] | 4 [ 5 - 7 ] | 5 [ 1 - 5 ] | 6 [ 0 - 59 ] | 8 [ 015 - 8 ] ) ) | 2 ( ? : 2 ( ? : 11 [ 1 - 9 ] | 22 [ 0 - 7 ] | 33 \ \ d | 44 [ 1467 ] | 66 [ 1 - 68 ] ) | 5 ( ? : 11 [ 124 - 6 ] | 33 [ 2 - 8 ] | 44 [ 1467 ] | 55 [ 14 ] | 66 [ 1 - 3679 ] | 77 [ 124 - 79 ] | 880 ) ) | 3 ( ? : 3 ( ? : 11 [ 0 - 46 - 8 ] | 22 [ 0 - 6 ] | 33 [ 0134689 ] | 44 [ 04 ] | 55 [ 0 - 6 ] | 66 [ 01467 ] ) | 4 ( ? : 44 [ 0 - 8 ] | 55 [ 0 - 69 ] | 66 [ 0 - 3 ] | 77 [ 1 - 5 ] ) ) | 4 ( ? : 6 ( ? : 22 [ 0 - 24 - 7 ] | 33 [ 1 - 5 ] | 44 [ 13 - 69 ] | 55 [ 14 - 689 ] | 660 | 88 [ 1 - 4 ] ) | 7 ( ? : 11 [ 1 - 9 ] | 22 [ 1 - 9 ] | 33 [ 13 - 7 ] | 44 [ 13 - 6 ] | 55 [ 1 - 689 ] ) ) | 5 ( ? : 7 ( ? : 227 | 55 [ 05 ] | ( ? : 66 | 77 ) [ 14 - 8 ] ) | 8 ( ? : 11 [ 149 ] | 22 [ 013 - 79 ] | 33 [ 0 - 68 ] | 44 [ 013 - 8 ] | 550 | 66 [ 1 - 5 ] | 77 \ \ d ) ) ) \ \ d { 4 } " , " \ \ d { 7 , 9 } " , , , " 111112345 " ] <nl> - , [ , , " 9 [ 12 ] \ \ d { 7 } " , " \ \ d { 9 } " , , , " 911234567 " ] <nl> + , [ , , " 9 ( ? : [ 1 - 3 ] \ \ d | 5 [ 89 ] ) \ \ d { 6 } " , " \ \ d { 9 } " , , , " 911234567 " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> i18n . phonenumbers . metadata . countryToMetadata = { <nl> ] <nl> , " KW " : [ , [ , , " [ 12569 ] \ \ d { 6 , 7 } | 65816 \ \ d { 6 } " , " \ \ d { 7 , 8 } | \ \ d { 11 } " ] <nl> , [ , , " ( ? : 18 \ \ d | 2 ( ? : [ 23 ] \ \ d { 2 } | 4 [ 1 - 35 - 9 ] \ \ d | 5 ( ? : 0 [ 034 ] | [ 2 - 46 ] \ \ d | 5 [ 1 - 3 ] | 7 [ 1 - 7 ] ) ) ) \ \ d { 4 } " , " \ \ d { 7 , 8 } " , , , " 22345678 " ] <nl> - , [ , , " ( ? : 5 ( ? : 0 [ 0 - 2568 ] | 5 \ \ d ) | 6 ( ? : 0 [ 034679 ] | 5 ( ? : [ 015 - 79 ] | 8 ( ? : [ 02 - 9 ] | 1 [ 0 - 57 - 9 ] ) ) | 6 \ \ d | 7 [ 067 ] | 9 [ 69 ] ) | 9 ( ? : 0 [ 09 ] | 4 [ 049 ] | 6 [ 69 ] | [ 79 ] \ \ d ) ) \ \ d { 5 } " , " \ \ d { 8 } " , , , " 50012345 " ] <nl> + , [ , , " ( ? : 5 ( ? : 0 [ 0 - 2568 ] | 5 \ \ d ) | 6 ( ? : 0 [ 034679 ] | 5 ( ? : [ 015 - 79 ] | 8 ( ? : [ 02 - 9 ] | 1 [ 0 - 57 - 9 ] ) ) | 6 \ \ d | 7 [ 067 ] | 9 [ 069 ] ) | 9 ( ? : 0 [ 09 ] | 4 [ 049 ] | 6 [ 69 ] | [ 79 ] \ \ d ) ) \ \ d { 5 } " , " \ \ d { 8 } " , , , " 50012345 " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> i18n . phonenumbers . metadata . countryToMetadata = { <nl> , [ , , " NA " , " NA " ] <nl> ] <nl> , " MN " : [ , [ , , " [ 12 ] \ \ d { 7 , 9 } | [ 57 - 9 ] \ \ d { 7 } " , " \ \ d { 6 , 10 } " ] <nl> - , [ , , " [ 12 ] ( ? : 1 \ \ d | 2 ( ? : [ 1 - 3 ] \ \ d ? | 7 \ \ d ) | 3 [ 2 - 8 ] \ \ d { 1 , 2 } | 4 [ 2 - 68 ] \ \ d { 1 , 2 } | 5 [ 1 - 4689 ] \ \ d { 1 , 2 } ) \ \ d { 5 } | ( ? : 5 [ 0568 ] | 70 ) \ \ d { 6 } " , " \ \ d { 6 , 10 } " , , , " 70123456 " ] <nl> - , [ , , " ( ? : 8 [ 89 ] | 9 [ 15689 ] ) \ \ d { 6 } " , " \ \ d { 8 } " , , , " 88123456 " ] <nl> + , [ , , " [ 12 ] ( ? : 1 \ \ d | 2 ( ? : [ 1 - 3 ] \ \ d ? | 7 \ \ d ) | 3 [ 2 - 8 ] \ \ d { 1 , 2 } | 4 [ 2 - 68 ] \ \ d { 1 , 2 } | 5 [ 1 - 4689 ] \ \ d { 1 , 2 } ) \ \ d { 5 } | 5 [ 0568 ] \ \ d { 6 } " , " \ \ d { 6 , 10 } " , , , " 50123456 " ] <nl> + , [ , , " ( ? : 8 [ 89 ] | 9 [ 013 - 9 ] ) \ \ d { 6 } " , " \ \ d { 8 } " , , , " 88123456 " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> - , [ , , " 7 [ 569 ] \ \ d { 6 } " , " \ \ d { 8 } " , , , " 75123456 " ] <nl> + , [ , , " 7 [ 05 - 8 ] \ \ d { 6 } " , " \ \ d { 8 } " , , , " 75123456 " ] <nl> , " MN " , 976 , " 001 " , " 0 " , , , " 0 " , , , , [ [ , " ( [ 12 ] \ \ d ) ( \ \ d { 2 } ) ( \ \ d { 4 } ) " , " $ 1 $ 2 $ 3 " , [ " [ 12 ] 1 " ] <nl> , " 0 $ 1 " , " " , 0 ] <nl> , [ , " ( [ 12 ] 2 \ \ d ) ( \ \ d { 5 , 6 } ) " , " $ 1 $ 2 " , [ " [ 12 ] 2 [ 1 - 3 ] " ] <nl> i18n . phonenumbers . metadata . countryToMetadata = { <nl> , [ , , " NA " , " NA " ] <nl> ] <nl> , " SX " : [ , [ , , " [ 5789 ] \ \ d { 9 } " , " \ \ d { 7 } ( ? : \ \ d { 3 } ) ? " ] <nl> - , [ , , " 7215 ( ? : 4 [ 2 - 8 ] | 8 [ 239 ] ) \ \ d { 4 } " , " \ \ d { 7 } ( ? : \ \ d { 3 } ) ? " , , , " 7215425678 " ] <nl> - , [ , , " 7215 ( ? : 1 [ 02 ] | 2 \ \ d | 5 [ 03469 ] | 8 [ 01678 ] ) \ \ d { 4 } " , " \ \ d { 10 } " , , , " 7215205678 " ] <nl> + , [ , , " 7215 ( ? : 4 [ 2 - 8 ] | 8 [ 239 ] | 9 [ 056 ] ) \ \ d { 4 } " , " \ \ d { 7 } ( ? : \ \ d { 3 } ) ? " , , , " 7215425678 " ] <nl> + , [ , , " 7215 ( ? : 1 [ 02 ] | 2 \ \ d | 5 [ 034679 ] | 8 [ 014 - 8 ] ) \ \ d { 4 } " , " \ \ d { 10 } " , , , " 7215205678 " ] <nl> , [ , , " 8 ( ? : 00 | 55 | 66 | 77 | 88 ) [ 2 - 9 ] \ \ d { 6 } " , " \ \ d { 10 } " , , , " 8002123456 " ] <nl> , [ , , " 900 [ 2 - 9 ] \ \ d { 6 } " , " \ \ d { 10 } " , , , " 9002123456 " ] <nl> , [ , , " NA " , " NA " ] <nl> i18n . phonenumbers . metadata . countryToMetadata = { <nl> ] <nl> , " TC " : [ , [ , , " [ 5689 ] \ \ d { 9 } " , " \ \ d { 7 } ( ? : \ \ d { 3 } ) ? " ] <nl> , [ , , " 649 ( ? : 712 | 9 ( ? : 4 \ \ d | 50 ) ) \ \ d { 4 } " , " \ \ d { 7 } ( ? : \ \ d { 3 } ) ? " , , , " 6497121234 " ] <nl> - , [ , , " 649 ( ? : 2 ( ? : 3 [ 12 ] | 4 [ 1 - 7 ] ) | 3 ( ? : 3 [ 1 - 39 ] | 4 [ 1 - 7 ] ) | 4 [ 34 ] [ 12 ] ) \ \ d { 4 } " , " \ \ d { 10 } " , , , " 6492311234 " ] <nl> + , [ , , " 649 ( ? : 2 ( ? : 3 [ 129 ] | 4 [ 1 - 7 ] ) | 3 ( ? : 3 [ 1 - 39 ] | 4 [ 1 - 7 ] ) | 4 [ 34 ] [ 12 ] ) \ \ d { 4 } " , " \ \ d { 10 } " , , , " 6492311234 " ] <nl> , [ , , " 8 ( ? : 00 | 55 | 66 | 77 | 88 ) [ 2 - 9 ] \ \ d { 6 } " , " \ \ d { 10 } " , , , " 8002345678 " ] <nl> , [ , , " 900 [ 2 - 9 ] \ \ d { 6 } " , " \ \ d { 10 } " , , , " 9002345678 " ] <nl> , [ , , " NA " , " NA " ] <nl> i18n . phonenumbers . metadata . countryToMetadata = { <nl> ] <nl> , " TL " : [ , [ , , " [ 2 - 47 - 9 ] \ \ d { 6 } " , " \ \ d { 7 } " ] <nl> , [ , , " ( ? : 2 [ 1 - 5 ] | 3 [ 1 - 9 ] | 4 [ 1 - 4 ] ) \ \ d { 5 } " , " \ \ d { 7 } " , , , " 2112345 " ] <nl> - , [ , , " 7 [ 2 - 4 ] \ \ d { 5 } " , " \ \ d { 7 } " , , , " 7212345 " ] <nl> + , [ , , " 7 [ 2 - 49 ] \ \ d { 5 } " , " \ \ d { 7 } " , , , " 7212345 " ] <nl> , [ , , " 80 \ \ d { 5 } " , " \ \ d { 7 } " , , , " 8012345 " ] <nl> , [ , , " 90 \ \ d { 5 } " , " \ \ d { 7 } " , , , " 9012345 " ] <nl> , [ , , " NA " , " NA " ] <nl> mmm a / javascript / i18n / phonenumbers / metadatalite . js <nl> ppp b / javascript / i18n / phonenumbers / metadatalite . js <nl> i18n . phonenumbers . metadata . countryToMetadata = { <nl> , [ , , " NA " , " NA " ] <nl> ] <nl> , " AN " : [ , [ , , " 5 \ \ d { 6 } " , " \ \ d { 7 } " ] <nl> - , [ , , " 5 ( ? : 4 \ \ d | 8 [ 239 ] ) \ \ d { 4 } " , " \ \ d { 7 } " ] <nl> - , [ , , " 5 ( ? : 1 [ 01 ] | 2 [ 0 - 7 ] | 5 \ \ d | 8 [ 016 - 8 ] ) \ \ d { 4 } " , " \ \ d { 7 } " ] <nl> + , [ , , " 5 ( ? : 4 [ 2 - 8 ] | 8 [ 239 ] ) \ \ d { 4 } " , " \ \ d { 7 } " ] <nl> + , [ , , " 5 ( ? : 1 [ 02 ] | 2 \ \ d | 5 [ 0 - 79 ] | 8 [ 016 - 8 ] ) \ \ d { 4 } " , " \ \ d { 7 } " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> i18n . phonenumbers . metadata . countryToMetadata = { <nl> ] <nl> , " ET " : [ , [ , , " [ 1 - 59 ] \ \ d { 8 } " , " \ \ d { 7 , 9 } " ] <nl> , [ , , " ( ? : 11 ( ? : 1 ( ? : 1 [ 124 ] | 2 [ 2 - 57 ] | 3 [ 1 - 5 ] | 5 [ 5 - 8 ] | 8 [ 6 - 8 ] ) | 2 ( ? : 13 | 3 [ 6 - 8 ] | 5 [ 89 ] | 7 [ 05 - 9 ] | 8 [ 2 - 6 ] ) | 3 ( ? : 2 [ 01 ] | 3 [ 0 - 289 ] | 4 [ 1289 ] | 7 [ 1 - 4 ] | 87 ) | 4 ( ? : 1 [ 69 ] | 3 [ 2 - 49 ] | 4 [ 0 - 3 ] | 6 [ 5 - 8 ] ) | 5 ( ? : 1 [ 57 ] | 44 | 5 [ 0 - 4 ] ) | 6 ( ? : 18 | 2 [ 69 ] | 4 [ 5 - 7 ] | 5 [ 1 - 5 ] | 6 [ 0 - 59 ] | 8 [ 015 - 8 ] ) ) | 2 ( ? : 2 ( ? : 11 [ 1 - 9 ] | 22 [ 0 - 7 ] | 33 \ \ d | 44 [ 1467 ] | 66 [ 1 - 68 ] ) | 5 ( ? : 11 [ 124 - 6 ] | 33 [ 2 - 8 ] | 44 [ 1467 ] | 55 [ 14 ] | 66 [ 1 - 3679 ] | 77 [ 124 - 79 ] | 880 ) ) | 3 ( ? : 3 ( ? : 11 [ 0 - 46 - 8 ] | 22 [ 0 - 6 ] | 33 [ 0134689 ] | 44 [ 04 ] | 55 [ 0 - 6 ] | 66 [ 01467 ] ) | 4 ( ? : 44 [ 0 - 8 ] | 55 [ 0 - 69 ] | 66 [ 0 - 3 ] | 77 [ 1 - 5 ] ) ) | 4 ( ? : 6 ( ? : 22 [ 0 - 24 - 7 ] | 33 [ 1 - 5 ] | 44 [ 13 - 69 ] | 55 [ 14 - 689 ] | 660 | 88 [ 1 - 4 ] ) | 7 ( ? : 11 [ 1 - 9 ] | 22 [ 1 - 9 ] | 33 [ 13 - 7 ] | 44 [ 13 - 6 ] | 55 [ 1 - 689 ] ) ) | 5 ( ? : 7 ( ? : 227 | 55 [ 05 ] | ( ? : 66 | 77 ) [ 14 - 8 ] ) | 8 ( ? : 11 [ 149 ] | 22 [ 013 - 79 ] | 33 [ 0 - 68 ] | 44 [ 013 - 8 ] | 550 | 66 [ 1 - 5 ] | 77 \ \ d ) ) ) \ \ d { 4 } " , " \ \ d { 7 , 9 } " ] <nl> - , [ , , " 9 [ 12 ] \ \ d { 7 } " , " \ \ d { 9 } " ] <nl> + , [ , , " 9 ( ? : [ 1 - 3 ] \ \ d | 5 [ 89 ] ) \ \ d { 6 } " , " \ \ d { 9 } " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> i18n . phonenumbers . metadata . countryToMetadata = { <nl> ] <nl> , " KW " : [ , [ , , " [ 12569 ] \ \ d { 6 , 7 } | 65816 \ \ d { 6 } " , " \ \ d { 7 , 8 } | \ \ d { 11 } " ] <nl> , [ , , " ( ? : 18 \ \ d | 2 ( ? : [ 23 ] \ \ d { 2 } | 4 [ 1 - 35 - 9 ] \ \ d | 5 ( ? : 0 [ 034 ] | [ 2 - 46 ] \ \ d | 5 [ 1 - 3 ] | 7 [ 1 - 7 ] ) ) ) \ \ d { 4 } " , " \ \ d { 7 , 8 } " ] <nl> - , [ , , " ( ? : 5 ( ? : 0 [ 0 - 2568 ] | 5 \ \ d ) | 6 ( ? : 0 [ 034679 ] | 5 ( ? : [ 015 - 79 ] | 8 ( ? : [ 02 - 9 ] | 1 [ 0 - 57 - 9 ] ) ) | 6 \ \ d | 7 [ 067 ] | 9 [ 69 ] ) | 9 ( ? : 0 [ 09 ] | 4 [ 049 ] | 6 [ 69 ] | [ 79 ] \ \ d ) ) \ \ d { 5 } " , " \ \ d { 8 } " ] <nl> + , [ , , " ( ? : 5 ( ? : 0 [ 0 - 2568 ] | 5 \ \ d ) | 6 ( ? : 0 [ 034679 ] | 5 ( ? : [ 015 - 79 ] | 8 ( ? : [ 02 - 9 ] | 1 [ 0 - 57 - 9 ] ) ) | 6 \ \ d | 7 [ 067 ] | 9 [ 069 ] ) | 9 ( ? : 0 [ 09 ] | 4 [ 049 ] | 6 [ 69 ] | [ 79 ] \ \ d ) ) \ \ d { 5 } " , " \ \ d { 8 } " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> i18n . phonenumbers . metadata . countryToMetadata = { <nl> , [ , , " NA " , " NA " ] <nl> ] <nl> , " MN " : [ , [ , , " [ 12 ] \ \ d { 7 , 9 } | [ 57 - 9 ] \ \ d { 7 } " , " \ \ d { 6 , 10 } " ] <nl> - , [ , , " [ 12 ] ( ? : 1 \ \ d | 2 ( ? : [ 1 - 3 ] \ \ d ? | 7 \ \ d ) | 3 [ 2 - 8 ] \ \ d { 1 , 2 } | 4 [ 2 - 68 ] \ \ d { 1 , 2 } | 5 [ 1 - 4689 ] \ \ d { 1 , 2 } ) \ \ d { 5 } | ( ? : 5 [ 0568 ] | 70 ) \ \ d { 6 } " , " \ \ d { 6 , 10 } " ] <nl> - , [ , , " ( ? : 8 [ 89 ] | 9 [ 15689 ] ) \ \ d { 6 } " , " \ \ d { 8 } " ] <nl> + , [ , , " [ 12 ] ( ? : 1 \ \ d | 2 ( ? : [ 1 - 3 ] \ \ d ? | 7 \ \ d ) | 3 [ 2 - 8 ] \ \ d { 1 , 2 } | 4 [ 2 - 68 ] \ \ d { 1 , 2 } | 5 [ 1 - 4689 ] \ \ d { 1 , 2 } ) \ \ d { 5 } | 5 [ 0568 ] \ \ d { 6 } " , " \ \ d { 6 , 10 } " ] <nl> + , [ , , " ( ? : 8 [ 89 ] | 9 [ 013 - 9 ] ) \ \ d { 6 } " , " \ \ d { 8 } " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> , [ , , " NA " , " NA " ] <nl> - , [ , , " 7 [ 569 ] \ \ d { 6 } " , " \ \ d { 8 } " ] <nl> + , [ , , " 7 [ 05 - 8 ] \ \ d { 6 } " , " \ \ d { 8 } " ] <nl> , " MN " , 976 , " 001 " , " 0 " , , , " 0 " , , , , [ [ , " ( [ 12 ] \ \ d ) ( \ \ d { 2 } ) ( \ \ d { 4 } ) " , " $ 1 $ 2 $ 3 " , [ " [ 12 ] 1 " ] <nl> , " 0 $ 1 " , " " , 0 ] <nl> , [ , " ( [ 12 ] 2 \ \ d ) ( \ \ d { 5 , 6 } ) " , " $ 1 $ 2 " , [ " [ 12 ] 2 [ 1 - 3 ] " ] <nl> i18n . phonenumbers . metadata . countryToMetadata = { <nl> , [ , , " NA " , " NA " ] <nl> ] <nl> , " SX " : [ , [ , , " [ 5789 ] \ \ d { 9 } " , " \ \ d { 7 } ( ? : \ \ d { 3 } ) ? " ] <nl> - , [ , , " 7215 ( ? : 4 [ 2 - 8 ] | 8 [ 239 ] ) \ \ d { 4 } " , " \ \ d { 7 } ( ? : \ \ d { 3 } ) ? " ] <nl> - , [ , , " 7215 ( ? : 1 [ 02 ] | 2 \ \ d | 5 [ 03469 ] | 8 [ 01678 ] ) \ \ d { 4 } " , " \ \ d { 10 } " ] <nl> + , [ , , " 7215 ( ? : 4 [ 2 - 8 ] | 8 [ 239 ] | 9 [ 056 ] ) \ \ d { 4 } " , " \ \ d { 7 } ( ? : \ \ d { 3 } ) ? " ] <nl> + , [ , , " 7215 ( ? : 1 [ 02 ] | 2 \ \ d | 5 [ 034679 ] | 8 [ 014 - 8 ] ) \ \ d { 4 } " , " \ \ d { 10 } " ] <nl> , [ , , " 8 ( ? : 00 | 55 | 66 | 77 | 88 ) [ 2 - 9 ] \ \ d { 6 } " , " \ \ d { 10 } " ] <nl> , [ , , " 900 [ 2 - 9 ] \ \ d { 6 } " , " \ \ d { 10 } " ] <nl> , [ , , " NA " , " NA " ] <nl> i18n . phonenumbers . metadata . countryToMetadata = { <nl> ] <nl> , " TC " : [ , [ , , " [ 5689 ] \ \ d { 9 } " , " \ \ d { 7 } ( ? : \ \ d { 3 } ) ? " ] <nl> , [ , , " 649 ( ? : 712 | 9 ( ? : 4 \ \ d | 50 ) ) \ \ d { 4 } " , " \ \ d { 7 } ( ? : \ \ d { 3 } ) ? " ] <nl> - , [ , , " 649 ( ? : 2 ( ? : 3 [ 12 ] | 4 [ 1 - 7 ] ) | 3 ( ? : 3 [ 1 - 39 ] | 4 [ 1 - 7 ] ) | 4 [ 34 ] [ 12 ] ) \ \ d { 4 } " , " \ \ d { 10 } " ] <nl> + , [ , , " 649 ( ? : 2 ( ? : 3 [ 129 ] | 4 [ 1 - 7 ] ) | 3 ( ? : 3 [ 1 - 39 ] | 4 [ 1 - 7 ] ) | 4 [ 34 ] [ 12 ] ) \ \ d { 4 } " , " \ \ d { 10 } " ] <nl> , [ , , " 8 ( ? : 00 | 55 | 66 | 77 | 88 ) [ 2 - 9 ] \ \ d { 6 } " , " \ \ d { 10 } " ] <nl> , [ , , " 900 [ 2 - 9 ] \ \ d { 6 } " , " \ \ d { 10 } " ] <nl> , [ , , " NA " , " NA " ] <nl> i18n . phonenumbers . metadata . countryToMetadata = { <nl> ] <nl> , " TL " : [ , [ , , " [ 2 - 47 - 9 ] \ \ d { 6 } " , " \ \ d { 7 } " ] <nl> , [ , , " ( ? : 2 [ 1 - 5 ] | 3 [ 1 - 9 ] | 4 [ 1 - 4 ] ) \ \ d { 5 } " , " \ \ d { 7 } " ] <nl> - , [ , , " 7 [ 2 - 4 ] \ \ d { 5 } " , " \ \ d { 7 } " ] <nl> + , [ , , " 7 [ 2 - 49 ] \ \ d { 5 } " , " \ \ d { 7 } " ] <nl> , [ , , " 80 \ \ d { 5 } " , " \ \ d { 7 } " ] <nl> , [ , , " 90 \ \ d { 5 } " , " \ \ d { 7 } " ] <nl> , [ , , " NA " , " NA " ] <nl> mmm a / resources / PhoneNumberMetaData . xml <nl> ppp b / resources / PhoneNumberMetaData . xml <nl> <nl> <nl> < ! - - Netherlands Antilles - - > <nl> < ! - - Note this country no longer exists . This metadata exists only to validate old Sint Maarten <nl> - numbers through their parallel running period ( which ends Sep 2012 ) . - - > <nl> + numbers through their parallel running period ( which ends Sep 2012 ) . New number ranges <nl> + added to Sint Maarten during this time will be added to the new territory only and not <nl> + here . - - > <nl> < ! - - The link below no longer works , since ITU deleted this document since this country no <nl> longer exists . We keep it here as a record of the last place we found information on this <nl> country . - - > <nl> <nl> < fixedLine > <nl> < nationalNumberPattern > <nl> 5 ( ? : <nl> - 4 \ d | <nl> + 4 [ 2 - 8 ] | <nl> 8 [ 239 ] <nl> ) \ d { 4 } <nl> < / nationalNumberPattern > <nl> < exampleNumber > 5451234 < / exampleNumber > <nl> < / fixedLine > <nl> < mobile > <nl> + < ! - - The code 555 was NOT migrated when the new country calling code ( + 1 721 ) was assigned <nl> + for Sint Maarten , so it is supported here but not there . The codes 55 [ 12 ] also seem <nl> + to not have been migrated : although they were not explicitly mentioned in the migration <nl> + instructions , they are not in the new plan for SX . - - > <nl> < nationalNumberPattern > <nl> 5 ( ? : <nl> - 1 [ 01 ] | <nl> - 2 [ 0 - 7 ] | <nl> - 5 \ d | <nl> + 1 [ 02 ] | <nl> + 2 \ d | <nl> + 5 [ 0 - 79 ] | <nl> 8 [ 016 - 8 ] <nl> ) \ d { 4 } <nl> < / nationalNumberPattern > <nl> <nl> < mobile > <nl> < ! - - The data here is not regularly updated by the Ethiopian authorities , and many more <nl> numbers are visible online than are reported in the ITU document . This pattern is <nl> - therefore somewhat more relaxed than in the ITU document . - - > <nl> - < nationalNumberPattern > 9 [ 12 ] \ d { 7 } < / nationalNumberPattern > <nl> + therefore somewhat more relaxed than in the ITU document . According to the ETC , the <nl> + prefix 93 is assigned to Addis Ababa Mobile and 95 [ 89 ] to some CDMA providers . - - > <nl> + < nationalNumberPattern > <nl> + 9 ( ? : <nl> + [ 1 - 3 ] \ d | <nl> + 5 [ 89 ] <nl> + ) \ d { 6 } <nl> + < / nationalNumberPattern > <nl> < possibleNumberPattern > \ d { 9 } < / possibleNumberPattern > <nl> < exampleNumber > 911234567 < / exampleNumber > <nl> < / mobile > <nl> <nl> < exampleNumber > 22345678 < / exampleNumber > <nl> < / fixedLine > <nl> < mobile > <nl> - < ! - - Added 969 from an opensource report that this is now in use by Zain . - - > <nl> < nationalNumberPattern > <nl> ( ? : <nl> 5 ( ? : <nl> <nl> ) | <nl> 6 \ d | <nl> 7 [ 067 ] | <nl> - 9 [ 69 ] <nl> + 9 [ 069 ] <nl> ) | <nl> 9 ( ? : <nl> 0 [ 09 ] | <nl> <nl> 4 [ 2 - 68 ] \ d { 1 , 2 } | <nl> 5 [ 1 - 4689 ] \ d { 1 , 2 } <nl> ) \ d { 5 } | <nl> - ( ? : <nl> - 5 [ 0568 ] | <nl> - 70 <nl> - ) \ d { 6 } <nl> + 5 [ 0568 ] \ d { 6 } <nl> < / nationalNumberPattern > <nl> - < exampleNumber > 70123456 < / exampleNumber > <nl> + < exampleNumber > 50123456 < / exampleNumber > <nl> < / fixedLine > <nl> < mobile > <nl> - < ! - - Adding prefix 98 from numbers found on the internet . - - > <nl> < nationalNumberPattern > <nl> ( ? : <nl> 8 [ 89 ] | <nl> - 9 [ 15689 ] <nl> + 9 [ 013 - 9 ] <nl> ) \ d { 6 } <nl> < / nationalNumberPattern > <nl> < possibleNumberPattern > \ d { 8 } < / possibleNumberPattern > <nl> <nl> < / mobile > <nl> < ! - - No tollFree or premiumRate information can be found . - - > <nl> < voip > <nl> - < nationalNumberPattern > 7 [ 569 ] \ d { 6 } < / nationalNumberPattern > <nl> + < ! - - According to the document this could be stricter , but there are counter examples <nl> + online . - - > <nl> + < nationalNumberPattern > 7 [ 05 - 8 ] \ d { 6 } < / nationalNumberPattern > <nl> < possibleNumberPattern > \ d { 8 } < / possibleNumberPattern > <nl> < exampleNumber > 75123456 < / exampleNumber > <nl> < / voip > <nl> <nl> < nationalNumberPattern > <nl> 7215 ( ? : <nl> 4 [ 2 - 8 ] | <nl> - 8 [ 239 ] <nl> + 8 [ 239 ] | <nl> + 9 [ 056 ] <nl> ) \ d { 4 } <nl> < / nationalNumberPattern > <nl> < exampleNumber > 7215425678 < / exampleNumber > <nl> <nl> 7215 ( ? : <nl> 1 [ 02 ] | <nl> 2 \ d | <nl> - 5 [ 03469 ] | <nl> - 8 [ 01678 ] <nl> + 5 [ 034679 ] | <nl> + 8 [ 014 - 8 ] <nl> ) \ d { 4 } <nl> < / nationalNumberPattern > <nl> < possibleNumberPattern > \ d { 10 } < / possibleNumberPattern > <nl> <nl> < nationalNumberPattern > <nl> 649 ( ? : <nl> 2 ( ? : <nl> - 3 [ 12 ] | <nl> + 3 [ 129 ] | <nl> 4 [ 1 - 7 ] <nl> ) | <nl> 3 ( ? : <nl> <nl> < exampleNumber > 2112345 < / exampleNumber > <nl> < / fixedLine > <nl> < mobile > <nl> - < nationalNumberPattern > 7 [ 2 - 4 ] \ d { 5 } < / nationalNumberPattern > <nl> + < ! - - The prefix 79 is assigned to " pagers " in the numbering plan , but it seems from numbers <nl> + found online that it is in fact assigned to mobile users . - - > <nl> + < nationalNumberPattern > 7 [ 2 - 49 ] \ d { 5 } < / nationalNumberPattern > <nl> < exampleNumber > 7212345 < / exampleNumber > <nl> < / mobile > <nl> < tollFree > <nl> | JAVA / JS : Metadata changes only ( v4 . 8 . 3 ) . | google/libphonenumber | 6424d7a700f83d026c8fb7d3d79b9468172cb479 | 2014-12-03T12:22:01Z |
mmm a / src / compiler / objective_c_plugin . cc <nl> ppp b / src / compiler / objective_c_plugin . cc <nl> class ObjectiveCGrpcGenerator : public grpc : : protobuf : : compiler : : CodeGenerator { <nl> return true ; <nl> } <nl> <nl> + bool grpc_local_import ; <nl> : : std : : string framework ; <nl> : : std : : string pb_runtime_import_prefix ; <nl> + : : std : : string grpc_local_import_prefix ; <nl> std : : vector < : : std : : string > params_list = <nl> grpc_generator : : tokenize ( parameter , " , " ) ; <nl> for ( auto param_str = params_list . begin ( ) ; param_str ! = params_list . end ( ) ; <nl> class ObjectiveCGrpcGenerator : public grpc : : protobuf : : compiler : : CodeGenerator { <nl> } <nl> pb_runtime_import_prefix = param [ 1 ] ; <nl> grpc_generator : : StripSuffix ( & pb_runtime_import_prefix , " / " ) ; <nl> + } else if ( param [ 0 ] = = " grpc_local_import_prefix " ) { <nl> + grpc_local_import = true ; <nl> + if ( param . size ( ) ! = 2 ) { <nl> + * error = grpc : : string ( " Format : grpc_local_import_prefix = dir / " ) ; <nl> + return false ; <nl> + } <nl> + grpc_local_import_prefix = param [ 1 ] ; <nl> } <nl> } <nl> <nl> class ObjectiveCGrpcGenerator : public grpc : : protobuf : : compiler : : CodeGenerator { <nl> imports = FrameworkImport ( file_name + " . pbobjc . h " , framework ) ; <nl> } <nl> <nl> - : : std : : string system_imports = <nl> - SystemImport ( " ProtoRPC / ProtoService . h " ) + <nl> - ( generator_params . no_v1_compatibility <nl> - ? SystemImport ( " ProtoRPC / ProtoRPC . h " ) <nl> - : SystemImport ( " ProtoRPC / ProtoRPCLegacy . h " ) ) ; <nl> - if ( ! generator_params . no_v1_compatibility ) { <nl> - system_imports + = SystemImport ( " RxLibrary / GRXWriteable . h " ) + <nl> - SystemImport ( " RxLibrary / GRXWriter . h " ) ; <nl> + : : std : : string system_imports ; <nl> + if ( grpc_local_import ) { <nl> + system_imports = <nl> + LocalImport ( grpc_local_import_prefix + " ProtoRPC / ProtoService . h " ) ; <nl> + if ( generator_params . no_v1_compatibility ) { <nl> + system_imports + = <nl> + LocalImport ( grpc_local_import_prefix + " ProtoRPC / ProtoRPC . h " ) ; <nl> + } else { <nl> + system_imports + = LocalImport ( grpc_local_import_prefix + <nl> + " ProtoRPC / ProtoRPCLegacy . h " ) ; <nl> + system_imports + = LocalImport ( grpc_local_import_prefix + <nl> + " RxLibrary / GRXWriteable . h " ) ; <nl> + system_imports + = <nl> + LocalImport ( grpc_local_import_prefix + " RxLibrary / GRXWriter . h " ) ; <nl> + } <nl> + } else { <nl> + system_imports = SystemImport ( " ProtoRPC / ProtoService . h " ) ; <nl> + if ( generator_params . no_v1_compatibility ) { <nl> + system_imports + = SystemImport ( " ProtoRPC / ProtoRPC . h " ) ; <nl> + } else { <nl> + system_imports + = SystemImport ( " ProtoRPC / ProtoRPCLegacy . h " ) ; <nl> + system_imports + = SystemImport ( " RxLibrary / GRXWriteable . h " ) ; <nl> + system_imports + = SystemImport ( " RxLibrary / GRXWriter . h " ) ; <nl> + } <nl> } <nl> <nl> : : std : : string forward_declarations = <nl> class ObjectiveCGrpcGenerator : public grpc : : protobuf : : compiler : : CodeGenerator { <nl> imports = FrameworkImport ( file_name + " . pbrpc . h " , framework ) + <nl> FrameworkImport ( file_name + " . pbobjc . h " , framework ) ; <nl> } <nl> - imports + = ( generator_params . no_v1_compatibility <nl> - ? SystemImport ( " ProtoRPC / ProtoRPC . h " ) <nl> - : SystemImport ( " ProtoRPC / ProtoRPCLegacy . h " ) ) ; <nl> - if ( ! generator_params . no_v1_compatibility ) { <nl> - imports + = SystemImport ( " RxLibrary / GRXWriter + Immediate . h " ) ; <nl> + <nl> + if ( grpc_local_import ) { <nl> + if ( generator_params . no_v1_compatibility ) { <nl> + imports + = <nl> + LocalImport ( grpc_local_import_prefix + " ProtoRPC / ProtoRPC . h " ) ; <nl> + } else { <nl> + imports + = LocalImport ( grpc_local_import_prefix + <nl> + " ProtoRPC / ProtoRPCLegacy . h " ) ; <nl> + imports + = LocalImport ( grpc_local_import_prefix + <nl> + " RxLibrary / GRXWriter + Immediate . h " ) ; <nl> + } <nl> + } else { <nl> + if ( generator_params . no_v1_compatibility ) { <nl> + imports + = SystemImport ( " ProtoRPC / ProtoRPC . h " ) ; <nl> + } else { <nl> + imports + = SystemImport ( " ProtoRPC / ProtoRPCLegacy . h " ) ; <nl> + imports + = SystemImport ( " RxLibrary / GRXWriter + Immediate . h " ) ; <nl> + } <nl> } <nl> <nl> : : std : : string class_imports ; <nl> mmm a / src / objective - c / tests / run_plugin_option_tests . sh <nl> ppp b / src / objective - c / tests / run_plugin_option_tests . sh <nl> rm - rf RemoteTestClient / * pb * <nl> $ PROTOC \ <nl> - - plugin = protoc - gen - grpc = $ PLUGIN \ <nl> - - objc_out = RemoteTestClient \ <nl> - - - grpc_out = runtime_import_prefix = $ RUNTIME_IMPORT_PREFIX : RemoteTestClient \ <nl> + - - grpc_out = grpc_local_import_prefix = $ RUNTIME_IMPORT_PREFIX , runtime_import_prefix = $ RUNTIME_IMPORT_PREFIX : RemoteTestClient \ <nl> - I $ ROOT_DIR \ <nl> - I . . / . . / . . / third_party / protobuf / src \ <nl> $ ROOT_DIR / src / objective - c / examples / RemoteTestClient / * . proto <nl> <nl> + # Verify the " runtime_import_prefix " option <nl> # Verify the output proto filename <nl> [ - e . / RemoteTestClient / src / objective - c / examples / RemoteTestClient / Test . pbrpc . m ] | | { <nl> echo > & 2 " protoc outputs wrong filename . " <nl> $ PROTOC \ <nl> exit 1 <nl> } <nl> <nl> + # Verify the " grpc_local_import_directory " option <nl> + # Verify system files are imported in a " local " way in header files . <nl> + [ " ` cat RemoteTestClient / src / objective - c / examples / RemoteTestClient / Test . pbrpc . h | <nl> + egrep ' # import " ' " $ { RUNTIME_IMPORT_PREFIX } " ' / ProtoRPC / . * \ . h ' ` " ] | | { <nl> + echo > & 2 " grpc system files should be imported with full paths . " <nl> + } <nl> + <nl> + # Verify system files are imported in a " local " way in source files . <nl> + [ " ` cat RemoteTestClient / src / objective - c / examples / RemoteTestClient / Test . pbrpc . m | <nl> + egrep ' # import " ' " $ { RUNTIME_IMPORT_PREFIX } " ' / ProtoRPC / . * \ . h ' ` " ] | | { <nl> + echo > & 2 " grpc system files should be imported with full paths . " <nl> + } <nl> + <nl> # Run one extra command to clear $ ? before exiting the script to prevent <nl> # failing even when tests pass . <nl> echo " Plugin option tests passed . " <nl> | Merge pull request from yulin - liang / local_import_prefix | grpc/grpc | f87e635d2d39f2bcd0cc94c8372cb5093fec0c54 | 2020-07-20T22:33:49Z |
mmm a / js / common / modules / org / arangodb / general - graph . js <nl> ppp b / js / common / modules / org / arangodb / general - graph . js <nl> Graph . prototype . _vertices = function ( example ) { <nl> } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ startDocuBlock JSF_general_graph_getFromVertex <nl> + / / / @ startDocuBlock JSF_general_graph_fromVertex <nl> / / / Get the vertex of an edge defined as * _from * <nl> / / / <nl> - / / / ` general - graph . _getFromVertex ( edgeId ) ` <nl> + / / / ` general - graph . _fromVertex ( edgeId ) ` <nl> / / / <nl> / / / Returns the vertex defined with the attribute * _from * of the edge with * edgeId * as its * _id * . <nl> / / / <nl> Graph . prototype . _vertices = function ( example ) { <nl> / / / @ EXAMPLE_ARANGOSH_OUTPUT { generalGraphGetFromVertex } <nl> / / / var examples = require ( " org / arangodb / graph - examples / example - graph . js " ) ; <nl> / / / var g = examples . loadGraph ( " social " ) ; <nl> - / / / g . _getFromVertex ( " relation / aliceAndBob " ) <nl> + / / / g . _fromVertex ( " relation / aliceAndBob " ) <nl> / / / @ END_EXAMPLE_ARANGOSH_OUTPUT <nl> / / / <nl> / / / @ endDocuBlock <nl> / / / <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - Graph . prototype . _getFromVertex = function ( edgeId ) { <nl> + Graph . prototype . _fromVertex = function ( edgeId ) { <nl> var edgeCollection = this . _getEdgeCollectionByName ( edgeId . split ( " / " ) [ 0 ] ) ; <nl> var document = edgeCollection . document ( edgeId ) ; <nl> if ( document ) { <nl> mmm a / js / common / tests / shell - general - graph . js <nl> ppp b / js / common / tests / shell - general - graph . js <nl> function ChainedFluentAQLResultsSuite ( ) { <nl> assertEqual ( result [ 0 ] . name , ubName ) ; <nl> } , <nl> <nl> - test_getFromVertexForSelectedEdgeResultingAQL : function ( ) { <nl> + test_fromVertexForSelectedEdgeResultingAQL : function ( ) { <nl> var query = g . _edges ( { since : ud1 } ) <nl> . fromVertices ( ) ; <nl> var stmt = query . printQuery ( ) ; <nl> function ChainedFluentAQLResultsSuite ( ) { <nl> assertEqual ( query . bindVars . options_1 , { } ) ; <nl> } , <nl> <nl> - test_getFromVertexForSelectedEdge : function ( ) { <nl> + test_fromVertexForSelectedEdge : function ( ) { <nl> var result = g . _edges ( { since : ud1 } ) <nl> . fromVertices ( ) <nl> . toArray ( ) ; <nl> function EdgesAndVerticesSuite ( ) { <nl> <nl> test_getInVertex : function ( ) { <nl> var ids = fillCollections ( ) ; <nl> - var result = g . _getFromVertex ( ids . eId11 ) ; <nl> + var result = g . _fromVertex ( ids . eId11 ) ; <nl> assertEqual ( result . _id , ids . vId11 ) ; <nl> } , <nl> <nl> | renamed _getFromVertex ( . . . ) - > _fromVertex ( . . ) | arangodb/arangodb | 1f02d365705c384c3fa21b42761e2f8576af8952 | 2014-06-23T07:11:07Z |
mmm a / . pre - commit - config . yaml <nl> ppp b / . pre - commit - config . yaml <nl> repos : <nl> - id : flake8 <nl> exclude : ' ^ ( pyextra ) | ( external ) | ( cereal ) | ( rednose ) | ( panda ) | ( laika ) | ( laika_repo ) | ( rednose_repo ) / ' <nl> args : <nl> - - - - select = F <nl> + - - - ignore = E111 , E114 , E121 , E122 , E123 , E124 , E126 , E127 , E128 , E201 , E202 , E203 , E221 , E225 , E226 , E231 , E241 , E251 , E261 , E265 , E266 , E302 , E303 , E305 , E402 , E501 , E502 , E722 , E741 , W504 <nl> + - - - statistics <nl> - repo : local <nl> hooks : <nl> - id : pylint <nl> mmm a / common / android . py <nl> ppp b / common / android . py <nl> def get_imei ( slot ) : <nl> ret = parse_service_call_string ( service_call ( [ " iphonesubinfo " , " 3 " , " i32 " , str ( slot ) ] ) ) <nl> if not ret : <nl> # allow non android to be identified differently <nl> - ret = " % 015d " % random . randint ( 0 , 1 < < 32 ) <nl> + ret = " % 015d " % random . randint ( 0 , 1 < < 32 ) <nl> return ret <nl> <nl> def get_serial ( ) : <nl> def get_network_type ( ) : <nl> <nl> def get_network_strength ( network_type ) : <nl> network_strength = NetworkStrength . unknown <nl> + <nl> # from SignalStrength . java <nl> def get_lte_level ( rsrp , rssnr ) : <nl> INT_MAX = 2147483647 <nl> mmm a / common / api / __init__ . py <nl> ppp b / common / api / __init__ . py <nl> def api_get ( endpoint , method = ' GET ' , timeout = None , access_token = None , * * params ) : <nl> headers [ ' User - Agent ' ] = " openpilot - " + version <nl> <nl> return requests . request ( method , backend + endpoint , timeout = timeout , headers = headers , params = params ) <nl> - <nl> mmm a / common / basedir . py <nl> ppp b / common / basedir . py <nl> <nl> else : <nl> PERSIST = os . path . join ( BASEDIR , " persist " ) <nl> PARAMS = os . path . join ( BASEDIR , " persist " , " params " ) <nl> - <nl> mmm a / common / file_helpers . py <nl> ppp b / common / file_helpers . py <nl> def name ( self ) : <nl> def close ( self ) : <nl> os . rename ( self . _path , self . _target_path ) <nl> <nl> - def __enter__ ( self ) : return self <nl> + def __enter__ ( self ) : <nl> + return self <nl> <nl> def __exit__ ( self , type , value , traceback ) : <nl> if type is None : <nl> def name ( self ) : <nl> def close ( self ) : <nl> shutil . rmtree ( self . _path ) <nl> <nl> - def __enter__ ( self ) : return self <nl> + def __enter__ ( self ) : <nl> + return self <nl> <nl> def __exit__ ( self , type , value , traceback ) : <nl> self . close ( ) <nl> mmm a / common / logging_extra . py <nl> ppp b / common / logging_extra . py <nl> class SwagErrorFilter ( logging . Filter ) : <nl> def filter ( self , record ) : <nl> return record . levelno < logging . ERROR <nl> <nl> - _tmpfunc = lambda : 0 <nl> - _srcfile = os . path . normcase ( _tmpfunc . __code__ . co_filename ) <nl> + def _tmpfunc ( ) : <nl> + return 0 <nl> + <nl> + def _srcfile ( ) : <nl> + return os . path . normcase ( _tmpfunc . __code__ . co_filename ) <nl> <nl> class SwagLogger ( logging . Logger ) : <nl> def __init__ ( self ) : <nl> mmm a / common / numpy_fast . py <nl> ppp b / common / numpy_fast . py <nl> def clip ( x , lo , hi ) : <nl> <nl> def interp ( x , xp , fp ) : <nl> N = len ( xp ) <nl> + <nl> def get_interp ( xv ) : <nl> hi = 0 <nl> while hi < N and xv > xp [ hi ] : <nl> def get_interp ( xv ) : <nl> return fp [ - 1 ] if hi = = N and xv > xp [ low ] else ( <nl> fp [ 0 ] if hi = = 0 else <nl> ( xv - xp [ low ] ) * ( fp [ hi ] - fp [ low ] ) / ( xp [ hi ] - xp [ low ] ) + fp [ low ] ) <nl> - return [ get_interp ( v ) for v in x ] if hasattr ( <nl> - x , ' __iter__ ' ) else get_interp ( x ) <nl> + <nl> + return [ get_interp ( v ) for v in x ] if hasattr ( x , ' __iter__ ' ) else get_interp ( x ) <nl> <nl> def mean ( x ) : <nl> return sum ( x ) / len ( x ) <nl> mmm a / common / params . py <nl> ppp b / common / params . py <nl> def __enter__ ( self ) : <nl> finally : <nl> lock . release ( ) <nl> <nl> - def __exit__ ( self , type , value , traceback ) : pass <nl> + def __exit__ ( self , type , value , traceback ) : <nl> + pass <nl> <nl> <nl> class DBWriter ( DBAccessor ) : <nl> mmm a / common / profiler . py <nl> ppp b / common / profiler . py <nl> def display ( self ) : <nl> else : <nl> print ( " % 30s : % 9 . 2f percent : % 3 . 0f " % ( n , ms * 1000 . 0 , ms / self . tot * 100 ) ) <nl> print ( " Iter clock : % 2 . 6f TOTAL : % 2 . 2f " % ( self . tot / self . iter , self . tot ) ) <nl> - <nl> mmm a / common / stat_live . py <nl> ppp b / common / stat_live . py <nl> def push_data ( self , new_data ) : <nl> self . S_last = 0 . <nl> else : <nl> self . M = self . M_last + ( new_data - self . M_last ) / self . n <nl> - self . S = self . S_last + ( new_data - self . M_last ) * ( new_data - self . M ) ; <nl> + self . S = self . S_last + ( new_data - self . M_last ) * ( new_data - self . M ) <nl> self . M_last = self . M <nl> self . S_last = self . S <nl> <nl> mmm a / common / string_helpers . py <nl> ppp b / common / string_helpers . py <nl> def replace_right ( s , old , new , occurrence ) : <nl> # replace_right ( ' 1232425 ' , ' 2 ' , ' ' , 2 ) - > ' 123 4 5 ' <nl> <nl> split = s . rsplit ( old , occurrence ) <nl> - return new . join ( split ) <nl> \ No newline at end of file <nl> + return new . join ( split ) <nl> mmm a / common / testing . py <nl> ppp b / common / testing . py <nl> def phone_only ( x ) : <nl> return x <nl> else : <nl> return nottest ( x ) <nl> - <nl> mmm a / common / timeout . py <nl> ppp b / common / timeout . py <nl> def __enter__ ( self ) : <nl> <nl> def __exit__ ( self , exc_type , exc_val , exc_tb ) : <nl> signal . alarm ( 0 ) <nl> - <nl> mmm a / common / transformations / camera . py <nl> ppp b / common / transformations / camera . py <nl> def pretransform_from_calib ( calib ) : <nl> camera_frame_from_road_frame = np . dot ( eon_intrinsics , view_frame_from_road_frame ) <nl> camera_frame_from_calib_frame = get_camera_frame_from_calib_frame ( camera_frame_from_road_frame ) <nl> return np . linalg . inv ( camera_frame_from_calib_frame ) <nl> - <nl> mmm a / common / transformations / coordinates . py <nl> ppp b / common / transformations / coordinates . py <nl> def ecef2geodetic ( ecef , radians = False ) : <nl> S = np . cbrt ( 1 + C + np . sqrt ( C * C + 2 * C ) ) <nl> P = F / ( 3 * pow ( ( S + 1 / S + 1 ) , 2 ) * G * G ) <nl> Q = np . sqrt ( 1 + 2 * esq * esq * P ) <nl> - r_0 = - ( P * esq * r ) / ( 1 + Q ) + np . sqrt ( 0 . 5 * a * a * ( 1 + 1 . 0 / Q ) - \ <nl> + r_0 = - ( P * esq * r ) / ( 1 + Q ) + np . sqrt ( 0 . 5 * a * a * ( 1 + 1 . 0 / Q ) - \ <nl> P * ( 1 - esq ) * z * z / ( Q * ( 1 + Q ) ) - 0 . 5 * P * r * r ) <nl> U = np . sqrt ( pow ( ( r - esq * r_0 ) , 2 ) + z * z ) <nl> V = np . sqrt ( pow ( ( r - esq * r_0 ) , 2 ) + ( 1 - esq ) * z * z ) <nl> mmm a / common / url_file . py <nl> ppp b / common / url_file . py <nl> def read ( self , ll = None ) : <nl> <nl> if self . _debug : <nl> print ( " downloading " , self . _url ) <nl> + <nl> def header ( x ) : <nl> if b ' MISS ' in x : <nl> print ( x . strip ( ) ) <nl> + <nl> c . setopt ( pycurl . HEADERFUNCTION , header ) <nl> + <nl> def test ( debug_type , debug_msg ) : <nl> print ( " debug ( % d ) : % s " % ( debug_type , debug_msg . strip ( ) ) ) <nl> + <nl> c . setopt ( pycurl . VERBOSE , 1 ) <nl> c . setopt ( pycurl . DEBUGFUNCTION , test ) <nl> t1 = time . time ( ) <nl> def test ( debug_type , debug_msg ) : <nl> print ( " get % s % r % . f slow " % ( self . _url , trange , t2 - t1 ) ) <nl> <nl> response_code = c . getinfo ( pycurl . RESPONSE_CODE ) <nl> - if response_code = = 416 : # Requested Range Not Satisfiable <nl> + if response_code = = 416 : # Requested Range Not Satisfiable <nl> return " " <nl> if response_code ! = 206 and response_code ! = 200 : <nl> raise Exception ( " Error { } ( { } ) : { } " . format ( response_code , self . _url , repr ( dats . getvalue ( ) ) [ : 500 ] ) ) <nl> mmm a / opendbc <nl> ppp b / opendbc <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 0430bfa5c2b08f9cc6ab32470fe8ac9465e7a876 <nl> + Subproject commit b15edbc1b5a68fd725ea45ba9442a6c9be875971 <nl> mmm a / scripts / code_stats . py <nl> ppp b / scripts / code_stats . py <nl> def visit_Import ( self , node ) : <nl> for alias in node . names : <nl> imps . add ( alias . name ) <nl> self . generic_visit ( node ) <nl> + <nl> def visit_ImportFrom ( self , node ) : <nl> imps . add ( node . module ) <nl> self . generic_visit ( node ) <nl> def visit_ImportFrom ( self , node ) : <nl> <nl> print ( " % d lines of parsed openpilot python " % tlns ) <nl> # print ( sorted ( list ( imps ) ) ) <nl> - <nl> mmm a / selfdrive / boardd / tests / boardd_old . py <nl> ppp b / selfdrive / boardd / tests / boardd_old . py <nl> <nl> # USB is optional <nl> try : <nl> import usb1 <nl> - from usb1 import USBErrorIO , USBErrorOverflow # pylint : disable = no - name - in - module <nl> + from usb1 import USBErrorIO , USBErrorOverflow # pylint : disable = no - name - in - module <nl> except Exception : <nl> pass <nl> <nl> def __parse_can_buffer ( dat ) : <nl> for j in range ( 0 , len ( dat ) , 0x10 ) : <nl> ddat = dat [ j : j + 0x10 ] <nl> f1 , f2 = struct . unpack ( " II " , ddat [ 0 : 8 ] ) <nl> - ret . append ( ( f1 > > 21 , f2 > > 16 , ddat [ 8 : 8 + ( f2 & 0xF ) ] , ( f2 > > 4 ) & 0xFF ) ) <nl> + ret . append ( ( f1 > > 21 , f2 > > 16 , ddat [ 8 : 8 + ( f2 & 0xF ) ] , ( f2 > > 4 ) & 0xFF ) ) <nl> return ret <nl> <nl> def can_send_many ( arr ) : <nl> mmm a / selfdrive / camerad / test / frame_test . py <nl> ppp b / selfdrive / camerad / test / frame_test . py <nl> <nl> font = ImageFont . truetype ( " arial " , size = 72 ) <nl> def get_frame ( idx ) : <nl> img = np . zeros ( ( 874 , 1164 , 3 ) , np . uint8 ) <nl> - img [ 100 : 400 , 100 : 100 + ( idx % 10 ) * 100 ] = 255 <nl> + img [ 100 : 400 , 100 : 100 + ( idx % 10 ) * 100 ] = 255 <nl> <nl> # big number <nl> im2 = Image . new ( " RGB " , ( 200 , 200 ) ) <nl> def get_frame ( idx ) : <nl> dat . valid = True <nl> dat . frame = { <nl> " frameId " : idx , <nl> - " image " : frm [ idx % len ( frm ) ] , <nl> + " image " : frm [ idx % len ( frm ) ] , <nl> } <nl> pm . send ( ' frame ' , dat ) <nl> <nl> mmm a / selfdrive / car / __init__ . py <nl> ppp b / selfdrive / car / __init__ . py <nl> def is_ecu_disconnected ( fingerprint , fingerprint_list , ecu_fingerprint , car , ecu <nl> <nl> def make_can_msg ( addr , dat , bus ) : <nl> return [ addr , 0 , dat , bus ] <nl> - <nl> mmm a / selfdrive / car / ford / carcontroller . py <nl> ppp b / selfdrive / car / ford / carcontroller . py <nl> def update ( self , enabled , CS , frame , actuators , visual_alert , pcm_cancel ) : <nl> static_msgs = range ( 1653 , 1658 ) <nl> for addr in static_msgs : <nl> cnt = ( frame % 10 ) + 1 <nl> - can_sends . append ( make_can_msg ( addr , ( cnt < < 4 ) . to_bytes ( 1 , ' little ' ) + b ' \ x00 \ x00 \ x00 \ x00 \ x00 \ x00 \ x00 ' , 1 ) ) <nl> + can_sends . append ( make_can_msg ( addr , ( cnt < < 4 ) . to_bytes ( 1 , ' little ' ) + b ' \ x00 \ x00 \ x00 \ x00 \ x00 \ x00 \ x00 ' , 1 ) ) <nl> <nl> self . enabled_last = enabled <nl> self . main_on_last = CS . out . cruiseState . available <nl> mmm a / selfdrive / car / ford / fordcan . py <nl> ppp b / selfdrive / car / ford / fordcan . py <nl> def create_steer_command ( packer , angle_cmd , enabled , lkas_state , angle_steers , c <nl> " " " Creates a CAN message for the Ford Steer Command . " " " <nl> <nl> # if enabled and lkas available : <nl> - if enabled and lkas_state in [ 2 , 3 ] : # and ( frame % 500 ) > = 3 : <nl> + if enabled and lkas_state in [ 2 , 3 ] : # and ( frame % 500 ) > = 3 : <nl> action = lkas_action <nl> else : <nl> action = 0xf <nl> mmm a / selfdrive / car / gm / carcontroller . py <nl> ppp b / selfdrive / car / gm / carcontroller . py <nl> def update ( self , enabled , CS , frame , actuators , \ <nl> lka_active = CS . lkas_status = = 1 <nl> lka_critical = lka_active and abs ( actuators . steer ) > 0 . 9 <nl> lka_icon_status = ( lka_active , lka_critical ) <nl> - if frame % P . CAMERA_KEEPALIVE_STEP = = 0 \ <nl> - or lka_icon_status ! = self . lka_icon_status_last : <nl> + if frame % P . CAMERA_KEEPALIVE_STEP = = 0 or lka_icon_status ! = self . lka_icon_status_last : <nl> steer_alert = hud_alert = = VisualAlert . steerRequired <nl> can_sends . append ( gmcan . create_lka_icon_command ( CanBus . SW_GMLAN , lka_active , lka_critical , steer_alert ) ) <nl> self . lka_icon_status_last = lka_icon_status <nl> mmm a / selfdrive / car / gm / carstate . py <nl> ppp b / selfdrive / car / gm / carstate . py <nl> def update ( self , pt_cp ) : <nl> <nl> # 1 - open , 0 - closed <nl> ret . doorOpen = ( pt_cp . vl [ " BCMDoorBeltStatus " ] [ ' FrontLeftDoor ' ] = = 1 or <nl> - pt_cp . vl [ " BCMDoorBeltStatus " ] [ ' FrontRightDoor ' ] = = 1 or <nl> - pt_cp . vl [ " BCMDoorBeltStatus " ] [ ' RearLeftDoor ' ] = = 1 or <nl> - pt_cp . vl [ " BCMDoorBeltStatus " ] [ ' RearRightDoor ' ] = = 1 ) <nl> + pt_cp . vl [ " BCMDoorBeltStatus " ] [ ' FrontRightDoor ' ] = = 1 or <nl> + pt_cp . vl [ " BCMDoorBeltStatus " ] [ ' RearLeftDoor ' ] = = 1 or <nl> + pt_cp . vl [ " BCMDoorBeltStatus " ] [ ' RearRightDoor ' ] = = 1 ) <nl> <nl> # 1 - latched <nl> ret . seatbeltUnlatched = pt_cp . vl [ " BCMDoorBeltStatus " ] [ ' LeftSeatBelt ' ] = = 0 <nl> mmm a / selfdrive / car / gm / gmcan . py <nl> ppp b / selfdrive / car / gm / gmcan . py <nl> def create_lka_icon_command ( bus , active , critical , steer ) : <nl> else : <nl> dat = b " \ x00 \ x00 \ x00 " <nl> return make_can_msg ( 0x104c006c , dat , bus ) <nl> - <nl> mmm a / selfdrive / car / gm / interface . py <nl> ppp b / selfdrive / car / gm / interface . py <nl> def get_params ( candidate , fingerprint = gen_empty_fingerprint ( ) , has_relay = False , <nl> ret . minEnableSpeed = - 1 . # engage speed is decided by pcm <nl> ret . mass = 4353 . * CV . LB_TO_KG + STD_CARGO_KG <nl> ret . wheelbase = 2 . 86 <nl> - ret . steerRatio = 14 . 4 # end to end is 13 . 46 <nl> + ret . steerRatio = 14 . 4 # end to end is 13 . 46 <nl> ret . steerRatioRear = 0 . <nl> ret . centerToFront = ret . wheelbase * 0 . 4 <nl> <nl> elif candidate = = CAR . BUICK_REGAL : <nl> ret . minEnableSpeed = 18 * CV . MPH_TO_MS <nl> ret . mass = 3779 . * CV . LB_TO_KG + STD_CARGO_KG # ( 3849 + 3708 ) / 2 <nl> - ret . wheelbase = 2 . 83 # 111 . 4 inches in meters <nl> - ret . steerRatio = 14 . 4 # guess for tourx <nl> + ret . wheelbase = 2 . 83 # 111 . 4 inches in meters <nl> + ret . steerRatio = 14 . 4 # guess for tourx <nl> ret . steerRatioRear = 0 . <nl> ret . centerToFront = ret . wheelbase * 0 . 4 # guess for tourx <nl> <nl> mmm a / selfdrive / car / gm / radar_interface . py <nl> ppp b / selfdrive / car / gm / radar_interface . py <nl> def update ( self , can_strings ) : <nl> self . pts [ targetId ] . yvRel = float ( ' nan ' ) <nl> <nl> for oldTarget in list ( self . pts . keys ( ) ) : <nl> - if not oldTarget in currentTargets : <nl> + if oldTarget not in currentTargets : <nl> del self . pts [ oldTarget ] <nl> <nl> ret . points = list ( self . pts . values ( ) ) <nl> mmm a / selfdrive / car / honda / carstate . py <nl> ppp b / selfdrive / car / honda / carstate . py <nl> def calc_cruise_offset ( offset , speed ) : <nl> <nl> <nl> def get_can_signals ( CP ) : <nl> - # this function generates lists for signal , messages and initial values <nl> + # this function generates lists for signal , messages and initial values <nl> signals = [ <nl> ( " XMISSION_SPEED " , " ENGINE_DATA " , 0 ) , <nl> ( " WHEEL_SPEED_FL " , " WHEEL_SPEEDS " , 0 ) , <nl> def get_cam_can_parser ( CP ) : <nl> if CP . carFingerprint in [ CAR . CRV , CAR . CRV_EU , CAR . ACURA_RDX , CAR . ODYSSEY_CHN ] : <nl> checks = [ ( 0x194 , 100 ) ] <nl> <nl> - bus_cam = 1 if CP . carFingerprint in HONDA_BOSCH and not CP . isPandaBlack else 2 <nl> + bus_cam = 1 if CP . carFingerprint in HONDA_BOSCH and not CP . isPandaBlack else 2 <nl> return CANParser ( DBC [ CP . carFingerprint ] [ ' pt ' ] , signals , checks , bus_cam ) <nl> mmm a / selfdrive / car / hyundai / carcontroller . py <nl> ppp b / selfdrive / car / hyundai / carcontroller . py <nl> def process_hud_alert ( enabled , fingerprint , visual_alert , left_lane , <nl> <nl> # initialize to no line visible <nl> sys_state = 1 <nl> - if left_lane and right_lane or sys_warning : # HUD alert only display when LKAS status is active <nl> + if left_lane and right_lane or sys_warning : # HUD alert only display when LKAS status is active <nl> if enabled or sys_warning : <nl> sys_state = 3 <nl> else : <nl> mmm a / selfdrive / car / hyundai / carstate . py <nl> ppp b / selfdrive / car / hyundai / carstate . py <nl> def update ( self , cp , cp_cam ) : <nl> self . lkas11 = cp_cam . vl [ " LKAS11 " ] <nl> self . clu11 = cp . vl [ " CLU11 " ] <nl> self . park_brake = cp . vl [ " CGW1 " ] [ ' CF_Gway_ParkBrakeSw ' ] <nl> - self . steer_state = cp . vl [ " MDPS12 " ] [ ' CF_Mdps_ToiActive ' ] # 0 NOT ACTIVE , 1 ACTIVE <nl> + self . steer_state = cp . vl [ " MDPS12 " ] [ ' CF_Mdps_ToiActive ' ] # 0 NOT ACTIVE , 1 ACTIVE <nl> self . lead_distance = cp . vl [ " SCC11 " ] [ ' ACC_ObjDist ' ] <nl> <nl> return ret <nl> def get_can_parser ( CP ) : <nl> <nl> ( " ESC_Off_Step " , " TCS15 " , 0 ) , <nl> <nl> - ( " CF_Lvr_GearInf " , " LVR11 " , 0 ) , # Transmission Gear ( 0 = N or P , 1 - 8 = Fwd , 14 = Rev ) <nl> + ( " CF_Lvr_GearInf " , " LVR11 " , 0 ) , # Transmission Gear ( 0 = N or P , 1 - 8 = Fwd , 14 = Rev ) <nl> <nl> ( " CR_Mdps_StrColTq " , " MDPS12 " , 0 ) , <nl> ( " CF_Mdps_ToiActive " , " MDPS12 " , 0 ) , <nl> mmm a / selfdrive / car / hyundai / interface . py <nl> ppp b / selfdrive / car / hyundai / interface . py <nl> def get_params ( candidate , fingerprint = gen_empty_fingerprint ( ) , has_relay = False , <nl> ret . lateralTuning . pid . kf = 0 . 00006 <nl> ret . mass = 1275 . + STD_CARGO_KG <nl> ret . wheelbase = 2 . 7 <nl> - ret . steerRatio = 13 . 73 # Spec <nl> + ret . steerRatio = 13 . 73 # Spec <nl> tire_stiffness_factor = 0 . 385 <nl> ret . lateralTuning . pid . kiBP , ret . lateralTuning . pid . kpBP = [ [ 0 . ] , [ 0 . ] ] <nl> ret . lateralTuning . pid . kpV , ret . lateralTuning . pid . kiV = [ [ 0 . 25 ] , [ 0 . 05 ] ] <nl> def get_params ( candidate , fingerprint = gen_empty_fingerprint ( ) , has_relay = False , <nl> ret . lateralTuning . pid . kf = 0 . 00006 <nl> ret . mass = 1685 . + STD_CARGO_KG <nl> ret . wheelbase = 2 . 7 <nl> - ret . steerRatio = 13 . 73 # Spec <nl> + ret . steerRatio = 13 . 73 # Spec <nl> tire_stiffness_factor = 0 . 385 <nl> ret . lateralTuning . pid . kiBP , ret . lateralTuning . pid . kpBP = [ [ 0 . ] , [ 0 . ] ] <nl> ret . lateralTuning . pid . kpV , ret . lateralTuning . pid . kiV = [ [ 0 . 25 ] , [ 0 . 05 ] ] <nl> mmm a / selfdrive / car / mazda / __init__ . py <nl> ppp b / selfdrive / car / mazda / __init__ . py <nl> @ @ - 1 + 0 , 0 @ @ <nl> - <nl> mmm a / selfdrive / car / mazda / carcontroller . py <nl> ppp b / selfdrive / car / mazda / carcontroller . py <nl> def update ( self , enabled , CS , frame , actuators ) : <nl> <nl> <nl> self . apply_steer_last = apply_steer <nl> - <nl> + <nl> can_sends . append ( mazdacan . create_steering_control ( self . packer , CS . CP . carFingerprint , <nl> frame , apply_steer , CS . cam_lkas ) ) <nl> return can_sends <nl> mmm a / selfdrive / car / mazda / carstate . py <nl> ppp b / selfdrive / car / mazda / carstate . py <nl> def update ( self , cp , cp_cam ) : <nl> ret . leftBlinker = cp . vl [ " BLINK_INFO " ] [ ' LEFT_BLINK ' ] = = 1 <nl> ret . rightBlinker = cp . vl [ " BLINK_INFO " ] [ ' RIGHT_BLINK ' ] = = 1 <nl> <nl> - ret . steeringAngle = cp . vl [ " STEER " ] [ ' STEER_ANGLE ' ] <nl> + ret . steeringAngle = cp . vl [ " STEER " ] [ ' STEER_ANGLE ' ] <nl> ret . steeringTorque = cp . vl [ " STEER_TORQUE " ] [ ' STEER_TORQUE_SENSOR ' ] <nl> ret . steeringPressed = abs ( ret . steeringTorque ) > LKAS_LIMITS . STEER_THRESHOLD <nl> <nl> def update ( self , cp , cp_cam ) : <nl> self . cruise_speed = ret . vEgoRaw <nl> <nl> ret . cruiseState . available = True <nl> - ret . cruiseState . enabled = cp . vl [ " CRZ_CTRL " ] [ ' CRZ_ACTIVE ' ] = = 1 <nl> + ret . cruiseState . enabled = cp . vl [ " CRZ_CTRL " ] [ ' CRZ_ACTIVE ' ] = = 1 <nl> ret . cruiseState . speed = self . cruise_speed <nl> <nl> if ret . cruiseState . enabled : <nl> def get_cam_can_parser ( CP ) : <nl> ] <nl> <nl> return CANParser ( DBC [ CP . carFingerprint ] [ ' pt ' ] , signals , checks , 2 ) <nl> - <nl> mmm a / selfdrive / car / mazda / interface . py <nl> ppp b / selfdrive / car / mazda / interface . py <nl> def get_params ( candidate , fingerprint = gen_empty_fingerprint ( ) , has_relay = False , <nl> tire_stiffness_factor = 0 . 70 # not optimized yet <nl> <nl> if candidate in [ CAR . CX5 ] : <nl> - ret . mass = 3655 * CV . LB_TO_KG + STD_CARGO_KG <nl> + ret . mass = 3655 * CV . LB_TO_KG + STD_CARGO_KG <nl> ret . wheelbase = 2 . 7 <nl> ret . steerRatio = 15 . 5 <nl> <nl> mmm a / selfdrive / car / mazda / mazdacan . py <nl> ppp b / selfdrive / car / mazda / mazdacan . py <nl> def create_steering_control ( packer , car_fingerprint , frame , apply_steer , lkas ) : <nl> b2 = int ( lkas [ " ANGLE_ENABLED " ] ) <nl> <nl> tmp = steering_angle + 2048 <nl> - ahi = tmp > > 10 <nl> - amd = ( tmp & 0x3FF ) > > 2 <nl> - amd = ( amd > > 4 ) | ( ( amd & 0xF ) < < 4 ) <nl> - alo = ( tmp & 0x3 ) < < 2 <nl> + ahi = tmp > > 10 <nl> + amd = ( tmp & 0x3FF ) > > 2 <nl> + amd = ( amd > > 4 ) | ( ( amd & 0xF ) < < 4 ) <nl> + alo = ( tmp & 0x3 ) < < 2 <nl> <nl> ctr = frame % 16 <nl> # bytes : [ 1 ] [ 2 ] [ 3 ] [ 4 ] <nl> mmm a / selfdrive / car / mazda / radar_interface . py <nl> ppp b / selfdrive / car / mazda / radar_interface . py <nl> <nl> <nl> class RadarInterface ( RadarInterfaceBase ) : <nl> pass <nl> - <nl> mmm a / selfdrive / car / mazda / values . py <nl> ppp b / selfdrive / car / mazda / values . py <nl> class CAR : <nl> <nl> class LKAS_LIMITS : <nl> STEER_THRESHOLD = 15 <nl> - DISABLE_SPEED = 45 # kph <nl> - ENABLE_SPEED = 52 # kph <nl> + DISABLE_SPEED = 45 # kph <nl> + ENABLE_SPEED = 52 # kph <nl> <nl> class Buttons : <nl> NONE = 0 <nl> mmm a / selfdrive / car / subaru / __init__ . py <nl> ppp b / selfdrive / car / subaru / __init__ . py <nl> @ @ - 1 + 0 , 0 @ @ <nl> - <nl> mmm a / selfdrive / car / toyota / carcontroller . py <nl> ppp b / selfdrive / car / toyota / carcontroller . py <nl> def __init__ ( self , dbc_name , CP , VM ) : <nl> self . steer_rate_limited = False <nl> <nl> self . fake_ecus = set ( ) <nl> - if CP . enableCamera : self . fake_ecus . add ( Ecu . fwdCamera ) <nl> - if CP . enableDsu : self . fake_ecus . add ( Ecu . dsu ) <nl> + if CP . enableCamera : <nl> + self . fake_ecus . add ( Ecu . fwdCamera ) <nl> + if CP . enableDsu : <nl> + self . fake_ecus . add ( Ecu . dsu ) <nl> <nl> self . packer = CANPacker ( dbc_name ) <nl> <nl> mmm a / selfdrive / car / toyota / interface . py <nl> ppp b / selfdrive / car / toyota / interface . py <nl> def get_params ( candidate , fingerprint = gen_empty_fingerprint ( ) , has_relay = False , <nl> ret . wheelbase = 2 . 82448 <nl> ret . steerRatio = 13 . 7 <nl> tire_stiffness_factor = 0 . 7933 <nl> - ret . mass = 3400 . * CV . LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid <nl> + ret . mass = 3400 . * CV . LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid <nl> ret . lateralTuning . pid . kpV , ret . lateralTuning . pid . kiV = [ [ 0 . 6 ] , [ 0 . 1 ] ] <nl> ret . lateralTuning . pid . kf = 0 . 00006 <nl> <nl> def get_params ( candidate , fingerprint = gen_empty_fingerprint ( ) , has_relay = False , <nl> ret . wheelbase = 2 . 78 <nl> ret . steerRatio = 16 . 0 <nl> tire_stiffness_factor = 0 . 8 <nl> - ret . mass = 4607 . * CV . LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid limited <nl> + ret . mass = 4607 . * CV . LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid limited <nl> ret . lateralTuning . pid . kpV , ret . lateralTuning . pid . kiV = [ [ 0 . 18 ] , [ 0 . 015 ] ] # community tuning <nl> ret . lateralTuning . pid . kf = 0 . 00012 # community tuning <nl> <nl> def get_params ( candidate , fingerprint = gen_empty_fingerprint ( ) , has_relay = False , <nl> stop_and_go = False <nl> ret . safetyParam = 73 <nl> ret . wheelbase = 2 . 82 <nl> - ret . steerRatio = 14 . 8 # Found at https : / / pressroom . toyota . com / releases / 2016 + avalon + product + specs . download <nl> + ret . steerRatio = 14 . 8 # Found at https : / / pressroom . toyota . com / releases / 2016 + avalon + product + specs . download <nl> tire_stiffness_factor = 0 . 7983 <nl> ret . mass = 3505 . * CV . LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid <nl> ret . lateralTuning . pid . kpV , ret . lateralTuning . pid . kiV = [ [ 0 . 17 ] , [ 0 . 03 ] ] <nl> mmm a / selfdrive / car / toyota / radar_interface . py <nl> ppp b / selfdrive / car / toyota / radar_interface . py <nl> def update ( self , can_strings ) : <nl> if self . trigger_msg not in self . updated_messages : <nl> return None <nl> <nl> - rr = self . _update ( self . updated_messages ) <nl> + rr = self . _update ( self . updated_messages ) <nl> self . updated_messages . clear ( ) <nl> <nl> return rr <nl> mmm a / selfdrive / car / toyota / values . py <nl> ppp b / selfdrive / car / toyota / values . py <nl> class CAR : <nl> b ' \ x01896630852100 \ x00 \ x00 \ x00 \ x00 ' , <nl> b ' \ x01896630859000 \ x00 \ x00 \ x00 \ x00 ' , <nl> ] , <nl> - ( Ecu . eps , 0x7a1 , None ) : [ <nl> + ( Ecu . eps , 0x7a1 , None ) : [ <nl> b ' 8965B45070 \ x00 \ x00 \ x00 \ x00 \ x00 \ x00 ' , <nl> ] , <nl> ( Ecu . esp , 0x7b0 , None ) : [ <nl> class CAR : <nl> } , <nl> CAR . LEXUS_RX : { <nl> ( Ecu . engine , 0x700 , None ) : [ <nl> - b ' \ x01896630E41200 \ x00 \ x00 \ x00 \ x00 ' , <nl> - ] , <nl> + b ' \ x01896630E41200 \ x00 \ x00 \ x00 \ x00 ' , <nl> + ] , <nl> ( Ecu . esp , 0x7b0 , None ) : [ <nl> - b ' F152648473 \ x00 \ x00 \ x00 \ x00 \ x00 \ x00 ' , <nl> - ] , <nl> + b ' F152648473 \ x00 \ x00 \ x00 \ x00 \ x00 \ x00 ' , <nl> + ] , <nl> ( Ecu . dsu , 0x791 , None ) : [ <nl> - b ' 881514810500 \ x00 \ x00 \ x00 \ x00 ' , <nl> - ] , <nl> + b ' 881514810500 \ x00 \ x00 \ x00 \ x00 ' , <nl> + ] , <nl> ( Ecu . eps , 0x7a1 , None ) : [ <nl> - b ' 8965B0E012 \ x00 \ x00 \ x00 \ x00 \ x00 \ x00 ' , <nl> - ] , <nl> + b ' 8965B0E012 \ x00 \ x00 \ x00 \ x00 \ x00 \ x00 ' , <nl> + ] , <nl> ( Ecu . fwdRadar , 0x750 , 0xf ) : [ <nl> - b ' 8821F4701100 \ x00 \ x00 \ x00 \ x00 ' , <nl> - ] , <nl> + b ' 8821F4701100 \ x00 \ x00 \ x00 \ x00 ' , <nl> + ] , <nl> ( Ecu . fwdCamera , 0x750 , 0x6d ) : [ <nl> - b ' 8646F4802001 \ x00 \ x00 \ x00 \ x00 ' , <nl> - b ' 8646F4802100 \ x00 \ x00 \ x00 \ x00 ' , <nl> - ] , <nl> + b ' 8646F4802001 \ x00 \ x00 \ x00 \ x00 ' , <nl> + b ' 8646F4802100 \ x00 \ x00 \ x00 \ x00 ' , <nl> + ] , <nl> } , <nl> CAR . LEXUS_RXH : { <nl> ( Ecu . engine , 0x7e0 , None ) : [ <nl> mmm a / selfdrive / car / volkswagen / __init__ . py <nl> ppp b / selfdrive / car / volkswagen / __init__ . py <nl> @ @ - 1 + 0 , 0 @ @ <nl> - <nl> mmm a / selfdrive / car / volkswagen / carcontroller . py <nl> ppp b / selfdrive / car / volkswagen / carcontroller . py <nl> def update ( self , enabled , CS , frame , actuators , visual_alert , audible_alert , lef <nl> self . hcaEnabledFrameCount = 0 <nl> else : <nl> self . hcaEnabledFrameCount + = 1 <nl> - if self . hcaEnabledFrameCount > = 118 * ( 100 / P . HCA_STEP ) : # 118s <nl> + if self . hcaEnabledFrameCount > = 118 * ( 100 / P . HCA_STEP ) : # 118s <nl> # The Kansas I - 70 Crosswind Problem : if we truly do need to steer <nl> # in one direction for > 360 seconds , we have to disable HCA for a <nl> # frame while actively steering . Testing shows we can just set the <nl> mmm a / selfdrive / config . py <nl> ppp b / selfdrive / config . py <nl> class UIParams : <nl> car_front = 2 . 6924 * lidar_zoom <nl> car_back = 1 . 8796 * lidar_zoom <nl> car_color = 110 <nl> - <nl> mmm a / selfdrive / controls / controlsd . py <nl> ppp b / selfdrive / controls / controlsd . py <nl> def update_events ( self , CS ) : <nl> <nl> # Only allow engagement with brake pressed when stopped behind another stopped car <nl> if CS . brakePressed and self . sm [ ' plan ' ] . vTargetFuture > = STARTING_TARGET_SPEED \ <nl> - and not self . CP . radarOffCan and CS . vEgo < 0 . 3 : <nl> + and not self . CP . radarOffCan and CS . vEgo < 0 . 3 : <nl> self . events . add ( EventName . noTarget ) <nl> <nl> <nl> def state_control ( self , CS ) : <nl> <nl> # Send a " steering required alert " if saturation count has reached the limit <nl> if ( lac_log . saturated and not CS . steeringPressed ) or \ <nl> - ( self . saturated_count > STEER_ANGLE_SATURATION_TIMEOUT ) : <nl> + ( self . saturated_count > STEER_ANGLE_SATURATION_TIMEOUT ) : <nl> # Check if we deviated from the path <nl> left_deviation = actuators . steer > 0 and path_plan . dPoly [ 3 ] > 0 . 1 <nl> right_deviation = actuators . steer < 0 and path_plan . dPoly [ 3 ] < - 0 . 1 <nl> mmm a / selfdrive / controls / lib / driverview . py <nl> ppp b / selfdrive / controls / lib / driverview . py <nl> def terminate ( signalNumber , frame ) : <nl> is_rhd_checked = True <nl> <nl> if __name__ = = ' __main__ ' : <nl> - main ( ) <nl> \ No newline at end of file <nl> + main ( ) <nl> mmm a / selfdrive / controls / lib / latcontrol_indi . py <nl> ppp b / selfdrive / controls / lib / latcontrol_indi . py <nl> def update ( self , active , CS , CP , path_plan ) : <nl> indi_log . delta = float ( delta_u ) <nl> indi_log . output = float ( self . output_steer ) <nl> <nl> - check_saturation = ( CS . vEgo > 10 . ) and not CS . steeringRateLimited and not CS . steeringPressed <nl> + check_saturation = ( CS . vEgo > 10 . ) and not CS . steeringRateLimited and not CS . steeringPressed <nl> indi_log . saturated = self . _check_saturation ( self . output_steer , check_saturation , steers_max ) <nl> <nl> return float ( self . output_steer ) , float ( self . angle_steers_des ) , indi_log <nl> mmm a / selfdrive / controls / lib / latcontrol_lqr . py <nl> ppp b / selfdrive / controls / lib / latcontrol_lqr . py <nl> def update ( self , active , CS , CP , path_plan ) : <nl> i = self . i_lqr + self . ki * self . i_rate * error <nl> control = lqr_output + i <nl> <nl> - if ( ( error > = 0 and ( control < = steers_max or i < 0 . 0 ) ) or \ <nl> - ( error < = 0 and ( control > = - steers_max or i > 0 . 0 ) ) ) : <nl> + if ( error > = 0 and ( control < = steers_max or i < 0 . 0 ) ) or \ <nl> + ( error < = 0 and ( control > = - steers_max or i > 0 . 0 ) ) : <nl> self . i_lqr = i <nl> <nl> self . output_steer = lqr_output + self . i_lqr <nl> mmm a / selfdrive / controls / lib / vehicle_model . py <nl> ppp b / selfdrive / controls / lib / vehicle_model . py <nl> def yaw_rate ( self , sa , u ) : <nl> Yaw rate [ rad / s ] <nl> " " " <nl> return self . calc_curvature ( sa , u ) * u <nl> - <nl> mmm a / selfdrive / controls / tests / test_events . py <nl> ppp b / selfdrive / controls / tests / test_events . py <nl> def test_alert_text_length ( self ) : <nl> continue <nl> <nl> for i , txt in enumerate ( [ alert . alert_text_1 , alert . alert_text_2 ] ) : <nl> - if i > = len ( fonts [ alert . alert_size ] ) : break <nl> + if i > = len ( fonts [ alert . alert_size ] ) : <nl> + break <nl> <nl> font = fonts [ alert . alert_size ] [ i ] <nl> w , h = draw . textsize ( txt , font ) <nl> mmm a / selfdrive / controls / tests / test_monitoring . py <nl> ppp b / selfdrive / controls / tests / test_monitoring . py <nl> def run_DState_seq ( driver_state_msgs , driver_car_interaction , openpilot_status , <nl> for idx in range ( len ( driver_state_msgs ) ) : <nl> e = Events ( ) <nl> DS . get_pose ( driver_state_msgs [ idx ] , [ 0 , 0 , 0 ] , 0 , openpilot_status [ idx ] ) <nl> - # cal_rpy and car_speed don ' t matter here <nl> + # cal_rpy and car_speed don ' t matter here <nl> <nl> # evaluate events at 10Hz for tests <nl> DS . update ( e , driver_car_interaction [ idx ] , openpilot_status [ idx ] , car_standstill_status [ idx ] ) <nl> mmm a / selfdrive / crash . py <nl> ppp b / selfdrive / crash . py <nl> <nl> if os . getenv ( " NOLOG " ) or os . getenv ( " NOCRASH " ) or not ANDROID : <nl> def capture_exception ( * args , * * kwargs ) : <nl> pass <nl> + <nl> def bind_user ( * * kwargs ) : <nl> pass <nl> + <nl> def bind_extra ( * * kwargs ) : <nl> pass <nl> + <nl> def install ( ) : <nl> pass <nl> else : <nl> def bind_extra ( * * kwargs ) : <nl> def install ( ) : <nl> # installs a sys . excepthook <nl> __excepthook__ = sys . excepthook <nl> + <nl> def handle_exception ( * exc_info ) : <nl> if exc_info [ 0 ] not in ( KeyboardInterrupt , SystemExit ) : <nl> capture_exception ( ) <nl> mmm a / selfdrive / launcher . py <nl> ppp b / selfdrive / launcher . py <nl> <nl> import importlib <nl> - from setproctitle import setproctitle # pylint : disable = no - name - in - module <nl> + from setproctitle import setproctitle # pylint : disable = no - name - in - module <nl> <nl> import cereal . messaging as messaging <nl> import selfdrive . crash as crash <nl> mmm a / selfdrive / locationd / calibration_helpers . py <nl> ppp b / selfdrive / locationd / calibration_helpers . py <nl> class Calibration : <nl> UNCALIBRATED = 0 <nl> CALIBRATED = 1 <nl> INVALID = 2 <nl> - <nl> mmm a / selfdrive / locationd / test / ephemeris . py <nl> ppp b / selfdrive / locationd / test / ephemeris . py <nl> def __init__ ( self , svId , subframes ) : <nl> self . ionoAlpha = [ ] <nl> self . ionoBeta = [ ] <nl> self . ionoCoeffsValid = False <nl> - <nl> - <nl> mmm a / selfdrive / locationd / test / ublox . py <nl> ppp b / selfdrive / locationd / test / ublox . py <nl> <nl> <nl> <nl> import struct <nl> - import time , os <nl> + import os <nl> + import time <nl> <nl> # protocol constants <nl> PREAMBLE1 = 0xb5 <nl> def pack ( self , msg , msg_class = None , msg_id = None ) : <nl> fields = self . fields [ : ] <nl> for f in fields : <nl> ( fieldname , alen ) = ArrayParse ( f ) <nl> - if not fieldname in msg . _fields : <nl> + if fieldname not in msg . _fields : <nl> break <nl> if alen = = - 1 : <nl> f1 . append ( msg . _fields [ fieldname ] ) <nl> def format ( self , msg ) : <nl> ret = self . name + ' : ' <nl> for f in self . fields : <nl> ( fieldname , alen ) = ArrayParse ( f ) <nl> - if not fieldname in msg . _fields : <nl> + if fieldname not in msg . _fields : <nl> continue <nl> v = msg . _fields [ fieldname ] <nl> if isinstance ( v , list ) : <nl> def unpack ( self ) : <nl> if not self . valid ( ) : <nl> raise UBloxError ( ' INVALID MESSAGE ' ) <nl> type = self . msg_type ( ) <nl> - if not type in msg_types : <nl> + if type not in msg_types : <nl> raise UBloxError ( ' Unknown message % s length = % u ' % ( str ( type ) , len ( self . _buf ) ) ) <nl> msg_types [ type ] . unpack ( self ) <nl> return self . _fields , self . _recs <nl> def pack ( self ) : <nl> if not self . valid ( ) : <nl> raise UBloxError ( ' INVALID MESSAGE ' ) <nl> type = self . msg_type ( ) <nl> - if not type in msg_types : <nl> + if type not in msg_types : <nl> raise UBloxError ( ' Unknown message % s ' % str ( type ) ) <nl> msg_types [ type ] . pack ( self ) <nl> <nl> def name ( self ) : <nl> if not self . valid ( ) : <nl> raise UBloxError ( ' INVALID MESSAGE ' ) <nl> type = self . msg_type ( ) <nl> - if not type in msg_types : <nl> + if type not in msg_types : <nl> raise UBloxError ( ' Unknown message % s length = % u ' % ( str ( type ) , len ( self . _buf ) ) ) <nl> return msg_types [ type ] . name <nl> <nl> mmm a / selfdrive / locationd / test / ubloxd . py <nl> ppp b / selfdrive / locationd / test / ubloxd . py <nl> def configure_ublox ( dev ) : <nl> <nl> def int_to_bool_list ( num ) : <nl> # for parsing bool bytes <nl> - return [ bool ( num & ( 1 < < n ) ) for n in range ( 8 ) ] <nl> + return [ bool ( num & ( 1 < < n ) ) for n in range ( 8 ) ] <nl> <nl> <nl> def gen_ephemeris ( ephem_data ) : <nl> def gen_solution ( msg ) : <nl> msg_data [ ' day ' ] , <nl> msg_data [ ' hour ' ] , <nl> msg_data [ ' min ' ] , <nl> - msg_data [ ' sec ' ] ) <nl> - - datetime . datetime ( 1970 , 1 , 1 ) ) . total_seconds ( ) ) * 1e + 03 <nl> - + msg_data [ ' nano ' ] * 1e - 06 ) <nl> + msg_data [ ' sec ' ] ) - <nl> + datetime . datetime ( 1970 , 1 , 1 ) ) . total_seconds ( ) ) * 1e + 03 + <nl> + msg_data [ ' nano ' ] * 1e - 06 ) <nl> gps_fix = { ' bearing ' : msg_data [ ' headMot ' ] * 1e - 05 , # heading of motion in degrees <nl> ' altitude ' : msg_data [ ' height ' ] * 1e - 03 , # altitude above ellipsoid <nl> ' latitude ' : msg_data [ ' lat ' ] * 1e - 07 , # latitude in degrees <nl> def gen_nav_data ( msg , nav_frame_buffer ) : <nl> # parse GPS ephem <nl> gnssId = msg_meta_data [ ' gnssId ' ] <nl> if gnssId = = 0 : <nl> - svId = msg_meta_data [ ' svid ' ] <nl> - subframeId = GET_FIELD_U ( measurements [ 1 ] [ ' dwrd ' ] , 3 , 8 ) <nl> + svId = msg_meta_data [ ' svid ' ] <nl> + subframeId = GET_FIELD_U ( measurements [ 1 ] [ ' dwrd ' ] , 3 , 8 ) <nl> words = [ ] <nl> for m in measurements : <nl> words . append ( m [ ' dwrd ' ] ) <nl> def init_reader ( ) : <nl> return dev <nl> except serial . serialutil . SerialException as e : <nl> print ( e ) <nl> - port_counter = ( port_counter + 1 ) % len ( ports ) <nl> + port_counter = ( port_counter + 1 ) % len ( ports ) <nl> time . sleep ( 2 ) <nl> <nl> def handle_msg ( dev , msg , nav_frame_buffer ) : <nl> mmm a / selfdrive / loggerd / tools / mark_unuploaded . py <nl> ppp b / selfdrive / loggerd / tools / mark_unuploaded . py <nl> <nl> for fn in sys . argv [ 1 : ] : <nl> print ( " unmarking % s " % fn ) <nl> removexattr ( fn , UPLOAD_ATTR_NAME ) <nl> - <nl> mmm a / selfdrive / loggerd / uploader . py <nl> ppp b / selfdrive / loggerd / uploader . py <nl> def do_upload ( self , key , fn ) : <nl> if url_resp . status_code = = 412 : <nl> self . last_resp = url_resp <nl> return <nl> - <nl> + <nl> url_resp_json = json . loads ( url_resp . text ) <nl> url = url_resp_json [ ' url ' ] <nl> headers = url_resp_json [ ' headers ' ] <nl> def do_upload ( self , key , fn ) : <nl> <nl> if fake_upload : <nl> cloudlog . info ( " * * * WARNING , THIS IS A FAKE UPLOAD TO % s * * * " % url ) <nl> + <nl> class FakeResponse ( ) : <nl> def __init__ ( self ) : <nl> self . status_code = 200 <nl> + <nl> self . last_resp = FakeResponse ( ) <nl> else : <nl> with open ( fn , " rb " ) as f : <nl> mmm a / selfdrive / test / longitudinal_maneuvers / maneuverplots . py <nl> ppp b / selfdrive / test / longitudinal_maneuvers / maneuverplots . py <nl> class ManeuverPlot ( ) : <nl> def __init__ ( self , title = None ) : <nl> self . time_array = [ ] <nl> <nl> - self . gas_array = [ ] <nl> + self . gas_array = [ ] <nl> self . brake_array = [ ] <nl> self . steer_torque_array = [ ] <nl> <nl> def __init__ ( self , title = None ) : <nl> self . fcw_array = [ ] <nl> <nl> self . title = title <nl> - <nl> - def add_data ( self , time , gas , brake , steer_torque , distance , speed , <nl> - acceleration , up_accel_cmd , ui_accel_cmd , uf_accel_cmd , d_rel , v_rel , <nl> + <nl> + def add_data ( self , time , gas , brake , steer_torque , distance , speed , <nl> + acceleration , up_accel_cmd , ui_accel_cmd , uf_accel_cmd , d_rel , v_rel , <nl> v_lead , v_target_lead , pid_speed , cruise_speed , jerk_factor , a_target , fcw ) : <nl> self . time_array . append ( time ) <nl> self . gas_array . append ( gas ) <nl> def write_plot ( self , path , maneuver_name ) : <nl> if not os . path . exists ( path + " / " + maneuver_name ) : <nl> os . makedirs ( path + " / " + maneuver_name ) <nl> plt_num = 0 <nl> - <nl> + <nl> # speed chart = = = = = = = = = = = = = = = = = = = <nl> plt_num + = 1 <nl> plt . figure ( plt_num ) <nl> def write_plot ( self , path , maneuver_name ) : <nl> pylab . savefig ( " / " . join ( [ path , maneuver_name , ' distance . svg ' ] ) , dpi = 1000 ) <nl> <nl> plt . close ( " all " ) <nl> - <nl> mmm a / selfdrive / test / longitudinal_maneuvers / plant . py <nl> ppp b / selfdrive / test / longitudinal_maneuvers / plant . py <nl> <nl> def can_cksum ( mm ) : <nl> s = 0 <nl> for c in mm : <nl> - s + = ( c > > 4 ) <nl> + s + = ( c > > 4 ) <nl> s + = c & 0xF <nl> s = 8 - s <nl> s % = 0x10 <nl> def car_plant ( pos , speed , grade , gas , brake ) : <nl> mass = 1700 <nl> aero_cd = 0 . 3 <nl> force_peak = mass * 3 . <nl> - force_brake_peak = - mass * 10 . # 1g <nl> + force_brake_peak = - mass * 10 . # 1g <nl> power_peak = 100000 # 100kW <nl> speed_base = power_peak / force_peak <nl> rolling_res = 0 . 01 <nl> def step ( self , v_lead = 0 . 0 , cruise_buttons = None , grade = 0 . 0 , publish_model = True ) <nl> vls = vls_tuple ( <nl> self . speed_sensor ( speed ) , <nl> self . speed_sensor ( speed ) , self . speed_sensor ( speed ) , self . speed_sensor ( speed ) , self . speed_sensor ( speed ) , <nl> - self . angle_steer , self . angle_steer_rate , 0 , 0 , # Steer torque sensor <nl> + self . angle_steer , self . angle_steer_rate , 0 , 0 , # Steer torque sensor <nl> 0 , 0 , # Blinkers <nl> self . gear_choice , <nl> speed ! = 0 , <nl> self . brake_error , self . brake_error , <nl> not self . seatbelt , self . seatbelt , # Seatbelt <nl> - self . brake_pressed , 0 . , # Brake pressed , Brake switch <nl> + self . brake_pressed , 0 . , # Brake pressed , Brake switch <nl> cruise_buttons , <nl> self . esp_disabled , <nl> 0 , # HUD lead <nl> def step ( self , v_lead = 0 . 0 , cruise_buttons = None , grade = 0 . 0 , publish_model = True ) <nl> # TODO : use the DBC <nl> if self . frame % 5 = = 0 : <nl> radar_state_msg = b ' \ x79 \ x00 \ x00 \ x00 \ x00 \ x00 \ x00 \ x00 ' <nl> - radar_msg = to_3_byte ( d_rel * 16 . 0 ) + \ <nl> - to_3_byte ( int ( lateral_pos_rel * 16 . 0 ) & 0x3ff ) + \ <nl> - to_3s_byte ( int ( v_rel * 32 . 0 ) ) + \ <nl> + radar_msg = to_3_byte ( d_rel * 16 . 0 ) + \ <nl> + to_3_byte ( int ( lateral_pos_rel * 16 . 0 ) & 0x3ff ) + \ <nl> + to_3s_byte ( int ( v_rel * 32 . 0 ) ) + \ <nl> b " 0f00000 " <nl> <nl> radar_msg = binascii . unhexlify ( radar_msg ) <nl> mmm a / selfdrive / test / process_replay / compare_logs . py <nl> ppp b / selfdrive / test / process_replay / compare_logs . py <nl> <nl> <nl> import dictdiffer <nl> if " CI " in os . environ : <nl> - tqdm = lambda x : x <nl> + def tqdm ( x ) : <nl> + return x <nl> else : <nl> from tqdm import tqdm # type : ignore <nl> <nl> def remove_ignored_fields ( msg , ignore ) : <nl> for k in keys [ : - 1 ] : <nl> try : <nl> attr = getattr ( msg , k ) <nl> - except : <nl> + except AttributeError : <nl> break <nl> else : <nl> v = getattr ( attr , keys [ - 1 ] ) <nl> def remove_ignored_fields ( msg , ignore ) : <nl> return msg . as_reader ( ) <nl> <nl> def compare_logs ( log1 , log2 , ignore_fields = [ ] , ignore_msgs = [ ] ) : <nl> - filter_msgs = lambda m : m . which ( ) not in ignore_msgs <nl> - log1 , log2 = [ list ( filter ( filter_msgs , log ) ) for log in ( log1 , log2 ) ] <nl> + log1 , log2 = [ list ( filter ( lambda m : m . which ( ) not in ignore_msgs , log ) ) for log in ( log1 , log2 ) ] <nl> assert len ( log1 ) = = len ( log2 ) , " logs are not same length : " + str ( len ( log1 ) ) + " VS " + str ( len ( log2 ) ) <nl> <nl> diff = [ ] <nl> mmm a / selfdrive / test / process_replay / process_replay . py <nl> ppp b / selfdrive / test / process_replay / process_replay . py <nl> <nl> # ! / usr / bin / env python3 <nl> + import capnp <nl> import os <nl> import sys <nl> import threading <nl> import importlib <nl> <nl> if " CI " in os . environ : <nl> - tqdm = lambda x : x <nl> + def tqdm ( x ) : <nl> + return x <nl> else : <nl> from tqdm import tqdm # type : ignore <nl> <nl> def __init__ ( self , services ) : <nl> for s in services : <nl> try : <nl> data = messaging . new_message ( s ) <nl> - except : <nl> + except capnp . lib . capnp . KjException : <nl> data = messaging . new_message ( s , 0 ) <nl> self . data [ s ] = data . as_reader ( ) <nl> self . sock [ s ] = DumbSocket ( ) <nl> mmm a / selfdrive / test / process_replay / test_processes . py <nl> ppp b / selfdrive / test / process_replay / test_processes . py <nl> def format_diff ( results , ref_commit ) : <nl> process_replay_dir = os . path . dirname ( os . path . abspath ( __file__ ) ) <nl> try : <nl> ref_commit = open ( os . path . join ( process_replay_dir , " ref_commit " ) ) . read ( ) . strip ( ) <nl> - except : <nl> + except FileNotFoundError : <nl> print ( " couldn ' t find reference commit " ) <nl> sys . exit ( 1 ) <nl> <nl> def format_diff ( results , ref_commit ) : <nl> results : Any = { } <nl> for car_brand , segment in segments : <nl> if ( cars_whitelisted and car_brand . upper ( ) not in args . whitelist_cars ) or \ <nl> - ( not cars_whitelisted and car_brand . upper ( ) in args . blacklist_cars ) : <nl> + ( not cars_whitelisted and car_brand . upper ( ) in args . blacklist_cars ) : <nl> continue <nl> <nl> print ( " * * * * * testing route segment % s * * * * * \ n " % segment ) <nl> def format_diff ( results , ref_commit ) : <nl> <nl> for cfg in CONFIGS : <nl> if ( procs_whitelisted and cfg . proc_name not in args . whitelist_procs ) or \ <nl> - ( not procs_whitelisted and cfg . proc_name in args . blacklist_procs ) : <nl> + ( not procs_whitelisted and cfg . proc_name in args . blacklist_procs ) : <nl> continue <nl> <nl> cmp_log_fn = os . path . join ( process_replay_dir , " % s_ % s_ % s . bz2 " % ( segment , cfg . proc_name , ref_commit ) ) <nl> mmm a / selfdrive / test / test_car_models . py <nl> ppp b / selfdrive / test / test_car_models . py <nl> def get_route_log ( route_name ) : <nl> ' enableCamera ' : True , <nl> ' enableDsu ' : False , <nl> } , <nl> - " 7e34a988419b5307 | 2019 - 12 - 18 - - 19 - 13 - 30 " : { <nl> + " 7e34a988419b5307 | 2019 - 12 - 18 - - 19 - 13 - 30 " : { <nl> ' carFingerprint ' : TOYOTA . RAV4H_TSS2 , <nl> ' enableCamera ' : True , <nl> ' fingerprintSource ' : ' fixed ' <nl> def get_route_log ( route_name ) : <nl> ' enableCamera ' : True , <nl> ' enableDsu ' : False , <nl> } , <nl> - " 886fcd8408d570e9 | 2020 - 01 - 29 - - 05 - 11 - 22 " : { <nl> + " 886fcd8408d570e9 | 2020 - 01 - 29 - - 05 - 11 - 22 " : { <nl> ' carFingerprint ' : TOYOTA . LEXUS_RX , <nl> ' enableCamera ' : True , <nl> ' enableDsu ' : True , <nl> } , <nl> - " 886fcd8408d570e9 | 2020 - 01 - 29 - - 02 - 18 - 55 " : { <nl> + " 886fcd8408d570e9 | 2020 - 01 - 29 - - 02 - 18 - 55 " : { <nl> ' carFingerprint ' : TOYOTA . LEXUS_RX , <nl> ' enableCamera ' : True , <nl> ' enableDsu ' : False , <nl> def get_route_log ( route_name ) : <nl> ' enableCamera ' : True , <nl> ' enableDsu ' : True , <nl> } , <nl> - " 01b22eb2ed121565 | 2020 - 02 - 02 - - 11 - 25 - 51 " : { <nl> + " 01b22eb2ed121565 | 2020 - 02 - 02 - - 11 - 25 - 51 " : { <nl> ' carFingerprint ' : TOYOTA . LEXUS_RX_TSS2 , <nl> ' enableCamera ' : True , <nl> ' fingerprintSource ' : ' fixed ' , <nl> } , <nl> - " b74758c690a49668 | 2020 - 05 - 20 - - 15 - 58 - 57 " : { <nl> + " b74758c690a49668 | 2020 - 05 - 20 - - 15 - 58 - 57 " : { <nl> ' carFingerprint ' : TOYOTA . LEXUS_RXH_TSS2 , <nl> ' enableCamera ' : True , <nl> ' fingerprintSource ' : ' fixed ' , <nl> mmm a / selfdrive / test / test_leeco_alt_fan . py <nl> ppp b / selfdrive / test / test_leeco_alt_fan . py <nl> def setup_leon_fan ( ) : <nl> bus . write_i2c_block_data ( 0x67 , 0xa , [ 0 ] ) <nl> else : <nl> bus . write_i2c_block_data ( 0x67 , 0xa , [ 0x20 ] ) <nl> - bus . write_i2c_block_data ( 0x67 , 0x8 , [ ( i - 1 ) < < 6 ] ) <nl> + bus . write_i2c_block_data ( 0x67 , 0x8 , [ ( i - 1 ) < < 6 ] ) <nl> time . sleep ( 1 ) <nl> <nl> bus . close ( ) <nl> mmm a / selfdrive / thermald / thermald . py <nl> ppp b / selfdrive / thermald / thermald . py <nl> def set_eon_fan ( val ) : <nl> else : <nl> # bus . write_i2c_block_data ( 0x67 , 0x45 , [ 0 ] ) <nl> bus . write_i2c_block_data ( 0x67 , 0xa , [ 0x20 ] ) <nl> - bus . write_i2c_block_data ( 0x67 , 0x8 , [ ( val - 1 ) < < 6 ] ) <nl> + bus . write_i2c_block_data ( 0x67 , 0x8 , [ ( val - 1 ) < < 6 ] ) <nl> else : <nl> bus . write_byte_data ( 0x21 , 0x04 , 0x2 ) <nl> bus . write_byte_data ( 0x21 , 0x03 , ( val * 2 ) + 1 ) <nl> mmm a / tools / carcontrols / joystick_test . py <nl> ppp b / tools / carcontrols / joystick_test . py <nl> def unindent ( self ) : <nl> textPrint = TextPrint ( ) <nl> <nl> # mmmmmm - - Main Program Loop mmmmmmmmm - - <nl> - while done = = False : <nl> + while not done : <nl> # EVENT PROCESSING STEP <nl> for event in pygame . event . get ( ) : # User did something <nl> if event . type = = pygame . QUIT : # If user clicked close <nl> def unindent ( self ) : <nl> # Close the window and quit . <nl> # If you forget this line , the program will ' hang ' <nl> # on exit if running from IDLE . <nl> - pygame . quit ( ) <nl> + pygame . quit ( ) <nl> mmm a / tools / lib / route . py <nl> ppp b / tools / lib / route . py <nl> def __init__ ( self , name , log_path , camera_path ) : <nl> self . camera_path = camera_path <nl> <nl> @ property <nl> - def name ( self ) : return str ( self . _name ) <nl> + def name ( self ) : <nl> + return str ( self . _name ) <nl> <nl> @ property <nl> - def canonical_name ( self ) : return self . _name <nl> + def canonical_name ( self ) : <nl> + return self . _name <nl> <nl> class RouteSegmentName ( object ) : <nl> def __init__ ( self , name_str ) : <nl> def __init__ ( self , name_str ) : <nl> self . _num = int ( num_str ) <nl> <nl> @ property <nl> - def segment_num ( self ) : return self . _num <nl> + def segment_num ( self ) : <nl> + return self . _num <nl> <nl> - def __str__ ( self ) : return self . _segment_name_str <nl> + def __str__ ( self ) : <nl> + return self . _segment_name_str <nl> mmm a / tools / lib / route_framereader . py <nl> ppp b / tools / lib / route_framereader . py <nl> def close ( self ) : <nl> for fr in frs : <nl> fr . close ( ) <nl> <nl> - def __enter__ ( self ) : return self <nl> - def __exit__ ( self , type , value , traceback ) : self . close ( ) <nl> + def __enter__ ( self ) : <nl> + return self <nl> + <nl> + def __exit__ ( self , type , value , traceback ) : <nl> + self . close ( ) <nl> mmm a / tools / replay / lib / ui_helpers . py <nl> ppp b / tools / replay / lib / ui_helpers . py <nl> def draw_path ( y , x , color , img , calibration , top_down , lid_color = None ) : <nl> uv_model > 0 , axis = 1 ) , uv_model [ : , 0 ] < img . shape [ 1 ] - 1 , uv_model [ : , 1 ] < <nl> img . shape [ 0 ] - 1 ) ) ] <nl> <nl> - for i , j in ( ( - 1 , 0 ) , ( 0 , - 1 ) , ( 0 , 0 ) , ( 0 , 1 ) , ( 1 , 0 ) ) : <nl> + for i , j in ( ( - 1 , 0 ) , ( 0 , - 1 ) , ( 0 , 0 ) , ( 0 , 1 ) , ( 1 , 0 ) ) : <nl> img [ uv_model_dots [ : , 1 ] + i , uv_model_dots [ : , 0 ] + j ] = color <nl> <nl> # draw lidar path point on lidar <nl> def draw_path ( y , x , color , img , calibration , top_down , lid_color = None ) : <nl> def draw_steer_path ( speed_ms , curvature , color , img , <nl> calibration , top_down , VM , lid_color = None ) : <nl> path_x = np . arange ( 101 . ) <nl> - path_y = np . multiply ( path_x , np . tan ( np . arcsin ( np . clip ( path_x * curvature , - 0 . 999 , 0 . 999 ) ) / 2 . ) ) <nl> + path_y = np . multiply ( path_x , np . tan ( np . arcsin ( np . clip ( path_x * curvature , - 0 . 999 , 0 . 999 ) ) / 2 . ) ) <nl> <nl> draw_path ( path_y , path_x , color , img , calibration , top_down , lid_color ) <nl> <nl> def draw_lead_car ( closest , top_down ) : <nl> - if closest ! = None : <nl> + if closest is not None : <nl> closest_y = int ( round ( UP . lidar_car_y - closest * UP . lidar_zoom ) ) <nl> if closest_y > 0 : <nl> top_down [ 1 ] [ int ( round ( UP . lidar_car_x - METER_WIDTH * 2 ) ) : int ( <nl> def init_plots ( arr , name_to_arr_idx , plot_xlims , plot_ylims , plot_names , plot_co <nl> " p " : ( 0 , 1 , 1 ) , <nl> " m " : ( 1 , 0 , 1 ) } <nl> <nl> - if bigplots = = True : <nl> + if bigplots : <nl> fig = plt . figure ( figsize = ( 6 . 4 , 7 . 0 ) ) <nl> - elif bigplots = = False : <nl> - fig = plt . figure ( ) <nl> else : <nl> - fig = plt . figure ( figsize = bigplots ) <nl> + fig = plt . figure ( ) <nl> <nl> fig . set_facecolor ( ( 0 . 2 , 0 . 2 , 0 . 2 ) ) <nl> <nl> def init_plots ( arr , name_to_arr_idx , plot_xlims , plot_ylims , plot_names , plot_co <nl> ax . patch . set_facecolor ( ( 0 . 4 , 0 . 4 , 0 . 4 ) ) <nl> axs . append ( ax ) <nl> <nl> - plots = [ ] ; idxs = [ ] ; plot_select = [ ] <nl> + plots , idxs , plot_select = [ ] , [ ] , [ ] <nl> for i , pl_list in enumerate ( plot_names ) : <nl> for j , item in enumerate ( pl_list ) : <nl> plot , = axs [ i ] . plot ( arr [ : , name_to_arr_idx [ item ] ] , <nl> mmm a / tools / replay / unlogger . py <nl> ppp b / tools / replay / unlogger . py <nl> def get_arg_parser ( ) : <nl> parser . add_argument ( " route_name " , type = ( lambda x : x . replace ( " # " , " | " ) ) , nargs = " ? " , <nl> help = " The route whose messages will be published . " ) <nl> parser . add_argument ( " data_dir " , nargs = ' ? ' , default = os . getenv ( ' UNLOGGER_DATA_DIR ' ) , <nl> - help = " Path to directory in which log and camera files are located . " ) <nl> + help = " Path to directory in which log and camera files are located . " ) <nl> <nl> parser . add_argument ( " - - no - loop " , action = " store_true " , help = " Stop at the end of the replay . " ) <nl> <nl> - key_value_pair = lambda x : x . split ( " = " ) <nl> + def key_value_pair ( x ) : <nl> + return x . split ( " = " ) <nl> + <nl> parser . add_argument ( " address_mapping " , nargs = " * " , type = key_value_pair , <nl> help = " Pairs < service > = < zmq_addr > to publish < service > on < zmq_addr > . " ) <nl> <nl> - comma_list = lambda x : x . split ( " , " ) <nl> + def comma_list ( x ) : <nl> + return x . split ( " , " ) <nl> + <nl> to_mock_group = parser . add_mutually_exclusive_group ( ) <nl> to_mock_group . add_argument ( " - - min " , action = " store_true " , default = os . getenv ( " MIN " ) ) <nl> to_mock_group . add_argument ( " - - enabled " , default = os . getenv ( " ENABLED " ) , type = comma_list ) <nl> mmm a / tools / sim / bridge . py <nl> ppp b / tools / sim / bridge . py <nl> def destroy ( ) : <nl> speed = math . sqrt ( vel . x * * 2 + vel . y * * 2 + vel . z * * 2 ) * 3 . 6 <nl> can_function ( pm , speed , fake_wheel . angle , rk . frame , cruise_button = cruise_button , is_engaged = is_openpilot_engaged ) <nl> <nl> - if rk . frame % 1 = = 0 : # 20Hz ? <nl> + if rk . frame % 1 = = 0 : # 20Hz ? <nl> throttle_op , brake_op , steer_torque_op = sendcan_function ( sendcan ) <nl> # print ( " = = = torq , " , steer_torque_op , " = = = " ) <nl> if is_openpilot_engaged : <nl> mmm a / tools / sim / lib / can . py <nl> ppp b / tools / sim / lib / can . py <nl> def can_function ( pm , speed , angle , idx , cruise_button = 0 , is_engaged = False ) : <nl> <nl> msg . append ( packer . make_can_msg ( " SCM_BUTTONS " , 0 , { " CRUISE_BUTTONS " : cruise_button } , idx ) ) <nl> <nl> - values = { " COUNTER_PEDAL " : idx & 0xF } <nl> - checksum = crc8_pedal ( packer . make_can_msg ( " GAS_SENSOR " , 0 , { " COUNTER_PEDAL " : idx & 0xF } , - 1 ) [ 2 ] [ : - 1 ] ) <nl> + values = { " COUNTER_PEDAL " : idx & 0xF } <nl> + checksum = crc8_pedal ( packer . make_can_msg ( " GAS_SENSOR " , 0 , { " COUNTER_PEDAL " : idx & 0xF } , - 1 ) [ 2 ] [ : - 1 ] ) <nl> values [ " CHECKSUM_PEDAL " ] = checksum <nl> msg . append ( packer . make_can_msg ( " GAS_SENSOR " , 0 , values , - 1 ) ) <nl> <nl> def can_function ( pm , speed , angle , idx , cruise_button = 0 , is_engaged = False ) : <nl> msg . append ( packer . make_can_msg ( " BRAKE_COMMAND " , 2 , { } , idx ) ) <nl> <nl> # radar <nl> - if idx % 5 = = 0 : <nl> + if idx % 5 = = 0 : <nl> msg . append ( rpacker . make_can_msg ( " RADAR_DIAGNOSTIC " , 1 , { " RADAR_STATE " : 0x79 } , - 1 ) ) <nl> for i in range ( 16 ) : <nl> msg . append ( rpacker . make_can_msg ( " TRACK_ % d " % i , 1 , { " LONG_DIST " : 255 . 5 } , - 1 ) ) <nl> mmm a / tools / sim / lib / manual_ctrl . py <nl> ppp b / tools / sim / lib / manual_ctrl . py <nl> <nl> # ! / usr / bin / env python3 <nl> # set up wheel <nl> - import os , struct , array <nl> + import array <nl> + import os <nl> + import struct <nl> from fcntl import ioctl <nl> <nl> # Iterate over the joystick devices . <nl> mmm a / tools / webcam / accept_terms . py <nl> ppp b / tools / webcam / accept_terms . py <nl> <nl> params = Params ( ) <nl> params . put ( " HasAcceptedTerms " , str ( terms_version , ' utf - 8 ' ) ) <nl> params . put ( " CompletedTrainingVersion " , str ( training_version , ' utf - 8 ' ) ) <nl> - print ( " Terms Accepted ! " ) <nl> \ No newline at end of file <nl> + print ( " Terms Accepted ! " ) <nl> | Enable more flake8 checks ( ) | commaai/openpilot | d9bf9f0a4036f55411f6dfbb438990a5eb7f4930 | 2020-05-31T03:14:58Z |
mmm a / tensorflow / compiler / xla / BUILD <nl> ppp b / tensorflow / compiler / xla / BUILD <nl> xla_proto_library ( <nl> ] , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " execution_options_util " , <nl> + srcs = [ <nl> + " execution_options_util . cc " , <nl> + ] , <nl> + hdrs = [ <nl> + " execution_options_util . h " , <nl> + ] , <nl> + visibility = [ " : friends " ] , <nl> + deps = [ <nl> + " : xla_proto " , <nl> + " / / tensorflow / compiler / xla / legacy_flags : debug_options_flags " , <nl> + ] , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " test " , <nl> testonly = 1 , <nl> mmm a / tensorflow / compiler / xla / client / lib / BUILD <nl> ppp b / tensorflow / compiler / xla / client / lib / BUILD <nl> cc_library ( <nl> srcs = [ " testing . cc " ] , <nl> hdrs = [ " testing . h " ] , <nl> deps = [ <nl> + " / / tensorflow / compiler / xla : execution_options_util " , <nl> " / / tensorflow / compiler / xla : literal_util " , <nl> " / / tensorflow / compiler / xla : shape_util " , <nl> " / / tensorflow / compiler / xla : statusor " , <nl> mmm a / tensorflow / compiler / xla / client / lib / testing . cc <nl> ppp b / tensorflow / compiler / xla / client / lib / testing . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / compiler / xla / client / computation . h " <nl> # include " tensorflow / compiler / xla / client / computation_builder . h " <nl> + # include " tensorflow / compiler / xla / execution_options_util . h " <nl> # include " tensorflow / compiler / xla / literal_util . h " <nl> # include " tensorflow / compiler / xla / shape_util . h " <nl> # include " tensorflow / compiler / xla / statusor . h " <nl> std : : unique_ptr < GlobalData > MakeFakeDataOrDie ( const Shape & shape , <nl> AsInt64Slice ( shape . dimensions ( ) ) ) ; <nl> Computation computation = b . Build ( ) . ConsumeValueOrDie ( ) ; <nl> <nl> - ExecutionOptions execution_options ; <nl> + auto execution_options = CreateDefaultExecutionOptions ( ) ; <nl> * execution_options . mutable_shape_with_output_layout ( ) = shape ; <nl> return client - > Execute ( computation , / * arguments = * / { } , & execution_options ) <nl> . ConsumeValueOrDie ( ) ; <nl> new file mode 100644 <nl> index 0000000000000 . . e83ff7cddd675 <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / xla / execution_options_util . cc <nl> <nl> + / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + # include " tensorflow / compiler / xla / execution_options_util . h " <nl> + # include " tensorflow / compiler / xla / legacy_flags / debug_options_flags . h " <nl> + <nl> + namespace xla { <nl> + <nl> + ExecutionOptions CreateDefaultExecutionOptions ( ) { <nl> + ExecutionOptions execution_options ; <nl> + * ( execution_options . mutable_debug_options ( ) ) = <nl> + legacy_flags : : GetDebugOptionsFromFlags ( ) ; <nl> + return execution_options ; <nl> + } <nl> + <nl> + } / / namespace xla <nl> new file mode 100644 <nl> index 0000000000000 . . 562da78e837ea <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / xla / execution_options_util . h <nl> <nl> + / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef THIRD_PARTY_TENSORFLOW_COMPILER_XLA_EXECUTION_OPTIONS_UTIL_H_ <nl> + # define THIRD_PARTY_TENSORFLOW_COMPILER_XLA_EXECUTION_OPTIONS_UTIL_H_ <nl> + <nl> + # include " tensorflow / compiler / xla / xla . pb . h " <nl> + <nl> + namespace xla { <nl> + <nl> + / / Create a default ExecutionOptions proto ; this proto has its debug options <nl> + / / popupated to the default values taken from flags . <nl> + ExecutionOptions CreateDefaultExecutionOptions ( ) ; <nl> + <nl> + } / / namespace xla <nl> + <nl> + # endif / / THIRD_PARTY_TENSORFLOW_COMPILER_XLA_EXECUTION_OPTIONS_UTIL_H_ <nl> mmm a / tensorflow / compiler / xla / service / gpu / llvm_gpu_backend / gpu_backend_lib . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / llvm_gpu_backend / gpu_backend_lib . cc <nl> StatusOr < string > CompileModuleToPtx ( llvm : : Module * module , <nl> int32 opt_level = <nl> hlo_module_config . debug_options ( ) . xla_backend_optimization_level ( ) ; <nl> <nl> + CHECK_GE ( opt_level , 2 ) <nl> + < < " The XLA GPU backend doesn ' t support unoptimized code generation " ; <nl> + <nl> AddOptimizationPasses ( opt_level , <nl> / * size_level = * / 0 , target_machine . get ( ) , & module_passes , <nl> & function_passes ) ; <nl> mmm a / tensorflow / compiler / xla / tests / BUILD <nl> ppp b / tensorflow / compiler / xla / tests / BUILD <nl> cc_library ( <nl> " / / tensorflow / compiler / xla : array2d " , <nl> " / / tensorflow / compiler / xla : array3d " , <nl> " / / tensorflow / compiler / xla : array4d " , <nl> + " / / tensorflow / compiler / xla : execution_options_util " , <nl> " / / tensorflow / compiler / xla : literal_util " , <nl> " / / tensorflow / compiler / xla : shape_util " , <nl> " / / tensorflow / compiler / xla : status_macros " , <nl> cc_library ( <nl> " / / tensorflow / compiler / xla / client : computation_builder " , <nl> " / / tensorflow / compiler / xla / client : global_data " , <nl> " / / tensorflow / compiler / xla / client : local_client " , <nl> - " / / tensorflow / compiler / xla / legacy_flags : debug_options_flags " , <nl> " / / tensorflow / compiler / xla / tests : literal_test_util " , <nl> " / / tensorflow / compiler / xla / tests : test_utils " , <nl> " / / tensorflow / core : lib " , <nl> mmm a / tensorflow / compiler / xla / tests / client_library_test_base . cc <nl> ppp b / tensorflow / compiler / xla / tests / client_library_test_base . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / client / client_library . h " <nl> # include " tensorflow / compiler / xla / client / computation . h " <nl> # include " tensorflow / compiler / xla / client / local_client . h " <nl> - # include " tensorflow / compiler / xla / legacy_flags / debug_options_flags . h " <nl> + # include " tensorflow / compiler / xla / execution_options_util . h " <nl> # include " tensorflow / compiler / xla / literal_util . h " <nl> # include " tensorflow / compiler / xla / ptr_util . h " <nl> # include " tensorflow / compiler / xla / shape_util . h " <nl> Client * GetOrCreateLocalClientOrDie ( se : : Platform * platform ) { <nl> } / / namespace <nl> <nl> ClientLibraryTestBase : : ClientLibraryTestBase ( se : : Platform * platform ) <nl> - : client_ ( GetOrCreateLocalClientOrDie ( platform ) ) { <nl> - * ( execution_options_ . mutable_debug_options ( ) ) = <nl> - legacy_flags : : GetDebugOptionsFromFlags ( ) ; <nl> - <nl> + : client_ ( GetOrCreateLocalClientOrDie ( platform ) ) , <nl> + execution_options_ ( CreateDefaultExecutionOptions ( ) ) { <nl> / / Disabling constant_folding so that tests ( usually written using Constants ) <nl> / / will exercise the intended code paths , instead of being constant folded . <nl> / / <nl> StatusOr < std : : unique_ptr < GlobalData > > ClientLibraryTestBase : : Execute ( <nl> } <nl> <nl> StatusOr < std : : unique_ptr < Literal > > ClientLibraryTestBase : : ExecuteAndTransfer ( <nl> - ComputationBuilder * builder , <nl> + const Computation & computation , <nl> tensorflow : : gtl : : ArraySlice < GlobalData * > arguments , <nl> const Shape * shape_with_output_layout ) { <nl> - / / Build the computation , as a convenience . <nl> - TF_ASSIGN_OR_RETURN ( auto computation , builder - > Build ( ) ) ; <nl> - <nl> ExecutionOptions execution_options = execution_options_ ; <nl> if ( shape_with_output_layout ! = nullptr ) { <nl> * execution_options . mutable_shape_with_output_layout ( ) = <nl> StatusOr < std : : unique_ptr < Literal > > ClientLibraryTestBase : : ExecuteAndTransfer ( <nl> & execution_options ) ; <nl> } <nl> <nl> + StatusOr < std : : unique_ptr < Literal > > ClientLibraryTestBase : : ExecuteAndTransfer ( <nl> + ComputationBuilder * builder , <nl> + tensorflow : : gtl : : ArraySlice < GlobalData * > arguments , <nl> + const Shape * shape_with_output_layout ) { <nl> + / / Build the computation , as a convenience . <nl> + TF_ASSIGN_OR_RETURN ( auto computation , builder - > Build ( ) ) ; <nl> + return ExecuteAndTransfer ( computation , arguments , shape_with_output_layout ) ; <nl> + } <nl> + <nl> std : : unique_ptr < GlobalData > ClientLibraryTestBase : : ExecuteOrDie ( <nl> ComputationBuilder * builder , <nl> tensorflow : : gtl : : ArraySlice < GlobalData * > arguments ) { <nl> mmm a / tensorflow / compiler / xla / tests / client_library_test_base . h <nl> ppp b / tensorflow / compiler / xla / tests / client_library_test_base . h <nl> class ClientLibraryTestBase : public : : testing : : Test { <nl> <nl> / / TODO ( b / 25566808 ) : Add helper that populates a literal from a testdata file . <nl> <nl> - / / Convenience methods for building and running a computation from a builder . <nl> + / / Convenience methods for building and running a computation with the member <nl> + / / execution options . Modify execution_options_ in your test if you want to <nl> + / / customize the options . <nl> StatusOr < std : : unique_ptr < GlobalData > > Execute ( <nl> ComputationBuilder * builder , <nl> tensorflow : : gtl : : ArraySlice < GlobalData * > arguments ) ; <nl> class ClientLibraryTestBase : public : : testing : : Test { <nl> ComputationBuilder * builder , <nl> tensorflow : : gtl : : ArraySlice < GlobalData * > arguments , <nl> const Shape * shape_with_output_layout = nullptr ) ; <nl> + StatusOr < std : : unique_ptr < Literal > > ExecuteAndTransfer ( <nl> + const Computation & computation , <nl> + tensorflow : : gtl : : ArraySlice < GlobalData * > arguments , <nl> + const Shape * shape_with_output_layout = nullptr ) ; <nl> <nl> / / Convenience OrDie variants of above methods . <nl> std : : unique_ptr < GlobalData > ExecuteOrDie ( <nl> mmm a / tensorflow / compiler / xla / tests / deallocation_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / deallocation_test . cc <nl> class DeallocationTest : public ClientLibraryTestBase { <nl> tensorflow : : gtl : : ArraySlice < GlobalData * > arguments ) { <nl> Computation computation = builder - > Build ( ) . ConsumeValueOrDie ( ) ; <nl> auto global_data = <nl> - client_ - > Execute ( computation , arguments ) . ConsumeValueOrDie ( ) ; <nl> + client_ - > Execute ( computation , arguments , & execution_options_ ) <nl> + . ConsumeValueOrDie ( ) ; <nl> TF_CHECK_OK ( client_ - > Transfer ( * global_data ) . status ( ) ) ; <nl> return global_data ; <nl> } <nl> mmm a / tensorflow / compiler / xla / tests / prng_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / prng_test . cc <nl> XLA_TEST_F ( PrngTest , PassInGlobalRngSeed ) { <nl> client_ - > ExecuteAndTransfer ( computation , / * arguments = * / { } , <nl> & execution_options2 ) ) ; <nl> TF_ASSIGN_OR_ASSERT_OK ( <nl> - result5 , client_ - > ExecuteAndTransfer ( computation , / * arguments = * / { } ) ) ; <nl> + result5 , client_ - > ExecuteAndTransfer ( computation , / * arguments = * / { } , <nl> + & execution_options_ ) ) ; <nl> TF_ASSIGN_OR_ASSERT_OK ( <nl> - result6 , client_ - > ExecuteAndTransfer ( computation , / * arguments = * / { } ) ) ; <nl> + result6 , client_ - > ExecuteAndTransfer ( computation , / * arguments = * / { } , <nl> + & execution_options_ ) ) ; <nl> } <nl> <nl> LiteralTestUtil : : ExpectEqual ( * result1 , * result2 ) ; <nl> | [ XLA ] Properly propagate backend optimization options to XLA in more places . | tensorflow/tensorflow | ac47dc166f290d631c156846039ac78f30f362af | 2017-06-19T23:36:16Z |
mmm a / src / frontend / mosh - client . cc <nl> ppp b / src / frontend / mosh - client . cc <nl> int main ( int argc , char * argv [ ] ) <nl> } catch ( const Crypto : : CryptoException & e ) { <nl> fprintf ( stderr , " Crypto exception : % s \ r \ n " , <nl> e . what ( ) ) ; <nl> - } catch ( const std : : string & s ) { <nl> - fprintf ( stderr , " Error : % s \ r \ n " , s . c_str ( ) ) ; <nl> + } catch ( const std : : exception & e ) { <nl> + fprintf ( stderr , " Error : % s \ r \ n " , e . what ( ) ) ; <nl> } <nl> <nl> printf ( " \ n [ mosh is exiting . ] \ n " ) ; <nl> mmm a / src / terminal / terminaldisplayinit . cc <nl> ppp b / src / terminal / terminaldisplayinit . cc <nl> <nl> # include " terminaldisplay . h " <nl> <nl> # include < string > <nl> + # include < stdexcept > <nl> <nl> # if defined HAVE_NCURSESW_CURSES_H <nl> # include < ncursesw / curses . h > <nl> bool Display : : ti_flag ( const char * capname ) const <nl> { <nl> int val = tigetflag ( const_cast < char * > ( capname ) ) ; <nl> if ( val = = - 1 ) { <nl> - throw std : : string ( " Invalid terminfo boolean capability " ) + capname ; <nl> + throw std : : invalid_argument ( std : : string ( " Invalid terminfo boolean capability " ) + capname ) ; <nl> } <nl> return val ; <nl> } <nl> int Display : : ti_num ( const char * capname ) const <nl> { <nl> int val = tigetnum ( const_cast < char * > ( capname ) ) ; <nl> if ( val = = - 2 ) { <nl> - throw std : : string ( " Invalid terminfo numeric capability " ) + capname ; <nl> + throw std : : invalid_argument ( std : : string ( " Invalid terminfo numeric capability " ) + capname ) ; <nl> } <nl> return val ; <nl> } <nl> const char * Display : : ti_str ( const char * capname ) const <nl> { <nl> const char * val = tigetstr ( const_cast < char * > ( capname ) ) ; <nl> if ( val = = ( const char * ) - 1 ) { <nl> - throw std : : string ( " Invalid terminfo string capability " ) + capname ; <nl> + throw std : : invalid_argument ( std : : string ( " Invalid terminfo string capability " ) + capname ) ; <nl> } <nl> return val ; <nl> } <nl> Display : : Display ( bool use_environment ) <nl> if ( ret ! = OK ) { <nl> switch ( errret ) { <nl> case 1 : <nl> - throw std : : string ( " Terminal is hardcopy and cannot be used by curses applications . " ) ; <nl> + throw std : : runtime_error ( " Terminal is hardcopy and cannot be used by curses applications . " ) ; <nl> break ; <nl> case 0 : <nl> - throw std : : string ( " Unknown terminal type . " ) ; <nl> + throw std : : runtime_error ( " Unknown terminal type . " ) ; <nl> break ; <nl> case - 1 : <nl> - throw std : : string ( " Terminfo database could not be found . " ) ; <nl> + throw std : : runtime_error ( " Terminfo database could not be found . " ) ; <nl> break ; <nl> default : <nl> - throw std : : string ( " Unknown terminfo error . " ) ; <nl> + throw std : : runtime_error ( " Unknown terminfo error . " ) ; <nl> break ; <nl> } <nl> } <nl> | Throw std : : exception subclasses instead of std : : strings | mobile-shell/mosh | b5ac92491c1b40bb40f71c6ca629b2347fbdd9ef | 2015-06-05T03:47:32Z |
mmm a / lib / IRGen / IRGenSIL . cpp <nl> ppp b / lib / IRGen / IRGenSIL . cpp <nl> class IRGenSILFunction : <nl> / / / Keeps track of the mapping of source variables to - O0 shadow copy allocas . <nl> llvm : : SmallDenseMap < StackSlotKey , Address , 8 > ShadowStackSlots ; <nl> llvm : : SmallDenseMap < Decl * , SmallString < 4 > , 8 > AnonymousVariables ; <nl> - llvm : : SmallVector < std : : pair < DominancePoint , llvm : : Instruction * > , 8 > <nl> - ValueVariables ; <nl> + llvm : : SmallDenseMap < llvm : : Instruction * , DominancePoint , 8 > ValueVariables ; <nl> unsigned NumAnonVars = 0 ; <nl> unsigned NumCondFails = 0 ; <nl> <nl> class IRGenSILFunction : <nl> void emitDebugVariableRangeExtension ( const SILBasicBlock * CurBB ) { <nl> if ( IGM . IRGen . Opts . Optimize ) <nl> return ; <nl> - for ( auto & Variable : reversed ( ValueVariables ) ) { <nl> - auto VarDominancePoint = Variable . first ; <nl> - llvm : : Value * Storage = Variable . second ; <nl> + for ( auto & Variable : ValueVariables ) { <nl> + auto VarDominancePoint = Variable . second ; <nl> + llvm : : Value * Storage = Variable . first ; <nl> if ( getActiveDominancePoint ( ) = = VarDominancePoint | | <nl> isActiveDominancePointDominatedBy ( VarDominancePoint ) ) { <nl> llvm : : Type * ArgTys ; <nl> class IRGenSILFunction : <nl> / / that this shouldn ' t be necessary . LiveDebugValues should be doing <nl> / / this but can ' t in general because it currently only tracks register <nl> / / locations . <nl> - auto It = llvm : : BasicBlock : : iterator ( Variable . second ) ; <nl> - auto * BB = Variable . second - > getParent ( ) ; <nl> + llvm : : Instruction * Value = Variable . first ; <nl> + auto It = llvm : : BasicBlock : : iterator ( Value ) ; <nl> + auto * BB = Value - > getParent ( ) ; <nl> auto * CurBB = Builder . GetInsertBlock ( ) ; <nl> if ( BB ! = CurBB ) <nl> for ( auto I = std : : next ( It ) , E = BB - > end ( ) ; I ! = E ; + + I ) { <nl> auto * DVI = dyn_cast < llvm : : DbgValueInst > ( I ) ; <nl> - if ( DVI & & DVI - > getValue ( ) = = Variable . second ) <nl> + if ( DVI & & DVI - > getValue ( ) = = Value ) <nl> IGM . DebugInfo - > getBuilder ( ) . insertDbgValueIntrinsic ( <nl> DVI - > getValue ( ) , 0 , DVI - > getVariable ( ) , DVI - > getExpression ( ) , <nl> DVI - > getDebugLoc ( ) , & * CurBB - > getFirstInsertionPt ( ) ) ; <nl> class IRGenSILFunction : <nl> if ( ! needsShadowCopy ( Storage ) ) { <nl> / / Mark for debug value range extension unless this is a constant . <nl> if ( auto * Value = dyn_cast < llvm : : Instruction > ( Storage ) ) <nl> - ValueVariables . push_back ( { getActiveDominancePoint ( ) , Value } ) ; <nl> + ValueVariables . insert ( { Value , getActiveDominancePoint ( ) } ) ; <nl> return Storage ; <nl> } <nl> <nl> mmm a / test / DebugInfo / liverange - extension . swift <nl> ppp b / test / DebugInfo / liverange - extension . swift <nl> public func rangeExtension ( _ b : Bool ) { <nl> / / CHECK : llvm . dbg . value ( metadata i32 [ [ I ] ] , i64 0 , metadata <nl> / / CHECK : llvm . dbg . value ( metadata i32 [ [ J : . * ] ] , i64 0 , metadata <nl> use ( j ) <nl> - / / CHECK : { { ( asm sideeffect " " , " r " . * ) | ( zext i32 ) } } [ [ J ] ] <nl> - / / CHECK : asm sideeffect " " , " r " <nl> + / / CHECK - DAG : { { ( asm sideeffect " " , " r " . * ) | ( zext i32 ) } } [ [ J ] ] <nl> + / / CHECK - DAG : asm sideeffect " " , " r " <nl> } <nl> let z = getInt32 ( ) <nl> use ( z ) <nl> - / / CHECK : llvm . dbg . value ( metadata i32 [ [ I ] ] , i64 0 , metadata <nl> / / CHECK - NOT : llvm . dbg . value ( metadata i32 [ [ J ] ] , i64 0 , metadata <nl> - / / CHECK : llvm . dbg . value ( metadata i32 [ [ Z : . * ] ] , i64 0 , metadata <nl> - / / CHECK : asm sideeffect " " , " r " <nl> - / / CHECK : { { ( asm sideeffect " " , " r " . * ) | ( zext i32 ) } } [ [ I ] ] <nl> + / / CHECK - DAG : llvm . dbg . value ( metadata i32 [ [ I ] ] , i64 0 , metadata <nl> + / / CHECK - DAG : llvm . dbg . value ( metadata i32 [ [ Z : . * ] ] , i64 0 , metadata <nl> + / / CHECK - DAG : { { ( asm sideeffect " " , " r " . * ) | ( zext i32 ) } } [ [ I ] ] <nl> + / / CHECK - DAG : asm sideeffect " " , " r " <nl> } <nl> new file mode 100644 <nl> index 000000000000 . . b694e04a4bad <nl> mmm / dev / null <nl> ppp b / test / DebugInfo / patternvars . swift <nl> <nl> + / / RUN : % target - swift - frontend % s - emit - ir - g - o - | % FileCheck % s <nl> + <nl> + @ _fixed_layout <nl> + public struct UnicodeScalar { <nl> + var _value : UInt32 <nl> + public var value : UInt32 { return _value } <nl> + } <nl> + <nl> + public func mangle ( s : [ UnicodeScalar ] ) - > [ UnicodeScalar ] { <nl> + let replacementUnichar = UnicodeScalar ( _value : 0 ) <nl> + var mangledUnichars : [ UnicodeScalar ] = s . map { <nl> + switch $ 0 . value { <nl> + case <nl> + / / A - Z <nl> + 0x0041 . . . 0x005A , <nl> + / / a - z <nl> + 0x0061 . . . 0x007A , <nl> + / / 0 - 9 <nl> + 0x0030 . . . 0x0039 , <nl> + / / _ <nl> + 0x005F , <nl> + / / Latin ( 1 ) <nl> + 0x00AA . . . 0x00AA : <nl> + return $ 0 <nl> + default : <nl> + return replacementUnichar <nl> + } <nl> + } <nl> + return mangledUnichars <nl> + } <nl> + <nl> + / / The patterns in the first case statement each define an anonymous variable , <nl> + / / which shares the storage with the expression in the switch statement . Make <nl> + / / sure we only emit live range extensions for the storage once per basic block . <nl> + <nl> + / / CHECK : define { { . * } } @ _TFF11patternvars6mangleFT1sGSaVS_13UnicodeScalar__GSaS0__U_FS0_S0_ <nl> + / / CHECK : call void asm sideeffect " " , " r " <nl> + / / CHECK - NOT : call void asm sideeffect " " , " r " <nl> + / / CHECK : br { { . * } } label <nl> + / / CHECK : call void asm sideeffect " " , " r " <nl> + / / CHECK - NOT : call void asm sideeffect " " , " r " <nl> + / / CHECK : br { { . * } } label <nl> + / / CHECK : call void asm sideeffect " " , " r " <nl> + / / CHECK - NOT : call void asm sideeffect " " , " r " <nl> + / / CHECK : br { { . * } } label <nl> + / / CHECK : call void asm sideeffect " " , " r " <nl> + / / CHECK - NOT : call void asm sideeffect " " , " r " <nl> + / / CHECK : br { { . * } } label <nl> + / / CHECK : call void asm sideeffect " " , " r " <nl> + / / CHECK - NOT : call void asm sideeffect " " , " r " <nl> + / / CHECK : br { { . * } } label <nl> + / / CHECK : call void asm sideeffect " " , " r " <nl> + / / CHECK - NOT : call void asm sideeffect " " , " r " <nl> + / / CHECK : br { { . * } } label <nl> + / / CHECK : call void asm sideeffect " " , " r " <nl> + / / CHECK - NOT : call void asm sideeffect " " , " r " <nl> + / / CHECK : br { { . * } } label <nl> | Merge pull request from adrian - prantl / 28467349 | apple/swift | 07b196d2f9a5facc490b35e3649e18937796239b | 2016-09-27T05:33:05Z |
mmm a / dbms / include / DB / Core / ErrorCodes . h <nl> ppp b / dbms / include / DB / Core / ErrorCodes . h <nl> namespace ErrorCodes <nl> NO_ATTRIBUTES_LISTED , <nl> INDEX_OF_COLUMN_IN_SORT_CLAUSE_IS_OUT_OF_RANGE , <nl> UNKNOWN_DIRECTION_OF_SORTING , <nl> - DIVISION_BY_ZERO , <nl> + ILLEGAL_DIVISION , <nl> AGGREGATE_FUNCTION_NOT_APPLICABLE , <nl> UNKNOWN_RELATION , <nl> DICTIONARIES_WAS_NOT_LOADED , <nl> mmm a / dbms / include / DB / Functions / FunctionsArithmetic . h <nl> ppp b / dbms / include / DB / Functions / FunctionsArithmetic . h <nl> struct DivideFloatingImpl <nl> } ; <nl> <nl> <nl> - template < typename T > <nl> - inline void throwIfZero ( T x ) <nl> + template < typename A , typename B > <nl> + inline void throwIfDivisionLeadsToFPE ( A a , B b ) <nl> { <nl> - if ( unlikely ( x = = 0 ) ) <nl> - throw Exception ( " Division by zero " , ErrorCodes : : DIVISION_BY_ZERO ) ; <nl> + / / / Возможно , лучше вместо проверок использовать siglongjmp ? <nl> + <nl> + if ( unlikely ( b = = 0 ) ) <nl> + throw Exception ( " Division by zero " , ErrorCodes : : ILLEGAL_DIVISION ) ; <nl> + <nl> + / / / http : / / avva . livejournal . com / 2548306 . html <nl> + if ( unlikely ( std : : tr1 : : is_signed < A > : : value & & std : : tr1 : : is_signed < B > : : value & & a = = std : : numeric_limits < A > : : min ( ) & & b = = - 1 ) ) <nl> + throw Exception ( " Division of minimal signed number by minus one " , ErrorCodes : : ILLEGAL_DIVISION ) ; <nl> } <nl> <nl> template < typename A , typename B > <nl> struct DivideIntegralImpl <nl> <nl> static inline ResultType apply ( A a , B b ) <nl> { <nl> - throwIfZero ( b ) ; <nl> + throwIfDivisionLeadsToFPE ( a , b ) ; <nl> return static_cast < ResultType > ( a ) / b ; <nl> } <nl> } ; <nl> struct ModuloImpl <nl> <nl> static inline ResultType apply ( A a , B b ) <nl> { <nl> - throwIfZero ( typename NumberTraits : : ToInteger < A > : : Type ( b ) ) ; <nl> + throwIfDivisionLeadsToFPE ( typename NumberTraits : : ToInteger < A > : : Type ( a ) , typename NumberTraits : : ToInteger < A > : : Type ( b ) ) ; <nl> return typename NumberTraits : : ToInteger < A > : : Type ( a ) <nl> % typename NumberTraits : : ToInteger < A > : : Type ( b ) ; <nl> } <nl> | dbms : fixed error with division [ # CONV - 2944 ] . | ClickHouse/ClickHouse | 4b1888ecf3f0e2b86b3bb5c9a046f47836985668 | 2013-02-16T21:23:55Z |
mmm a / xbmc / utils / test / Makefile <nl> ppp b / xbmc / utils / test / Makefile <nl> SRCS = \ <nl> TestMime . cpp \ <nl> TestPerformanceSample . cpp \ <nl> TestPOUtils . cpp \ <nl> + TestRegExp . cpp \ <nl> TestXBMCTinyXML . cpp <nl> <nl> LIB = utilsTest . a <nl> new file mode 100644 <nl> index 000000000000 . . 709f450c6813 <nl> mmm / dev / null <nl> ppp b / xbmc / utils / test / TestRegExp . cpp <nl> <nl> + / * <nl> + * Copyright ( C ) 2005 - 2012 Team XBMC <nl> + * http : / / www . xbmc . org <nl> + * <nl> + * This Program is free software ; you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> + * any later version . <nl> + * <nl> + * This Program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with XBMC ; see the file COPYING . If not , write to <nl> + * the Free Software Foundation , 675 Mass Ave , Cambridge , MA 02139 , USA . <nl> + * http : / / www . gnu . org / copyleft / gpl . html <nl> + * <nl> + * / <nl> + <nl> + / * TODO : gtest / gtest . h needs to come in before utils / RegExp . h . <nl> + * Investigate why . <nl> + * / <nl> + # include " gtest / gtest . h " <nl> + <nl> + # include " utils / RegExp . h " <nl> + # include " utils / log . h " <nl> + # include " filesystem / File . h " <nl> + # include " filesystem / SpecialProtocol . h " <nl> + <nl> + TEST ( TestRegExp , RegFind ) <nl> + { <nl> + CRegExp regex ; <nl> + <nl> + EXPECT_TRUE ( regex . RegComp ( " ^ Test . * " ) ) ; <nl> + EXPECT_EQ ( 0 , regex . RegFind ( " Test string . " ) ) ; <nl> + <nl> + EXPECT_TRUE ( regex . RegComp ( " ^ string . * " ) ) ; <nl> + EXPECT_EQ ( - 1 , regex . RegFind ( " Test string . " ) ) ; <nl> + } <nl> + <nl> + TEST ( TestRegExp , GetReplaceString ) <nl> + { <nl> + CRegExp regex ; <nl> + <nl> + EXPECT_TRUE ( regex . RegComp ( " ^ ( Test ) \ \ s * ( . * ) \ \ . " ) ) ; <nl> + EXPECT_EQ ( 0 , regex . RegFind ( " Test string . " ) ) ; <nl> + EXPECT_STREQ ( " string " , regex . GetReplaceString ( " \ \ 2 " ) ) ; <nl> + } <nl> + <nl> + TEST ( TestRegExp , GetFindLen ) <nl> + { <nl> + CRegExp regex ; <nl> + <nl> + EXPECT_TRUE ( regex . RegComp ( " ^ ( Test ) \ \ s * ( . * ) \ \ . " ) ) ; <nl> + EXPECT_EQ ( 0 , regex . RegFind ( " Test string . " ) ) ; <nl> + EXPECT_EQ ( 12 , regex . GetFindLen ( ) ) ; <nl> + } <nl> + <nl> + TEST ( TestRegExp , GetSubCount ) <nl> + { <nl> + CRegExp regex ; <nl> + <nl> + EXPECT_TRUE ( regex . RegComp ( " ^ ( Test ) \ \ s * ( . * ) \ \ . " ) ) ; <nl> + EXPECT_EQ ( 0 , regex . RegFind ( " Test string . " ) ) ; <nl> + EXPECT_EQ ( 2 , regex . GetSubCount ( ) ) ; <nl> + } <nl> + <nl> + TEST ( TestRegExp , GetSubStart ) <nl> + { <nl> + CRegExp regex ; <nl> + <nl> + EXPECT_TRUE ( regex . RegComp ( " ^ ( Test ) \ \ s * ( . * ) \ \ . " ) ) ; <nl> + EXPECT_EQ ( 0 , regex . RegFind ( " Test string . " ) ) ; <nl> + EXPECT_EQ ( 0 , regex . GetSubStart ( 0 ) ) ; <nl> + EXPECT_EQ ( 0 , regex . GetSubStart ( 1 ) ) ; <nl> + EXPECT_EQ ( 5 , regex . GetSubStart ( 2 ) ) ; <nl> + } <nl> + <nl> + TEST ( TestRegExp , GetCaptureTotal ) <nl> + { <nl> + CRegExp regex ; <nl> + <nl> + EXPECT_TRUE ( regex . RegComp ( " ^ ( Test ) \ \ s * ( . * ) \ \ . " ) ) ; <nl> + EXPECT_EQ ( 0 , regex . RegFind ( " Test string . " ) ) ; <nl> + EXPECT_EQ ( 2 , regex . GetCaptureTotal ( ) ) ; <nl> + } <nl> + <nl> + TEST ( TestRegExp , GetMatch ) <nl> + { <nl> + CRegExp regex ; <nl> + <nl> + EXPECT_TRUE ( regex . RegComp ( " ^ ( Test ) \ \ s * ( . * ) \ \ . " ) ) ; <nl> + EXPECT_EQ ( 0 , regex . RegFind ( " Test string . " ) ) ; <nl> + EXPECT_STREQ ( " Test string . " , regex . GetMatch ( 0 ) . c_str ( ) ) ; <nl> + EXPECT_STREQ ( " Test " , regex . GetMatch ( 1 ) . c_str ( ) ) ; <nl> + EXPECT_STREQ ( " string " , regex . GetMatch ( 2 ) . c_str ( ) ) ; <nl> + } <nl> + <nl> + TEST ( TestRegExp , GetPattern ) <nl> + { <nl> + CRegExp regex ; <nl> + <nl> + EXPECT_TRUE ( regex . RegComp ( " ^ ( Test ) \ \ s * ( . * ) \ \ . " ) ) ; <nl> + EXPECT_STREQ ( " ^ ( Test ) \ \ s * ( . * ) \ \ . " , regex . GetPattern ( ) . c_str ( ) ) ; <nl> + } <nl> + <nl> + TEST ( TestRegExp , GetNamedSubPattern ) <nl> + { <nl> + CRegExp regex ; <nl> + std : : string match ; <nl> + <nl> + EXPECT_TRUE ( regex . RegComp ( " ^ ( ? < first > Test ) \ \ s * ( ? < second > . * ) \ \ . " ) ) ; <nl> + EXPECT_EQ ( 0 , regex . RegFind ( " Test string . " ) ) ; <nl> + EXPECT_TRUE ( regex . GetNamedSubPattern ( " first " , match ) ) ; <nl> + EXPECT_STREQ ( " Test " , match . c_str ( ) ) ; <nl> + EXPECT_TRUE ( regex . GetNamedSubPattern ( " second " , match ) ) ; <nl> + EXPECT_STREQ ( " string " , match . c_str ( ) ) ; <nl> + } <nl> + <nl> + TEST ( TestRegExp , operatorEqual ) <nl> + { <nl> + CRegExp regex , regexcopy ; <nl> + std : : string match ; <nl> + <nl> + EXPECT_TRUE ( regex . RegComp ( " ^ ( ? < first > Test ) \ \ s * ( ? < second > . * ) \ \ . " ) ) ; <nl> + regexcopy = regex ; <nl> + EXPECT_EQ ( 0 , regexcopy . RegFind ( " Test string . " ) ) ; <nl> + EXPECT_TRUE ( regexcopy . GetNamedSubPattern ( " first " , match ) ) ; <nl> + EXPECT_STREQ ( " Test " , match . c_str ( ) ) ; <nl> + EXPECT_TRUE ( regexcopy . GetNamedSubPattern ( " second " , match ) ) ; <nl> + EXPECT_STREQ ( " string " , match . c_str ( ) ) ; <nl> + } <nl> + <nl> + class TestRegExpLog : public testing : : Test <nl> + { <nl> + protected : <nl> + TestRegExpLog ( ) { } <nl> + ~ TestRegExpLog ( ) <nl> + { <nl> + / * Reset globals used by CLog after each test . * / <nl> + g_log_globalsRef - > m_file = NULL ; <nl> + g_log_globalsRef - > m_repeatCount = 0 ; <nl> + g_log_globalsRef - > m_repeatLogLevel = - 1 ; <nl> + g_log_globalsRef - > m_logLevel = LOG_LEVEL_DEBUG ; <nl> + } <nl> + } ; <nl> + <nl> + TEST_F ( TestRegExpLog , DumpOvector ) <nl> + { <nl> + CRegExp regex ; <nl> + CStdString logfile , logstring ; <nl> + char buf [ 100 ] ; <nl> + unsigned int bytesread ; <nl> + XFILE : : CFile file ; <nl> + <nl> + logfile = CSpecialProtocol : : TranslatePath ( " special : / / temp / " ) + " xbmc . log " ; <nl> + EXPECT_TRUE ( CLog : : Init ( CSpecialProtocol : : TranslatePath ( " special : / / temp / " ) ) ) ; <nl> + EXPECT_TRUE ( XFILE : : CFile : : Exists ( logfile ) ) ; <nl> + <nl> + EXPECT_TRUE ( regex . RegComp ( " ^ ( ? < first > Test ) \ \ s * ( ? < second > . * ) \ \ . " ) ) ; <nl> + EXPECT_EQ ( 0 , regex . RegFind ( " Test string . " ) ) ; <nl> + regex . DumpOvector ( LOGDEBUG ) ; <nl> + CLog : : Close ( ) ; <nl> + <nl> + EXPECT_TRUE ( file . Open ( logfile ) ) ; <nl> + while ( ( bytesread = file . Read ( buf , sizeof ( buf ) - 1 ) ) > 0 ) <nl> + { <nl> + buf [ bytesread ] = ' \ 0 ' ; <nl> + logstring . append ( buf ) ; <nl> + } <nl> + file . Close ( ) ; <nl> + EXPECT_FALSE ( logstring . empty ( ) ) ; <nl> + <nl> + EXPECT_STREQ ( " \ xEF \ xBB \ xBF " , logstring . substr ( 0 , 3 ) . c_str ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( regex . RegComp ( " . * DEBUG : regexp ovector = \ \ { \ \ [ 0 , 12 \ \ ] , \ \ [ 0 , 4 \ \ ] , " <nl> + " \ \ [ 5 , 11 \ \ ] \ \ } . * " ) ) ; <nl> + EXPECT_GE ( regex . RegFind ( logstring ) , 0 ) ; <nl> + <nl> + EXPECT_TRUE ( XFILE : : CFile : : Delete ( logfile ) ) ; <nl> + } <nl> | [ GSOC ] Add test cases for CRegExp class . | xbmc/xbmc | 2bb1825618af26d1886869a14f33d04d8f6e554e | 2012-09-05T19:07:44Z |
mmm a / etc / backports_required_for_multiversion_tests . yml <nl> ppp b / etc / backports_required_for_multiversion_tests . yml <nl> replica_sets_multiversion : <nl> test_file : jstests / replsets / reconfig_waits_for_oplog_commitment_condition . js <nl> - ticket : SERVER - 44500 <nl> test_file : jstests / replsets / apply_ops_inserts_do_not_include_fromMigrate_field . js <nl> + - ticket : SERVER - 47190 <nl> + test_file : jstests / replsets / force_shutdown_primary . js <nl> <nl> sharding_multiversion : <nl> <nl> new file mode 100644 <nl> index 000000000000 . . 77e20016de52 <nl> mmm / dev / null <nl> ppp b / jstests / replsets / force_shutdown_primary . js <nl> <nl> + / * * <nl> + * Test that the shutdown command called on a primary node with { force : true } succeeds even if <nl> + * stepDown fails . <nl> + * <nl> + * 1 . Initiate a 3 - node replica set . <nl> + * 2 . Block replication to secondaries . <nl> + * 3 . Write to primary . <nl> + * 4 . Try to shut down primary with { force : true } . <nl> + * 5 . Kill the shutdown command while the shutdown command is waiting to stepDown . <nl> + * 6 . Test that the primary node still shuts down . <nl> + * <nl> + * / <nl> + ( function ( ) { <nl> + " use strict " ; <nl> + <nl> + load ( " jstests / libs / write_concern_util . js " ) ; / / for stopReplicationOnSecondaries . <nl> + const replTest = new ReplSetTest ( { nodes : 3 } ) ; <nl> + replTest . startSet ( ) ; <nl> + replTest . initiateWithHighElectionTimeout ( ) ; <nl> + <nl> + const primary = replTest . getPrimary ( ) ; <nl> + const testDB = primary . getDB ( " test " ) ; <nl> + assert . commandWorked ( testDB . foo . insert ( { x : 1 } , { writeConcern : { w : 3 } } ) ) ; <nl> + <nl> + jsTestLog ( " Blocking replication to secondaries . " ) ; <nl> + stopReplicationOnSecondaries ( replTest ) ; <nl> + <nl> + jsTestLog ( " Executing write to primary . " ) ; <nl> + assert . commandWorked ( testDB . foo . insert ( { x : 2 } ) ) ; <nl> + <nl> + jsTestLog ( " Shutting down primary in a parallel shell " ) ; <nl> + const shutdownShell = startParallelShell ( function ( ) { <nl> + db . adminCommand ( { shutdown : 1 , timeoutSecs : 60 , force : true } ) ; <nl> + } , primary . port ) ; <nl> + <nl> + let shutdownOpID = - 1 ; <nl> + let res = { } ; <nl> + jsTestLog ( " Looking for shutdown in currentOp ( ) output " ) ; <nl> + assert . soon ( function ( ) { <nl> + res = primary . getDB ( ' admin ' ) . currentOp ( true ) ; <nl> + for ( const index in res . inprog ) { <nl> + const entry = res . inprog [ index ] ; <nl> + if ( entry [ " command " ] & & entry [ " command " ] [ " shutdown " ] = = = 1 ) { <nl> + shutdownOpID = entry . opid ; <nl> + return true ; <nl> + } <nl> + } <nl> + return false ; <nl> + } , " No shutdown command found : " + tojson ( res ) ) ; <nl> + <nl> + jsTestLog ( " Killing shutdown command on primary . " ) ; <nl> + primary . getDB ( ' admin ' ) . killOp ( shutdownOpID ) ; <nl> + <nl> + jsTestLog ( " Verifying primary shut down and cannot be connected to . " ) ; <nl> + const exitCode = shutdownShell ( { checkExitSuccess : false } ) ; <nl> + assert . neq ( 0 , exitCode , " expected shutdown to close the shell ' s connection " ) ; <nl> + assert . soonNoExcept ( function ( ) { <nl> + / / The parallel shell exits while shutdown is in progress , and if this happens early enough , <nl> + / / the primary can still accept connections despite successfully starting to shutdown . <nl> + / / So , retry connecting until connections cannot be established and an error is thrown . <nl> + assert . throws ( function ( ) { <nl> + new Mongo ( primary . host ) ; <nl> + } ) ; <nl> + return true ; <nl> + } , " expected primary node to shut down and not be connectable " ) ; <nl> + <nl> + replTest . stopSet ( ) ; <nl> + } ) ( ) ; <nl> mmm a / src / mongo / db / commands / shutdown . h <nl> ppp b / src / mongo / db / commands / shutdown . h <nl> <nl> # include " mongo / util / ntservice . h " <nl> <nl> namespace mongo { <nl> + Status stepDownForShutdown ( OperationContext * opCtx , <nl> + const Milliseconds & waitTime , <nl> + bool forceShutdown ) noexcept ; <nl> <nl> namespace shutdown_detail { <nl> void finishShutdown ( bool force , long long timeoutSecs ) ; <nl> mmm a / src / mongo / db / commands / shutdown_d . cpp <nl> ppp b / src / mongo / db / commands / shutdown_d . cpp <nl> <nl> * it in the license file . <nl> * / <nl> <nl> + # define MONGO_LOGV2_DEFAULT_COMPONENT : : mongo : : logv2 : : LogComponent : : kCommand <nl> + <nl> # include " mongo / platform / basic . h " <nl> <nl> # include < string > <nl> <nl> # include " mongo / db / commands / shutdown . h " <nl> # include " mongo / db / index_builds_coordinator . h " <nl> # include " mongo / db / repl / replication_coordinator . h " <nl> + # include " mongo / db / s / transaction_coordinator_service . h " <nl> + # include " mongo / logv2 / log . h " <nl> <nl> namespace mongo { <nl> + <nl> + Status stepDownForShutdown ( OperationContext * opCtx , <nl> + const Milliseconds & waitTime , <nl> + bool forceShutdown ) noexcept { <nl> + auto replCoord = repl : : ReplicationCoordinator : : get ( opCtx ) ; <nl> + / / If this is a single node replica set , then we don ' t have to wait <nl> + / / for any secondaries . Ignore stepdown . <nl> + if ( replCoord - > getConfig ( ) . getNumMembers ( ) ! = 1 ) { <nl> + try { <nl> + replCoord - > stepDown ( opCtx , false / * force * / , waitTime , Seconds ( 120 ) ) ; <nl> + } catch ( const ExceptionFor < ErrorCodes : : NotMaster > & ) { <nl> + / / Ignore not master errors . <nl> + } catch ( const DBException & e ) { <nl> + if ( ! forceShutdown ) { <nl> + return e . toStatus ( ) ; <nl> + } <nl> + / / Ignore stepDown errors on force shutdown . <nl> + LOGV2_WARNING ( 4719000 , " Error stepping down during force shutdown " , " error " _attr = e ) ; <nl> + } <nl> + <nl> + / / Even if the ReplicationCoordinator failed to step down , ensure we still shut down the <nl> + / / TransactionCoordinatorService ( see SERVER - 45009 ) <nl> + TransactionCoordinatorService : : get ( opCtx ) - > onStepDown ( ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> namespace { <nl> <nl> class CmdShutdownMongoD : public CmdShutdown < CmdShutdownMongoD > { <nl> class CmdShutdownMongoD : public CmdShutdown < CmdShutdownMongoD > { <nl> numIndexBuilds = = 0U ) ; <nl> } <nl> <nl> - try { <nl> - repl : : ReplicationCoordinator : : get ( opCtx ) - > stepDown ( <nl> - opCtx , force , Seconds ( timeoutSecs ) , Seconds ( 120 ) ) ; <nl> - } catch ( const DBException & e ) { <nl> - if ( e . code ( ) ! = ErrorCodes : : NotMaster ) { / / ignore not master <nl> - throw ; <nl> - } <nl> - } <nl> + uassertStatusOK ( stepDownForShutdown ( opCtx , Seconds ( timeoutSecs ) , force ) ) ; <nl> } <nl> <nl> } cmdShutdownMongoD ; <nl> mmm a / src / mongo / db / db . cpp <nl> ppp b / src / mongo / db / db . cpp <nl> <nl> # include " mongo / db / clientcursor . h " <nl> # include " mongo / db / commands / feature_compatibility_version . h " <nl> # include " mongo / db / commands / feature_compatibility_version_gen . h " <nl> + # include " mongo / db / commands / shutdown . h " <nl> # include " mongo / db / concurrency / d_concurrency . h " <nl> # include " mongo / db / concurrency / flow_control_ticketholder . h " <nl> # include " mongo / db / concurrency / lock_state . h " <nl> void shutdownTask ( const ShutdownTaskArgs & shutdownArgs ) { <nl> opCtx = uniqueOpCtx . get ( ) ; <nl> } <nl> <nl> - / / If this is a single node replica set , then we don ' t have to wait <nl> - / / for any secondaries . Ignore stepdown . <nl> - if ( repl : : ReplicationCoordinator : : get ( serviceContext ) - > getConfig ( ) . getNumMembers ( ) ! = 1 ) { <nl> - try { <nl> - / / For faster tests , we allow a short wait time with setParameter . <nl> - auto waitTime = repl : : waitForStepDownOnNonCommandShutdown . load ( ) <nl> - ? Milliseconds ( Seconds ( 15 ) ) <nl> - : Milliseconds ( 100 ) ; <nl> - replCoord - > stepDown ( opCtx , false / * force * / , waitTime , Seconds ( 120 ) ) ; <nl> - } catch ( const ExceptionFor < ErrorCodes : : NotMaster > & ) { <nl> - / / ignore not master errors <nl> - } catch ( const DBException & e ) { <nl> - LOGV2_WARNING ( 20561 , <nl> - " Error stepping down in non - command initiated shutdown path : { error } " , <nl> - " Error stepping down in non - command initiated shutdown path " , <nl> - " error " _attr = e ) ; <nl> - } <nl> - <nl> - / / Even if the replCoordinator failed to step down , ensure we still shut down the <nl> - / / TransactionCoordinatorService ( see SERVER - 45009 ) <nl> - TransactionCoordinatorService : : get ( serviceContext ) - > onStepDown ( ) ; <nl> - } <nl> + / / For faster tests , we allow a short wait time with setParameter . <nl> + auto waitTime = repl : : waitForStepDownOnNonCommandShutdown . load ( ) ? Milliseconds ( Seconds ( 15 ) ) <nl> + : Milliseconds ( 100 ) ; <nl> + const auto forceShutdown = true ; <nl> + / / stepDown should never return an error during force shutdown . <nl> + invariantStatusOK ( stepDownForShutdown ( opCtx , waitTime , forceShutdown ) ) ; <nl> } <nl> <nl> MirrorMaestro : : shutdown ( serviceContext ) ; <nl> | SERVER - 47190 : Shutdown command with force : true should ignore all stepdown errors | mongodb/mongo | 1563ac389bdcb08aadeb31705b2cc123742b739b | 2020-04-22T17:36:40Z |
mmm a / tests / runner . py <nl> ppp b / tests / runner . py <nl> def skip_requested_tests ( args , modules ) : <nl> which = [ arg . split ( ' skip : ' ) [ 1 ] ] <nl> <nl> print ( ' , ' . join ( which ) , file = sys . stderr ) <nl> + skipped = False <nl> for test in which : <nl> print ( ' will skip " % s " ' % test , file = sys . stderr ) <nl> suite_name , test_name = test . split ( ' . ' ) <nl> for m in modules : <nl> - try : <nl> - suite = getattr ( m , suite_name ) <nl> + suite = getattr ( m , suite_name , None ) <nl> + if suite : <nl> setattr ( suite , test_name , lambda s : s . skipTest ( " requested to be skipped " ) ) <nl> + skipped = True <nl> break <nl> - except AttributeError : <nl> - pass <nl> + assert skipped , " Not able to skip test " + test <nl> args [ i ] = None <nl> return [ a for a in args if a is not None ] <nl> <nl> | [ runner . py ] Do not silently ignore incorrect command line skips ( ) | emscripten-core/emscripten | 18e914b692539d19ab5fc4a5b222f6d85d4ee916 | 2020-10-07T05:13:37Z |
mmm a / lib / SILOptimizer / Utils / CMakeLists . txt <nl> ppp b / lib / SILOptimizer / Utils / CMakeLists . txt <nl> set ( UTILS_SOURCES <nl> Utils / FunctionSignatureOptUtils . cpp <nl> Utils / GenericCloner . cpp <nl> Utils / Generics . cpp <nl> + Utils / LoadStoreOptUtils . cpp <nl> Utils / Local . cpp <nl> Utils / LoopUtils . cpp <nl> - Utils / LSBase . cpp <nl> Utils / SILInliner . cpp <nl> Utils / SILSSAUpdater . cpp <nl> PARENT_SCOPE ) <nl> similarity index 100 % <nl> rename from lib / SILOptimizer / Utils / LSBase . cpp <nl> rename to lib / SILOptimizer / Utils / LoadStoreOptUtils . cpp <nl> mmm a / test / SILOptimizer / dead_store_elim . sil <nl> ppp b / test / SILOptimizer / dead_store_elim . sil <nl> bb4 : <nl> <nl> / / Remove dead store in 1 loop block , as the store in exit block kills it . <nl> / / <nl> - / / CHECK - LABEL : DeadStoreSingleLoopBlockSimpleStruct <nl> + / / CHECK - LABEL : dead_store_single_loop_block_simple_struct <nl> / / CHECK : bb2 : <nl> / / CHECK : { { store } } <nl> / / CHECK : br <nl> - sil hidden @ DeadStoreSingleLoopBlockSimpleStruct : $ @ convention ( thin ) ( Bool , Int ) - > ( ) { <nl> + sil hidden @ dead_store_single_loop_block_simple_struct : $ @ convention ( thin ) ( Bool , Int ) - > ( ) { <nl> bb0 ( % 0 : $ Bool , % 1 : $ Int ) : <nl> - % 2 = alloc_stack $ S1 , var , name " x " / / users : % 8 , % 13 , % 18 , % 23 , % 26 <nl> - % 5 = function_ref @ S1_init : $ @ convention ( thin ) ( @ thin S1 . Type ) - > S1 / / user : % 7 <nl> - % 6 = metatype $ @ thin S1 . Type / / user : % 7 <nl> - % 7 = apply % 5 ( % 6 ) : $ @ convention ( thin ) ( @ thin S1 . Type ) - > S1 / / user : % 8 <nl> - store % 7 to % 2 : $ * S1 / / id : % 8 <nl> + % 2 = alloc_stack $ S1 , var , name " x " <nl> + % 5 = function_ref @ S1_init : $ @ convention ( thin ) ( @ thin S1 . Type ) - > S1 <nl> + % 6 = metatype $ @ thin S1 . Type <nl> + % 7 = apply % 5 ( % 6 ) : $ @ convention ( thin ) ( @ thin S1 . Type ) - > S1 <nl> + store % 7 to % 2 : $ * S1 <nl> br bb1 <nl> <nl> - bb1 : / / Preds : bb0 <nl> - % 11 = integer_literal $ Builtin . Int64 , 0 / / user : % 12 <nl> - % 12 = struct $ Int ( % 11 : $ Builtin . Int64 ) / / user : % 14 <nl> - % 13 = struct_element_addr % 2 : $ * S1 , # S1 . a / / user : % 14 <nl> + bb1 : <nl> + % 11 = integer_literal $ Builtin . Int64 , 0 <nl> + % 12 = struct $ Int ( % 11 : $ Builtin . Int64 ) <nl> + % 13 = struct_element_addr % 2 : $ * S1 , # S1 . a <nl> % 14 = load % 13 : $ * Int <nl> - br bb2 / / id : % 15 <nl> - <nl> - bb2 : / / Preds : bb0 <nl> - % 16 = integer_literal $ Builtin . Int64 , 1 / / user : % 17 <nl> - % 17 = struct $ Int ( % 16 : $ Builtin . Int64 ) / / user : % 19 <nl> - % 18 = struct_element_addr % 2 : $ * S1 , # S1 . a / / user : % 19 <nl> - store % 17 to % 18 : $ * Int / / id : % 19 <nl> - % 9 = struct_extract % 0 : $ Bool , # Bool . value / / user : % 10 <nl> - cond_br % 9 , bb1 , bb3 / / id : % 10 <nl> - <nl> - bb3 : / / Preds : bb1 bb2 <nl> - % 21 = integer_literal $ Builtin . Int64 , 2 / / user : % 22 <nl> - % 22 = struct $ Int ( % 21 : $ Builtin . Int64 ) / / user : % 24 <nl> - % 23 = struct_element_addr % 2 : $ * S1 , # S1 . a / / user : % 24 <nl> - store % 22 to % 23 : $ * Int / / id : % 24 <nl> - % 25 = tuple ( ) / / user : % 27 <nl> - dealloc_stack % 2 : $ * S1 / / id : % 26 <nl> - return % 25 : $ ( ) / / id : % 27 <nl> + br bb2 <nl> + <nl> + bb2 : <nl> + % 16 = integer_literal $ Builtin . Int64 , 1 <nl> + % 17 = struct $ Int ( % 16 : $ Builtin . Int64 ) <nl> + % 18 = struct_element_addr % 2 : $ * S1 , # S1 . a <nl> + store % 17 to % 18 : $ * Int <nl> + % 9 = struct_extract % 0 : $ Bool , # Bool . value <nl> + cond_br % 9 , bb1 , bb3 <nl> + <nl> + bb3 : <nl> + % 21 = integer_literal $ Builtin . Int64 , 2 <nl> + % 22 = struct $ Int ( % 21 : $ Builtin . Int64 ) <nl> + % 23 = struct_element_addr % 2 : $ * S1 , # S1 . a <nl> + store % 22 to % 23 : $ * Int <nl> + % 25 = tuple ( ) <nl> + dealloc_stack % 2 : $ * S1 <nl> + return % 25 : $ ( ) <nl> } <nl> <nl> / / Remove dead stores in loop blocks , as the store in exit block kills them . <nl> bb3 : <nl> <nl> / / Remove dead store in the tuple data structure . <nl> / / <nl> - / / CHECK - LABEL : DeadStoreInSimpleTuple <nl> + / / CHECK - LABEL : dead_store_simple_tuple <nl> / / CHECK : bb0 : <nl> / / CHECK : { { store } } <nl> / / CHECK : { { store } } <nl> / / CHECK - NOT : { { store } } <nl> / / CHECK : load <nl> - sil hidden @ DeadStoreInSimpleTuple : $ @ convention ( thin ) ( ) - > Int { <nl> + sil hidden @ dead_store_simple_tuple : $ @ convention ( thin ) ( ) - > Int { <nl> bb0 : <nl> - % 0 = alloc_stack $ ( a : Int , b : Int ) , var , name " x " / / users : % 1 , % 2 , % 11 , % 15 , % 20 <nl> - % 1 = tuple_element_addr % 0 : $ * ( a : Int , b : Int ) , 0 / / user : % 5 <nl> - % 2 = tuple_element_addr % 0 : $ * ( a : Int , b : Int ) , 1 / / user : % 8 <nl> - % 3 = integer_literal $ Builtin . Int64 , 2 / / user : % 4 <nl> - % 4 = struct $ Int ( % 3 : $ Builtin . Int64 ) / / user : % 5 <nl> - store % 4 to % 1 : $ * Int / / id : % 5 <nl> - % 6 = integer_literal $ Builtin . Int64 , 2 / / user : % 7 <nl> - % 7 = struct $ Int ( % 6 : $ Builtin . Int64 ) / / user : % 8 <nl> - store % 7 to % 2 : $ * Int / / id : % 8 <nl> - % 9 = integer_literal $ Builtin . Int64 , 10 / / user : % 10 <nl> - % 10 = struct $ Int ( % 9 : $ Builtin . Int64 ) / / user : % 12 <nl> - % 11 = tuple_element_addr % 0 : $ * ( a : Int , b : Int ) , 0 / / user : % 12 <nl> - store % 10 to % 11 : $ * Int / / id : % 12 <nl> - % 13 = integer_literal $ Builtin . Int64 , 12 / / user : % 14 <nl> - % 14 = struct $ Int ( % 13 : $ Builtin . Int64 ) / / user : % 16 <nl> - % 15 = tuple_element_addr % 0 : $ * ( a : Int , b : Int ) , 1 / / user : % 16 <nl> - store % 14 to % 15 : $ * Int / / id : % 16 <nl> + % 0 = alloc_stack $ ( a : Int , b : Int ) , var , name " x " <nl> + % 1 = tuple_element_addr % 0 : $ * ( a : Int , b : Int ) , 0 <nl> + % 2 = tuple_element_addr % 0 : $ * ( a : Int , b : Int ) , 1 <nl> + % 3 = integer_literal $ Builtin . Int64 , 2 <nl> + % 4 = struct $ Int ( % 3 : $ Builtin . Int64 ) <nl> + store % 4 to % 1 : $ * Int <nl> + % 6 = integer_literal $ Builtin . Int64 , 2 <nl> + % 7 = struct $ Int ( % 6 : $ Builtin . Int64 ) <nl> + store % 7 to % 2 : $ * Int <nl> + % 9 = integer_literal $ Builtin . Int64 , 10 <nl> + % 10 = struct $ Int ( % 9 : $ Builtin . Int64 ) <nl> + % 11 = tuple_element_addr % 0 : $ * ( a : Int , b : Int ) , 0 <nl> + store % 10 to % 11 : $ * Int <nl> + % 13 = integer_literal $ Builtin . Int64 , 12 <nl> + % 14 = struct $ Int ( % 13 : $ Builtin . Int64 ) <nl> + % 15 = tuple_element_addr % 0 : $ * ( a : Int , b : Int ) , 1 <nl> + store % 14 to % 15 : $ * Int <nl> % 22 = load % 15 : $ * Int <nl> % 23 = load % 11 : $ * Int <nl> % 24 = load % 2 : $ * Int <nl> % 25 = load % 1 : $ * Int <nl> - % 17 = integer_literal $ Builtin . Int64 , 22 / / user : % 19 <nl> + % 17 = integer_literal $ Builtin . Int64 , 22 <nl> % 18 = tuple ( ) <nl> - % 19 = struct $ Int ( % 17 : $ Builtin . Int64 ) / / user : % 21 <nl> - dealloc_stack % 0 : $ * ( a : Int , b : Int ) / / id : % 20 <nl> - return % 19 : $ Int / / id : % 21 <nl> - } <nl> - <nl> - / / Remove dead stores in if else blocks for simple class . <nl> - / / <nl> - / / CHECK - LABEL : DeadStoreInIfElseSimpleClass <nl> - / / CHECK : bb1 : <nl> - / / CHECK - NOT : { { store } } <nl> - / / CHECK : br <nl> - / / CHECK : bb2 : <nl> - / / CHECK - NOT : { { store } } <nl> - / / CHECK : br <nl> - sil hidden @ DeadStoreInIfElseSimpleClass : $ @ convention ( thin ) ( Bool ) - > ( ) { <nl> - bb0 ( % 0 : $ Bool ) : <nl> - % 1 = alloc_stack $ foo / / users : % 8 , % 36 <nl> - % 3 = alloc_ref $ foo / / users : % 6 , % 8 , % 11 , % 14 , % 17 , % 19 , % 22 , % 25 , % 27 , % 30 , % 33 , % 34 <nl> - % 4 = integer_literal $ Builtin . Int64 , 10 / / user : % 5 <nl> - % 5 = struct $ Int ( % 4 : $ Builtin . Int64 ) / / user : % 7 <nl> - % 6 = ref_element_addr % 3 : $ foo , # foo . a / / user : % 7 <nl> - store % 5 to % 6 : $ * Int / / id : % 7 <nl> - store % 3 to % 1 : $ * foo / / id : % 8 <nl> - % 9 = struct_extract % 0 : $ Bool , # Bool . value / / user : % 10 <nl> - cond_br % 9 , bb1 , bb2 / / id : % 10 <nl> - <nl> - bb1 : / / Preds : bb0 <nl> - % 12 = integer_literal $ Builtin . Int64 , 11 / / user : % 13 <nl> - % 13 = struct $ Int ( % 12 : $ Builtin . Int64 ) / / user : % 15 <nl> - % 14 = ref_element_addr % 3 : $ foo , # foo . a / / user : % 15 <nl> - store % 13 to % 14 : $ * Int / / id : % 15 <nl> - % 16 = tuple ( ) <nl> - br bb3 / / id : % 18 <nl> - <nl> - bb2 : / / Preds : bb0 <nl> - % 20 = integer_literal $ Builtin . Int64 , 12 / / user : % 21 <nl> - % 21 = struct $ Int ( % 20 : $ Builtin . Int64 ) / / user : % 23 <nl> - % 22 = ref_element_addr % 3 : $ foo , # foo . a / / user : % 23 <nl> - store % 21 to % 22 : $ * Int / / id : % 23 <nl> - % 24 = tuple ( ) <nl> - br bb3 / / id : % 26 <nl> - <nl> - bb3 : / / Preds : bb1 bb2 <nl> - strong_retain % 3 : $ foo / / id : % 27 <nl> - % 28 = integer_literal $ Builtin . Int64 , 13 / / user : % 29 <nl> - % 29 = struct $ Int ( % 28 : $ Builtin . Int64 ) / / user : % 31 <nl> - % 30 = ref_element_addr % 3 : $ foo , # foo . a / / user : % 31 <nl> - store % 29 to % 30 : $ * Int / / id : % 31 <nl> - % 32 = tuple ( ) <nl> - strong_release % 3 : $ foo / / id : % 33 <nl> - strong_release % 3 : $ foo / / id : % 34 <nl> - % 35 = tuple ( ) / / user : % 37 <nl> - dealloc_stack % 1 : $ * foo / / id : % 36 <nl> - return % 35 : $ ( ) / / id : % 37 <nl> - } <nl> - <nl> - / / Remove dead store in split block for simple class . <nl> - / / <nl> - / / CHECK - LABEL : DeadStoreInSplitSimpleClass <nl> - / / CHECK : bb1 : <nl> - / / CHECK - NOT : { { store } } <nl> - / / CHECK : cond_br <nl> - sil hidden @ DeadStoreInSplitSimpleClass : $ @ convention ( thin ) ( Bool ) - > ( ) { <nl> - bb0 ( % 0 : $ Bool ) : <nl> - % 1 = alloc_stack $ foo / / users : % 3 , % 25 <nl> - % 2 = alloc_ref $ foo / / users : % 3 , % 7 , % 13 , % 19 , % 23 <nl> - store % 2 to % 1 : $ * foo / / id : % 3 <nl> - br bb1 / / id : % 4 <nl> - <nl> - bb1 : / / Preds : bb0 <nl> - % 5 = integer_literal $ Builtin . Int64 , 10 / / user : % 6 <nl> - % 6 = struct $ Int ( % 5 : $ Builtin . Int64 ) / / user : % 8 <nl> - % 7 = ref_element_addr % 2 : $ foo , # foo . a / / user : % 8 <nl> - store % 6 to % 7 : $ * Int / / id : % 8 <nl> - % 9 = struct_extract % 0 : $ Bool , # Bool . value / / user : % 10 <nl> - cond_br % 9 , bb2 , bb3 / / id : % 10 <nl> - <nl> - bb2 : / / Preds : bb1 <nl> - % 11 = integer_literal $ Builtin . Int64 , 11 / / user : % 12 <nl> - % 12 = struct $ Int ( % 11 : $ Builtin . Int64 ) / / user : % 14 <nl> - % 13 = ref_element_addr % 2 : $ foo , # foo . a / / user : % 14 <nl> - store % 12 to % 13 : $ * Int / / id : % 14 <nl> - % 15 = tuple ( ) <nl> - br bb4 / / id : % 16 <nl> - <nl> - bb3 : / / Preds : bb1 <nl> - % 17 = integer_literal $ Builtin . Int64 , 12 / / user : % 18 <nl> - % 18 = struct $ Int ( % 17 : $ Builtin . Int64 ) / / user : % 20 <nl> - % 19 = ref_element_addr % 2 : $ foo , # foo . a / / user : % 20 <nl> - store % 18 to % 19 : $ * Int / / id : % 20 <nl> - % 21 = tuple ( ) <nl> - br bb4 / / id : % 22 <nl> - <nl> - bb4 : / / Preds : bb2 bb3 <nl> - strong_release % 2 : $ foo / / id : % 23 <nl> - % 24 = tuple ( ) / / user : % 26 <nl> - dealloc_stack % 1 : $ * foo / / id : % 25 <nl> - return % 24 : $ ( ) / / id : % 26 <nl> + % 19 = struct $ Int ( % 17 : $ Builtin . Int64 ) <nl> + dealloc_stack % 0 : $ * ( a : Int , b : Int ) <nl> + return % 19 : $ Int <nl> } <nl> <nl> / / Cannot remove partially dead store in split block for simple class . <nl> bb4 : <nl> / / Currently , % 14 = apply % 13 ( % 10 ) is marked as having side effect on the <nl> / / store % 8 to % 9 : $ * Int / / id : % 15 <nl> / / <nl> - / / CHECK - LABEL : DeadStoreSameBlockAcrossFunctionCallSimpleStruct <nl> + / / CHECK - LABEL : dead_store_across_function_call <nl> / / CHECK : bb1 : <nl> / / CHECK - NOT : { { store } } <nl> / / CHECK : function_ref <nl> - sil hidden @ DeadStoreSameBlockAcrossFunctionCallSimpleStruct : $ @ convention ( thin ) ( Bool , Int ) - > ( ) { <nl> + sil hidden @ dead_store_across_function_call : $ @ convention ( thin ) ( Bool , Int ) - > ( ) { <nl> bb0 ( % 0 : $ Bool , % 1 : $ Int ) : <nl> - % 2 = alloc_stack $ S1 / / users : % 6 , % 9 , % 18 <nl> - / / function_ref S1_init <nl> - % 3 = function_ref @ S1_init : $ @ convention ( thin ) ( @ thin S1 . Type ) - > S1 / / user : % 5 <nl> - % 4 = metatype $ @ thin S1 . Type / / user : % 5 <nl> - % 5 = apply % 3 ( % 4 ) : $ @ convention ( thin ) ( @ thin S1 . Type ) - > S1 / / user : % 6 <nl> - store % 5 to % 2 : $ * S1 / / id : % 6 <nl> - % 7 = integer_literal $ Builtin . Int64 , 0 / / user : % 8 <nl> - % 8 = struct $ Int ( % 7 : $ Builtin . Int64 ) / / users : % 12 , % 15 <nl> - % 9 = struct_element_addr % 2 : $ * S1 , # S1 . a / / users : % 12 , % 15 <nl> - % 10 = alloc_ref $ foo / / user : % 14 <nl> - br bb1 / / id : % 11 <nl> - <nl> - bb1 : / / Preds : bb0 <nl> - store % 8 to % 9 : $ * Int / / id : % 12 <nl> - % 13 = function_ref @ foo_user : $ @ convention ( thin ) ( @ guaranteed foo ) - > ( ) / / user : % 14 <nl> + % 2 = alloc_stack $ S1 <nl> + % 3 = function_ref @ S1_init : $ @ convention ( thin ) ( @ thin S1 . Type ) - > S1 <nl> + % 4 = metatype $ @ thin S1 . Type <nl> + % 5 = apply % 3 ( % 4 ) : $ @ convention ( thin ) ( @ thin S1 . Type ) - > S1 <nl> + store % 5 to % 2 : $ * S1 <nl> + % 7 = integer_literal $ Builtin . Int64 , 0 <nl> + % 8 = struct $ Int ( % 7 : $ Builtin . Int64 ) <nl> + % 9 = struct_element_addr % 2 : $ * S1 , # S1 . a <nl> + % 10 = alloc_ref $ foo <nl> + br bb1 <nl> + <nl> + bb1 : <nl> + store % 8 to % 9 : $ * Int <nl> + % 13 = function_ref @ foo_user : $ @ convention ( thin ) ( @ guaranteed foo ) - > ( ) <nl> % 14 = apply % 13 ( % 10 ) : $ @ convention ( thin ) ( @ guaranteed foo ) - > ( ) <nl> - store % 8 to % 9 : $ * Int / / id : % 15 <nl> - br bb2 / / id : % 16 <nl> + store % 8 to % 9 : $ * Int <nl> + br bb2 <nl> <nl> - bb2 : / / Preds : bb1 <nl> - % 17 = tuple ( ) / / user : % 19 <nl> - dealloc_stack % 2 : $ * S1 / / id : % 18 <nl> - return % 17 : $ ( ) / / id : % 19 <nl> + bb2 : <nl> + % 17 = tuple ( ) <nl> + dealloc_stack % 2 : $ * S1 <nl> + return % 17 : $ ( ) <nl> } <nl> <nl> <nl> bb0 ( % 0 : $ * Example ) : <nl> return % 17 : $ Int64 <nl> } <nl> <nl> - / / The store in bb0 as the its only partially dead , i . e . <nl> - / / the structure S3 has 2 fields . <nl> - / / <nl> - / / CHECK - LABEL : PartialDeadStoreSimpleStruct <nl> - / / CHECK : bb0 <nl> - / / CHECK - NEXT : [ [ RET0 : % . + ] ] = alloc_stack <nl> - / / CHECK : [ [ RET1 : % . + ] ] = function_ref @ S3_init <nl> - / / CHECK - NEXT : [ [ RET2 : % . + ] ] = metatype $ @ thin S3 . Type <nl> - / / CHECK - NEXT : [ [ RET3 : % . + ] ] = apply [ [ RET1 : % . + ] ] ( [ [ RET2 : % . + ] ] ) <nl> - / / CHECK - NOT : store [ [ RET3 : % . + ] ] to [ [ RET0 : % . + ] ] # 1 : $ * S3 <nl> - sil hidden @ PartialDeadStoreSimpleStruct : $ @ convention ( thin ) ( Bool , Int ) - > ( ) { <nl> - bb0 ( % 0 : $ Bool , % 1 : $ Int ) : <nl> - % 2 = alloc_stack $ S3 / / users : % 6 , % 11 , % 16 , % 20 <nl> - / / function_ref S3_init <nl> - % 3 = function_ref @ S3_init : $ @ convention ( thin ) ( @ thin S3 . Type ) - > S3 / / user : % 5 <nl> - % 4 = metatype $ @ thin S3 . Type / / user : % 5 <nl> - % 5 = apply % 3 ( % 4 ) : $ @ convention ( thin ) ( @ thin S3 . Type ) - > S3 / / user : % 6 <nl> - store % 5 to % 2 : $ * S3 / / id : % 6 <nl> - % 7 = struct_extract % 0 : $ Bool , # Bool . value / / user : % 8 <nl> - cond_br % 7 , bb1 , bb2 / / id : % 8 <nl> - <nl> - bb1 : / / Preds : bb0 <nl> - % 9 = integer_literal $ Builtin . Int64 , 0 / / user : % 10 <nl> - % 10 = struct $ Int ( % 9 : $ Builtin . Int64 ) / / user : % 12 <nl> - % 11 = struct_element_addr % 2 : $ * S3 , # S3 . a / / user : % 12 <nl> - store % 10 to % 11 : $ * Int / / id : % 12 <nl> - br bb3 / / id : % 13 <nl> - <nl> - bb2 : / / Preds : bb0 <nl> - % 14 = integer_literal $ Builtin . Int64 , 1 / / user : % 15 <nl> - % 15 = struct $ Int ( % 14 : $ Builtin . Int64 ) / / user : % 17 <nl> - % 16 = struct_element_addr % 2 : $ * S3 , # S3 . a / / user : % 17 <nl> - store % 15 to % 16 : $ * Int / / id : % 17 <nl> - br bb3 / / id : % 18 <nl> - <nl> - bb3 : / / Preds : bb1 bb2 <nl> - % 19 = tuple ( ) / / user : % 21 <nl> - dealloc_stack % 2 : $ * S3 / / id : % 20 <nl> - return % 19 : $ ( ) / / id : % 21 <nl> - } <nl> - <nl> / / / Make sure we can coalesce the 2 live stores to the 2 fields in S4 . <nl> / / / <nl> - / / / CHECK - LABEL : PartialDeadStoreStructInStruct <nl> + / / / CHECK - LABEL : partial_dead_store_struct_in_struct <nl> / / / CHECK : [ [ RET0 : % . + ] ] = struct_extract <nl> / / / CHECK : [ [ RET1 : % . + ] ] = struct_element_addr <nl> / / / CHECK : { { store } } [ [ RET0 : % . + ] ] to [ [ RET1 : % . + ] ] : $ * S4 <nl> - sil hidden @ PartialDeadStoreStructInStruct : $ @ convention ( thin ) ( @ inout S5 ) - > ( ) { <nl> + sil hidden @ partial_dead_store_struct_in_struct : $ @ convention ( thin ) ( @ inout S5 ) - > ( ) { <nl> bb0 ( % 0 : $ * S5 ) : <nl> - % 1 = function_ref @ S5_init : $ @ convention ( thin ) ( @ thin S5 . Type ) - > S5 / / user : % 3 <nl> - % 2 = metatype $ @ thin S5 . Type / / user : % 3 <nl> - % 3 = apply % 1 ( % 2 ) : $ @ convention ( thin ) ( @ thin S5 . Type ) - > S5 / / user : % 4 <nl> - store % 3 to % 0 : $ * S5 / / id : % 4 <nl> - % 5 = integer_literal $ Builtin . Int64 , 11 / / user : % 6 <nl> - % 6 = struct $ Int ( % 5 : $ Builtin . Int64 ) / / user : % 8 <nl> - % 7 = struct_element_addr % 0 : $ * S5 , # S5 . y / / user : % 8 <nl> - store % 6 to % 7 : $ * Int / / id : % 8 <nl> - % 9 = tuple ( ) / / user : % 11 <nl> - return % 9 : $ ( ) / / id : % 11 <nl> + % 1 = function_ref @ S5_init : $ @ convention ( thin ) ( @ thin S5 . Type ) - > S5 <nl> + % 2 = metatype $ @ thin S5 . Type <nl> + % 3 = apply % 1 ( % 2 ) : $ @ convention ( thin ) ( @ thin S5 . Type ) - > S5 <nl> + store % 3 to % 0 : $ * S5 <nl> + % 5 = integer_literal $ Builtin . Int64 , 11 <nl> + % 6 = struct $ Int ( % 5 : $ Builtin . Int64 ) <nl> + % 7 = struct_element_addr % 0 : $ * S5 , # S5 . y <nl> + store % 6 to % 7 : $ * Int <nl> + % 9 = tuple ( ) <nl> + return % 9 : $ ( ) <nl> } <nl> <nl> / / / Make sure we do not coalesce the 2 live stores to the 2 fields in S6 . <nl> | Rename LSBase . cpp to LoadStoreOptUtils . cpp | apple/swift | 63d04a8713a0b42558d064d5771fc178c4afc7dc | 2016-04-01T03:09:55Z |
mmm a / api / tesseractmain . cpp <nl> ppp b / api / tesseractmain . cpp <nl> <nl> # include " strngs . h " <nl> # include " tprintf . h " <nl> # include " openclwrapper . h " <nl> + # include " osdetect . h " <nl> <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> * main ( ) <nl> int main ( int argc , char * * argv ) { <nl> <nl> if ( pagesegmode = = tesseract : : PSM_AUTO_ONLY | | <nl> pagesegmode = = tesseract : : PSM_OSD_ONLY ) { <nl> - tesseract : : Orientation orientation ; <nl> - tesseract : : WritingDirection direction ; <nl> - tesseract : : TextlineOrder order ; <nl> - float deskew_angle ; <nl> int ret_val = 0 ; <nl> <nl> if ( ! pixs ) <nl> pixs = pixRead ( image ) ; <nl> + if ( ! pixs ) { <nl> + fprintf ( stderr , " Cannot open input file : % s \ n " , image ) ; <nl> + exit ( 2 ) ; <nl> + } <nl> api . SetImage ( pixs ) ; <nl> - tesseract : : PageIterator * it = api . AnalyseLayout ( ) ; <nl> - if ( it ) { <nl> - it - > Orientation ( & orientation , & direction , & order , & deskew_angle ) ; <nl> - tprintf ( " Orientation : % d \ nWritingDirection : % d \ nTextlineOrder : % d \ n " \ <nl> - " Deskew angle : % . 4f \ n " , <nl> - orientation , direction , order , deskew_angle ) ; <nl> + <nl> + if ( pagesegmode = = tesseract : : PSM_OSD_ONLY ) { <nl> + OSResults osr ; <nl> + if ( api . DetectOS ( & osr ) ) { <nl> + int orient = osr . best_result . orientation_id ; <nl> + int script_id = osr . get_best_script ( orient ) ; <nl> + float orient_oco = osr . best_result . oconfidence ; <nl> + float orient_sco = osr . best_result . sconfidence ; <nl> + tprintf ( " Orientation : % d \ nOrientation in degrees : % d \ n " \ <nl> + " Orientation confidence : % . 2f \ n " \ <nl> + " Script : % d \ nScript confidence : % . 2f \ n " , <nl> + orient , OrientationIdToValue ( orient ) , orient_oco , <nl> + script_id , orient_sco ) ; <nl> + } else { <nl> + ret_val = 1 ; <nl> + } <nl> } else { <nl> - ret_val = 1 ; <nl> + tesseract : : Orientation orientation ; <nl> + tesseract : : WritingDirection direction ; <nl> + tesseract : : TextlineOrder order ; <nl> + float deskew_angle ; <nl> + tesseract : : PageIterator * it = api . AnalyseLayout ( ) ; <nl> + if ( it ) { <nl> + it - > Orientation ( & orientation , & direction , & order , & deskew_angle ) ; <nl> + tprintf ( " Orientation : % d \ nWritingDirection : % d \ nTextlineOrder : % d \ n " \ <nl> + " Deskew angle : % . 4f \ n " , <nl> + orientation , direction , order , deskew_angle ) ; <nl> + } else { <nl> + ret_val = 1 ; <nl> + } <nl> + delete it ; <nl> } <nl> pixDestroy ( & pixs ) ; <nl> - delete it ; <nl> exit ( ret_val ) ; <nl> } <nl> <nl> | provide output for - psm 0 | tesseract-ocr/tesseract | 32789291a8dc4098dcbf7e710c122ea86e9c0e1f | 2014-02-01T12:56:36Z |
mmm a / src / video_core / shader / decode / texture . cpp <nl> ppp b / src / video_core / shader / decode / texture . cpp <nl> u32 ShaderIR : : DecodeTexture ( NodeBlock & bb , u32 pc ) { <nl> case OpCode : : Id : : TXD : { <nl> UNIMPLEMENTED_IF_MSG ( instr . txd . UsesMiscMode ( TextureMiscMode : : AOFFI ) , <nl> " AOFFI is not implemented " ) ; <nl> - UNIMPLEMENTED_IF_MSG ( instr . txd . is_array ! = 0 , " TXD Array is not implemented " ) ; <nl> <nl> + const bool is_array = instr . txd . is_array ! = 0 ; <nl> u64 base_reg = instr . gpr8 . Value ( ) ; <nl> const auto derivate_reg = instr . gpr20 . Value ( ) ; <nl> const auto texture_type = instr . txd . texture_type . Value ( ) ; <nl> const auto coord_count = GetCoordCount ( texture_type ) ; <nl> <nl> - const Sampler * sampler = is_bindless <nl> - ? GetBindlessSampler ( base_reg , { { texture_type , false , false } } ) <nl> - : GetSampler ( instr . sampler , { { texture_type , false , false } } ) ; <nl> + const Sampler * sampler = <nl> + is_bindless ? GetBindlessSampler ( base_reg , { { texture_type , is_array , false } } ) <nl> + : GetSampler ( instr . sampler , { { texture_type , is_array , false } } ) ; <nl> Node4 values ; <nl> if ( sampler = = nullptr ) { <nl> for ( u32 element = 0 ; element < values . size ( ) ; + + element ) { <nl> u32 ShaderIR : : DecodeTexture ( NodeBlock & bb , u32 pc ) { <nl> WriteTexInstructionFloat ( bb , instr , values ) ; <nl> break ; <nl> } <nl> + <nl> if ( is_bindless ) { <nl> base_reg + + ; <nl> } <nl> u32 ShaderIR : : DecodeTexture ( NodeBlock & bb , u32 pc ) { <nl> derivates . push_back ( GetRegister ( derivate_reg + derivate + 1 ) ) ; <nl> } <nl> <nl> + Node array_node = { } ; <nl> + if ( is_array ) { <nl> + const Node info_reg = GetRegister ( base_reg + coord_count ) ; <nl> + array_node = BitfieldExtract ( info_reg , 0 , 16 ) ; <nl> + } <nl> + <nl> for ( u32 element = 0 ; element < values . size ( ) ; + + element ) { <nl> - MetaTexture meta { * sampler , { } , { } , { } , { } , derivates , { } , { } , { } , element } ; <nl> + MetaTexture meta { * sampler , array_node , { } , { } , { } , derivates , { } , { } , { } , element } ; <nl> values [ element ] = Operation ( OperationCode : : TextureGradient , std : : move ( meta ) , coords ) ; <nl> } <nl> <nl> | Shader_IR : Implement TXD Array . | yuzu-emu/yuzu | a1667a7b46cef31a493721b6c193749ee54f730f | 2020-01-04T17:28:02Z |
mmm a / src / test / util_tests . cpp <nl> ppp b / src / test / util_tests . cpp <nl> BOOST_AUTO_TEST_CASE ( util_HexStr ) <nl> BOOST_CHECK_EQUAL ( <nl> HexStr ( ParseHex_vec , true ) , <nl> " 04 67 8a fd b0 " ) ; <nl> + <nl> + BOOST_CHECK_EQUAL ( <nl> + HexStr ( ParseHex_vec . rbegin ( ) , ParseHex_vec . rend ( ) ) , <nl> + " b0fd8a6704 " <nl> + ) ; <nl> + <nl> + BOOST_CHECK_EQUAL ( <nl> + HexStr ( ParseHex_vec . rbegin ( ) , ParseHex_vec . rend ( ) , true ) , <nl> + " b0 fd 8a 67 04 " <nl> + ) ; <nl> + <nl> + BOOST_CHECK_EQUAL ( <nl> + HexStr ( std : : reverse_iterator < const uint8_t * > ( ParseHex_expected ) , <nl> + std : : reverse_iterator < const uint8_t * > ( ParseHex_expected ) ) , <nl> + " " <nl> + ) ; <nl> + <nl> + BOOST_CHECK_EQUAL ( <nl> + HexStr ( std : : reverse_iterator < const uint8_t * > ( ParseHex_expected ) , <nl> + std : : reverse_iterator < const uint8_t * > ( ParseHex_expected ) , true ) , <nl> + " " <nl> + ) ; <nl> + <nl> + BOOST_CHECK_EQUAL ( <nl> + HexStr ( std : : reverse_iterator < const uint8_t * > ( ParseHex_expected + 1 ) , <nl> + std : : reverse_iterator < const uint8_t * > ( ParseHex_expected ) ) , <nl> + " 04 " <nl> + ) ; <nl> + <nl> + BOOST_CHECK_EQUAL ( <nl> + HexStr ( std : : reverse_iterator < const uint8_t * > ( ParseHex_expected + 1 ) , <nl> + std : : reverse_iterator < const uint8_t * > ( ParseHex_expected ) , true ) , <nl> + " 04 " <nl> + ) ; <nl> + <nl> + BOOST_CHECK_EQUAL ( <nl> + HexStr ( std : : reverse_iterator < const uint8_t * > ( ParseHex_expected + 5 ) , <nl> + std : : reverse_iterator < const uint8_t * > ( ParseHex_expected ) ) , <nl> + " b0fd8a6704 " <nl> + ) ; <nl> + <nl> + BOOST_CHECK_EQUAL ( <nl> + HexStr ( std : : reverse_iterator < const uint8_t * > ( ParseHex_expected + 5 ) , <nl> + std : : reverse_iterator < const uint8_t * > ( ParseHex_expected ) , true ) , <nl> + " b0 fd 8a 67 04 " <nl> + ) ; <nl> + <nl> + BOOST_CHECK_EQUAL ( <nl> + HexStr ( std : : reverse_iterator < const uint8_t * > ( ParseHex_expected + 65 ) , <nl> + std : : reverse_iterator < const uint8_t * > ( ParseHex_expected ) ) , <nl> + " 5f1df16b2b704c8a578d0bbaf74d385cde12c11ee50455f3c438ef4c3fbcf649b6de611feae06279a60939e028a8d65c10b73071a6f16719274855feb0fd8a6704 " <nl> + ) ; <nl> } <nl> <nl> <nl> | Add tests for HexStr std : : reverse_iterator cases | bitcoin/bitcoin | ac48861815832e924bc355553f677f54ace984c5 | 2018-03-01T05:31:35Z |
mmm a / fdbclient / FileBackupAgent . actor . cpp <nl> ppp b / fdbclient / FileBackupAgent . actor . cpp <nl> namespace fileBackup { <nl> } <nl> <nl> / / Calculate number of shards that should be done before the next interval end <nl> - state Version nextDispatchVersion = recentReadVersion + CLIENT_KNOBS - > CORE_VERSIONSPERSECOND * ( g_network - > isSimulated ( ) ? ( snapshotIntervalSeconds / 5 . 0 ) : CLIENT_KNOBS - > BACKUP_SNAPSHOT_DISPATCH_INTERVAL_SEC ) ; <nl> + state Version nextDispatchVersion = recentReadVersion + CLIENT_KNOBS - > CORE_VERSIONSPERSECOND * std : : min < int > ( ( snapshotIntervalSeconds / 5 . 0 ) , CLIENT_KNOBS - > BACKUP_SNAPSHOT_DISPATCH_INTERVAL_SEC ) ; <nl> Params . nextDispatchVersion ( ) . set ( task , nextDispatchVersion ) ; <nl> / / timeElapsed is between 0 and 1 and represents what portion of the shards we should have completed by now <nl> double timeElapsed ; <nl> | fix : nextDispatchVersion was being set too large if the snapshot interval was small | apple/foundationdb | 570f72ba40efdd6e011ba98ef5f560fbd0c3b141 | 2018-01-19T18:53:58Z |
mmm a / src / mongo / db / catalog / collection . cpp <nl> ppp b / src / mongo / db / catalog / collection . cpp <nl> namespace mongo { <nl> options . logIfError = false ; <nl> options . dupsAllowed = <nl> ! ( KeyPattern : : isIdKeyPattern ( descriptor - > keyPattern ( ) ) | | descriptor - > unique ( ) ) <nl> - | | replset : : ignoreUniqueIndex ( descriptor ) ; <nl> + | | repl : : ignoreUniqueIndex ( descriptor ) ; <nl> UpdateTicket * updateTicket = new UpdateTicket ( ) ; <nl> updateTickets . mutableMap ( ) [ descriptor ] = updateTicket ; <nl> Status ret = iam - > validateUpdate ( objOld , objNew , oldLocation , options , updateTicket ) ; <nl> mmm a / src / mongo / db / catalog / index_catalog . cpp <nl> ppp b / src / mongo / db / catalog / index_catalog . cpp <nl> namespace mongo { <nl> if ( ! IndexDescriptor : : isIdIndexPattern ( key ) ) { <nl> / / for non _id indexes , we check to see if replication has turned off all indexes <nl> / / we _always_ created _id index <nl> - if ( replset : : theReplSet & & ! replset : : theReplSet - > buildIndexes ( ) ) { <nl> + if ( repl : : theReplSet & & ! repl : : theReplSet - > buildIndexes ( ) ) { <nl> / / this is not exactly the right error code , but I think will make the most sense <nl> return Status ( ErrorCodes : : IndexAlreadyExists , " no indexes per repl " ) ; <nl> } <nl> namespace mongo { <nl> KeyPattern : : isIdKeyPattern ( index - > descriptor ( ) - > keyPattern ( ) ) | | <nl> index - > descriptor ( ) - > unique ( ) ; <nl> <nl> - options . dupsAllowed = replset : : ignoreUniqueIndex ( index - > descriptor ( ) ) | | ! isUnique ; <nl> + options . dupsAllowed = repl : : ignoreUniqueIndex ( index - > descriptor ( ) ) | | ! isUnique ; <nl> <nl> int64_t inserted ; <nl> return index - > accessMethod ( ) - > insert ( txn , obj , loc , options , & inserted ) ; <nl> namespace mongo { <nl> if ( ! descriptor - > unique ( ) ) <nl> continue ; <nl> <nl> - if ( replset : : ignoreUniqueIndex ( descriptor ) ) <nl> + if ( repl : : ignoreUniqueIndex ( descriptor ) ) <nl> continue ; <nl> <nl> IndexAccessMethod * iam = getIndex ( descriptor ) ; <nl> mmm a / src / mongo / db / catalog / index_create . cpp <nl> ppp b / src / mongo / db / catalog / index_create . cpp <nl> namespace mongo { <nl> options . dupsAllowed = true ; <nl> <nl> if ( descriptor - > isIdIndex ( ) | | descriptor - > unique ( ) ) { <nl> - if ( ! replset : : ignoreUniqueIndex ( descriptor ) ) { <nl> + if ( ! repl : : ignoreUniqueIndex ( descriptor ) ) { <nl> options . dupsAllowed = false ; <nl> } <nl> } <nl> namespace mongo { <nl> runner - > saveState ( ) ; <nl> BSONObj toDelete ; <nl> collection - > deleteDocument ( txn , loc , false , true , & toDelete ) ; <nl> - replset : : logOp ( txn , " d " , ns . c_str ( ) , toDelete ) ; <nl> + repl : : logOp ( txn , " d " , ns . c_str ( ) , toDelete ) ; <nl> <nl> if ( ! runner - > restoreState ( txn ) ) { <nl> / / Runner got killed somehow . This probably shouldn ' t happen . <nl> namespace mongo { <nl> false / * cappedOk * / , <nl> true / * noWarn * / , <nl> & toDelete ) ; <nl> - if ( replset : : isMasterNs ( ns . c_str ( ) ) ) { <nl> - replset : : logOp ( txn , " d " , ns . c_str ( ) , toDelete ) ; <nl> + if ( repl : : isMasterNs ( ns . c_str ( ) ) ) { <nl> + repl : : logOp ( txn , " d " , ns . c_str ( ) , toDelete ) ; <nl> } <nl> <nl> txn - > recoveryUnit ( ) - > commitIfNeeded ( ) ; <nl> mmm a / src / mongo / db / client . cpp <nl> ppp b / src / mongo / db / client . cpp <nl> namespace mongo { <nl> <nl> void Client : : appendLastOp ( BSONObjBuilder & b ) const { <nl> / / _lastOp is never set if replication is off <nl> - if ( replset : : theReplSet | | ! _lastOp . isNull ( ) ) { <nl> + if ( repl : : theReplSet | | ! _lastOp . isNull ( ) ) { <nl> b . appendTimestamp ( " lastOp " , _lastOp . asDate ( ) ) ; <nl> } <nl> } <nl> namespace mongo { <nl> <nl> _handshake = b . obj ( ) ; <nl> <nl> - if ( ! replset : : theReplSet | | ! o . hasField ( " member " ) ) { <nl> + if ( ! repl : : theReplSet | | ! o . hasField ( " member " ) ) { <nl> return false ; <nl> } <nl> <nl> - return replset : : theReplSet - > registerSlave ( _remoteId , o [ " member " ] . Int ( ) ) ; <nl> + return repl : : theReplSet - > registerSlave ( _remoteId , o [ " member " ] . Int ( ) ) ; <nl> } <nl> <nl> bool ClientBasic : : hasCurrent ( ) { <nl> mmm a / src / mongo / db / clientcursor . cpp <nl> ppp b / src / mongo / db / clientcursor . cpp <nl> namespace mongo { <nl> void ClientCursor : : updateSlaveLocation ( CurOp & curop ) { <nl> if ( _slaveReadTill . isNull ( ) ) <nl> return ; <nl> - mongo : : replset : : updateSlaveLocation ( curop , _ns . c_str ( ) , _slaveReadTill ) ; <nl> + mongo : : repl : : updateSlaveLocation ( curop , _ns . c_str ( ) , _slaveReadTill ) ; <nl> } <nl> <nl> / / <nl> mmm a / src / mongo / db / cloner . cpp <nl> ppp b / src / mongo / db / cloner . cpp <nl> namespace mongo { <nl> } <nl> uassertStatusOK ( loc . getStatus ( ) ) ; <nl> if ( logForRepl ) <nl> - replset : : logOp ( txn , " i " , to_collection , js ) ; <nl> + repl : : logOp ( txn , " i " , to_collection , js ) ; <nl> <nl> txn - > recoveryUnit ( ) - > commitIfNeeded ( ) ; <nl> <nl> namespace mongo { <nl> } <nl> <nl> if ( logForRepl ) <nl> - replset : : logOp ( txn , " i " , to_collection , spec ) ; <nl> + repl : : logOp ( txn , " i " , to_collection , spec ) ; <nl> <nl> txn - > recoveryUnit ( ) - > commitIfNeeded ( ) ; <nl> <nl> namespace mongo { <nl> / / cloner owns _conn in auto_ptr <nl> cloner . setConnection ( tmpConn ) ; <nl> uassert ( 15908 , errmsg , <nl> - tmpConn - > connect ( host , errmsg ) & & replset : : replAuthenticate ( tmpConn ) ) ; <nl> + tmpConn - > connect ( host , errmsg ) & & repl : : replAuthenticate ( tmpConn ) ) ; <nl> <nl> return cloner . copyCollection ( txn , ns , BSONObj ( ) , errmsg , true , false , true , false ) ; <nl> } <nl> namespace mongo { <nl> auto_ptr < DBClientBase > con ( cs . connect ( errmsg ) ) ; <nl> if ( ! con . get ( ) ) <nl> return false ; <nl> - if ( ! replset : : replAuthenticate ( con . get ( ) ) ) <nl> + if ( ! repl : : replAuthenticate ( con . get ( ) ) ) <nl> return false ; <nl> <nl> _conn = con ; <nl> mmm a / src / mongo / db / commands / apply_ops . cpp <nl> ppp b / src / mongo / db / commands / apply_ops . cpp <nl> namespace mongo { <nl> invariant ( Lock : : nested ( ) ) ; <nl> <nl> Client : : Context ctx ( ns ) ; <nl> - bool failed = replset : : applyOperation_inlock ( txn , <nl> + bool failed = repl : : applyOperation_inlock ( txn , <nl> ctx . db ( ) , <nl> temp , <nl> false , <nl> namespace mongo { <nl> } <nl> } <nl> <nl> - replset : : logOp ( txn , " c " , tempNS . c_str ( ) , cmdBuilder . done ( ) ) ; <nl> + repl : : logOp ( txn , " c " , tempNS . c_str ( ) , cmdBuilder . done ( ) ) ; <nl> } <nl> <nl> return errors = = 0 ; <nl> mmm a / src / mongo / db / commands / auth_schema_upgrade_d . cpp <nl> ppp b / src / mongo / db / commands / auth_schema_upgrade_d . cpp <nl> namespace mongo { <nl> namespace { <nl> <nl> Status checkReplicaMemberVersions ( ) { <nl> - if ( ! replset : : theReplSet ) <nl> + if ( ! repl : : theReplSet ) <nl> return Status : : OK ( ) ; <nl> <nl> <nl> - std : : list < replset : : Target > rsMembers ; <nl> + std : : list < repl : : Target > rsMembers ; <nl> try { <nl> - const unsigned rsSelfId = replset : : theReplSet - > selfId ( ) ; <nl> - const std : : vector < replset : : ReplSetConfig : : MemberCfg > & rsMemberConfigs = <nl> - replset : : theReplSet - > config ( ) . members ; <nl> + const unsigned rsSelfId = repl : : theReplSet - > selfId ( ) ; <nl> + const std : : vector < repl : : ReplSetConfig : : MemberCfg > & rsMemberConfigs = <nl> + repl : : theReplSet - > config ( ) . members ; <nl> for ( size_t i = 0 ; i < rsMemberConfigs . size ( ) ; + + i ) { <nl> const unsigned otherId = rsMemberConfigs [ i ] . _id ; <nl> if ( rsSelfId = = otherId ) <nl> continue ; <nl> - const replset : : Member * other = replset : : theReplSet - > findById ( otherId ) ; <nl> + const repl : : Member * other = repl : : theReplSet - > findById ( otherId ) ; <nl> if ( ! other ) { <nl> log ( ) < < " During authSchemaUpgrade , no information about replica set member " <nl> " with id " < < otherId < < " ; ignoring . " ; <nl> namespace { <nl> " is down ; ignoring . " ; <nl> continue ; <nl> } <nl> - rsMembers . push_back ( replset : : Target ( other - > fullName ( ) ) ) ; <nl> + rsMembers . push_back ( repl : : Target ( other - > fullName ( ) ) ) ; <nl> } <nl> <nl> multiCommand ( BSON ( " buildInfo " < < 1 ) , rsMembers ) ; <nl> namespace { <nl> return ex . toStatus ( ) ; <nl> } <nl> <nl> - for ( std : : list < replset : : Target > : : const_iterator iter = rsMembers . begin ( ) ; <nl> + for ( std : : list < repl : : Target > : : const_iterator iter = rsMembers . begin ( ) ; <nl> iter ! = rsMembers . end ( ) ; <nl> + + iter ) { <nl> <nl> mmm a / src / mongo / db / commands / collection_to_capped . cpp <nl> ppp b / src / mongo / db / commands / collection_to_capped . cpp <nl> namespace mongo { <nl> <nl> toCollection - > insertDocument ( txn , obj , true ) ; <nl> if ( logForReplication ) <nl> - replset : : logOp ( txn , " i " , toNs . c_str ( ) , obj ) ; <nl> + repl : : logOp ( txn , " i " , toNs . c_str ( ) , obj ) ; <nl> txn - > recoveryUnit ( ) - > commitIfNeeded ( ) ; <nl> } <nl> } <nl> namespace mongo { <nl> return appendCommandStatus ( result , status ) ; <nl> <nl> if ( ! fromRepl ) <nl> - replset : : logOp ( txn , " c " , ( dbname + " . $ cmd " ) . c_str ( ) , jsobj ) ; <nl> + repl : : logOp ( txn , " c " , ( dbname + " . $ cmd " ) . c_str ( ) , jsobj ) ; <nl> return true ; <nl> } <nl> } cmdConvertToCapped ; <nl> mmm a / src / mongo / db / commands / compact . cpp <nl> ppp b / src / mongo / db / commands / compact . cpp <nl> namespace mongo { <nl> return false ; <nl> } <nl> <nl> - if ( replset : : isCurrentlyAReplSetPrimary ( ) & & ! cmdObj [ " force " ] . trueValue ( ) ) { <nl> + if ( repl : : isCurrentlyAReplSetPrimary ( ) & & ! cmdObj [ " force " ] . trueValue ( ) ) { <nl> errmsg = " will not run compact on an active replica set primary as this is a slow blocking operation . use force : true to force " ; <nl> return false ; <nl> } <nl> mmm a / src / mongo / db / commands / create_indexes . cpp <nl> ppp b / src / mongo / db / commands / create_indexes . cpp <nl> namespace mongo { <nl> <nl> if ( ! fromRepl ) { <nl> std : : string systemIndexes = ns . getSystemIndexesCollection ( ) ; <nl> - replset : : logOp ( txn , " i " , systemIndexes . c_str ( ) , spec ) ; <nl> + repl : : logOp ( txn , " i " , systemIndexes . c_str ( ) , spec ) ; <nl> } <nl> } <nl> <nl> mmm a / src / mongo / db / commands / drop_indexes . cpp <nl> ppp b / src / mongo / db / commands / drop_indexes . cpp <nl> namespace mongo { <nl> Lock : : DBWrite dbXLock ( dbname ) ; <nl> bool ok = wrappedRun ( txn , dbname , jsobj , errmsg , anObjBuilder ) ; <nl> if ( ok & & ! fromRepl ) <nl> - replset : : logOp ( txn , " c " , ( dbname + " . $ cmd " ) . c_str ( ) , jsobj ) ; <nl> + repl : : logOp ( txn , " c " , ( dbname + " . $ cmd " ) . c_str ( ) , jsobj ) ; <nl> return ok ; <nl> } <nl> bool wrappedRun ( OperationContext * txn , <nl> mmm a / src / mongo / db / commands / get_last_error . cpp <nl> ppp b / src / mongo / db / commands / get_last_error . cpp <nl> namespace mongo { <nl> <nl> / / If we got an electionId , make sure it matches <nl> if ( electionIdPresent ) { <nl> - if ( ! replset : : theReplSet ) { <nl> + if ( ! repl : : theReplSet ) { <nl> / / Ignore electionIds of 0 from mongos . <nl> if ( electionId ! = OID ( ) ) { <nl> errmsg = " wElectionId passed but no replication active " ; <nl> namespace mongo { <nl> } <nl> } <nl> else { <nl> - if ( electionId ! = replset : : theReplSet - > getElectionId ( ) ) { <nl> + if ( electionId ! = repl : : theReplSet - > getElectionId ( ) ) { <nl> LOG ( 3 ) < < " oid passed in is " < < electionId <nl> - < < " , but our id is " < < replset : : theReplSet - > getElectionId ( ) ; <nl> + < < " , but our id is " < < repl : : theReplSet - > getElectionId ( ) ; <nl> errmsg = " election occurred after write " ; <nl> result . append ( " code " , ErrorCodes : : WriteConcernFailed ) ; <nl> return false ; <nl> mmm a / src / mongo / db / commands / mr . cpp <nl> ppp b / src / mongo / db / commands / mr . cpp <nl> namespace mongo { <nl> b . append ( " create " , nsToCollectionSubstring ( _config . tempNamespace ) ) ; <nl> b . appendElements ( options . toBSON ( ) ) ; <nl> string logNs = nsToDatabase ( _config . tempNamespace ) + " . $ cmd " ; <nl> - replset : : logOp ( _txn , " c " , logNs . c_str ( ) , b . obj ( ) ) ; <nl> + repl : : logOp ( _txn , " c " , logNs . c_str ( ) , b . obj ( ) ) ; <nl> } <nl> <nl> for ( vector < BSONObj > : : iterator it = indexesToInsert . begin ( ) ; <nl> namespace mongo { <nl> tempColl - > getIndexCatalog ( ) - > createIndex ( _txn , * it , false ) ; <nl> / / Log the createIndex operation . <nl> string logNs = nsToDatabase ( _config . tempNamespace ) + " . system . indexes " ; <nl> - replset : : logOp ( _txn , " i " , logNs . c_str ( ) , * it ) ; <nl> + repl : : logOp ( _txn , " i " , logNs . c_str ( ) , * it ) ; <nl> } <nl> } <nl> <nl> namespace mongo { <nl> BSONObj bo = b . obj ( ) ; <nl> <nl> coll - > insertDocument ( _txn , bo , true ) ; <nl> - replset : : logOp ( _txn , " i " , ns . c_str ( ) , bo ) ; <nl> + repl : : logOp ( _txn , " i " , ns . c_str ( ) , bo ) ; <nl> } <nl> <nl> / * * <nl> namespace mongo { <nl> / * why ! replset ? <nl> bad things happen with - - slave ( i think because of this ) <nl> * / <nl> - virtual bool slaveOk ( ) const { return ! replset : : replSet ; } <nl> + virtual bool slaveOk ( ) const { return ! repl : : replSet ; } <nl> <nl> virtual bool slaveOverrideOk ( ) const { return true ; } <nl> <nl> namespace mongo { <nl> return false ; <nl> } <nl> <nl> - if ( replset : : replSet & & state . isOnDisk ( ) ) { <nl> + if ( repl : : replSet & & state . isOnDisk ( ) ) { <nl> / / this means that it will be doing a write operation , make sure we are on Master <nl> / / ideally this check should be in slaveOk ( ) , but at that point config is not known <nl> - if ( ! replset : : isMasterNs ( dbname . c_str ( ) ) ) { <nl> + if ( ! repl : : isMasterNs ( dbname . c_str ( ) ) ) { <nl> errmsg = " not master " ; <nl> return false ; <nl> } <nl> namespace mongo { <nl> public : <nl> void help ( stringstream & h ) const { h < < " internal " ; } <nl> MapReduceFinishCommand ( ) : Command ( " mapreduce . shardedfinish " ) { } <nl> - virtual bool slaveOk ( ) const { return ! replset : : replSet ; } <nl> + virtual bool slaveOk ( ) const { return ! repl : : replSet ; } <nl> virtual bool slaveOverrideOk ( ) const { return true ; } <nl> virtual bool isWriteCommandForConfigServer ( ) const { return false ; } <nl> virtual void addRequiredPrivileges ( const std : : string & dbname , <nl> mmm a / src / mongo / db / commands / oplog_note . cpp <nl> ppp b / src / mongo / db / commands / oplog_note . cpp <nl> namespace mongo { <nl> string & errmsg , <nl> BSONObjBuilder & result , <nl> bool fromRepl ) { <nl> - if ( ! replset : : replSettings . master ) { <nl> + if ( ! repl : : replSettings . master ) { <nl> return appendCommandStatus ( result , Status ( <nl> ErrorCodes : : NoReplicationEnabled , <nl> " Must have replication set up to run \ " appendOplogNote \ " " ) ) ; <nl> namespace mongo { <nl> return appendCommandStatus ( result , status ) ; <nl> } <nl> <nl> - replset : : logOpComment ( dataElement . Obj ( ) ) ; <nl> + repl : : logOpComment ( dataElement . Obj ( ) ) ; <nl> return true ; <nl> } <nl> <nl> mmm a / src / mongo / db / commands / rename_collection . cpp <nl> ppp b / src / mongo / db / commands / rename_collection . cpp <nl> namespace mongo { <nl> Lock : : GlobalWrite globalWriteLock ; <nl> bool ok = wrappedRun ( txn , dbname , cmdObj , errmsg , result , fromRepl ) ; <nl> if ( ok & & ! fromRepl ) <nl> - replset : : logOp ( txn , " c " , ( dbname + " . $ cmd " ) . c_str ( ) , cmdObj ) ; <nl> + repl : : logOp ( txn , " c " , ( dbname + " . $ cmd " ) . c_str ( ) , cmdObj ) ; <nl> return ok ; <nl> } <nl> virtual bool wrappedRun ( OperationContext * txn , <nl> mmm a / src / mongo / db / commands / test_commands . cpp <nl> ppp b / src / mongo / db / commands / test_commands . cpp <nl> namespace mongo { <nl> IndexBuilder : : restoreIndexes ( indexes ) ; <nl> <nl> if ( ! fromRepl ) <nl> - replset : : logOp ( txn , " c " , ( dbname + " . $ cmd " ) . c_str ( ) , cmdObj ) ; <nl> + repl : : logOp ( txn , " c " , ( dbname + " . $ cmd " ) . c_str ( ) , cmdObj ) ; <nl> return true ; <nl> } <nl> } ; <nl> mmm a / src / mongo / db / commands / write_commands / batch_executor . cpp <nl> ppp b / src / mongo / db / commands / write_commands / batch_executor . cpp <nl> namespace mongo { <nl> response - > setWriteConcernError ( wcError . release ( ) ) ; <nl> } <nl> <nl> - if ( replset : : anyReplEnabled ( ) ) { <nl> + if ( repl : : anyReplEnabled ( ) ) { <nl> response - > setLastOp ( _client - > getLastOp ( ) ) ; <nl> - if ( replset : : theReplSet ) { <nl> - response - > setElectionId ( replset : : theReplSet - > getElectionId ( ) ) ; <nl> + if ( repl : : theReplSet ) { <nl> + response - > setElectionId ( repl : : theReplSet - > getElectionId ( ) ) ; <nl> } <nl> } <nl> <nl> namespace mongo { <nl> } <nl> <nl> static bool checkIsMasterForCollection ( const std : : string & ns , WriteOpResult * result ) { <nl> - if ( ! replset : : isMasterNs ( ns . c_str ( ) ) ) { <nl> + if ( ! repl : : isMasterNs ( ns . c_str ( ) ) ) { <nl> WriteErrorDetail * errorDetail = new WriteErrorDetail ; <nl> result - > setError ( errorDetail ) ; <nl> errorDetail - > setErrCode ( ErrorCodes : : NotMaster ) ; <nl> namespace mongo { <nl> result - > setError ( toWriteError ( status . getStatus ( ) ) ) ; <nl> } <nl> else { <nl> - replset : : logOp ( txn , " i " , insertNS . c_str ( ) , docToInsert ) ; <nl> + repl : : logOp ( txn , " i " , insertNS . c_str ( ) , docToInsert ) ; <nl> txn - > recoveryUnit ( ) - > commitIfNeeded ( ) ; <nl> result - > getStats ( ) . n = 1 ; <nl> } <nl> namespace mongo { <nl> result - > setError ( toWriteError ( status ) ) ; <nl> } <nl> else { <nl> - replset : : logOp ( txn , " i " , indexNS . c_str ( ) , indexDesc ) ; <nl> + repl : : logOp ( txn , " i " , indexNS . c_str ( ) , indexDesc ) ; <nl> result - > getStats ( ) . n = 1 ; <nl> } <nl> } <nl> mmm a / src / mongo / db / db . cpp <nl> ppp b / src / mongo / db / db . cpp <nl> namespace mongo { <nl> server - > setupSockets ( ) ; <nl> <nl> logStartup ( ) ; <nl> - replset : : startReplication ( ) ; <nl> + repl : : startReplication ( ) ; <nl> if ( serverGlobalParams . isHttpInterfaceEnabled ) <nl> boost : : thread web ( stdx : : bind ( & webServerThread , <nl> new RestAdminAccess ( ) , / / takes ownership <nl> namespace mongo { <nl> DataFile * p = ctx . db ( ) - > getExtentManager ( ) - > getFile ( & txn , 0 ) ; <nl> DataFileHeader * h = p - > getHeader ( ) ; <nl> <nl> - if ( replset : : replSettings . usingReplSets ( ) ) { <nl> + if ( repl : : replSettings . usingReplSets ( ) ) { <nl> / / we only care about the _id index if we are in a replset <nl> checkForIdIndexes ( ctx . db ( ) ) ; <nl> } <nl> namespace mongo { <nl> * / <nl> unsigned long long checkIfReplMissingFromCommandLine ( ) { <nl> Lock : : GlobalWrite lk ; / / this is helpful for the query below to work as you can ' t open files when readlocked <nl> - if ( ! replset : : replSettings . usingReplSets ( ) ) { <nl> + if ( ! repl : : replSettings . usingReplSets ( ) ) { <nl> DBDirectClient c ; <nl> return c . count ( " local . system . replset " ) ; <nl> } <nl> namespace mongo { <nl> l < < " MongoDB starting : pid = " < < pid <nl> < < " port = " < < serverGlobalParams . port <nl> < < " dbpath = " < < storageGlobalParams . dbpath ; <nl> - if ( replset : : replSettings . master ) l < < " master = " < < replset : : replSettings . master ; <nl> - if ( replset : : replSettings . slave ) l < < " slave = " < < ( int ) replset : : replSettings . slave ; <nl> + if ( repl : : replSettings . master ) l < < " master = " < < repl : : replSettings . master ; <nl> + if ( repl : : replSettings . slave ) l < < " slave = " < < ( int ) repl : : replSettings . slave ; <nl> l < < ( is32bit ? " 32 " : " 64 " ) < < " - bit host = " < < getHostNameCached ( ) < < endl ; <nl> } <nl> DEV log ( ) < < " _DEBUG build ( which is slower ) " < < endl ; <nl> namespace mongo { <nl> / / promotion to primary . On pure slaves , they are only cleared when the oplog tells them to . <nl> / / The local DB is special because it is not replicated . See SERVER - 10927 for more details . <nl> const bool shouldClearNonLocalTmpCollections = ! ( missingRepl <nl> - | | replset : : replSettings . usingReplSets ( ) <nl> - | | replset : : replSettings . slave = = replset : : SimpleSlave ) ; <nl> + | | repl : : replSettings . usingReplSets ( ) <nl> + | | repl : : replSettings . slave = = repl : : SimpleSlave ) ; <nl> repairDatabasesAndCheckVersion ( shouldClearNonLocalTmpCollections ) ; <nl> <nl> if ( mongodGlobalParams . upgrade ) <nl> mmm a / src / mongo / db / dbcommands . cpp <nl> ppp b / src / mongo / db / dbcommands . cpp <nl> namespace mongo { <nl> bool force = cmdObj . hasField ( " force " ) & & cmdObj [ " force " ] . trueValue ( ) ; <nl> <nl> if ( ! force & & <nl> - replset : : theReplSet & & <nl> - replset : : theReplSet - > getConfig ( ) . members . size ( ) > 1 & & <nl> - replset : : theReplSet - > isPrimary ( ) ) { <nl> + repl : : theReplSet & & <nl> + repl : : theReplSet - > getConfig ( ) . members . size ( ) > 1 & & <nl> + repl : : theReplSet - > isPrimary ( ) ) { <nl> long long timeout , now , start ; <nl> timeout = now = start = curTimeMicros64 ( ) / 1000000 ; <nl> if ( cmdObj . hasField ( " timeoutSecs " ) ) { <nl> timeout + = cmdObj [ " timeoutSecs " ] . numberLong ( ) ; <nl> } <nl> <nl> - OpTime lastOp = replset : : theReplSet - > lastOpTimeWritten ; <nl> - OpTime closest = replset : : theReplSet - > lastOtherOpTime ( ) ; <nl> + OpTime lastOp = repl : : theReplSet - > lastOpTimeWritten ; <nl> + OpTime closest = repl : : theReplSet - > lastOtherOpTime ( ) ; <nl> long long int diff = lastOp . getSecs ( ) - closest . getSecs ( ) ; <nl> while ( now < = timeout & & ( diff < 0 | | diff > 10 ) ) { <nl> sleepsecs ( 1 ) ; <nl> now + + ; <nl> <nl> - lastOp = replset : : theReplSet - > lastOpTimeWritten ; <nl> - closest = replset : : theReplSet - > lastOtherOpTime ( ) ; <nl> + lastOp = repl : : theReplSet - > lastOpTimeWritten ; <nl> + closest = repl : : theReplSet - > lastOtherOpTime ( ) ; <nl> diff = lastOp . getSecs ( ) - closest . getSecs ( ) ; <nl> } <nl> <nl> namespace mongo { <nl> } <nl> <nl> / / step down <nl> - replset : : theReplSet - > stepDown ( 120 ) ; <nl> + repl : : theReplSet - > stepDown ( 120 ) ; <nl> <nl> log ( ) < < " waiting for secondaries to catch up " < < endl ; <nl> <nl> - lastOp = replset : : theReplSet - > lastOpTimeWritten ; <nl> + lastOp = repl : : theReplSet - > lastOpTimeWritten ; <nl> while ( lastOp ! = closest & & now - start < 60 ) { <nl> - closest = replset : : theReplSet - > lastOtherOpTime ( ) ; <nl> + closest = repl : : theReplSet - > lastOtherOpTime ( ) ; <nl> <nl> now + + ; <nl> sleepsecs ( 1 ) ; <nl> namespace mongo { <nl> log ( ) < < " dropDatabase " < < dbname < < " finished " ; <nl> <nl> if ( ! fromRepl ) <nl> - replset : : logOp ( txn , " c " , ( dbname + " . $ cmd " ) . c_str ( ) , cmdObj ) ; <nl> + repl : : logOp ( txn , " c " , ( dbname + " . $ cmd " ) . c_str ( ) , cmdObj ) ; <nl> } <nl> <nl> result . append ( " dropped " , dbname ) ; <nl> namespace mongo { <nl> <nl> if ( s . isOK ( ) ) { <nl> if ( ! fromRepl ) <nl> - replset : : logOp ( txn , " c " , ( dbname + " . $ cmd " ) . c_str ( ) , cmdObj ) ; <nl> + repl : : logOp ( txn , " c " , ( dbname + " . $ cmd " ) . c_str ( ) , cmdObj ) ; <nl> return true ; <nl> } <nl> <nl> namespace mongo { <nl> CmdCount ( ) : Command ( " count " ) { } <nl> virtual bool slaveOk ( ) const { <nl> / / ok on - - slave setups <nl> - return replset : : replSettings . slave = = replset : : SimpleSlave ; <nl> + return repl : : replSettings . slave = = repl : : SimpleSlave ; <nl> } <nl> virtual bool slaveOverrideOk ( ) const { return true ; } <nl> virtual bool maintenanceOk ( ) const { return false ; } <nl> namespace mongo { <nl> } <nl> <nl> if ( ok & & ! fromRepl ) <nl> - replset : : logOp ( txn , " c " , ( dbname + " . $ cmd " ) . c_str ( ) , jsobj ) ; <nl> + repl : : logOp ( txn , " c " , ( dbname + " . $ cmd " ) . c_str ( ) , jsobj ) ; <nl> <nl> return ok ; <nl> } <nl> namespace mongo { <nl> assumption needs to be audited and documented . * / <nl> class MaintenanceModeSetter { <nl> public : <nl> - MaintenanceModeSetter ( ) : maintenanceModeSet ( replset : : theReplSet - > setMaintenanceMode ( true ) ) <nl> + MaintenanceModeSetter ( ) : maintenanceModeSet ( repl : : theReplSet - > setMaintenanceMode ( true ) ) <nl> { } <nl> ~ MaintenanceModeSetter ( ) { <nl> if ( maintenanceModeSet ) <nl> - replset : : theReplSet - > setMaintenanceMode ( false ) ; <nl> + repl : : theReplSet - > setMaintenanceMode ( false ) ; <nl> } <nl> private : <nl> bool maintenanceModeSet ; <nl> namespace mongo { <nl> } <nl> <nl> bool canRunHere = <nl> - replset : : isMasterNs ( dbname . c_str ( ) ) | | <nl> + repl : : isMasterNs ( dbname . c_str ( ) ) | | <nl> c - > slaveOk ( ) | | <nl> ( c - > slaveOverrideOk ( ) & & ( queryOptions & QueryOption_SlaveOk ) ) | | <nl> fromRepl ; <nl> namespace mongo { <nl> } <nl> <nl> if ( ! c - > maintenanceOk ( ) <nl> - & & replset : : theReplSet <nl> - & & ! replset : : isMasterNs ( dbname . c_str ( ) ) <nl> - & & ! replset : : theReplSet - > isSecondary ( ) ) { <nl> + & & repl : : theReplSet <nl> + & & ! repl : : isMasterNs ( dbname . c_str ( ) ) <nl> + & & ! repl : : theReplSet - > isSecondary ( ) ) { <nl> result . append ( " note " , " from execCommand " ) ; <nl> appendCommandStatus ( result , false , " node is recovering " ) ; <nl> return ; <nl> namespace mongo { <nl> <nl> client . curop ( ) - > setCommand ( c ) ; <nl> <nl> - if ( c - > maintenanceMode ( ) & & replset : : theReplSet ) { <nl> + if ( c - > maintenanceMode ( ) & & repl : : theReplSet ) { <nl> mmSetter . reset ( new MaintenanceModeSetter ( ) ) ; <nl> } <nl> <nl> namespace mongo { <nl> appendCommandStatus ( result , retval , errmsg ) ; <nl> <nl> / / For commands from mongos , append some info to help getLastError ( w ) work . <nl> - if ( replset : : theReplSet ) { <nl> + if ( repl : : theReplSet ) { <nl> / / Detect mongos connections by looking for setShardVersion to have been run previously <nl> / / on this connection . <nl> if ( shardingState . needCollectionMetadata ( dbname ) ) { <nl> appendGLEHelperData ( result , client . getLastOp ( ) , <nl> - replset : : theReplSet - > getElectionId ( ) ) ; <nl> + repl : : theReplSet - > getElectionId ( ) ) ; <nl> } <nl> } <nl> return ; <nl> mmm a / src / mongo / db / dbhelpers . cpp <nl> ppp b / src / mongo / db / dbhelpers . cpp <nl> namespace mongo { <nl> BSONObj deletedId ; <nl> collection - > deleteDocument ( txn , rloc , false , false , & deletedId ) ; <nl> / / The above throws on failure , and so is not logged <nl> - replset : : logOp ( txn , " d " , ns . c_str ( ) , deletedId , 0 , 0 , fromMigrate ) ; <nl> + repl : : logOp ( txn , " d " , ns . c_str ( ) , deletedId , 0 , 0 , fromMigrate ) ; <nl> numDeleted + + ; <nl> } <nl> <nl> Timer secondaryThrottleTime ; <nl> <nl> if ( secondaryThrottle & & numDeleted > 0 ) { <nl> - if ( ! replset : : waitForReplication ( c . getLastOp ( ) , 2 , 60 / * seconds to wait * / ) ) { <nl> + if ( ! repl : : waitForReplication ( c . getLastOp ( ) , 2 , 60 / * seconds to wait * / ) ) { <nl> warning ( ) < < " replication to secondaries for removeRange at least 60 seconds behind " < < endl ; <nl> } <nl> millisWaitingForReplication + = secondaryThrottleTime . millis ( ) ; <nl> mmm a / src / mongo / db / index / btree_based_bulk_access_method . cpp <nl> ppp b / src / mongo / db / index / btree_based_bulk_access_method . cpp <nl> namespace mongo { <nl> IndexCatalogEntry * entry = _real - > _btreeState ; <nl> <nl> bool dupsAllowed = ! entry - > descriptor ( ) - > unique ( ) <nl> - | | replset : : ignoreUniqueIndex ( entry - > descriptor ( ) ) ; <nl> + | | repl : : ignoreUniqueIndex ( entry - > descriptor ( ) ) ; <nl> <nl> bool dropDups = entry - > descriptor ( ) - > dropDups ( ) | | inDBRepair ; <nl> <nl> mmm a / src / mongo / db / index_builder . cpp <nl> ppp b / src / mongo / db / index_builder . cpp <nl> namespace mongo { <nl> Client : : initThread ( name ( ) . c_str ( ) ) ; <nl> Lock : : ParallelBatchWriterMode : : iAmABatchParticipant ( ) ; <nl> <nl> - replset : : replLocalAuth ( ) ; <nl> + repl : : replLocalAuth ( ) ; <nl> <nl> cc ( ) . curop ( ) - > reset ( HostAndPort ( ) , dbInsert ) ; <nl> NamespaceString ns ( _index [ " ns " ] . String ( ) ) ; <nl> mmm a / src / mongo / db / index_rebuilder . cpp <nl> ppp b / src / mongo / db / index_rebuilder . cpp <nl> namespace mongo { <nl> catch ( const DBException & e ) { <nl> warning ( ) < < " Index rebuilding did not complete : " < < e . what ( ) < < endl ; <nl> } <nl> - boost : : unique_lock < boost : : mutex > lk ( replset : : ReplSet : : rss . mtx ) ; <nl> - replset : : ReplSet : : rss . indexRebuildDone = true ; <nl> - replset : : ReplSet : : rss . cond . notify_all ( ) ; <nl> + boost : : unique_lock < boost : : mutex > lk ( repl : : ReplSet : : rss . mtx ) ; <nl> + repl : : ReplSet : : rss . indexRebuildDone = true ; <nl> + repl : : ReplSet : : rss . cond . notify_all ( ) ; <nl> LOG ( 1 ) < < " checking complete " < < endl ; <nl> } <nl> <nl> mmm a / src / mongo / db / instance . cpp <nl> ppp b / src / mongo / db / instance . cpp <nl> namespace mongo { <nl> Database * database = ctx - > db ( ) ; <nl> verify ( database - > name ( ) = = db ) ; <nl> <nl> - replset : : oplogCheckCloseDatabase ( database ) ; / / oplog caches some things , dirty its caches <nl> + repl : : oplogCheckCloseDatabase ( database ) ; / / oplog caches some things , dirty its caches <nl> <nl> if ( BackgroundOperation : : inProgForDb ( db ) ) { <nl> log ( ) < < " warning : bg op in prog during close db ? " < < db < < endl ; <nl> namespace mongo { <nl> last = getLastSetOptime ( ) ; <nl> } <nl> else { <nl> - replset : : waitForOptimeChange ( last , 1000 / * ms * / ) ; <nl> + repl : : waitForOptimeChange ( last , 1000 / * ms * / ) ; <nl> } <nl> } <nl> <nl> namespace mongo { <nl> return ; <nl> <nl> uassertStatusOK ( status ) ; <nl> - replset : : logOp ( txn , " i " , ns , js ) ; <nl> + repl : : logOp ( txn , " i " , ns , js ) ; <nl> return ; <nl> } <nl> <nl> namespace mongo { <nl> <nl> StatusWith < DiskLoc > status = collection - > insertDocument ( txn , js , true ) ; <nl> uassertStatusOK ( status . getStatus ( ) ) ; <nl> - replset : : logOp ( txn , " i " , ns , js ) ; <nl> + repl : : logOp ( txn , " i " , ns , js ) ; <nl> } <nl> <nl> NOINLINE_DECL void insertMulti ( OperationContext * txn , <nl> namespace mongo { <nl> <nl> / / CONCURRENCY TODO : is being read locked in big log sufficient here ? <nl> / / writelock is used to synchronize stepdowns w / writes <nl> - uassert ( 10058 , " not master " , replset : : isMasterNs ( ns ) ) ; <nl> + uassert ( 10058 , " not master " , repl : : isMasterNs ( ns ) ) ; <nl> <nl> if ( handlePossibleShardedMessage ( m , 0 ) ) <nl> return ; <nl> namespace mongo { <nl> return true ; <nl> / / we have a local database . return true if oplog isn ' t empty <nl> { <nl> - Lock : : DBRead lk ( replset : : rsoplog ) ; <nl> + Lock : : DBRead lk ( repl : : rsoplog ) ; <nl> BSONObj o ; <nl> - if ( Helpers : : getFirst ( replset : : rsoplog , o ) ) <nl> + if ( Helpers : : getFirst ( repl : : rsoplog , o ) ) <nl> return true ; <nl> } <nl> } <nl> namespace { <nl> <nl> void exitCleanly ( ExitCode code ) { <nl> killCurrentOp . killAll ( ) ; <nl> - if ( replset : : theReplSet ) { <nl> - replset : : theReplSet - > shutdown ( ) ; <nl> + if ( repl : : theReplSet ) { <nl> + repl : : theReplSet - > shutdown ( ) ; <nl> } <nl> <nl> { <nl> mmm a / src / mongo / db / mongod_options . cpp <nl> ppp b / src / mongo / db / mongod_options . cpp <nl> namespace mongo { <nl> storageGlobalParams . noTableScan = params [ " notablescan " ] . as < bool > ( ) ; <nl> } <nl> if ( params . count ( " master " ) ) { <nl> - replset : : replSettings . master = params [ " master " ] . as < bool > ( ) ; <nl> + repl : : replSettings . master = params [ " master " ] . as < bool > ( ) ; <nl> } <nl> if ( params . count ( " slave " ) & & params [ " slave " ] . as < bool > ( ) = = true ) { <nl> - replset : : replSettings . slave = replset : : SimpleSlave ; <nl> + repl : : replSettings . slave = repl : : SimpleSlave ; <nl> } <nl> if ( params . count ( " slavedelay " ) ) { <nl> - replset : : replSettings . slavedelay = params [ " slavedelay " ] . as < int > ( ) ; <nl> + repl : : replSettings . slavedelay = params [ " slavedelay " ] . as < int > ( ) ; <nl> } <nl> if ( params . count ( " fastsync " ) ) { <nl> - replset : : replSettings . fastsync = params [ " fastsync " ] . as < bool > ( ) ; <nl> + repl : : replSettings . fastsync = params [ " fastsync " ] . as < bool > ( ) ; <nl> } <nl> if ( params . count ( " autoresync " ) ) { <nl> - replset : : replSettings . autoresync = params [ " autoresync " ] . as < bool > ( ) ; <nl> + repl : : replSettings . autoresync = params [ " autoresync " ] . as < bool > ( ) ; <nl> } <nl> if ( params . count ( " source " ) ) { <nl> / * specifies what the source in local . sources should be * / <nl> - replset : : replSettings . source = params [ " source " ] . as < string > ( ) . c_str ( ) ; <nl> + repl : : replSettings . source = params [ " source " ] . as < string > ( ) . c_str ( ) ; <nl> } <nl> if ( params . count ( " pretouch " ) ) { <nl> - replset : : replSettings . pretouch = params [ " pretouch " ] . as < int > ( ) ; <nl> + repl : : replSettings . pretouch = params [ " pretouch " ] . as < int > ( ) ; <nl> } <nl> if ( params . count ( " replication . replSetName " ) ) { <nl> - replset : : replSettings . replSet = params [ " replication . replSetName " ] . as < string > ( ) . c_str ( ) ; <nl> + repl : : replSettings . replSet = params [ " replication . replSetName " ] . as < string > ( ) . c_str ( ) ; <nl> } <nl> if ( params . count ( " replication . replSet " ) ) { <nl> / * seed list of hosts for the repl set * / <nl> - replset : : replSettings . replSet = params [ " replication . replSet " ] . as < string > ( ) . c_str ( ) ; <nl> + repl : : replSettings . replSet = params [ " replication . replSet " ] . as < string > ( ) . c_str ( ) ; <nl> } <nl> if ( params . count ( " replication . secondaryIndexPrefetch " ) ) { <nl> - replset : : replSettings . rsIndexPrefetch = <nl> + repl : : replSettings . rsIndexPrefetch = <nl> params [ " replication . secondaryIndexPrefetch " ] . as < std : : string > ( ) ; <nl> } <nl> <nl> namespace mongo { <nl> } <nl> <nl> if ( params . count ( " only " ) ) { <nl> - replset : : replSettings . only = params [ " only " ] . as < string > ( ) . c_str ( ) ; <nl> + repl : : replSettings . only = params [ " only " ] . as < string > ( ) . c_str ( ) ; <nl> } <nl> if ( params . count ( " storage . nsSize " ) ) { <nl> int x = params [ " storage . nsSize " ] . as < int > ( ) ; <nl> namespace mongo { <nl> < < " MB is too big for 32 bit version . Use 64 bit build instead . " ; <nl> return Status ( ErrorCodes : : BadValue , sb . str ( ) ) ; <nl> } <nl> - replset : : replSettings . oplogSize = x * 1024 * 1024 ; <nl> - verify ( replset : : replSettings . oplogSize > 0 ) ; <nl> + repl : : replSettings . oplogSize = x * 1024 * 1024 ; <nl> + verify ( repl : : replSettings . oplogSize > 0 ) ; <nl> } <nl> if ( params . count ( " cacheSize " ) ) { <nl> long x = params [ " cacheSize " ] . as < long > ( ) ; <nl> namespace mongo { <nl> params [ " sharding . clusterRole " ] . as < std : : string > ( ) = = " configsvr " ) { <nl> serverGlobalParams . configsvr = true ; <nl> storageGlobalParams . smallfiles = true ; / / config server implies small files <nl> - if ( replset : : replSettings . usingReplSets ( ) <nl> - | | replset : : replSettings . master <nl> - | | replset : : replSettings . slave ) { <nl> + if ( repl : : replSettings . usingReplSets ( ) <nl> + | | repl : : replSettings . master <nl> + | | repl : : replSettings . slave ) { <nl> return Status ( ErrorCodes : : BadValue , <nl> " replication should not be enabled on a config server " ) ; <nl> } <nl> namespace mongo { <nl> <nl> if ( ! params . count ( " storage . dbPath " ) ) <nl> storageGlobalParams . dbpath = " / data / configdb " ; <nl> - replset : : replSettings . master = true ; <nl> + repl : : replSettings . master = true ; <nl> if ( ! params . count ( " replication . oplogSizeMB " ) ) <nl> - replset : : replSettings . oplogSize = 5 * 1024 * 1024 ; <nl> + repl : : replSettings . oplogSize = 5 * 1024 * 1024 ; <nl> } <nl> <nl> if ( params . count ( " sharding . archiveMovedChunks " ) ) { <nl> namespace mongo { <nl> storageGlobalParams . repairpath = storageGlobalParams . dbpath ; <nl> } <nl> <nl> - if ( replset : : replSettings . pretouch ) <nl> - log ( ) < < " - - pretouch " < < replset : : replSettings . pretouch ; <nl> + if ( repl : : replSettings . pretouch ) <nl> + log ( ) < < " - - pretouch " < < repl : : replSettings . pretouch ; <nl> <nl> / / Check if we are 32 bit and have not explicitly specified any journaling options <nl> if ( sizeof ( void * ) = = 4 & & ! params . count ( " storage . journal . enabled " ) ) { <nl> mmm a / src / mongo / db / operation_context_impl . cpp <nl> ppp b / src / mongo / db / operation_context_impl . cpp <nl> namespace mongo { <nl> <nl> bool OperationContextImpl : : isPrimaryFor ( const StringData & ns ) { <nl> string s = ns . toString ( ) ; / / TODO : fix copy <nl> - return replset : : isMasterNs ( s . c_str ( ) ) ; <nl> + return repl : : isMasterNs ( s . c_str ( ) ) ; <nl> } <nl> <nl> OperationContext * OperationContextImpl : : factory ( ) { <nl> mmm a / src / mongo / db / ops / delete_executor . cpp <nl> ppp b / src / mongo / db / ops / delete_executor . cpp <nl> namespace mongo { <nl> <nl> uassert ( ErrorCodes : : NotMaster , <nl> str : : stream ( ) < < " Not primary while removing from " < < ns . ns ( ) , <nl> - ! logop | | replset : : isMasterNs ( ns . ns ( ) . c_str ( ) ) ) ; <nl> + ! logop | | repl : : isMasterNs ( ns . ns ( ) . c_str ( ) ) ) ; <nl> <nl> long long nDeleted = 0 ; <nl> <nl> namespace mongo { <nl> if ( oldYieldCount ! = curOp - > numYields ( ) ) { <nl> uassert ( ErrorCodes : : NotMaster , <nl> str : : stream ( ) < < " No longer primary while removing from " < < ns . ns ( ) , <nl> - ! logop | | replset : : isMasterNs ( ns . ns ( ) . c_str ( ) ) ) ; <nl> + ! logop | | repl : : isMasterNs ( ns . ns ( ) . c_str ( ) ) ) ; <nl> oldYieldCount = curOp - > numYields ( ) ; <nl> } <nl> BSONObj toDelete ; <nl> namespace mongo { <nl> } <nl> else { <nl> bool replJustOne = true ; <nl> - replset : : logOp ( txn , " d " , ns . ns ( ) . c_str ( ) , toDelete , 0 , & replJustOne ) ; <nl> + repl : : logOp ( txn , " d " , ns . ns ( ) . c_str ( ) , toDelete , 0 , & replJustOne ) ; <nl> } <nl> } <nl> <nl> mmm a / src / mongo / db / ops / update . cpp <nl> ppp b / src / mongo / db / ops / update . cpp <nl> namespace mongo { <nl> <nl> uassert ( ErrorCodes : : NotMaster , <nl> mongoutils : : str : : stream ( ) < < " Not primary while updating " < < nsString . ns ( ) , <nl> - ! request . shouldCallLogOp ( ) | | replset : : isMasterNs ( nsString . ns ( ) . c_str ( ) ) ) ; <nl> + ! request . shouldCallLogOp ( ) | | repl : : isMasterNs ( nsString . ns ( ) . c_str ( ) ) ) ; <nl> <nl> while ( true ) { <nl> / / Get next doc , and location <nl> namespace mongo { <nl> / / Call logOp if requested . <nl> if ( request . shouldCallLogOp ( ) & & ! logObj . isEmpty ( ) ) { <nl> BSONObj idQuery = driver - > makeOplogEntryQuery ( newObj , request . isMulti ( ) ) ; <nl> - replset : : logOp ( txn , " u " , nsString . ns ( ) . c_str ( ) , logObj , & idQuery , <nl> + repl : : logOp ( txn , " u " , nsString . ns ( ) . c_str ( ) , logObj , & idQuery , <nl> NULL , request . isFromMigration ( ) ) ; <nl> } <nl> <nl> namespace mongo { <nl> ! request . isGod ( ) / * enforceQuota * / ) ; <nl> uassertStatusOK ( newLoc . getStatus ( ) ) ; <nl> if ( request . shouldCallLogOp ( ) ) { <nl> - replset : : logOp ( txn , " i " , nsString . ns ( ) . c_str ( ) , newObj , <nl> + repl : : logOp ( txn , " i " , nsString . ns ( ) . c_str ( ) , newObj , <nl> NULL , NULL , request . isFromMigration ( ) ) ; <nl> } <nl> <nl> mmm a / src / mongo / db / pdfile . cpp <nl> ppp b / src / mongo / db / pdfile . cpp <nl> namespace mongo { <nl> options = b . obj ( ) ; <nl> } <nl> string logNs = nsToDatabase ( ns ) + " . $ cmd " ; <nl> - replset : : logOp ( txn , " c " , logNs . c_str ( ) , options ) ; <nl> + repl : : logOp ( txn , " c " , logNs . c_str ( ) , options ) ; <nl> } <nl> <nl> return Status : : OK ( ) ; <nl> mmm a / src / mongo / db / prefetch . cpp <nl> ppp b / src / mongo / db / prefetch . cpp <nl> namespace mongo { <nl> void prefetchIndexPages ( Collection * collection , const BSONObj & obj ) { <nl> DiskLoc unusedDl ; / / unused <nl> BSONObjSet unusedKeys ; <nl> - replset : : ReplSetImpl : : IndexPrefetchConfig prefetchConfig = <nl> - replset : : theReplSet - > getIndexPrefetchConfig ( ) ; <nl> + repl : : ReplSetImpl : : IndexPrefetchConfig prefetchConfig = <nl> + repl : : theReplSet - > getIndexPrefetchConfig ( ) ; <nl> <nl> / / do we want prefetchConfig to be ( 1 ) as - is , ( 2 ) for update ops only , or ( 3 ) configured per op type ? <nl> / / One might want PREFETCH_NONE for updates , but it ' s more rare that it is a bad idea for inserts . <nl> / / # 3 ( per op ) , a big issue would be " too many knobs " . <nl> switch ( prefetchConfig ) { <nl> - case replset : : ReplSetImpl : : PREFETCH_NONE : <nl> + case repl : : ReplSetImpl : : PREFETCH_NONE : <nl> return ; <nl> - case replset : : ReplSetImpl : : PREFETCH_ID_ONLY : <nl> + case repl : : ReplSetImpl : : PREFETCH_ID_ONLY : <nl> { <nl> TimerHolder timer ( & prefetchIndexStats ) ; <nl> / / on the update op case , the call to prefetchRecordPages will touch the _id index . <nl> namespace mongo { <nl> } <nl> break ; <nl> } <nl> - case replset : : ReplSetImpl : : PREFETCH_ALL : <nl> + case repl : : ReplSetImpl : : PREFETCH_ALL : <nl> { <nl> / / indexCount includes all indexes , including ones <nl> / / in the process of being built <nl> mmm a / src / mongo / db / query / new_find . cpp <nl> ppp b / src / mongo / db / query / new_find . cpp <nl> namespace mongo { <nl> / / passing in a query object ( necessary to check SlaveOK query option ) , the only state where <nl> / / reads are allowed is PRIMARY ( or master in master / slave ) . This function uasserts if <nl> / / reads are not okay . <nl> - replset : : replVerifyReadsOk ( ns , NULL ) ; <nl> + repl : : replVerifyReadsOk ( ns , NULL ) ; <nl> <nl> / / A pin performs a CC lookup and if there is a CC , increments the CC ' s pin value so it <nl> / / doesn ' t time out . Also informs ClientCursor that there is somebody actively holding the <nl> namespace mongo { <nl> txn - > checkForInterrupt ( ) ; / / May trigger maxTimeAlwaysTimeOut fail point . <nl> <nl> / / uassert if we are not on a primary , and not a secondary with SlaveOk query parameter set . <nl> - replset : : replVerifyReadsOk ( cq - > ns ( ) , & pq ) ; <nl> + repl : : replVerifyReadsOk ( cq - > ns ( ) , & pq ) ; <nl> <nl> / / If this exists , the collection is sharded . <nl> / / If it doesn ' t exist , we can assume we ' re not sharded . <nl> mmm a / src / mongo / db / range_deleter_db_env . cpp <nl> ppp b / src / mongo / db / range_deleter_db_env . cpp <nl> namespace mongo { <nl> exclusiveUpper , <nl> keyPattern ) , <nl> false , / * maxInclusive * / <nl> - replset : : replSet ? secondaryThrottle : false , <nl> + repl : : replSet ? secondaryThrottle : false , <nl> serverGlobalParams . moveParanoia ? & removeSaver : NULL , <nl> true , / * fromMigrate * / <nl> true ) ; / * onlyRemoveOrphans * / <nl> namespace mongo { <nl> } <nl> } <nl> <nl> - if ( replset : : replSet ) { <nl> + if ( repl : : replSet ) { <nl> Timer elapsedTime ; <nl> ReplTime lastOpApplied = cc ( ) . getLastOp ( ) . asDate ( ) ; <nl> - while ( ! replset : : opReplicatedEnough ( lastOpApplied , <nl> + while ( ! repl : : opReplicatedEnough ( lastOpApplied , <nl> BSON ( " w " < < " majority " ) . firstElement ( ) ) ) { <nl> if ( elapsedTime . seconds ( ) > = 3600 ) { <nl> * errMsg = str : : stream ( ) < < " rangeDeleter timed out after " <nl> mmm a / src / mongo / db / repl / bgsync . cpp <nl> ppp b / src / mongo / db / repl / bgsync . cpp <nl> <nl> # include " mongo / db / stats / timer_stats . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> int SleepToAllowBatchingMillis = 2 ; <nl> const int BatchIsSmallish = 40000 ; / / bytes <nl> namespace replset { <nl> _assumingPrimary = false ; <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / bgsync . h <nl> ppp b / src / mongo / db / repl / bgsync . h <nl> <nl> # include " mongo / db / jsobj . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / / This interface exists to facilitate easier testing ; <nl> / / the test infrastructure implements these functions with stubs . <nl> namespace replset { <nl> } ; <nl> <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / connections . h <nl> ppp b / src / mongo / db / repl / connections . h <nl> <nl> # include " mongo / db / repl / rs_config . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / * * here we keep a single connection ( with reconnect ) for a set of hosts , <nl> one each , and allow one user at a time per host . if in use already for that <nl> namespace replset { <nl> / / Keep trying to connect if we ' re not yet connected <nl> connect ( ) ; <nl> } <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / consensus . cpp <nl> ppp b / src / mongo / db / repl / consensus . cpp <nl> <nl> # include " mongo / db / repl / replset_commands . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / * * the first cmd called by a node seeking election and it ' s a basic sanity <nl> test : do any of the nodes it can reach know that it can ' t be the primary ? <nl> namespace replset { <nl> } <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / consensus . h <nl> ppp b / src / mongo / db / repl / consensus . h <nl> <nl> # include " mongo / bson / bsonobj . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> class ReplSetImpl ; <nl> struct Target ; <nl> namespace replset { <nl> OpTime getElectionTime ( ) const { return _electionTime ; } <nl> void setElectionTime ( OpTime electionTime ) { _electionTime = electionTime ; } <nl> } ; <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / health . cpp <nl> ppp b / src / mongo / db / repl / health . cpp <nl> <nl> # include " mongo / util / ramlog . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> / * decls for connections . h * / <nl> ScopedConn : : M & ScopedConn : : _map = * ( new ScopedConn : : M ( ) ) ; <nl> mutex ScopedConn : : mapMutex ( " ScopedConn : : mapMutex " ) ; <nl> namespace replset { <nl> b . append ( " set " , name ( ) ) ; <nl> b . appendTimeT ( " date " , time ( 0 ) ) ; <nl> b . append ( " myState " , myState . s ) ; <nl> - const Member * syncTarget = replset : : BackgroundSync : : get ( ) - > getSyncTarget ( ) ; <nl> + const Member * syncTarget = repl : : BackgroundSync : : get ( ) - > getSyncTarget ( ) ; <nl> if ( syncTarget & & <nl> ( myState ! = MemberState : : RS_PRIMARY ) & & <nl> ( myState ! = MemberState : : RS_SHUNNED ) ) { <nl> namespace replset { <nl> if ( replSetBlind ) <nl> b . append ( " blind " , true ) ; / / to avoid confusion if set . . . normally never set except for testing . <nl> } <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / health . h <nl> ppp b / src / mongo / db / repl / health . h <nl> <nl> # include " mongo / logger / tee . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> void fillRsLog ( std : : stringstream & ) ; <nl> <nl> namespace replset { <nl> <nl> / / helper function needed by member . cpp <nl> std : : string ago ( time_t t ) ; <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / heartbeat . cpp <nl> ppp b / src / mongo / db / repl / heartbeat . cpp <nl> <nl> # include " mongo / util / ramlog . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> using namespace bson ; <nl> <nl> namespace replset { <nl> result . append ( " hbmsg " , theReplSet - > hbmsg ( ) ) ; <nl> result . append ( " time " , ( long long ) time ( 0 ) ) ; <nl> result . appendDate ( " opTime " , theReplSet - > lastOpTimeWritten . asDate ( ) ) ; <nl> - const Member * syncTarget = replset : : BackgroundSync : : get ( ) - > getSyncTarget ( ) ; <nl> + const Member * syncTarget = repl : : BackgroundSync : : get ( ) - > getSyncTarget ( ) ; <nl> if ( syncTarget ) { <nl> result . append ( " syncingTo " , syncTarget - > fullName ( ) ) ; <nl> } <nl> namespace replset { <nl> <nl> / / this ensures that will have bgsync ' s s_instance at all points where it is needed <nl> / / so that we needn ' t check for its existence <nl> - replset : : BackgroundSync * sync = replset : : BackgroundSync : : get ( ) ; <nl> + repl : : BackgroundSync * sync = repl : : BackgroundSync : : get ( ) ; <nl> <nl> boost : : thread t ( startSyncThread ) ; <nl> <nl> - boost : : thread producer ( stdx : : bind ( & replset : : BackgroundSync : : producerThread , sync ) ) ; <nl> + boost : : thread producer ( stdx : : bind ( & repl : : BackgroundSync : : producerThread , sync ) ) ; <nl> theReplSet - > syncSourceFeedback . go ( ) ; <nl> <nl> / / member heartbeats are started in ReplSetImpl : : initFromConfig <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> <nl> / * todo : <nl> mmm a / src / mongo / db / repl / heartbeat . h <nl> ppp b / src / mongo / db / repl / heartbeat . h <nl> <nl> namespace mongo { <nl> class BSONObj ; <nl> <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / * throws * / <nl> bool requestHeartbeat ( const std : : string & setname , <nl> namespace replset { <nl> int myConfigVersion , <nl> int & theirConfigVersion , <nl> bool checkEmpty = false ) ; <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / heartbeat_info . cpp <nl> ppp b / src / mongo / db / repl / heartbeat_info . cpp <nl> <nl> # include " mongo / util / fail_point_service . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> unsigned int HeartbeatInfo : : numPings ; <nl> <nl> namespace replset { <nl> } <nl> <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / heartbeat_info . h <nl> ppp b / src / mongo / db / repl / heartbeat_info . h <nl> <nl> # include " mongo / util / concurrency / value . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / * this is supposed to be just basic information on a member , <nl> and copy constructable . * / <nl> namespace replset { <nl> unsigned _id ; <nl> } ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / initial_sync . cpp <nl> ppp b / src / mongo / db / repl / initial_sync . cpp <nl> <nl> # include " mongo / db / repl / rs . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> InitialSync : : InitialSync ( BackgroundSyncInterface * q ) : <nl> SyncTail ( q ) { } <nl> <nl> namespace replset { <nl> return oplogApplySegment ( applyGTEObj , minValidObj , multiInitialSyncApply ) ; <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / initial_sync . h <nl> ppp b / src / mongo / db / repl / initial_sync . h <nl> <nl> # include " mongo / db / repl / sync_tail . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> class BackgroundSyncInterface ; <nl> <nl> namespace replset { <nl> BSONObj oplogApplication ( const BSONObj & applyGTEObj , const BSONObj & minValidObj ) ; <nl> } ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / is_master . h <nl> ppp b / src / mongo / db / repl / is_master . h <nl> <nl> # include " mongo / util / mongoutils / str . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / * We should not allow most operations when not the master <nl> also we report not master if we are " dead " . <nl> namespace replset { <nl> return ns [ 5 ] = = 0 | | ns [ 5 ] = = ' . ' ; <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / manager . cpp <nl> ppp b / src / mongo / db / repl / manager . cpp <nl> <nl> # include " mongo / db / client . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / * check members OTHER THAN US to see if they think they are primary * / <nl> const Member * Manager : : findOtherPrimary ( bool & two ) { <nl> namespace replset { <nl> busyWithElectSelf = false ; <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / manager . h <nl> ppp b / src / mongo / db / repl / manager . h <nl> <nl> # include " mongo / db / repl / server . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> class Member ; <nl> class ReplSetImpl ; <nl> <nl> namespace replset { <nl> void msgCheckNewState ( ) ; <nl> } ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / master_slave . cpp <nl> ppp b / src / mongo / db / repl / master_slave . cpp <nl> <nl> # include " mongo / util / exit . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> void pretouchOperation ( const BSONObj & op ) ; <nl> void pretouchN ( vector < BSONObj > & , unsigned a , unsigned b ) ; <nl> namespace replset { <nl> } <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / master_slave . h <nl> ppp b / src / mongo / db / repl / master_slave . h <nl> namespace mongo { <nl> class Database ; <nl> class OperationContext ; <nl> <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / / Main entry point for master / slave at startup time . <nl> void startMasterSlave ( ) ; <nl> namespace replset { <nl> std : : map < std : : string , OpTime > _ignores ; <nl> } ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / member . cpp <nl> ppp b / src / mongo / db / repl / member . cpp <nl> <nl> <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> using namespace mongoutils : : html ; <nl> <nl> namespace replset { <nl> return hbinfo ( ) . up ( ) & & ( config ( ) . buildIndexes | | ! buildIndexes ) & & state ( ) . readable ( ) ; <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / member . h <nl> ppp b / src / mongo / db / repl / member . h <nl> <nl> # include " mongo / util / concurrency / list . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / * member of a replica set * / <nl> class Member : public List1 < Member > : : Base { <nl> namespace replset { <nl> HeartbeatInfo _hbinfo ; <nl> } ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / member_state . h <nl> ppp b / src / mongo / db / repl / member_state . h <nl> <nl> # pragma once <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> <nl> / * <nl> namespace replset { <nl> return " " ; <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / multicmd . h <nl> ppp b / src / mongo / db / repl / multicmd . h <nl> <nl> # include " mongo / util / background . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> struct Target { <nl> Target ( std : : string hostport ) : toHost ( hostport ) , ok ( false ) { } <nl> namespace replset { <nl> ( * i ) - > wait ( ) ; <nl> } <nl> } <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / oplog . cpp <nl> ppp b / src / mongo / db / repl / oplog . cpp <nl> <nl> # include " mongo / util / startup_test . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / / cached copies of these . . . so don ' t rename them , drop them , etc . ! ! ! <nl> static Database * localDB = NULL ; <nl> namespace replset { <nl> theReplSet - > lastH = h ; <nl> ctx . getClient ( ) - > setLastOp ( ts ) ; <nl> <nl> - replset : : BackgroundSync : : notify ( ) ; <nl> + repl : : BackgroundSync : : notify ( ) ; <nl> } <nl> } <nl> <nl> namespace replset { <nl> setNewOptime ( lastOp [ " ts " ] . date ( ) ) ; <nl> } <nl> } <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / oplog . h <nl> ppp b / src / mongo / db / repl / oplog . h <nl> namespace mongo { <nl> class OperationContext ; <nl> class OpTime ; <nl> <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / / These functions redefine the function for logOp ( ) , <nl> / / for either master / slave or replica sets . <nl> namespace replset { <nl> * Initializes the global OpTime with the value from the timestamp of the last oplog entry . <nl> * / <nl> void initOpTimeFromOplog ( const std : : string & oplogNS ) ; <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / oplogreader . cpp <nl> ppp b / src / mongo / db / repl / oplogreader . cpp <nl> <nl> # include " mongo / util / log . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / / number of readers created ; <nl> / / this happens when the source source changes , a reconfig / network - error or the cursor dies <nl> namespace replset { <nl> tailingQuery ( ns , query . done ( ) , fields ) ; <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / oplogreader . h <nl> ppp b / src / mongo / db / repl / oplogreader . h <nl> namespace mongo { <nl> <nl> extern const BSONObj reverseNaturalObj ; / / { $ natural : - 1 } <nl> <nl> - namespace replset { <nl> + namespace repl { <nl> / * * <nl> * Authenticates conn using the server ' s cluster - membership credentials . <nl> * <nl> namespace replset { <nl> bool passthroughHandshake ( const mongo : : OID & rid , const int f ) ; <nl> } ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / repl_reads_ok . cpp <nl> ppp b / src / mongo / db / repl / repl_reads_ok . cpp <nl> <nl> # include " mongo / util / assert_util . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / * * we allow queries to SimpleSlave ' s * / <nl> void replVerifyReadsOk ( const std : : string & ns , const LiteParsedQuery * pq ) { <nl> namespace replset { <nl> } <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / repl_reads_ok . h <nl> ppp b / src / mongo / db / repl / repl_reads_ok . h <nl> <nl> namespace mongo { <nl> class LiteParsedQuery ; <nl> <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / / Check to see if slaveOk reads are allowed , <nl> / / based on read preference and query options <nl> void replVerifyReadsOk ( const std : : string & ns , const LiteParsedQuery * pq ) ; <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / repl_set . h <nl> ppp b / src / mongo / db / repl / repl_set . h <nl> <nl> # include " mongo / util / concurrency / thread_pool . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> class ReplSet : public ReplSetImpl { <nl> public : <nl> namespace replset { <nl> ReplSet ( ) ; <nl> } ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / repl_set_health_poll_task . cpp <nl> ppp b / src / mongo / db / repl / repl_set_health_poll_task . cpp <nl> <nl> # include " mongo / db / repl / rs_config . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> int ReplSetHealthPollTask : : s_try_offset = 0 ; <nl> <nl> namespace replset { <nl> mem . electionTime = info [ " electionTime " ] . Date ( ) ; <nl> } <nl> } <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / repl_set_health_poll_task . h <nl> ppp b / src / mongo / db / repl / repl_set_health_poll_task . h <nl> <nl> # include " mongo / util / net / hostandport . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / * * <nl> * Poll every other set member to check its status . <nl> namespace replset { <nl> time_t _timeout ; <nl> } ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / repl_set_impl . cpp <nl> ppp b / src / mongo / db / repl / repl_set_impl . cpp <nl> <nl> # include " mongo / util / exit . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> # ifdef MONGO_PLATFORM_64 <nl> const int ReplSetImpl : : replWriterThreadCount = 16 ; <nl> const int ReplSetImpl : : replPrefetcherThreadCount = 16 ; <nl> namespace { <nl> <nl> / / Wait for replication to stop and buffer to be consumed <nl> LOG ( 1 ) < < " replSet waiting for replication to finish before becoming primary " < < endl ; <nl> - replset : : BackgroundSync : : get ( ) - > stopReplicationAndFlushBuffer ( ) ; <nl> + repl : : BackgroundSync : : get ( ) - > stopReplicationAndFlushBuffer ( ) ; <nl> <nl> / / Lock here to prevent stepping down & becoming primary from getting interleaved <nl> LOG ( 1 ) < < " replSet waiting for global write lock " ; <nl> namespace { <nl> return true ; <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / repl_set_impl . h <nl> ppp b / src / mongo / db / repl / repl_set_impl . h <nl> <nl> namespace mongo { <nl> class Cloner ; <nl> <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> struct FixUpInfo ; <nl> class ReplSetCmdline ; <nl> namespace replset { <nl> private : <nl> bool _syncDoInitialSync_clone ( Cloner & cloner , const char * master , <nl> const list < string > & dbs , bool dataPass ) ; <nl> - bool _syncDoInitialSync_applyToHead ( replset : : SyncTail & syncer , OplogReader * r , <nl> + bool _syncDoInitialSync_applyToHead ( repl : : SyncTail & syncer , OplogReader * r , <nl> const Member * source , const BSONObj & lastOp , <nl> BSONObj & minValidOut ) ; <nl> void _syncDoInitialSync ( ) ; <nl> namespace replset { <nl> static const char * _initialSyncFlagString ; <nl> static const BSONObj _initialSyncFlag ; <nl> } ; <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / repl_settings . cpp <nl> ppp b / src / mongo / db / repl / repl_settings . cpp <nl> <nl> # include " mongo / s / write_ops / batched_command_request . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> <nl> / / our config from command line etc . <nl> namespace replset { <nl> <nl> OpCounterServerStatusSection replOpCounterServerStatusSection ( " opcountersRepl " , & replOpCounters ) ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / repl_settings . h <nl> ppp b / src / mongo / db / repl / repl_settings . h <nl> <nl> <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> bool anyReplEnabled ( ) ; <nl> <nl> namespace replset { <nl> } ; <nl> <nl> extern ReplSettings replSettings ; <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / repl_start . cpp <nl> ppp b / src / mongo / db / repl / repl_start . cpp <nl> <nl> # include " mongo / util / log . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / * * @ param cfgString < setname > / < seedhost1 > , < seedhost2 > * / <nl> void parseReplsetCmdLine ( const std : : string & cfgString , <nl> namespace replset { <nl> startMasterSlave ( ) ; <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / repl_start . h <nl> ppp b / src / mongo / db / repl / repl_start . h <nl> <nl> # include " mongo / util / net / hostandport . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> void parseReplsetCmdLine ( const std : : string & cfgString , <nl> std : : string & setname , <nl> namespace replset { <nl> / / This function starts replica set or master / slave replication <nl> / / based on command line / config parameters . <nl> void startReplication ( ) ; <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / replset_commands . cpp <nl> ppp b / src / mongo / db / repl / replset_commands . cpp <nl> <nl> using namespace bson ; <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> void checkMembersUpForConfigChange ( const ReplSetConfig & cfg , BSONObjBuilder & result , bool initial ) ; <nl> <nl> namespace replset { <nl> } <nl> <nl> if ( cmdObj . hasElement ( " sethbmsg " ) ) { <nl> - replset : : sethbmsg ( cmdObj [ " sethbmsg " ] . String ( ) ) ; <nl> + repl : : sethbmsg ( cmdObj [ " sethbmsg " ] . String ( ) ) ; <nl> return true ; <nl> } <nl> <nl> namespace replset { <nl> } <nl> } cmdReplSetUpdatePosition ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / replset_commands . h <nl> ppp b / src / mongo / db / repl / replset_commands . h <nl> <nl> # include " mongo / db / repl / rs . h " / / replSet bool <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> extern bool replSetBlind ; <nl> extern unsigned replSetForceInitialSyncFailure ; <nl> namespace replset { <nl> } <nl> } ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / replset_web_handler . cpp <nl> ppp b / src / mongo / db / repl / replset_web_handler . cpp <nl> <nl> # include " mongo / util / mongoutils / str . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> using namespace bson ; <nl> using namespace mongoutils ; <nl> namespace replset { <nl> <nl> } replSetHandler ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / resync . cpp <nl> ppp b / src / mongo / db / repl / resync . cpp <nl> <nl> # include " mongo / db / operation_context_impl . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / / operator requested resynchronization of replication ( on a slave or secondary ) . { resync : 1 } <nl> class CmdResync : public Command { <nl> namespace replset { <nl> return true ; <nl> } <nl> } cmdResync ; <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / rs . cpp <nl> ppp b / src / mongo / db / repl / rs . cpp <nl> <nl> using namespace std ; <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> using namespace bson ; <nl> <nl> namespace replset { <nl> } <nl> <nl> void ReplSet : : shutdown ( ) { <nl> - replset : : BackgroundSync : : shutdown ( ) ; <nl> + repl : : BackgroundSync : : shutdown ( ) ; <nl> } <nl> <nl> void replLocalAuth ( ) { <nl> namespace replset { <nl> } <nl> <nl> } replIndexPrefetch ; <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / rs . h <nl> ppp b / src / mongo / db / repl / rs . h <nl> <nl> * / <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> extern bool replSet ; / / true if using repl sets <nl> extern class ReplSet * theReplSet ; / / null until initialized <nl> namespace replset { <nl> return true ; <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / rs_base . h <nl> ppp b / src / mongo / db / repl / rs_base . h <nl> <nl> # include " mongo / util / log . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / * * <nl> * most operations on a ReplSet object should be done while locked . that <nl> namespace replset { <nl> bool lockedByMe ( ) { return _lockedByMe . get ( ) ; } <nl> } ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / rs_config . cpp <nl> ppp b / src / mongo / db / repl / rs_config . cpp <nl> <nl> using namespace bson ; <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> mongo : : mutex ReplSetConfig : : groupMx ( " RS tag group " ) ; <nl> const int ReplSetConfig : : DEFAULT_HB_TIMEOUT = 10 ; <nl> namespace replset { <nl> LOG ( level ) < < " replSet load config ok from " < < ( h . isSelf ( ) ? " self " : h . toString ( ) ) < < rsLog ; <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / rs_config . h <nl> ppp b / src / mongo / db / repl / rs_config . h <nl> <nl> # include " mongo / util / net / hostandport . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> class Member ; <nl> const std : : string rsConfigNs = " local . system . replset " ; <nl> <nl> namespace replset { <nl> } ; <nl> } ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / rs_exception . h <nl> ppp b / src / mongo / db / repl / rs_exception . h <nl> <nl> # pragma once <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> class VoteException : public std : : exception { <nl> public : <nl> namespace replset { <nl> const char * what ( ) const throw ( ) { return " RetryAfterSleepException " ; } <nl> } ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / rs_initialsync . cpp <nl> ppp b / src / mongo / db / repl / rs_initialsync . cpp <nl> <nl> # include " mongo / util / mongoutils / str . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> using namespace mongoutils ; <nl> using namespace bson ; <nl> namespace replset { <nl> * @ param syncer either initial sync ( can reclone missing docs ) or " normal " sync ( no recloning ) <nl> * @ param r the oplog reader <nl> * @ param source the sync target <nl> - * @ param lastOp the op to start syncing at . replset : : InitialSync writes this and then moves to <nl> - * the queue . replset : : SyncTail does not write this , it moves directly to the <nl> + * @ param lastOp the op to start syncing at . repl : : InitialSync writes this and then moves to <nl> + * the queue . repl : : SyncTail does not write this , it moves directly to the <nl> * queue . <nl> * @ param minValid populated by this function . The most recent op on the sync target ' s oplog , <nl> * this function syncs to this value ( inclusive ) <nl> * @ return if applying the oplog succeeded <nl> * / <nl> - bool ReplSetImpl : : _syncDoInitialSync_applyToHead ( replset : : SyncTail & syncer , OplogReader * r , <nl> + bool ReplSetImpl : : _syncDoInitialSync_applyToHead ( repl : : SyncTail & syncer , OplogReader * r , <nl> const Member * source , const BSONObj & lastOp , <nl> BSONObj & minValid ) { <nl> / * our cloned copy will be strange until we apply oplog events that occurred <nl> namespace replset { <nl> * closer to the latest op time before it can transition to secondary state . <nl> * / <nl> void ReplSetImpl : : _syncDoInitialSync ( ) { <nl> - replset : : InitialSync init ( replset : : BackgroundSync : : get ( ) ) ; <nl> - replset : : SyncTail tail ( replset : : BackgroundSync : : get ( ) ) ; <nl> + repl : : InitialSync init ( repl : : BackgroundSync : : get ( ) ) ; <nl> + repl : : SyncTail tail ( repl : : BackgroundSync : : get ( ) ) ; <nl> sethbmsg ( " initial sync pending " , 0 ) ; <nl> <nl> / / if this is the first node , it may have already become primary <nl> namespace replset { <nl> <nl> / / If we just cloned & there were no ops applied , we still want the primary to know where <nl> / / we ' re up to <nl> - replset : : BackgroundSync : : notify ( ) ; <nl> + repl : : BackgroundSync : : notify ( ) ; <nl> <nl> changeState ( MemberState : : RS_RECOVERING ) ; <nl> sethbmsg ( " initial sync done " , 0 ) ; <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / rs_initiate . cpp <nl> ppp b / src / mongo / db / repl / rs_initiate . cpp <nl> using namespace bson ; <nl> using namespace mongoutils ; <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / * called on a reconfig AND on initiate <nl> throws <nl> namespace replset { <nl> } <nl> } cmdReplSetInitiate ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / rs_rollback . cpp <nl> ppp b / src / mongo / db / repl / rs_rollback . cpp <nl> <nl> * / <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> using namespace bson ; <nl> <nl> namespace replset { <nl> return 0 ; <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / rs_sync . cpp <nl> ppp b / src / mongo / db / repl / rs_sync . cpp <nl> <nl> # include " mongo / util / fail_point_service . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> using namespace bson ; <nl> <nl> namespace replset { <nl> } <nl> <nl> / / record the previous member we were syncing from <nl> - const Member * prev = replset : : BackgroundSync : : get ( ) - > getSyncTarget ( ) ; <nl> + const Member * prev = repl : : BackgroundSync : : get ( ) - > getSyncTarget ( ) ; <nl> if ( prev ) { <nl> result . append ( " prevSyncTarget " , prev - > fullName ( ) ) ; <nl> } <nl> namespace replset { <nl> } <nl> <nl> / * we have some data . continue tailing . * / <nl> - replset : : SyncTail tail ( replset : : BackgroundSync : : get ( ) ) ; <nl> + repl : : SyncTail tail ( repl : : BackgroundSync : : get ( ) ) ; <nl> tail . oplogApplication ( ) ; <nl> } <nl> <nl> namespace replset { <nl> changeState ( MemberState : : RS_RECOVERING ) ; <nl> } <nl> } <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / rs_sync . h <nl> ppp b / src / mongo / db / repl / rs_sync . h <nl> <nl> # include " mongo / util / concurrency / thread_pool . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> / / TODO : move hbmsg into an error - keeping class ( SERVER - 4444 ) <nl> void sethbmsg ( const std : : string & s , const int logLevel = 0 ) ; <nl> <nl> extern int maxSyncSourceLagSecs ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / state_box . h <nl> ppp b / src / mongo / db / repl / state_box . h <nl> <nl> # include " mongo / util / concurrency / rwlock . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> class Member ; <nl> <nl> namespace replset { <nl> RWLock m ; <nl> SP sp ; <nl> } ; <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / sync . cpp <nl> ppp b / src / mongo / db / repl / sync . cpp <nl> <nl> # include " mongo / util / log . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> void Sync : : setHostname ( const string & hostname ) { <nl> hn = hostname ; <nl> namespace replset { <nl> return true ; <nl> } <nl> } <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / sync . h <nl> ppp b / src / mongo / db / repl / sync . h <nl> <nl> namespace mongo { <nl> class Database ; <nl> <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> class Sync { <nl> protected : <nl> namespace replset { <nl> void setHostname ( const std : : string & hostname ) ; <nl> } ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / sync_source_feedback . cpp <nl> ppp b / src / mongo / db / repl / sync_source_feedback . cpp <nl> <nl> # include " mongo / db / operation_context_impl . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> / / used in replAuthenticate <nl> static const BSONObj userReplQuery = fromjson ( " { \ " user \ " : \ " repl \ " } " ) ; <nl> namespace replset { <nl> if ( state . primary ( ) | | state . fatal ( ) | | state . startup ( ) ) { <nl> continue ; <nl> } <nl> - const Member * target = replset : : BackgroundSync : : get ( ) - > getSyncTarget ( ) ; <nl> + const Member * target = repl : : BackgroundSync : : get ( ) - > getSyncTarget ( ) ; <nl> if ( _syncTarget ! = target ) { <nl> _resetConnection ( ) ; <nl> _syncTarget = target ; <nl> namespace replset { <nl> } <nl> cc ( ) . shutdown ( ) ; <nl> } <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / sync_source_feedback . h <nl> ppp b / src / mongo / db / repl / sync_source_feedback . h <nl> <nl> # include " mongo / util / background . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> class Member ; <nl> <nl> namespace replset { <nl> / / used to indicate a connection change which has not yet been shook on <nl> bool _handshakeNeeded ; <nl> } ; <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / sync_tail . cpp <nl> ppp b / src / mongo / db / repl / sync_tail . cpp <nl> <nl> # include " mongo / util / fail_point_service . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> static Counter64 opsAppliedStats ; <nl> <nl> namespace replset { <nl> } <nl> } <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / sync_tail . h <nl> ppp b / src / mongo / db / repl / sync_tail . h <nl> <nl> # include " mongo / db / repl / sync . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> class BackgroundSyncInterface ; <nl> <nl> namespace replset { <nl> void multiSyncApply ( const std : : vector < BSONObj > & ops , SyncTail * st ) ; <nl> void multiInitialSyncApply ( const std : : vector < BSONObj > & ops , SyncTail * st ) ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / topology_coordinator . h <nl> ppp b / src / mongo / db / repl / topology_coordinator . h <nl> namespace mongo { <nl> <nl> class OpTime ; <nl> <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> class HeartbeatInfo ; <nl> class Member ; <nl> namespace replset { <nl> protected : <nl> TopologyCoordinator ( ) { } <nl> } ; <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / topology_coordinator_impl . cpp <nl> ppp b / src / mongo / db / repl / topology_coordinator_impl . cpp <nl> <nl> # include " mongo / db / repl / rs_sync . h " / / maxSyncSourceLagSecs <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> TopologyCoordinatorImpl : : TopologyCoordinatorImpl ( ) : <nl> _majorityNumber ( 0 ) { <nl> namespace replset { <nl> } <nl> <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / topology_coordinator_impl . h <nl> ppp b / src / mongo / db / repl / topology_coordinator_impl . h <nl> <nl> # include " mongo / util / concurrency / list . h " <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> class TagSubgroup ; <nl> <nl> namespace replset { <nl> <nl> } ; <nl> <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / write_concern . cpp <nl> ppp b / src / mongo / db / repl / write_concern . cpp <nl> <nl> # define REPLDEBUG ( x ) <nl> <nl> namespace mongo { <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> using namespace mongoutils ; <nl> <nl> namespace replset { <nl> unsigned getSlaveCount ( ) { <nl> return slaveTracking . getSlaveCount ( ) ; <nl> } <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / write_concern . h <nl> ppp b / src / mongo / db / repl / write_concern . h <nl> <nl> namespace mongo { <nl> class CurOp ; <nl> <nl> - namespace replset { <nl> + namespace repl { <nl> <nl> bool updateSlaveLocations ( BSONArray optimes ) ; <nl> <nl> namespace replset { <nl> <nl> void resetSlaveCache ( ) ; <nl> unsigned getSlaveCount ( ) ; <nl> - } / / namespace replset <nl> + } / / namespace repl <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / restapi . cpp <nl> ppp b / src / mongo / db / restapi . cpp <nl> namespace mongo { <nl> ss < < " # databases : " < < dbHolder ( ) . sizeInfo ( ) < < ' \ n ' ; <nl> ss < < " # Cursors : " < < ClientCursor : : totalOpen ( ) < < ' \ n ' ; <nl> ss < < " replication : " ; <nl> - if ( * replset : : replInfo ) <nl> - ss < < " \ nreplInfo : " < < replset : : replInfo < < " \ n \ n " ; <nl> - if ( replset : : replSet ) { <nl> + if ( * repl : : replInfo ) <nl> + ss < < " \ nreplInfo : " < < repl : : replInfo < < " \ n \ n " ; <nl> + if ( repl : : replSet ) { <nl> ss < < a ( " " , " see replSetGetStatus link top of page " ) < < " - - replSet < / a > " <nl> - < < replset : : replSettings . replSet ; <nl> + < < repl : : replSettings . replSet ; <nl> } <nl> - if ( replset : : replAllDead ) <nl> - ss < < " \ n < b > replication replAllDead = " < < replset : : replAllDead < < " < / b > \ n " ; <nl> + if ( repl : : replAllDead ) <nl> + ss < < " \ n < b > replication replAllDead = " < < repl : : replAllDead < < " < / b > \ n " ; <nl> else { <nl> - ss < < " \ nmaster : " < < replset : : replSettings . master < < ' \ n ' ; <nl> - ss < < " slave : " < < replset : : replSettings . slave < < ' \ n ' ; <nl> + ss < < " \ nmaster : " < < repl : : replSettings . master < < ' \ n ' ; <nl> + ss < < " slave : " < < repl : : replSettings . slave < < ' \ n ' ; <nl> ss < < ' \ n ' ; <nl> } <nl> <nl> mmm a / src / mongo / db / ttl . cpp <nl> ppp b / src / mongo / db / ttl . cpp <nl> namespace mongo { <nl> <nl> void doTTLForDB ( const string & dbName ) { <nl> <nl> - if ( ! replset : : isMasterNs ( dbName . c_str ( ) ) ) <nl> + if ( ! repl : : isMasterNs ( dbName . c_str ( ) ) ) <nl> return ; <nl> <nl> vector < BSONObj > indexes ; <nl> namespace mongo { <nl> continue ; <nl> } <nl> <nl> - if ( ! replset : : isMasterNs ( dbName . c_str ( ) ) ) { <nl> + if ( ! repl : : isMasterNs ( dbName . c_str ( ) ) ) { <nl> / / we ' ve stepped down since we started this function , <nl> / / so we should stop working as we only do deletes on the primary <nl> break ; <nl> namespace mongo { <nl> } <nl> <nl> / / if part of replSet but not in a readable state ( e . g . during initial sync ) , skip . <nl> - if ( replset : : theReplSet & & ! replset : : theReplSet - > state ( ) . readable ( ) ) <nl> + if ( repl : : theReplSet & & ! repl : : theReplSet - > state ( ) . readable ( ) ) <nl> continue ; <nl> <nl> set < string > dbs ; <nl> mmm a / src / mongo / db / write_concern . cpp <nl> ppp b / src / mongo / db / write_concern . cpp <nl> namespace mongo { <nl> } <nl> <nl> const bool isConfigServer = serverGlobalParams . configsvr ; <nl> - const bool isMasterSlaveNode = replset : : anyReplEnabled ( ) & & ! replset : : theReplSet ; <nl> - const bool isReplSetNode = replset : : anyReplEnabled ( ) & & replset : : theReplSet ; <nl> + const bool isMasterSlaveNode = repl : : anyReplEnabled ( ) & & ! repl : : theReplSet ; <nl> + const bool isReplSetNode = repl : : anyReplEnabled ( ) & & repl : : theReplSet ; <nl> <nl> if ( isConfigServer | | ( ! isMasterSlaveNode & & ! isReplSetNode ) ) { <nl> <nl> namespace mongo { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - if ( ! replset : : anyReplEnabled ( ) | | serverGlobalParams . configsvr ) { <nl> + if ( ! repl : : anyReplEnabled ( ) | | serverGlobalParams . configsvr ) { <nl> / / no replication check needed ( validated above ) <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - const bool isMasterSlaveNode = replset : : anyReplEnabled ( ) & & ! replset : : theReplSet ; <nl> + const bool isMasterSlaveNode = repl : : anyReplEnabled ( ) & & ! repl : : theReplSet ; <nl> if ( writeConcern . wMode = = " majority " & & isMasterSlaveNode ) { <nl> / / with master / slave , majority is equivalent to w = 1 <nl> return Status : : OK ( ) ; <nl> namespace mongo { <nl> while ( 1 ) { <nl> <nl> if ( writeConcern . wNumNodes > 0 ) { <nl> - if ( replset : : opReplicatedEnough ( replOpTime , writeConcern . wNumNodes ) ) { <nl> + if ( repl : : opReplicatedEnough ( replOpTime , writeConcern . wNumNodes ) ) { <nl> break ; <nl> } <nl> } <nl> - else if ( replset : : opReplicatedEnough ( replOpTime , writeConcern . wMode ) ) { <nl> + else if ( repl : : opReplicatedEnough ( replOpTime , writeConcern . wMode ) ) { <nl> break ; <nl> } <nl> <nl> namespace mongo { <nl> } <nl> <nl> / / Add stats <nl> - result - > writtenTo = replset : : getHostsWrittenTo ( replOpTime ) ; <nl> + result - > writtenTo = repl : : getHostsWrittenTo ( replOpTime ) ; <nl> result - > wTime = gleTimerHolder . recordMillis ( ) ; <nl> <nl> return replStatus ; <nl> mmm a / src / mongo / dbtests / framework_options . cpp <nl> ppp b / src / mongo / dbtests / framework_options . cpp <nl> namespace mongo { <nl> storageGlobalParams . dur = true ; <nl> } <nl> <nl> - replset : : replSettings . oplogSize = 10 * 1024 * 1024 ; <nl> + repl : : replSettings . oplogSize = 10 * 1024 * 1024 ; <nl> <nl> DEV log ( ) < < " _DEBUG build " < < endl ; <nl> if ( sizeof ( void * ) = = 4 ) <nl> mmm a / src / mongo / dbtests / mock / mock_replica_set . cpp <nl> ppp b / src / mongo / dbtests / mock / mock_replica_set . cpp <nl> namespace mongo { <nl> <nl> MockConnRegistry : : get ( ) - > addServer ( mockServer ) ; <nl> <nl> - replset : : ReplSetConfig : : MemberCfg config ; <nl> + repl : : ReplSetConfig : : MemberCfg config ; <nl> config . h = HostAndPort ( hostName ) ; <nl> replConfig . insert ( std : : make_pair ( hostName , config ) ) ; <nl> } <nl> namespace mongo { <nl> ReplConfigMap : : const_iterator iter = _replConfig . find ( hostAndPort ) ; <nl> fassert ( 16578 , iter ! = _replConfig . end ( ) ) ; <nl> <nl> - const replset : : ReplSetConfig : : MemberCfg & config = iter - > second ; <nl> + const repl : : ReplSetConfig : : MemberCfg & config = iter - > second ; <nl> fassert ( 16579 , ! config . hidden & & config . priority > 0 & & ! config . arbiterOnly ) ; <nl> <nl> _primaryHost = hostAndPort ; <nl> namespace mongo { <nl> <nl> builder . append ( " primary " , getPrimary ( ) ) ; <nl> <nl> - const replset : : ReplSetConfig : : MemberCfg & replConfig = configIter - > second ; <nl> + const repl : : ReplSetConfig : : MemberCfg & replConfig = configIter - > second ; <nl> if ( replConfig . arbiterOnly ) { <nl> builder . append ( " arbiterOnly " , true ) ; <nl> } <nl> namespace mongo { <nl> <nl> int MockReplicaSet : : getState ( const std : : string & hostAndPort ) const { <nl> if ( _replConfig . count ( hostAndPort ) < 1 ) { <nl> - return static_cast < int > ( replset : : MemberState : : RS_SHUNNED ) ; <nl> + return static_cast < int > ( repl : : MemberState : : RS_SHUNNED ) ; <nl> } <nl> else if ( hostAndPort = = getPrimary ( ) ) { <nl> - return static_cast < int > ( replset : : MemberState : : RS_PRIMARY ) ; <nl> + return static_cast < int > ( repl : : MemberState : : RS_PRIMARY ) ; <nl> } <nl> else { <nl> - return static_cast < int > ( replset : : MemberState : : RS_SECONDARY ) ; <nl> + return static_cast < int > ( repl : : MemberState : : RS_SECONDARY ) ; <nl> } <nl> } <nl> <nl> mmm a / src / mongo / dbtests / mock / mock_replica_set . h <nl> ppp b / src / mongo / dbtests / mock / mock_replica_set . h <nl> namespace mongo { <nl> * / <nl> class MockReplicaSet { <nl> public : <nl> - typedef std : : map < std : : string , replset : : ReplSetConfig : : MemberCfg > ReplConfigMap ; <nl> + typedef std : : map < std : : string , repl : : ReplSetConfig : : MemberCfg > ReplConfigMap ; <nl> <nl> / * * <nl> * Creates a mock replica set and automatically mocks the isMaster <nl> mmm a / src / mongo / dbtests / replsettests . cpp <nl> ppp b / src / mongo / dbtests / replsettests . cpp <nl> <nl> namespace ReplSetTests { <nl> const int replWriterThreadCount ( 32 ) ; <nl> const int replPrefetcherThreadCount ( 32 ) ; <nl> - class ReplSetTest : public mongo : : replset : : ReplSet { <nl> - mongo : : replset : : ReplSetConfig * _config ; <nl> - mongo : : replset : : ReplSetConfig : : MemberCfg * _myConfig ; <nl> - mongo : : replset : : BackgroundSyncInterface * _syncTail ; <nl> + class ReplSetTest : public mongo : : repl : : ReplSet { <nl> + mongo : : repl : : ReplSetConfig * _config ; <nl> + mongo : : repl : : ReplSetConfig : : MemberCfg * _myConfig ; <nl> + mongo : : repl : : BackgroundSyncInterface * _syncTail ; <nl> public : <nl> static const int replWriterThreadCount ; <nl> static const int replPrefetcherThreadCount ; <nl> namespace ReplSetTests { <nl> ret - > init ( ) ; <nl> / / we need to get ( ) the BackgroundSync so that it has its s_instance initialized <nl> / / since applyOps ( ) eventually calls notify ( ) which makes use of the s_instance <nl> - mongo : : replset : : BackgroundSync : : get ( ) ; <nl> + mongo : : repl : : BackgroundSync : : get ( ) ; <nl> return ret . release ( ) ; <nl> } <nl> virtual ~ ReplSetTest ( ) { <nl> namespace ReplSetTests { <nl> virtual bool tryToGoLiveAsASecondary ( OpTime & minvalid ) { <nl> return false ; <nl> } <nl> - virtual const mongo : : replset : : ReplSetConfig & config ( ) { <nl> + virtual const mongo : : repl : : ReplSetConfig & config ( ) { <nl> return * _config ; <nl> } <nl> - virtual const mongo : : replset : : ReplSetConfig : : MemberCfg & myConfig ( ) { <nl> + virtual const mongo : : repl : : ReplSetConfig : : MemberCfg & myConfig ( ) { <nl> return * _myConfig ; <nl> } <nl> virtual bool buildIndexes ( ) const { <nl> return true ; <nl> } <nl> - void setSyncTail ( replset : : BackgroundSyncInterface * syncTail ) { <nl> + void setSyncTail ( repl : : BackgroundSyncInterface * syncTail ) { <nl> _syncTail = syncTail ; <nl> } <nl> private : <nl> namespace ReplSetTests { <nl> void init ( ) { <nl> BSONArrayBuilder members ; <nl> members . append ( BSON ( " _id " < < 0 < < " host " < < " host1 " ) ) ; <nl> - _config = mongo : : replset : : ReplSetConfig : : make ( BSON ( " _id " < < " foo " <nl> + _config = mongo : : repl : : ReplSetConfig : : make ( BSON ( " _id " < < " foo " <nl> < < " members " < < members . arr ( ) ) ) ; <nl> - _myConfig = new mongo : : replset : : ReplSetConfig : : MemberCfg ( ) ; <nl> + _myConfig = new mongo : : repl : : ReplSetConfig : : MemberCfg ( ) ; <nl> } <nl> } ; <nl> <nl> - class BackgroundSyncTest : public replset : : BackgroundSyncInterface { <nl> + class BackgroundSyncTest : public repl : : BackgroundSyncInterface { <nl> std : : queue < BSONObj > _queue ; <nl> public : <nl> BackgroundSyncTest ( ) { } <nl> namespace ReplSetTests { <nl> virtual void consume ( ) { <nl> _queue . pop ( ) ; <nl> } <nl> - virtual mongo : : replset : : Member * getSyncTarget ( ) { <nl> + virtual mongo : : repl : : Member * getSyncTarget ( ) { <nl> return 0 ; <nl> } <nl> void addDoc ( BSONObj doc ) { <nl> namespace ReplSetTests { <nl> static DBDirectClient client_ ; <nl> protected : <nl> static BackgroundSyncTest * _bgsync ; <nl> - static replset : : SyncTail * _tailer ; <nl> + static repl : : SyncTail * _tailer ; <nl> public : <nl> Base ( ) { <nl> } <nl> namespace ReplSetTests { <nl> db - > dropCollection ( & txn , ns ( ) ) ; <nl> } <nl> static void setup ( ) { <nl> - mongo : : replset : : replSettings . replSet = " foo " ; <nl> - mongo : : replset : : replSettings . oplogSize = 5 * 1024 * 1024 ; <nl> - mongo : : replset : : createOplog ( ) ; <nl> + mongo : : repl : : replSettings . replSet = " foo " ; <nl> + mongo : : repl : : replSettings . oplogSize = 5 * 1024 * 1024 ; <nl> + mongo : : repl : : createOplog ( ) ; <nl> <nl> / / setup background sync instance <nl> _bgsync = new BackgroundSyncTest ( ) ; <nl> <nl> / / setup tail <nl> - _tailer = new replset : : SyncTail ( _bgsync ) ; <nl> + _tailer = new repl : : SyncTail ( _bgsync ) ; <nl> <nl> / / setup theReplSet <nl> ReplSetTest * rst = ReplSetTest : : make ( ) ; <nl> rst - > setSyncTail ( _bgsync ) ; <nl> <nl> - delete replset : : theReplSet ; <nl> - replset : : theReplSet = rst ; <nl> + delete repl : : theReplSet ; <nl> + repl : : theReplSet = rst ; <nl> } <nl> } ; <nl> <nl> DBDirectClient Base : : client_ ; <nl> BackgroundSyncTest * Base : : _bgsync = NULL ; <nl> - replset : : SyncTail * Base : : _tailer = NULL ; <nl> + repl : : SyncTail * Base : : _tailer = NULL ; <nl> <nl> - class MockInitialSync : public replset : : InitialSync { <nl> + class MockInitialSync : public repl : : InitialSync { <nl> int step ; <nl> public : <nl> MockInitialSync ( ) : InitialSync ( 0 ) , step ( 0 ) , failOnStep ( SUCCEED ) , retry ( true ) { } <nl> namespace ReplSetTests { <nl> / / all three should succeed <nl> std : : vector < BSONObj > ops ; <nl> ops . push_back ( obj ) ; <nl> - replset : : multiInitialSyncApply ( ops , & mock ) ; <nl> + repl : : multiInitialSyncApply ( ops , & mock ) ; <nl> <nl> mock . failOnStep = MockInitialSync : : FAIL_FIRST_APPLY ; <nl> - replset : : multiInitialSyncApply ( ops , & mock ) ; <nl> + repl : : multiInitialSyncApply ( ops , & mock ) ; <nl> <nl> mock . retry = false ; <nl> - replset : : multiInitialSyncApply ( ops , & mock ) ; <nl> + repl : : multiInitialSyncApply ( ops , & mock ) ; <nl> <nl> drop ( ) ; <nl> } <nl> } ; <nl> <nl> - class SyncTest2 : public replset : : InitialSync { <nl> + class SyncTest2 : public repl : : InitialSync { <nl> public : <nl> bool insertOnRetry ; <nl> SyncTest2 ( ) : InitialSync ( 0 ) , insertOnRetry ( false ) { } <nl> namespace ReplSetTests { <nl> Client : : Context ctx ( _cappedNs ) ; <nl> OperationContextImpl txn ; <nl> / / in an annoying twist of api , returns true on failure <nl> - return ! mongo : : replset : : applyOperation_inlock ( & txn , ctx . db ( ) , op , true ) ; <nl> + return ! mongo : : repl : : applyOperation_inlock ( & txn , ctx . db ( ) , op , true ) ; <nl> } <nl> <nl> void run ( ) { <nl> namespace ReplSetTests { <nl> <nl> BSONObj op = updateFail ( ) ; <nl> <nl> - mongo : : replset : : Sync s ( " " ) ; <nl> + mongo : : repl : : Sync s ( " " ) ; <nl> verify ( ! s . shouldRetry ( op ) ) ; <nl> } <nl> } ; <nl> namespace ReplSetTests { <nl> void run ( ) { <nl> const int expected = 100 ; <nl> <nl> - replset : : theReplSet - > syncSourceFeedback . ensureMe ( ) ; <nl> + repl : : theReplSet - > syncSourceFeedback . ensureMe ( ) ; <nl> <nl> drop ( ) ; <nl> addInserts ( 100 ) ; <nl> mmm a / src / mongo / dbtests / repltests . cpp <nl> ppp b / src / mongo / dbtests / repltests . cpp <nl> namespace ReplTests { <nl> mutable OperationContextImpl _txn ; <nl> public : <nl> Base ( ) : _context ( ns ( ) ) { <nl> - mongo : : replset : : oldRepl ( ) ; <nl> - mongo : : replset : : replSettings . replSet = " " ; <nl> - mongo : : replset : : replSettings . oplogSize = 5 * 1024 * 1024 ; <nl> - mongo : : replset : : replSettings . master = true ; <nl> - mongo : : replset : : createOplog ( ) ; <nl> + mongo : : repl : : oldRepl ( ) ; <nl> + mongo : : repl : : replSettings . replSet = " " ; <nl> + mongo : : repl : : replSettings . oplogSize = 5 * 1024 * 1024 ; <nl> + mongo : : repl : : replSettings . master = true ; <nl> + mongo : : repl : : createOplog ( ) ; <nl> <nl> Collection * c = _context . db ( ) - > getCollection ( ns ( ) ) ; <nl> if ( ! c ) { <nl> namespace ReplTests { <nl> } <nl> ~ Base ( ) { <nl> try { <nl> - mongo : : replset : : replSettings . master = false ; <nl> + mongo : : repl : : replSettings . master = false ; <nl> deleteAll ( ns ( ) ) ; <nl> deleteAll ( cllNS ( ) ) ; <nl> } <nl> namespace ReplTests { <nl> BSONObjBuilder b ; <nl> b . append ( " host " , " localhost " ) ; <nl> b . appendTimestamp ( " syncedTo " , 0 ) ; <nl> - mongo : : replset : : ReplSource a ( b . obj ( ) ) ; <nl> + mongo : : repl : : ReplSource a ( b . obj ( ) ) ; <nl> for ( vector < BSONObj > : : iterator i = ops . begin ( ) ; i ! = ops . end ( ) ; + + i ) { <nl> if ( 0 ) { <nl> mongo : : unittest : : log ( ) < < " op : " < < * i < < endl ; <nl> namespace ReplTests { <nl> class DatabaseIgnorerBasic { <nl> public : <nl> void run ( ) { <nl> - mongo : : replset : : DatabaseIgnorer d ; <nl> + mongo : : repl : : DatabaseIgnorer d ; <nl> ASSERT ( ! d . ignoreAt ( " a " , OpTime ( 4 , 0 ) ) ) ; <nl> d . doIgnoreUntilAfter ( " a " , OpTime ( 5 , 0 ) ) ; <nl> ASSERT ( d . ignoreAt ( " a " , OpTime ( 4 , 0 ) ) ) ; <nl> namespace ReplTests { <nl> class DatabaseIgnorerUpdate { <nl> public : <nl> void run ( ) { <nl> - mongo : : replset : : DatabaseIgnorer d ; <nl> + mongo : : repl : : DatabaseIgnorer d ; <nl> d . doIgnoreUntilAfter ( " a " , OpTime ( 5 , 0 ) ) ; <nl> d . doIgnoreUntilAfter ( " a " , OpTime ( 6 , 0 ) ) ; <nl> ASSERT ( d . ignoreAt ( " a " , OpTime ( 5 , 5 ) ) ) ; <nl> namespace ReplTests { <nl> class ReplSetMemberCfgEquality : public Base { <nl> public : <nl> void run ( ) { <nl> - mongo : : replset : : ReplSetConfig : : MemberCfg m1 , m2 ; <nl> + mongo : : repl : : ReplSetConfig : : MemberCfg m1 , m2 ; <nl> verify ( m1 = = m2 ) ; <nl> m1 . tags [ " x " ] = " foo " ; <nl> verify ( m1 ! = m2 ) ; <nl> namespace ReplTests { <nl> } <nl> } ; <nl> <nl> - class SyncTest : public mongo : : replset : : Sync { <nl> + class SyncTest : public mongo : : repl : : Sync { <nl> public : <nl> bool returnEmpty ; <nl> - SyncTest ( ) : mongo : : replset : : Sync ( " " ) , returnEmpty ( false ) { } <nl> + SyncTest ( ) : mongo : : repl : : Sync ( " " ) , returnEmpty ( false ) { } <nl> virtual ~ SyncTest ( ) { } <nl> virtual BSONObj getMissingDoc ( Database * db , const BSONObj & o ) { <nl> if ( returnEmpty ) { <nl> namespace ReplTests { <nl> <nl> / / this should fail because we can ' t connect <nl> try { <nl> - mongo : : replset : : Sync badSource ( " localhost : 123 " ) ; <nl> + mongo : : repl : : Sync badSource ( " localhost : 123 " ) ; <nl> badSource . getMissingDoc ( db ( ) , o ) ; <nl> } <nl> catch ( DBException & ) { <nl> mmm a / src / mongo / s / d_migrate . cpp <nl> ppp b / src / mongo / s / d_migrate . cpp <nl> namespace mongo { <nl> / / if we do a w = 2 after every write <nl> bool secondaryThrottle = cmdObj [ " secondaryThrottle " ] . trueValue ( ) ; <nl> if ( secondaryThrottle ) { <nl> - if ( replset : : theReplSet ) { <nl> - if ( replset : : theReplSet - > config ( ) . getMajority ( ) < = 1 ) { <nl> + if ( repl : : theReplSet ) { <nl> + if ( repl : : theReplSet - > config ( ) . getMajority ( ) < = 1 ) { <nl> secondaryThrottle = false ; <nl> warning ( ) < < " not enough nodes in set to use secondaryThrottle : " <nl> - < < " majority : " < < replset : : theReplSet - > config ( ) . getMajority ( ) <nl> + < < " majority : " < < repl : : theReplSet - > config ( ) . getMajority ( ) <nl> < < endl ; <nl> } <nl> } <nl> - else if ( ! replset : : anyReplEnabled ( ) ) { <nl> + else if ( ! repl : : anyReplEnabled ( ) ) { <nl> secondaryThrottle = false ; <nl> warning ( ) < < " secondaryThrottle selected but no replication " < < endl ; <nl> } <nl> namespace mongo { <nl> verify ( ! min . isEmpty ( ) ) ; <nl> verify ( ! max . isEmpty ( ) ) ; <nl> <nl> - replSetMajorityCount = replset : : theReplSet ? <nl> - replset : : theReplSet - > config ( ) . getMajority ( ) : 0 ; <nl> + replSetMajorityCount = repl : : theReplSet ? <nl> + repl : : theReplSet - > config ( ) . getMajority ( ) : 0 ; <nl> <nl> log ( ) < < " starting receiving - end of migration of chunk " < < min < < " - > " < < max < < <nl> " for collection " < < ns < < " from " < < from <nl> namespace mongo { <nl> } <nl> <nl> / / make sure to create index on secondaries as well <nl> - replset : : logOp ( txn , " i " , db - > getSystemIndexesName ( ) . c_str ( ) , idx , <nl> + repl : : logOp ( txn , " i " , db - > getSystemIndexesName ( ) . c_str ( ) , idx , <nl> NULL , NULL , true / * fromMigrate * / ) ; <nl> } <nl> <nl> namespace mongo { <nl> clonedBytes + = o . objsize ( ) ; <nl> <nl> if ( secondaryThrottle & & thisTime > 0 ) { <nl> - if ( ! replset : : waitForReplication ( cc ( ) . getLastOp ( ) , <nl> + if ( ! repl : : waitForReplication ( cc ( ) . getLastOp ( ) , <nl> 2 , 60 / * seconds to wait * / ) ) { <nl> warning ( ) < < " secondaryThrottle on , but doc insert timed out after 60 seconds , continuing " < < endl ; <nl> } <nl> namespace mongo { <nl> / / if replication is on , try to force enough secondaries to catch up <nl> / / TODO opReplicatedEnough should eventually honor priorities and geo - awareness <nl> / / for now , we try to replicate to a sensible number of secondaries <nl> - return replset : : opReplicatedEnough ( lastOpApplied , replSetMajorityCount ) ; <nl> + return repl : : opReplicatedEnough ( lastOpApplied , replSetMajorityCount ) ; <nl> } <nl> <nl> bool flushPendingWrites ( const ReplTime & lastOpApplied ) { <nl> namespace mongo { <nl> migrateStatus . shardKeyPattern = keya . getOwned ( ) ; <nl> } <nl> <nl> - if ( migrateStatus . secondaryThrottle & & ! replset : : anyReplEnabled ( ) ) { <nl> + if ( migrateStatus . secondaryThrottle & & ! repl : : anyReplEnabled ( ) ) { <nl> warning ( ) < < " secondaryThrottle asked for , but not replication " < < endl ; <nl> migrateStatus . secondaryThrottle = false ; <nl> } <nl> mmm a / src / mongo / s / d_state . cpp <nl> ppp b / src / mongo / s / d_state . cpp <nl> namespace mongo { <nl> } <nl> <nl> / / we can run on a slave up to here <nl> - if ( ! replset : : _isMaster ( ) ) { <nl> + if ( ! repl : : _isMaster ( ) ) { <nl> result . append ( " errmsg " , " not master " ) ; <nl> result . append ( " note " , " from post init in setShardVersion " ) ; <nl> return false ; <nl> namespace mongo { <nl> if ( ! shardingState . enabled ( ) ) <nl> return true ; <nl> <nl> - if ( ! replset : : isMasterNs ( ns . c_str ( ) ) ) { <nl> + if ( ! repl : : isMasterNs ( ns . c_str ( ) ) ) { <nl> / / right now connections to secondaries aren ' t versioned at all <nl> return true ; <nl> } <nl> mmm a / src / mongo / tools / oplog . cpp <nl> ppp b / src / mongo / tools / oplog . cpp <nl> class OplogTool : public Tool { <nl> <nl> toolInfoLog ( ) < < " going to connect " < < std : : endl ; <nl> <nl> - replset : : OplogReader r ; <nl> + repl : : OplogReader r ; <nl> r . setTailingQueryOptions ( QueryOption_SlaveOk | QueryOption_AwaitData ) ; <nl> r . connect ( mongoOplogGlobalParams . from ) ; <nl> <nl> | SERVER - 14043 s / namespace replset / namespace repl / | mongodb/mongo | e4735eb2de26e12c8048fdd85340f086bd8ab447 | 2014-05-27T09:44:51Z |
mmm a / tests / pthread / test_pthread_mandelbrot . cpp <nl> ppp b / tests / pthread / test_pthread_mandelbrot . cpp <nl> uint32_t ColorMap ( int iter ) <nl> <nl> float r , g , b ; <nl> float h = ( float ) iter ; <nl> - / / h = sqrtf ( h ) ; <nl> h = log ( h ) * 100 . f ; <nl> - h = fmod ( h , 360 . f ) ; <nl> + if ( h < 0 . f ) h = 0 . f ; <nl> + <nl> + / / h = fmod ( h , 360 . f ) ; / / fmod gives weird graphical artifacts ? <nl> + if ( h > = 360 . f ) h - = ( ( int ) ( h / 360 . f ) ) * 360 . f ; <nl> + <nl> float s = 0 . 5f ; <nl> float v = 0 . 5f ; <nl> HSVtoRGB ( & r , & g , & b , h , s , v ) ; <nl> uint32_t ColorMap ( int iter ) <nl> int B = b * 255 . f ; <nl> return 0xFF000000 | ( B ) | ( G < < 8 ) | ( R < < 16 ) ; <nl> <nl> - <nl> - / * <nl> + / * <nl> unsigned int i = ( iter ) * 10 ; <nl> / / unsigned int i = ( iter - si ) * 10 ; <nl> if ( i > 255 ) i = 255 ; <nl> i = 255 - i ; <nl> if ( i < 30 ) i = 30 ; <nl> return 0xFF000000 | ( i ) | ( i < < 8 ) | ( i < < 16 ) ; <nl> - * / <nl> + * / <nl> } <nl> <nl> - int ComputeMandelbrot ( float * srcReal , float * srcImag , uint32_t * dst , int strideSrc , int strideDst , int x , int y , int w , int h , float left , float top , float incrX , float incrY , int numItersBefore , int numIters ) <nl> + unsigned long long ComputeMandelbrot ( float * srcReal , float * srcImag , uint32_t * dst , int strideSrc , int strideDst , int x , int y , int yIncr , int w , int h , float left , float top , float incrX , float incrY , unsigned int numItersBefore , unsigned int numIters ) <nl> { <nl> - for ( int Y = y ; Y < y + h ; + + Y ) <nl> + for ( int Y = y ; Y < h ; Y + = yIncr ) <nl> { <nl> float * sr = ( float * ) ( ( uintptr_t ) srcReal + strideSrc * Y ) + x ; <nl> float * si = ( float * ) ( ( uintptr_t ) srcImag + strideSrc * Y ) + x ; <nl> uint32_t * d = ( uint32_t * ) ( ( uintptr_t ) dst + strideDst * Y ) + x ; <nl> float imag = top + Y * incrY ; <nl> - float real = left + x * incrX ; <nl> for ( int X = 0 ; X < w ; + + X ) <nl> { <nl> + float real = left + ( x + X ) * incrX ; <nl> float v_real = sr [ X ] ; <nl> if ( v_real ! = INFINITY ) <nl> { <nl> float v_imag = si [ X ] ; <nl> - for ( int i = 0 ; i < numIters ; + + i ) <nl> + for ( unsigned int i = 0 ; i < numIters ; + + i ) <nl> { <nl> / / ( x + yi ) ^ 2 = x ^ 2 - y ^ 2 + 2xyi <nl> / / | | x_ + yi | | ^ 2 = x ^ 2 + y ^ 2 <nl> int ComputeMandelbrot ( float * srcReal , float * srcImag , uint32_t * dst , int strideS <nl> sr [ X ] = v_real ; <nl> si [ X ] = v_imag ; <nl> } <nl> - real + = incrX ; <nl> } <nl> } <nl> - return h * w * numIters ; <nl> + return ( unsigned long long ) ( ( h - y ) / yIncr ) * w * numIters ; <nl> } <nl> <nl> / / Not strictly correct anyzero_ps , but faster , and depends on that color alpha channel is always either 0xFF or 0 . <nl> int ynotzero_ss ( __m128 m ) { return _mm_ucomineq_ss ( _mm_shuffle_ps ( m , m , _MM_SHUF <nl> int znotzero_ss ( __m128 m ) { return _mm_ucomineq_ss ( _mm_movehl_ps ( m , m ) , _mm_setzero_ps ( ) ) ; } <nl> int wnotzero_ss ( __m128 m ) { return _mm_ucomineq_ss ( _mm_shuffle_ps ( m , m , _MM_SHUFFLE ( 3 , 3 , 3 , 3 ) ) , _mm_setzero_ps ( ) ) ; } <nl> <nl> - int ComputeMandelbrot_SSE ( float * srcReal , float * srcImag , uint32_t * dst , int strideSrc , int strideDst , int x , int y , int w , int h , float left , float top , float incrX , float incrY , int numItersBefore , int numIters ) <nl> + unsigned long long ComputeMandelbrot_SSE ( float * srcReal , float * srcImag , uint32_t * dst , int strideSrc , int strideDst , int x , int y , int yIncr , int w , int h , float left , float top , float incrX , float incrY , unsigned int numItersBefore , unsigned int numIters ) <nl> { <nl> - for ( int Y = y ; Y < y + h ; + + Y ) <nl> + for ( int Y = y ; Y < h ; Y + = yIncr ) <nl> { <nl> float * sr = ( float * ) ( ( uintptr_t ) srcReal + strideSrc * Y ) + x ; <nl> float * si = ( float * ) ( ( uintptr_t ) srcImag + strideSrc * Y ) + x ; <nl> uint32_t * d = ( uint32_t * ) ( ( uintptr_t ) dst + strideDst * Y ) + x ; <nl> float imag = top + Y * incrY ; <nl> __m128 Imag = _mm_set1_ps ( imag ) ; <nl> - float real = left + x * incrX ; <nl> - __m128 Real = _mm_set_ps ( real + 3 * incrX , real + 2 * incrX , real + incrX , real ) ; <nl> __m128 four = _mm_set1_ps ( 4 . f ) ; <nl> for ( int X = 0 ; X < w ; X + = 4 ) <nl> { <nl> + float real = left + ( x + X ) * incrX ; <nl> + __m128 Real = _mm_set_ps ( real + 3 * incrX , real + 2 * incrX , real + incrX , real ) ; <nl> __m128 v_real = _mm_loadu_ps ( sr + X ) ; <nl> / / float v_real = sr [ X ] ; <nl> / / if ( v_real ! = INFINITY ) <nl> int ComputeMandelbrot_SSE ( float * srcReal , float * srcImag , uint32_t * dst , int str <nl> / / if ( d [ X ] = = 0 | | d [ X + 1 ] = = 0 | | d [ X + 2 ] = = 0 | | d [ X + 3 ] = = 0 ) <nl> { <nl> __m128 oldIterating = _mm_cmpeq_ps ( oldColor , _mm_setzero_ps ( ) ) ; <nl> - for ( int i = 0 ; i < numIters ; + + i ) <nl> + for ( unsigned int i = 0 ; i < numIters ; + + i ) <nl> { <nl> / / ( x + yi ) ^ 2 = x ^ 2 - y ^ 2 + 2xyi <nl> / / | | x_ + yi | | ^ 2 = x ^ 2 + y ^ 2 <nl> int ComputeMandelbrot_SSE ( float * srcReal , float * srcImag , uint32_t * dst , int str <nl> _mm_storeu_ps ( si + X , v_imag ) ; <nl> } <nl> } <nl> - real + = incrX * 4 ; <nl> - Real = _mm_set_ps ( real + 3 * incrX , real + 2 * incrX , real + incrX , real ) ; <nl> + / / real + = incrX * 4 ; <nl> + / / Real = _mm_set_ps ( real + 3 * incrX , real + 2 * incrX , real + incrX , real ) ; <nl> } <nl> } <nl> - return h * w * numIters ; <nl> + return ( unsigned long long ) ( ( h - y ) / yIncr ) * w * numIters ; <nl> } <nl> <nl> const int W = 512 ; <nl> float incrY = 3 . f / W ; <nl> float left = - 2 . f ; <nl> float top = 0 . f - incrY * H / 2 . f ; <nl> <nl> - <nl> - volatile int numItersBefore = 0 ; <nl> - int numItersPerFrame = 10 ; <nl> + volatile unsigned int numItersDoneOnCanvas = 0 ; <nl> + unsigned int numItersPerFrame = 10 ; <nl> <nl> # define MAX_NUM_THREADS 16 <nl> # define NUM_THREADS 2 <nl> void * mandelbrot_thread ( void * arg ) <nl> double t0 = emscripten_get_now ( ) ; <nl> int ni ; <nl> if ( use_sse ) <nl> - ni = ComputeMandelbrot_SSE ( mandelReal , mandelImag , outputImage , sizeof ( float ) * W , sizeof ( uint32_t ) * W , W * idx / numTasks , 0 , W / numTasks , H , left , top , incrX , incrY , numItersBefore , numItersPerFrame ) ; <nl> + ni = ComputeMandelbrot_SSE ( mandelReal , mandelImag , outputImage , sizeof ( float ) * W , sizeof ( uint32_t ) * W , 0 , idx , numTasks , W , H , left , top , incrX , incrY , numItersDoneOnCanvas , numItersPerFrame ) ; <nl> else <nl> - ni = ComputeMandelbrot ( mandelReal , mandelImag , outputImage , sizeof ( float ) * W , sizeof ( uint32_t ) * W , W * idx / numTasks , 0 , W / numTasks , H , left , top , incrX , incrY , numItersBefore , numItersPerFrame ) ; <nl> + ni = ComputeMandelbrot ( mandelReal , mandelImag , outputImage , sizeof ( float ) * W , sizeof ( uint32_t ) * W , 0 , idx , numTasks , W , H , left , top , incrX , incrY , numItersDoneOnCanvas , numItersPerFrame ) ; <nl> / / emscripten_atomic_add_u32 ( & numIters , ni ) ; <nl> double t1 = emscripten_get_now ( ) ; <nl> numIters [ idx ] + = ni ; <nl> void register_tasks ( ) <nl> { <nl> double t0 = emscripten_get_now ( ) ; <nl> if ( use_sse ) <nl> - numIters [ 0 ] + = ComputeMandelbrot_SSE ( mandelReal , mandelImag , outputImage , sizeof ( float ) * W , sizeof ( uint32_t ) * W , W * i / numTasks , 0 , W / numTasks , H , left , top , incrX , incrY , numItersBefore , numItersPerFrame ) ; <nl> + numIters [ 0 ] + = ComputeMandelbrot_SSE ( mandelReal , mandelImag , outputImage , sizeof ( float ) * W , sizeof ( uint32_t ) * W , W * i / numTasks , 0 , 1 , W / numTasks , H , left , top , incrX , incrY , numItersDoneOnCanvas , numItersPerFrame ) ; <nl> else <nl> - numIters [ 0 ] + = ComputeMandelbrot ( mandelReal , mandelImag , outputImage , sizeof ( float ) * W , sizeof ( uint32_t ) * W , W * i / numTasks , 0 , W / numTasks , H , left , top , incrX , incrY , numItersBefore , numItersPerFrame ) ; <nl> + numIters [ 0 ] + = ComputeMandelbrot ( mandelReal , mandelImag , outputImage , sizeof ( float ) * W , sizeof ( uint32_t ) * W , W * i / numTasks , 0 , 1 , W / numTasks , H , left , top , incrX , incrY , numItersDoneOnCanvas , numItersPerFrame ) ; <nl> double t1 = emscripten_get_now ( ) ; <nl> timeSpentInMandelbrot [ 0 ] + = t1 - t0 ; <nl> } <nl> void wait_tasks ( ) <nl> void main_tick ( ) <nl> { <nl> wait_tasks ( ) ; <nl> - numItersBefore + = numItersPerFrame ; <nl> + numItersDoneOnCanvas + = numItersPerFrame ; <nl> <nl> double t = emscripten_get_now ( ) ; <nl> double dt = t - prevT ; <nl> void main_tick ( ) <nl> # endif <nl> <nl> float iterSize = 1 . f / ( incrX < incrY ? incrX : incrY ) ; <nl> - int minItersBeforeDisplaying = 50 + ( int ) ( iterSize / 250 . f ) ; <nl> + unsigned int minItersBeforeDisplaying = 50 + ( int ) ( iterSize / 10000 . f ) ; <nl> prevT = t ; <nl> - if ( numItersBefore > = minItersBeforeDisplaying ) <nl> + if ( numItersDoneOnCanvas > = minItersBeforeDisplaying ) <nl> { <nl> top + = dt * vScroll * incrX / 5 . f ; <nl> left + = dt * hScroll * incrY / 5 . f ; <nl> void main_tick ( ) <nl> float incrXNew = incrX + dt * zoom * incrX / 1000 . 0 ; <nl> float incrYNew = incrY + dt * zoom * incrX / 1000 . 0 ; <nl> <nl> - if ( incrXNew > 1 . f / 200000 . f & & incrYNew > 1 . f / 200000 . f ) <nl> + if ( incrXNew > 1 . f / 20000000 . f & & incrYNew > 1 . f / 20000000 . f ) / / Stop zooming in when single - precision floating point accuracy starts to visibly break apart . <nl> { <nl> left + = ( incrX - incrXNew ) * W / 2 . f ; <nl> top + = ( incrY - incrYNew ) * H / 2 . f ; <nl> void main_tick ( ) <nl> } <nl> <nl> # ifndef NO_SDL <nl> - if ( numItersBefore > = minItersBeforeDisplaying ) <nl> + if ( numItersDoneOnCanvas > = minItersBeforeDisplaying ) <nl> { <nl> if ( SDL_MUSTLOCK ( screen ) ) SDL_LockSurface ( screen ) ; <nl> memcpy ( screen - > pixels , outputImage , sizeof ( outputImage ) ) ; <nl> void main_tick ( ) <nl> <nl> int new_use_sse = EM_ASM_INT_V ( return document . getElementById ( ' use_sse ' ) . checked ) ; <nl> <nl> - if ( numItersBefore > = minItersBeforeDisplaying | | new_use_sse ! = use_sse ) <nl> + if ( numItersDoneOnCanvas > = minItersBeforeDisplaying | | new_use_sse ! = use_sse ) <nl> { <nl> if ( hScroll ! = 0 . f | | vScroll ! = 0 . f | | zoom ! = 0 . f | | new_use_sse ! = use_sse ) <nl> { <nl> for ( int i = 0 ; i < W * H ; + + i ) <nl> outputImage [ i ] = 0x00000000 ; <nl> - numItersBefore = 0 ; <nl> + numItersDoneOnCanvas = 0 ; <nl> smallestIterOut = 0x7FFFFFFF ; <nl> memset ( mandelReal , 0 , sizeof ( mandelReal ) ) ; <nl> memset ( mandelImag , 0 , sizeof ( mandelImag ) ) ; <nl> void main_tick ( ) <nl> <nl> numItersPerFrame = EM_ASM_INT_V ( return parseInt ( document . getElementById ( ' updates_per_frame ' ) . value ) ; ) ; <nl> if ( numItersPerFrame < 10 ) numItersPerFrame = 10 ; <nl> - if ( numItersPerFrame > 2000 ) numItersPerFrame = 2000 ; <nl> + if ( numItersPerFrame > 50000 ) numItersPerFrame = 50000 ; <nl> <nl> + + framesRendered ; <nl> t = emscripten_get_now ( ) ; <nl> void main_tick ( ) <nl> double itersPerSecond = numItersAllThreads * 1000 . 0 / ( t - lastFPSPrint ) ; <nl> char str [ 256 ] ; <nl> const char * suffix = " " ; <nl> + double itersNum = itersPerSecond ; <nl> <nl> if ( itersPerSecond > 0 . 9 * 1000 * 1000 * 1000 ) <nl> + { <nl> suffix = " G " ; <nl> + itersNum = itersPerSecond / 1000000000 . 0 ; <nl> + } <nl> else if ( itersPerSecond > 0 . 9 * 1000 * 1000 ) <nl> + { <nl> suffix = " M " ; <nl> + itersNum = itersPerSecond / 1000000 . 0 ; <nl> + } <nl> else if ( itersPerSecond > 0 . 9 * 1000 ) <nl> + { <nl> suffix = " K " ; <nl> + itersNum = itersPerSecond / 1000 . 0 ; <nl> + } <nl> double cpuUsageSeconds = mbTime / 1000 . 0 ; <nl> double cpuUsageRatio = mbTime * 100 . 0 / ( t - lastFPSPrint ) ; <nl> - sprintf ( str , " % . 3f % s iterations / second . FPS : % . 2f . CPU usage : % . 2f % % " , itersPerSecond / 1000000000 . 0 , suffix , fps , cpuUsageRatio ) ; <nl> - / / sprintf ( str , " % . 3f % s iterations / second . FPS : % . 2f . Zoom : % f " , itersPerSecond / 1000000000 . 0 , suffix , fps , 1 . f / ( incrX < incrY ? incrX : incrY ) ) ; <nl> + sprintf ( str , " % . 3f % s iterations / second . FPS : % . 2f . CPU usage : % . 2f % % " , itersNum , suffix , fps , cpuUsageRatio ) ; <nl> + / / sprintf ( str , " % . 3f % s iterations / second . FPS : % . 2f . Zoom : % f " , itersNum , suffix , fps , 1 . f / ( incrX < incrY ? incrX : incrY ) ) ; <nl> char str2 [ 256 ] ; <nl> sprintf ( str2 , " document . getElementById ( ' performance ' ) . innerHTML = ' % s ' ; " , str ) ; <nl> emscripten_run_script_string ( str2 ) ; <nl> mmm a / tests / pthread / test_pthread_mandelbrot_shell . html <nl> ppp b / tests / pthread / test_pthread_mandelbrot_shell . html <nl> <nl> < canvas class = " emscripten " id = " canvas " oncontextmenu = " event . preventDefault ( ) " > < / canvas > <nl> < / div > <nl> <nl> - Updates per frame : < input type = " number " id = " updates_per_frame " value = 50 min = 10 max = 2000 > < br / > <nl> + Updates per frame : < input type = " number " id = " updates_per_frame " value = 50 min = 10 max = 50000 > < br / > <nl> # of threads : < input type = " number " id = " num_threads " value = 4 min = 1 max = 16 > < br / > <nl> Use SSE : < input type = " checkbox " id = ' use_sse ' > < br / > <nl> Performance : < span id = ' performance ' > - < / span > < br / > <nl> | Mandelbrot improvements . | emscripten-core/emscripten | b9c53111ec12bed1b5283932da7d5b70477d2cc8 | 2015-06-01T12:11:02Z |
mmm a / atom / browser / atom_speech_recognition_manager_delegate . cc <nl> ppp b / atom / browser / atom_speech_recognition_manager_delegate . cc <nl> void AtomSpeechRecognitionManagerDelegate : : OnAudioLevelsChange ( <nl> <nl> void AtomSpeechRecognitionManagerDelegate : : CheckRecognitionIsAllowed ( <nl> int session_id , <nl> - base : : Callback < void ( bool ask_user , bool is_allowed ) > callback ) { <nl> - callback . Run ( true , true ) ; <nl> + base : : OnceCallback < void ( bool ask_user , bool is_allowed ) > callback ) { <nl> + std : : move ( callback ) . Run ( true , true ) ; <nl> } <nl> <nl> content : : SpeechRecognitionEventListener * <nl> mmm a / atom / browser / atom_speech_recognition_manager_delegate . h <nl> ppp b / atom / browser / atom_speech_recognition_manager_delegate . h <nl> class AtomSpeechRecognitionManagerDelegate <nl> / / content : : SpeechRecognitionManagerDelegate : <nl> void CheckRecognitionIsAllowed ( <nl> int session_id , <nl> - base : : Callback < void ( bool ask_user , bool is_allowed ) > callback ) override ; <nl> + base : : OnceCallback < void ( bool ask_user , bool is_allowed ) > callback ) <nl> + override ; <nl> content : : SpeechRecognitionEventListener * GetEventListener ( ) override ; <nl> bool FilterProfanities ( int render_process_id ) override ; <nl> <nl> | Speech recognition : Use BindOnce / OnceCallback / OnceClosure where applicable | electron/electron | 95669048387d1e1e5f8e28cc1f03d137049de219 | 2017-11-24T01:58:16Z |
mmm a / tensorflow / compiler / xla / tests / BUILD <nl> ppp b / tensorflow / compiler / xla / tests / BUILD <nl> xla_test ( <nl> ) <nl> <nl> # Tests the dot operation in some cases that can be performed via a <nl> - # runtime call on some backends - e . g . a runtime call to to Eigen . <nl> + # runtime call on some backends - e . g . a runtime call to Eigen . <nl> xla_test ( <nl> name = " dot_operation_runtime_test " , <nl> srcs = [ " dot_operation_test . cc " ] , <nl> mmm a / tensorflow / contrib / distributions / python / ops / binomial . py <nl> ppp b / tensorflow / contrib / distributions / python / ops / binomial . py <nl> def logits ( self ) : <nl> <nl> @ property <nl> def probs ( self ) : <nl> - " " " Probability of of drawing a ` 1 ` . " " " <nl> + " " " Probability of drawing a ` 1 ` . " " " <nl> return self . _probs <nl> <nl> def _batch_shape_tensor ( self ) : <nl> mmm a / tensorflow / contrib / distributions / python / ops / vector_student_t . py <nl> ppp b / tensorflow / contrib / distributions / python / ops / vector_student_t . py <nl> class _VectorStudentT ( transformed_distribution . TransformedDistribution ) : <nl> # # # # Examples <nl> <nl> A single instance of a " Vector Student ' s t - distribution " is defined by a mean <nl> - vector of of length ` k ` and a scale matrix of shape ` k x k ` . <nl> + vector of length ` k ` and a scale matrix of shape ` k x k ` . <nl> <nl> Extra leading dimensions , if provided , allow for batches . <nl> <nl> mmm a / tensorflow / contrib / layers / python / layers / feature_column . py <nl> ppp b / tensorflow / contrib / layers / python / layers / feature_column . py <nl> class _LinearEmbeddingLookupArguments ( <nl> " combiner " ] ) ) : <nl> " " " Represents the information needed from a column for embedding lookup . <nl> <nl> - Used to to compute DNN inputs and weighted sum . <nl> + Used to compute DNN inputs and weighted sum . <nl> " " " <nl> pass <nl> <nl> class _DeepEmbeddingLookupArguments ( <nl> " trainable " ] ) ) : <nl> " " " Represents the information needed from a column for embedding lookup . <nl> <nl> - Used to to compute DNN inputs and weighted sum . <nl> + Used to compute DNN inputs and weighted sum . <nl> " " " <nl> pass <nl> <nl> mmm a / tensorflow / contrib / layers / python / layers / layers . py <nl> ppp b / tensorflow / contrib / layers / python / layers / layers . py <nl> def convolution ( inputs , <nl> with " NC " . <nl> num_outputs : Integer , the number of output filters . <nl> kernel_size : A sequence of N positive integers specifying the spatial <nl> - dimensions of of the filters . Can be a single integer to specify the same <nl> + dimensions of the filters . Can be a single integer to specify the same <nl> value for all spatial dimensions . <nl> stride : A sequence of N positive integers specifying the stride at which to <nl> compute output . Can be a single integer to specify the same value for all <nl> mmm a / tensorflow / contrib / training / python / training / bucket_ops . py <nl> ppp b / tensorflow / contrib / training / python / training / bucket_ops . py <nl> def bucket ( tensors , <nl> This function is implemented using several queues . A ` QueueRunner ` for the <nl> queues is added to the current ` Graph ` ' s ` QUEUE_RUNNER ` collection . <nl> <nl> - As the returned tensors are the result of of a dequeue operation , evaluating <nl> + As the returned tensors are the result of a dequeue operation , evaluating <nl> them will throw a ` tf . errors . OutOfRangeError ` when the input queue is <nl> exhausted . If these tensors are feeding another input queue , its queue runner <nl> will catch this exception , however , if they are used in your main thread <nl> mmm a / tensorflow / contrib / training / python / training / evaluation_test . py <nl> ppp b / tensorflow / contrib / training / python / training / evaluation_test . py <nl> def testEvaluationLoopTimeout ( self ) : <nl> if not gfile . Exists ( checkpoint_dir ) : <nl> gfile . MakeDirs ( checkpoint_dir ) <nl> <nl> - # We need a variable that that the saver will try to restore . <nl> + # We need a variable that the saver will try to restore . <nl> variables . get_or_create_global_step ( ) <nl> <nl> # Run with placeholders . If we actually try to evaluate this , we ' d fail <nl> def testEvaluateWithEvalFeedDict ( self ) : <nl> ' evaluate_with_eval_feed_dict ' ) <nl> self . _train_model ( checkpoint_dir , num_steps = 1 ) <nl> <nl> - # We need a variable that that the saver will try to restore . <nl> + # We need a variable that the saver will try to restore . <nl> variables . get_or_create_global_step ( ) <nl> <nl> # Create a variable and an eval op that increments it with a placeholder . <nl> mmm a / tensorflow / core / framework / op_kernel . h <nl> ppp b / tensorflow / core / framework / op_kernel . h <nl> class OpKernelContext { <nl> StringPiece output_name , const TensorShape & output_shape , <nl> Tensor * * output ) TF_MUST_USE_RESULT ; <nl> <nl> - / / Tries to reuse one of of the inputs given in input_indices as a temporary . <nl> + / / Tries to reuse one of the inputs given in input_indices as a temporary . <nl> / / If none of the given inputs can be forwarded , calls <nl> / / allocate_temp ( ) to allocate a new temporary buffer . <nl> Status forward_input_or_allocate_temp ( <nl> mmm a / tensorflow / core / kernels / fused_batch_norm_op . cc <nl> ppp b / tensorflow / core / kernels / fused_batch_norm_op . cc <nl> struct FusedBatchNormGrad < CPUDevice , T > { <nl> typename TTypes < T > : : Vec scale_backprop ( scale_backprop_output - > vec < T > ( ) ) ; <nl> typename TTypes < T > : : Vec offset_backprop ( offset_backprop_output - > vec < T > ( ) ) ; <nl> <nl> - / / Note : the following formulas are used to to compute the gradients for <nl> + / / Note : the following formulas are used to compute the gradients for <nl> / / back propagation . <nl> / / x_backprop = scale * rsqrt ( variance + epsilon ) * <nl> / / [ y_backprop - mean ( y_backprop ) - ( x - mean ( x ) ) * <nl> mmm a / tensorflow / core / kernels / parameterized_truncated_normal_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / parameterized_truncated_normal_op_gpu . cu . cc <nl> limitations under the License . <nl> # ifdef COMPILER_MSVC <nl> / / msvc does not support unroll . One could try the loop pragma but we need to <nl> / / take a closer look if this generates better code in this case . For now let <nl> - / / the compiler take care of of it . <nl> + / / the compiler take care of it . <nl> # define UNROLL <nl> # else <nl> # define UNROLL _Pragma ( " unroll " ) <nl> mmm a / tensorflow / core / lib / gtl / flatrep . h <nl> ppp b / tensorflow / core / lib / gtl / flatrep . h <nl> namespace internal { <nl> / / <nl> / / The representation is an open - addressed hash table . Conceptually , <nl> / / the representation is a flat array of entries . However we <nl> - / / structure it as an array of of buckets where each bucket holds <nl> + / / structure it as an array of buckets where each bucket holds <nl> / / kWidth entries along with metadata for the kWidth entries . The <nl> / / metadata marker is <nl> / / <nl> mmm a / tensorflow / python / layers / convolutional . py <nl> ppp b / tensorflow / python / layers / convolutional . py <nl> class SeparableConv2D ( Conv2D ) : <nl> filters : Integer , the dimensionality of the output space ( i . e . the number <nl> of filters in the convolution ) . <nl> kernel_size : A tuple or list of 2 integers specifying the spatial <nl> - dimensions of of the filters . Can be a single integer to specify the same <nl> + dimensions of the filters . Can be a single integer to specify the same <nl> value for all spatial dimensions . <nl> strides : A tuple or list of 2 positive integers specifying the strides <nl> of the convolution . Can be a single integer to specify the same value for <nl> def separable_conv2d ( inputs , <nl> filters : Integer , the dimensionality of the output space ( i . e . the number <nl> of filters in the convolution ) . <nl> kernel_size : A tuple or list of 2 integers specifying the spatial <nl> - dimensions of of the filters . Can be a single integer to specify the same <nl> + dimensions of the filters . Can be a single integer to specify the same <nl> value for all spatial dimensions . <nl> strides : A tuple or list of 2 positive integers specifying the strides <nl> of the convolution . Can be a single integer to specify the same value for <nl> class Conv2DTranspose ( Conv2D ) : <nl> filters : Integer , the dimensionality of the output space ( i . e . the number <nl> of filters in the convolution ) . <nl> kernel_size : A tuple or list of 2 positive integers specifying the spatial <nl> - dimensions of of the filters . Can be a single integer to specify the same <nl> + dimensions of the filters . Can be a single integer to specify the same <nl> value for all spatial dimensions . <nl> strides : A tuple or list of 2 positive integers specifying the strides <nl> of the convolution . Can be a single integer to specify the same value for <nl> def conv2d_transpose ( inputs , <nl> filters : Integer , the dimensionality of the output space ( i . e . the number <nl> of filters in the convolution ) . <nl> kernel_size : A tuple or list of 2 positive integers specifying the spatial <nl> - dimensions of of the filters . Can be a single integer to specify the same <nl> + dimensions of the filters . Can be a single integer to specify the same <nl> value for all spatial dimensions . <nl> strides : A tuple or list of 2 positive integers specifying the strides <nl> of the convolution . Can be a single integer to specify the same value for <nl> def conv3d_transpose ( inputs , <nl> filters : Integer , the dimensionality of the output space ( i . e . the number <nl> of filters in the convolution ) . <nl> kernel_size : A tuple or list of 3 positive integers specifying the spatial <nl> - dimensions of of the filters . Can be a single integer to specify the same <nl> + dimensions of the filters . Can be a single integer to specify the same <nl> value for all spatial dimensions . <nl> strides : A tuple or list of 3 positive integers specifying the strides <nl> of the convolution . Can be a single integer to specify the same value for <nl> mmm a / tensorflow / python / ops / distributions / dirichlet_multinomial . py <nl> ppp b / tensorflow / python / ops / distributions / dirichlet_multinomial . py <nl> class DirichletMultinomial ( distribution . Distribution ) : <nl> The last ` concentration ` dimension parametrizes a single Dirichlet - Multinomial <nl> distribution . When calling distribution functions ( e . g . , ` dist . prob ( counts ) ` ) , <nl> ` concentration ` , ` total_count ` and ` counts ` are broadcast to the same shape . <nl> - The last dimension of of ` counts ` corresponds single Dirichlet - Multinomial <nl> + The last dimension of ` counts ` corresponds single Dirichlet - Multinomial <nl> distributions . <nl> <nl> Distribution parameters are automatically broadcast in all functions ; see <nl> mmm a / tensorflow / python / ops / distributions / multinomial . py <nl> ppp b / tensorflow / python / ops / distributions / multinomial . py <nl> def logits ( self ) : <nl> <nl> @ property <nl> def probs ( self ) : <nl> - " " " Probability of of drawing a ` 1 ` in that coordinate . " " " <nl> + " " " Probability of drawing a ` 1 ` in that coordinate . " " " <nl> return self . _probs <nl> <nl> def _batch_shape_tensor ( self ) : <nl> mmm a / tensorflow / python / ops / distributions / util . py <nl> ppp b / tensorflow / python / ops / distributions / util . py <nl> <nl> <nl> def assert_close ( <nl> x , y , data = None , summarize = None , message = None , name = " assert_close " ) : <nl> - " " " Assert that that x and y are within machine epsilon of each other . <nl> + " " " Assert that x and y are within machine epsilon of each other . <nl> <nl> Args : <nl> x : Floating - point ` Tensor ` <nl> mmm a / tensorflow / python / ops / summary_op_util . py <nl> ppp b / tensorflow / python / ops / summary_op_util . py <nl> def summary_scope ( name , family = None , default_name = None , values = None ) : <nl> If ` family ` is set , then the tag name will be ' < family > / < scope_name > ' , where <nl> ` scope_name ` is ` < outer_scope > / < family > / < name > ` . This ensures that ` family ` <nl> is always the prefix of the tag ( and unmodified ) , while ensuring the scope <nl> - respects the outer scope from this this summary was created . <nl> + respects the outer scope from this summary was created . <nl> <nl> Args : <nl> name : A name for the generated summary node . <nl> mmm a / tensorflow / tools / docs / generate_lib . py <nl> ppp b / tensorflow / tools / docs / generate_lib . py <nl> def add_base_dir_argument ( self , default_base_dir ) : <nl> ' - - base_dir ' , <nl> type = str , <nl> default = default_base_dir , <nl> - help = ' Base directory to to strip from file names referenced in docs . ' ) <nl> + help = ' Base directory to strip from file names referenced in docs . ' ) <nl> <nl> def parse_known_args ( self ) : <nl> flags , _ = self . argument_parser . parse_known_args ( ) <nl> | Merge pull request from taehoonlee / fix_typos | tensorflow/tensorflow | b183be3b4d62c1033bcb3c0fe2b24ba3eef86da7 | 2017-06-28T15:50:37Z |
mmm a / ios / sdk / WeexSDK / Sources / Component / WXTextComponent . m <nl> ppp b / ios / sdk / WeexSDK / Sources / Component / WXTextComponent . m <nl> - ( UIImage * ) drawTextWithBounds : ( CGRect ) bounds padding : ( UIEdgeInsets ) padding <nl> NSMutableAttributedString * attributedStringCopy = [ _attributedString mutableCopy ] ; <nl> CTFramesetterRef framesetter = CTFramesetterCreateWithAttributedString ( ( __bridge CFAttributedStringRef ) attributedStringCopy ) ; <nl> CTFrameRef frame = CTFramesetterCreateFrame ( framesetter , CFRangeMake ( 0 , 0 ) , path , NULL ) ; <nl> - CTFrameDraw ( frame , context ) ; <nl> + <nl> + CFArrayRef lines = CTFrameGetLines ( frame ) ; <nl> + CFIndex lineCount = CFArrayGetCount ( lines ) ; <nl> + CGPoint lineOrigins [ lineCount ] ; <nl> + CTFrameGetLineOrigins ( frame , CFRangeMake ( 0 , 0 ) , lineOrigins ) ; <nl> + <nl> + CGFloat frameY = 0 ; <nl> + for ( CFIndex index = 0 ; index < lineCount ; index + + ) { <nl> + CTLineRef lineRef = CFArrayGetValueAtIndex ( lines , index ) ; <nl> + CGFloat lineAscent ; <nl> + CGFloat lineDescent ; <nl> + CGFloat lineLeading ; <nl> + <nl> + CTLineGetTypographicBounds ( lineRef , & lineAscent , & lineDescent , & lineLeading ) ; <nl> + CGPoint lineOrigin = lineOrigins [ index ] ; <nl> + <nl> + NSLog ( @ " lineAscent = % f " , lineAscent ) ; <nl> + NSLog ( @ " lineDescent = % f " , lineDescent ) ; <nl> + NSLog ( @ " lineLeading = % f " , lineLeading ) ; <nl> + <nl> + if ( index > 0 ) { <nl> + frameY = frameY - lineAscent ; <nl> + } else { <nl> + frameY = lineOrigin . y ; <nl> + } <nl> + lineOrigin . x + = padding . left ; <nl> + lineOrigin . y - = padding . top ; <nl> + NSLog ( @ " lines : % ld origin : % @ " , index , NSStringFromCGPoint ( lineOrigin ) ) ; <nl> + CGContextSetTextPosition ( context , lineOrigin . x , lineOrigin . y ) ; <nl> + / / CTLineDraw ( lineRef , context ) ; <nl> + CFArrayRef runs = CTLineGetGlyphRuns ( lineRef ) ; <nl> + for ( CFIndex runIndex = 0 ; runIndex < CFArrayGetCount ( runs ) ; runIndex + + ) { <nl> + CTRunRef run = CFArrayGetValueAtIndex ( runs , runIndex ) ; <nl> + CTRunDraw ( run , context , CFRangeMake ( 0 , 0 ) ) ; <nl> + } <nl> + frameY = frameY - lineDescent ; <nl> + } <nl> + <nl> UIImage * image = UIGraphicsGetImageFromCurrentImageContext ( ) ; <nl> UIGraphicsEndImageContext ( ) ; <nl> <nl> - ( NSMutableAttributedString * ) buildCTAttributeString { <nl> paragraphStyle . alignment = _textAlign ; <nl> } <nl> <nl> + / / set default lineBreakMode <nl> + paragraphStyle . lineBreakMode = NSLineBreakByClipping ; <nl> + if ( _textOverflow & & [ _textOverflow length ] > 0 ) { <nl> + if ( [ _textOverflow isEqualToString : @ " ellipsis " ] ) <nl> + paragraphStyle . lineBreakMode = NSLineBreakByTruncatingTail ; <nl> + } <nl> + <nl> if ( _lineHeight ) { <nl> paragraphStyle . maximumLineHeight = _lineHeight ; <nl> paragraphStyle . minimumLineHeight = _lineHeight ; <nl> } <nl> - if ( _lineHeight | | _textAlign ) { <nl> + if ( _lineHeight | | _textAlign | | [ _textOverflow length ] > 0 ) { <nl> [ attributedString addAttribute : ( id ) kCTParagraphStyleAttributeName <nl> value : paragraphStyle <nl> range : ( NSRange ) { 0 , attributedString . length } ] ; <nl> - ( NSAttributedString * ) buildAttributeString <nl> <nl> - ( BOOL ) adjustLineHeight <nl> { <nl> - return YES ; <nl> + return ! _coretext ; <nl> } <nl> <nl> - ( NSTextStorage * ) textStorageWithWidth : ( CGFloat ) width <nl> | * [ ios ] line by line to draw | apache/incubator-weex | c94e48b6d1ed5bbe1f9b42bd93c5c226b5008be2 | 2017-04-10T04:17:03Z |
mmm a / Telegram / SourceFiles / base / binary_guard . h <nl> ppp b / Telegram / SourceFiles / base / binary_guard . h <nl> inline void binary_guard : : kill ( ) { <nl> } <nl> <nl> inline void binary_guard : : destroy ( ) { <nl> - if ( _bothAlive ) { <nl> + if ( const auto both = base : : take ( _bothAlive ) ) { <nl> auto old = true ; <nl> - if ( ! _bothAlive - > compare_exchange_strong ( old , false ) ) { <nl> - delete _bothAlive ; <nl> + if ( ! both - > compare_exchange_strong ( old , false ) ) { <nl> + delete both ; <nl> } <nl> } <nl> } <nl> | Fix base : : binary_guard . | telegramdesktop/tdesktop | a58c082cfaae08c97a78def43479db4821ed685d | 2018-09-01T15:56:08Z |
mmm a / tools / jenkins_scripts / mac / android / generate - js - cxx - bindings . sh <nl> ppp b / tools / jenkins_scripts / mac / android / generate - js - cxx - bindings . sh <nl> if [ - z " $ { COCOS2DX_PULL_BASE + aaa } " ] ; then <nl> echo This script will NOT automatically generate pull requests <nl> echo unless this variable is set . <nl> echo example <nl> - echo COCOS2DX_PULL_BASE = \ " cocos2d / cocos2d - x : gles20 \ " <nl> + echo COCOS2DX_PULL_BASE = \ " cocos2d / cocos2d - x : master \ " <nl> echo COCOS2DX_PULL_BASE = \ " username / repository : branch \ " <nl> echo <nl> echo Exiting with success . <nl> mmm a / tools / tojs / genbindings . sh <nl> ppp b / tools / tojs / genbindings . sh <nl> if [ - z " $ { COCOS2DX_ROOT + aaa } " ] ; then <nl> fi <nl> <nl> if [ - z " $ { CXX_GENERATOR_ROOT + aaa } " ] ; then <nl> - CXX_GENERATOR_ROOT = " $ COCOS2DX_ROOT / tools / cxx - generator " <nl> + CXX_GENERATOR_ROOT = " $ COCOS2DX_ROOT / tools / bindings - generator " <nl> fi <nl> <nl> if [ - z " $ { TOJS_ROOT + aaa } " ] ; then <nl> set - x <nl> LD_LIBRARY_PATH = $ { CLANG_ROOT } / lib $ PYTHON_BIN $ { CXX_GENERATOR_ROOT } / generator . py $ { TO_JS_ROOT } / cocos2dx . ini - s cocos2d - x - o $ { COCOS2DX_ROOT } / scripting / javascript / bindings / generated - n jsb_cocos2dx_auto <nl> <nl> echo " Generating bindings for cocos2dx_extension . . . " <nl> - LD_LIBRARY_PATH = $ { CLANG_ROOT } / lib $ PYTHON_BIN $ { CXX_GENERATOR_ROOT } / generator . py $ { TO_JS_ROOT } / cocos2dx_extension . ini - s cocos2dx_extension - o $ { COCOS2DX_ROOT } / scripting / javascript / bindings / generated - n jsb_cocos2dx_extension_auto <nl> \ No newline at end of file <nl> + LD_LIBRARY_PATH = $ { CLANG_ROOT } / lib $ PYTHON_BIN $ { CXX_GENERATOR_ROOT } / generator . py $ { TO_JS_ROOT } / cocos2dx_extension . ini - s cocos2dx_extension - o $ { COCOS2DX_ROOT } / scripting / javascript / bindings / generated - n jsb_cocos2dx_extension_auto <nl> | Merge pull request from dumganhar / master | cocos2d/cocos2d-x | 7114aef7515343396fd563089e8ad4ce0f00699e | 2013-04-09T03:06:04Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.