diff
stringlengths
41
2.03M
msg
stringlengths
1
1.5k
repo
stringlengths
5
40
sha
stringlengths
40
40
time
stringlengths
20
20
new file mode 100644 <nl> index 000000000000 . . 9043c88224d7 <nl> Binary files / dev / null and b / locale / lt / LC_MESSAGES / bitcoin . mo differ <nl> new file mode 100644 <nl> index 000000000000 . . 81dd8615ee73 <nl> mmm / dev / null <nl> ppp b / locale / lt / LC_MESSAGES / bitcoin . po <nl> <nl> + msgid " " <nl> + msgstr " " <nl> + " Project - Id - Version : \ n " <nl> + " Report - Msgid - Bugs - To : \ n " <nl> + " POT - Creation - Date : 2010 - 10 - 05 10 : 53 + 0300 \ n " <nl> + " PO - Revision - Date : 2011 - 05 - 01 08 : 38 - 0000 \ n " <nl> + " Last - Translator : mewantsbitcoins < mewantsbitcoins @ gmail . com > \ n " <nl> + " Language - Team : \ n " <nl> + " MIME - Version : 1 . 0 \ n " <nl> + " Content - Type : text / plain ; charset = UTF - 8 \ n " <nl> + " Content - Transfer - Encoding : 8bit \ n " <nl> + " X - Poedit - KeywordsList : _ ; gettext ; gettext_noop \ n " <nl> + " X - Poedit - Basepath : . \ n " <nl> + " X - Poedit - SearchPath - 0 : . . / . . / . . \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 162 <nl> + msgid " Usage : " <nl> + msgstr " Komandų vartojimas : " <nl> + <nl> + # : . . / . . / . . / init . cpp : 164 <nl> + msgid " Send command to - server or bitcoind \ n " <nl> + msgstr " Siūsti komandą i - server arba bitcoind \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 165 <nl> + msgid " List commands \ n " <nl> + msgstr " Parodyti visas komandas \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 166 <nl> + msgid " Get help for a command \ n " <nl> + msgstr " Komandos paaiškinimas \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 167 <nl> + msgid " Options : \ n " <nl> + msgstr " Parametrai \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 168 <nl> + msgid " Specify configuration file ( default : bitcoin . conf ) \ n " <nl> + msgstr " Nurodyti konfiguracijos failą ( pagal nutylėjimą : bitcoin . conf ) \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 169 <nl> + msgid " Generate coins \ n " <nl> + msgstr " Generuoti monetas \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 170 <nl> + msgid " Don ' t generate coins \ n " <nl> + msgstr " Negeneruoti monetų \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 171 <nl> + msgid " Start minimized \ n " <nl> + msgstr " Paleisti minimizuotą klientą \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 172 <nl> + msgid " Specify data directory \ n " <nl> + msgstr " Nurodyti duomenų direktoriją \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 173 <nl> + msgid " Connect through socks4 proxy \ n " <nl> + msgstr " Prisijungti per socks4 proksį \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 174 <nl> + msgid " Add a node to connect to \ n " <nl> + msgstr " Pridėti nodą prie kurio bus jungiamasi \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 175 <nl> + msgid " Connect only to the specified node \ n " <nl> + msgstr " Prisijungti tik prie šio nodo \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 176 <nl> + msgid " Accept command line and JSON - RPC commands \ n " <nl> + msgstr " Priimti komandas iš terminalo ir JSON - RPC \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 177 <nl> + msgid " Run in the background as a daemon and accept commands \ n " <nl> + msgstr " Paleisti daemon \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 178 <nl> + msgid " This help message \ n " <nl> + msgstr " Ši pagalbos žinutė \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 284 <nl> + msgid " Error loading addr . dat \ n " <nl> + msgstr " Klaida nuskaitant addr . dat \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 290 <nl> + msgid " Error loading blkindex . dat \ n " <nl> + msgstr " Klaida nuskaitant blkindex . dat \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 297 <nl> + msgid " Error loading wallet . dat \ n " <nl> + msgstr " Klaida nuskaitant wallet . dat \ n " <nl> + <nl> + # : . . / . . / . . / init . cpp : 365 <nl> + msgid " Invalid - proxy address " <nl> + msgstr " Klaidingas - proxy adresas " <nl> + <nl> + # : . . / . . / . . / init . cpp : 385 <nl> + msgid " Invalid amount for - paytxfee = < amount > " <nl> + msgstr " Klaidinga suma - paytxfee = < amount > " <nl> + <nl> + # : . . / . . / . . / init . cpp : 389 <nl> + msgid " Warning : - paytxfee is set very high . This is the transaction fee you will pay if you send a transaction . " <nl> + msgstr " Perspėjimas : - paytxfee yra nustatyta labai aukšta suma . Ši suma bus nuskaičiuota kaip mokestis darant pervedimą " <nl> + <nl> + # : . . / . . / . . / main . cpp : 1641 <nl> + msgid " Warning : Disk space is low " <nl> + msgstr " Perspėjimas : Diske trūksta vietos " <nl> + <nl> + # : . . / . . / . . / main . cpp : 3505 <nl> + # , c - format <nl> + msgid " Error : This is an oversized transaction that requires a transaction fee of % s " <nl> + msgstr " Klaida : Šis pervedimas yra labai didelis ir reikalauja % s pervedimo mokesčio " <nl> + <nl> + # : . . / . . / . . / main . cpp : 3507 <nl> + msgid " Error : Transaction creation failed " <nl> + msgstr " Klaida : Pervedimo sukurti nepavyko " <nl> + <nl> + # : . . / . . / . . / main . cpp : 3512 <nl> + # : . . / . . / . . / ui . cpp : 1964 <nl> + # : . . / . . / . . / ui . cpp : 1966 <nl> + # : . . / . . / . . / ui . cpp : 2107 <nl> + # : . . / . . / . . / ui . cpp : 2260 <nl> + msgid " Sending . . . " <nl> + msgstr " Siunčia . . . " <nl> + <nl> + # : . . / . . / . . / main . cpp : 3516 <nl> + msgid " Error : The transaction was rejected . This might happen if some of the coins in your wallet were already spent , such as if you used a copy of wallet . dat and coins were spent in the copy but not marked as spent here . " <nl> + msgstr " Klaida : Pervedimas nepavyko . Tai galėjo atsitikti jei dalis jūsų monetų jau buvo išleista . Pvz . : Jei nodojote kopija wallet . dat kitame kliente ir monetos buvo išleistos ten , bet nepažymėtos kaip išleistos čia . " <nl> + <nl> + # : . . / . . / . . / main . cpp : 3528 <nl> + msgid " Invalid amount " <nl> + msgstr " Neteisinga suma " <nl> + <nl> + # : . . / . . / . . / main . cpp : 3530 <nl> + # : . . / . . / . . / ui . cpp : 2174 <nl> + # : . . / . . / . . / ui . cpp : 2245 <nl> + msgid " Insufficient funds " <nl> + msgstr " Nepakankamai monetų " <nl> + <nl> + # : . . / . . / . . / main . cpp : 3535 <nl> + msgid " Invalid bitcoin address " <nl> + msgstr " Klaidingas bitcoin adresas " <nl> + <nl> + # : . . / . . / . . / rpc . cpp : 963 <nl> + # : . . / . . / . . / rpc . cpp : 965 <nl> + # , c - format <nl> + msgid " To use the % s option " <nl> + msgstr " Kad naudoti % s parametrą " <nl> + <nl> + # : . . / . . / . . / rpc . cpp : 967 <nl> + # , c - format <nl> + msgid " " <nl> + " Warning : % s , you must set rpcpassword = < password > \ n " <nl> + " in the configuration file : % s \ n " <nl> + " If the file does not exist , create it with owner - readable - only file permissions . \ n " <nl> + msgstr " " <nl> + " Perspėjimas : % s , jūs privalote nustatyti rpcpassword = < slaptažodis > \ n " <nl> + " Konfiguracijos faile : % s \ n " <nl> + " Jei failas neegzistuoja , sukurkite jį su owner - readable - only failo teisėm . \ n " <nl> + <nl> + # : . . / . . / . . / rpc . cpp : 1100 <nl> + # , c - format <nl> + msgid " " <nl> + " You must set rpcpassword = < password > in the configuration file : \ n " <nl> + " % s \ n " <nl> + " If the file does not exist , create it with owner - readable - only file permissions . " <nl> + msgstr " " <nl> + " Jūs privalote nustatyti rpcpassword = < slaptažodis > Konfiguracijos faile : \ n " <nl> + " % s \ n " <nl> + " Jei failas neegzistuoja , sukurkite jį su owner - readable - only failo teisėm . " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 202 <nl> + # , c - format <nl> + msgid " This transaction is over the size limit . You can still send it for a fee of % s , which goes to the nodes that process your transaction and helps to support the network . Do you want to pay the fee ? " <nl> + msgstr " Šis pervedimas viršija dydžio limitą . Jūs galite siūsti šią suma sumokėję % s mokestį , kuris bus skirtas nodams už persiuntimą ir padėti tinklui . Ar sutinkate pridėti ši mokestį ? " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 301 <nl> + msgid " Status " <nl> + msgstr " Būklė " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 302 <nl> + msgid " Date " <nl> + msgstr " Data " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 303 <nl> + msgid " Description " <nl> + msgstr " Apibūdinimas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 304 <nl> + msgid " Debit " <nl> + msgstr " Debetas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 305 <nl> + msgid " Credit " <nl> + msgstr " Kreditas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 511 <nl> + # , c - format <nl> + msgid " Open for % d blocks " <nl> + msgstr " Atidaryta % d blokams " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 513 <nl> + # , c - format <nl> + msgid " Open until % s " <nl> + msgstr " Atidaryta iki % s " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 519 <nl> + # , c - format <nl> + msgid " % d / offline ? " <nl> + msgstr " % d / neprisijunges ? " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 521 <nl> + # , c - format <nl> + msgid " % d / unconfirmed " <nl> + msgstr " % d / nepatvirtinta " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 523 <nl> + # , c - format <nl> + msgid " % d confirmations " <nl> + msgstr " % d patvirtinta " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 608 <nl> + msgid " Generated " <nl> + msgstr " Sugeneruota " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 616 <nl> + # , c - format <nl> + msgid " Generated ( % s matures in % d more blocks ) " <nl> + msgstr " Sugeneruota ( % s bus galima naudoti už % d blokų ) " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 620 <nl> + msgid " Generated - Warning : This block was not received by any other nodes and will probably not be accepted ! " <nl> + msgstr " Sugeneruota - Perspėjimas : Šio bloko negavo kiti nodai ir jis tikriausiai nebus priimtas ! " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 624 <nl> + msgid " Generated ( not accepted ) " <nl> + msgstr " Sugeneruota ( nepriimta ) " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 634 <nl> + msgid " From : " <nl> + msgstr " Nuo : " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 658 <nl> + msgid " Received with : " <nl> + msgstr " Priimta su : " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 704 <nl> + msgid " Payment to yourself " <nl> + msgstr " Pervedimas sau " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 741 <nl> + msgid " To : " <nl> + msgstr " Kam : " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1049 <nl> + msgid " Generating " <nl> + msgstr " Generuojama " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1051 <nl> + msgid " ( not connected ) " <nl> + msgstr " ( neprijungta ) " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1054 <nl> + # , c - format <nl> + msgid " % d connections % d blocks % d transactions " <nl> + msgstr " % d Jungtys % d Blokai % d pervedimai " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1165 <nl> + # : . . / . . / . . / ui . cpp : 2560 <nl> + msgid " New Receiving Address " <nl> + msgstr " Naujas priėmimo adresas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1166 <nl> + # : . . / . . / . . / ui . cpp : 2561 <nl> + msgid " " <nl> + " You should use a new address for each payment you receive . \ n " <nl> + " \ n " <nl> + " Label " <nl> + msgstr " " <nl> + " Naudokite naują adresas kiekvienam pervedimui . \ n " <nl> + " \ n " <nl> + " Pavadinimas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1235 <nl> + msgid " < b > Status : < / b > " <nl> + msgstr " < b > Būklė : < / b > " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1240 <nl> + msgid " , has not been successfully broadcast yet " <nl> + msgstr " , dar nėra paskelbta " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1242 <nl> + # , c - format <nl> + msgid " , broadcast through % d node " <nl> + msgstr " , paskelbta per % d nodą " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1244 <nl> + # , c - format <nl> + msgid " , broadcast through % d nodes " <nl> + msgstr " , paskelbta per % d nodus " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1248 <nl> + msgid " < b > Date : < / b > " <nl> + msgstr " < b > Data : < / b > " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1256 <nl> + msgid " < b > Source : < / b > Generated < br > " <nl> + msgstr " < b > Iš : < / b > Sugeneruota < br > " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1262 <nl> + # : . . / . . / . . / ui . cpp : 1280 <nl> + msgid " < b > From : < / b > " <nl> + msgstr " < b > Nuo : < / b > " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1280 <nl> + msgid " unknown " <nl> + msgstr " nežinomas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1281 <nl> + # : . . / . . / . . / ui . cpp : 1305 <nl> + # : . . / . . / . . / ui . cpp : 1364 <nl> + msgid " < b > To : < / b > " <nl> + msgstr " < b > Kam : < / b > " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1284 <nl> + msgid " ( yours , label : " <nl> + msgstr " ( jūsų , pavadinimas : " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1286 <nl> + msgid " ( yours ) " <nl> + msgstr " ( jūsų ) " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1323 <nl> + # : . . / . . / . . / ui . cpp : 1335 <nl> + # : . . / . . / . . / ui . cpp : 1398 <nl> + msgid " < b > Credit : < / b > " <nl> + msgstr " < b > Kreditas : < / b > " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1325 <nl> + # , c - format <nl> + msgid " ( % s matures in % d more blocks ) " <nl> + msgstr " ( % s bus galima naudoti už % d blokų ) " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1327 <nl> + msgid " ( not accepted ) " <nl> + msgstr " ( nepriimta ) " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1372 <nl> + # : . . / . . / . . / ui . cpp : 1395 <nl> + msgid " < b > Debit : < / b > " <nl> + msgstr " < b > Debetas : < / b > " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1386 <nl> + msgid " < b > Transaction fee : < / b > " <nl> + msgstr " < b > Pervedimo mokestis : < / b > " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1402 <nl> + msgid " < b > Net amount : < / b > " <nl> + msgstr " < b > Neto suma : < / b > " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1409 <nl> + msgid " Message : " <nl> + msgstr " Žinutė : " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1412 <nl> + msgid " Generated coins must wait 120 blocks before they can be spent . When you generated this block , it was broadcast to the network to be added to the block chain . If it fails to get into the chain , it will change to \ " not accepted \ " and not be spendable . This may occasionally happen if another node generates a block within a few seconds of yours . " <nl> + msgstr " Turite palaukti 120 blokų , kol galėsite išleisti sugeneruotas monetas . Kai sugeneravotė šį blokai , jis buvo paskelbtas tinklui ir bus pridėtas į blokų grandinę . Jei šio bloko tinklas nepriims i grandinė , jis pasikeis į \ " nepriimtas \ " ir negali būti išleidžiamas . Tai gali kartais nutikti jei kitas nodas sugeneravo bloką keliom sekundėm anksčiau . " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1593 <nl> + msgid " Cannot write autostart / bitcoin . desktop file " <nl> + msgstr " Nepavyksta įrašyti į autostart / bitcoin . desktop failą " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1629 <nl> + msgid " Main " <nl> + msgstr " Pagrindinis " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1634 <nl> + msgid " & Start Bitcoin on window system startup " <nl> + msgstr " & Paleisti Bitcoin kai pasileižia operacinė sistema " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1641 <nl> + msgid " & Minimize on close " <nl> + msgstr " & Sumažinti kai uždaroma " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1798 <nl> + # , c - format <nl> + msgid " version % s % s BETA " <nl> + msgstr " versija % s % s beta " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1884 <nl> + msgid " n / a " <nl> + msgstr " netaikoma " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1885 <nl> + msgid " Can ' t include a message when sending to a Bitcoin address " <nl> + msgstr " Negalite pridėti žinutės kai siunčiate i Bitcoin adresą " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1938 <nl> + msgid " Error in amount " <nl> + msgstr " Klaidinga suma " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1938 <nl> + # : . . / . . / . . / ui . cpp : 1943 <nl> + # : . . / . . / . . / ui . cpp : 1948 <nl> + # : . . / . . / . . / ui . cpp : 1974 <nl> + # : . . / . . / . . / uibase . cpp : 59 <nl> + msgid " Send Coins " <nl> + msgstr " Siūsti monetas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1943 <nl> + msgid " Amount exceeds your balance " <nl> + msgstr " Suma viršija jūsų balasą " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1948 <nl> + msgid " Total exceeds your balance when the " <nl> + msgstr " Bendra suma viršija jūsu balansą kai " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1948 <nl> + msgid " transaction fee is included " <nl> + msgstr " pervedimo suma įskaičiuota " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1964 <nl> + msgid " Payment sent " <nl> + msgstr " Mokestis išsiųstas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 1974 <nl> + msgid " Invalid address " <nl> + msgstr " Klaidingas adresas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2028 <nl> + # , c - format <nl> + msgid " Sending % s to % s " <nl> + msgstr " Siunčia % s į % s " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2101 <nl> + # : . . / . . / . . / ui . cpp : 2134 <nl> + msgid " CANCELLED " <nl> + msgstr " NUTRAUKTA " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2105 <nl> + msgid " Cancelled " <nl> + msgstr " Nutraukta " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2107 <nl> + msgid " Transfer cancelled " <nl> + msgstr " Siuntimas nutrauktas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2160 <nl> + msgid " Error : " <nl> + msgstr " Klaida : " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2179 <nl> + msgid " Connecting . . . " <nl> + msgstr " Jungiasi . . . " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2184 <nl> + msgid " Unable to connect " <nl> + msgstr " Negali prisijungti " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2189 <nl> + msgid " Requesting public key . . . " <nl> + msgstr " Prašo \ " viešojo rakto \ " ( public key ) . . . " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2201 <nl> + msgid " Received public key . . . " <nl> + msgstr " Gautas \ " viešasis raktas \ " ( public key ) . . . " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2215 <nl> + msgid " Recipient is not accepting transactions sent by IP address " <nl> + msgstr " Gavėjas nepriima pervedimų siunčiamų iš IP adresų " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2217 <nl> + msgid " Transfer was not accepted " <nl> + msgstr " Persiuntimas nepriimtas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2226 <nl> + msgid " Invalid response received " <nl> + msgstr " Gautas klaidingas atsakymas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2241 <nl> + msgid " Creating transaction . . . " <nl> + msgstr " Kuriamas pervedimas . . . " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2253 <nl> + # , c - format <nl> + msgid " This is an oversized transaction that requires a transaction fee of % s " <nl> + msgstr " Šis pervedimas yra per didelis ir reikalauja pervedimo mokesčio % s " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2255 <nl> + msgid " Transaction creation failed " <nl> + msgstr " Pervedimo sukurti nepavyko " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2262 <nl> + msgid " Transaction aborted " <nl> + msgstr " Pervedimas nutrauktas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2270 <nl> + msgid " Lost connection , transaction cancelled " <nl> + msgstr " Prarastas ryšys , pervedimas nutrauktas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2286 <nl> + msgid " Sending payment . . . " <nl> + msgstr " Monetos siunčiamos . . . " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2292 <nl> + msgid " The transaction was rejected . This might happen if some of the coins in your wallet were already spent , such as if you used a copy of wallet . dat and coins were spent in the copy but not marked as spent here . " <nl> + msgstr " Pervedimas buvo atmestas . Tai galėjo atsitikti jei dalis jūsų monetų jau buvo išleista . Pvz . : Jei nodojote kopija wallet . dat kitame kliente ir monetos buvo išleistos ten , bet nepažymėtos kaip išleistos čia . " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2301 <nl> + msgid " Waiting for confirmation . . . " <nl> + msgstr " Laukia patvirtinimo . . . " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2319 <nl> + msgid " " <nl> + " The payment was sent , but the recipient was unable to verify it . \ n " <nl> + " The transaction is recorded and will credit to the recipient , \ n " <nl> + " but the comment information will be blank . " <nl> + msgstr " " <nl> + " Pervedimas išsiūstas , bet gavėjas negalėjo jo patvirtinti . \ n " <nl> + " Pervedimas įrašytas ir suma bus kredituojama gavėjui , \ n " <nl> + " bet komentaro informacija nebus rodoma . " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2328 <nl> + msgid " Payment was sent , but an invalid response was received " <nl> + msgstr " Pervedimas išsiūstas , bet klaidingas atsakymas gautas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2334 <nl> + msgid " Payment completed " <nl> + msgstr " Pervedimas pavyko " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2365 <nl> + # : . . / . . / . . / ui . cpp : 2511 <nl> + # : . . / . . / . . / ui . cpp : 2548 <nl> + msgid " Name " <nl> + msgstr " Vardas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2366 <nl> + # : . . / . . / . . / ui . cpp : 2511 <nl> + # : . . / . . / . . / ui . cpp : 2548 <nl> + msgid " Address " <nl> + msgstr " Adresas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2368 <nl> + # : . . / . . / . . / ui . cpp : 2523 <nl> + msgid " Label " <nl> + msgstr " Pavadinimas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2369 <nl> + # : . . / . . / . . / uibase . cpp : 902 <nl> + msgid " Bitcoin Address " <nl> + msgstr " Bitcoin Adresas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2493 <nl> + msgid " This is one of your own addresses for receiving payments and cannot be entered in the address book . " <nl> + msgstr " Tai vienas iš jūsų adresų priimti pervedimams ir negali būti įvestas i adresų knygą " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2511 <nl> + # : . . / . . / . . / ui . cpp : 2517 <nl> + msgid " Edit Address " <nl> + msgstr " Pakeisti Adresą " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2523 <nl> + msgid " Edit Address Label " <nl> + msgstr " Pakeisti adreso pavadinimą " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2548 <nl> + # : . . / . . / . . / ui . cpp : 2554 <nl> + msgid " Add Address " <nl> + msgstr " Pridėti adresą " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2630 <nl> + msgid " Bitcoin " <nl> + msgstr " Bitcoin " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2632 <nl> + msgid " Bitcoin - Generating " <nl> + msgstr " Bitcoin - Generuoja " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2634 <nl> + msgid " Bitcoin - ( not connected ) " <nl> + msgstr " Bitcoin - ( neprijungta ) " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2711 <nl> + msgid " & Open Bitcoin " <nl> + msgstr " & Atidaryti Bitcoin " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2712 <nl> + msgid " O & ptions . . . " <nl> + msgstr " P & arametrai . . . " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2713 <nl> + # : . . / . . / . . / uibase . cpp : 32 <nl> + msgid " & Generate Coins " <nl> + msgstr " & Generuoti monetas " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2716 <nl> + # : . . / . . / . . / uibase . cpp : 25 <nl> + msgid " E & xit " <nl> + msgstr " & Išeiti " <nl> + <nl> + # : . . / . . / . . / ui . cpp : 2931 <nl> + msgid " Program has crashed and will terminate . " <nl> + msgstr " Programa neveikia ir bus uždaryta " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 28 <nl> + msgid " & File " <nl> + msgstr " & Byla " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 36 <nl> + msgid " & Your Receiving Addresses . . . " <nl> + msgstr " & Jūsų priimantys adresai . . . " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 40 <nl> + msgid " & Options . . . " <nl> + msgstr " & Parametrai " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 43 <nl> + msgid " & Settings " <nl> + msgstr " N & ustatymai " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 47 <nl> + msgid " & About . . . " <nl> + msgstr " & О Apie . . . " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 50 <nl> + msgid " & Help " <nl> + msgstr " & Pagalba " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 60 <nl> + msgid " Address Book " <nl> + msgstr " Adresų knyga " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 75 <nl> + msgid " Your Bitcoin Address : " <nl> + msgstr " Jūsų Bitcoin Adresas : " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 82 <nl> + msgid " & New . . . " <nl> + msgstr " & Naujas . . . " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 85 <nl> + # : . . / . . / . . / uibase . cpp : 845 <nl> + # : . . / . . / . . / uibase . cpp : 948 <nl> + msgid " & Copy to Clipboard " <nl> + msgstr " & Kopijuoti " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 99 <nl> + msgid " Balance : " <nl> + msgstr " Balansas : " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 115 <nl> + msgid " All " <nl> + msgstr " Visi " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 115 <nl> + msgid " Sent " <nl> + msgstr " Išsiūsta " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 115 <nl> + msgid " Received " <nl> + msgstr " Priimta " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 115 <nl> + msgid " In Progress " <nl> + msgstr " Progrese " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 136 <nl> + msgid " All Transactions " <nl> + msgstr " Visi pervedimai " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 147 <nl> + msgid " Sent / Received " <nl> + msgstr " Išsiųsti / Priimti " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 158 <nl> + msgid " Sent " <nl> + msgstr " Išsiūsti " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 169 <nl> + msgid " Received " <nl> + msgstr " Priimti " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 312 <nl> + # : . . / . . / . . / uibase . cpp : 473 <nl> + # : . . / . . / . . / uibase . cpp : 574 <nl> + # : . . / . . / . . / uibase . cpp : 787 <nl> + # : . . / . . / . . / uibase . cpp : 848 <nl> + # : . . / . . / . . / uibase . cpp : 957 <nl> + # : . . / . . / . . / uibase . cpp : 1046 <nl> + msgid " OK " <nl> + msgstr " Gerai " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 355 <nl> + msgid " Optional transaction fee you give to the nodes that process your transactions . " <nl> + msgstr " Pasirinktinas pervedimo mokestis , kurį duodate tinklo nodams . " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 364 <nl> + msgid " Transaction fee : " <nl> + msgstr " Pervedimo mokestis : " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 380 <nl> + msgid " & Limit coin generation to " <nl> + msgstr " & Apriboti monetų generavima iki " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 387 <nl> + msgid " processors " <nl> + msgstr " procesorių " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 393 <nl> + msgid " & Start Bitcoin on system startup " <nl> + msgstr " & Paleisti Bitcoin kai pasileidžia operacinė sistema " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 397 <nl> + msgid " & Minimize to the tray instead of the taskbar " <nl> + msgstr " & Sumažinti į \ " tray \ " vietoj \ " taskbar \ " " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 401 <nl> + msgid " M & inimize to the tray on close " <nl> + msgstr " Su & mažinti į \ " tray \ " kai uždaroma " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 408 <nl> + msgid " & Connect through socks4 proxy : " <nl> + msgstr " & Prisijungti per socks4 proksį : " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 420 <nl> + msgid " Proxy & IP : " <nl> + msgstr " P & roksio IP : " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 428 <nl> + msgid " & Port : " <nl> + msgstr " & Portas " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 450 <nl> + msgid " / / [ don ' t translate ] Test panel 2 for future expansion " <nl> + msgstr " " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 454 <nl> + msgid " / / [ don ' t translate ] Let ' s not start multiple pages until the first page is filled up " <nl> + msgstr " " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 476 <nl> + # : . . / . . / . . / uibase . cpp : 729 <nl> + # : . . / . . / . . / uibase . cpp : 792 <nl> + # : . . / . . / . . / uibase . cpp : 851 <nl> + # : . . / . . / . . / uibase . cpp : 960 <nl> + # : . . / . . / . . / uibase . cpp : 1049 <nl> + msgid " Cancel " <nl> + msgstr " Nutraukti " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 479 <nl> + msgid " & Apply " <nl> + msgstr " & Nustatyti " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 540 <nl> + msgid " Bitcoin " <nl> + msgstr " Bitcoin " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 546 <nl> + msgid " version " <nl> + msgstr " versija " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 557 <nl> + msgid " " <nl> + " Copyright ( c ) 2009 - 2010 Bitcoin Developers \ n " <nl> + " \ n " <nl> + " This is experimental software . \ n " <nl> + " \ n " <nl> + " Distributed under the MIT / X11 software license , see the accompanying file \ n " <nl> + " license . txt or http : / / www . opensource . org / licenses / mit - license . php . \ n " <nl> + " \ n " <nl> + " This product includes software developed by the OpenSSL Project for use in the \ n " <nl> + " OpenSSL Toolkit ( http : / / www . openssl . org / ) and cryptographic software written by \ n " <nl> + " Eric Young ( eay @ cryptsoft . com ) and UPnP software written by Thomas Bernard . " <nl> + msgstr " " <nl> + " Autorinė teisė ( c ) 2009 - 2010 Bitcoin Developers \ n " <nl> + " \ n " <nl> + " Ši programa yra eksperimentinė . \ n " <nl> + " \ n " <nl> + " Išleista ir teisės saugomos pagal MIT / X11 programų licenziją , kuri detaliau aprašyra faile \ n " <nl> + " license . txt ir http : / / www . opensource . org / licenses / mit - license . php . \ n " <nl> + " \ n " <nl> + " Šis produktas turi programą iš OpenSSL projekto , kuri naudojamas \ n " <nl> + " OpenSSL Toolkit ( http : / / www . openssl . org / ) , kriptografinę programą parašyta \ n " <nl> + " Eric Young ( eay @ cryptsoft . com ) ir UPnP programą parašyta Thomas Bernard . " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 613 <nl> + msgid " Enter a Bitcoin address ( e . g . 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJED9L ) or IP address ( e . g . 123 . 45 . 6 . 7 ) " <nl> + msgstr " Įveskite Bitcoin adresą ( pvz . : 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJED9L ) arba IP adresą ( pvz . : 123 . 45 . 6 . 7 ) " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 627 <nl> + msgid " Pay & To : " <nl> + msgstr " & Kam : " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 642 <nl> + msgid " & Paste " <nl> + msgstr " & Įrašyti " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 645 <nl> + msgid " Address & Book . . . " <nl> + msgstr " & Adresų knyga . . . " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 652 <nl> + msgid " & Amount : " <nl> + msgstr " S & uma : " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 662 <nl> + msgid " T & ransfer : " <nl> + msgstr " & Pervedimas : " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 668 <nl> + msgid " Standard " <nl> + msgstr " Standartinis " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 690 <nl> + msgid " & From : " <nl> + msgstr " & Nuo : " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 707 <nl> + msgid " & Message : " <nl> + msgstr " Ž & inutė : " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 724 <nl> + msgid " & Send " <nl> + msgstr " & Siūsti " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 776 <nl> + msgid " " <nl> + " \ n " <nl> + " \ n " <nl> + " Connecting . . . " <nl> + msgstr " " <nl> + " \ n " <nl> + " \ n " <nl> + " Jungiasi . . . " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 826 <nl> + msgid " These are your Bitcoin addresses for receiving payments . You may want to give a different one to each sender so you can keep track of who is paying you . The highlighted address is displayed in the main window . " <nl> + msgstr " Tai yra jūsų Bitcoin adresai priimti pervedimus . Galite duoti skirtinga adresa kiekvienam siuntėjui , kad žinotumėte kas jums moka . Pažymėtas adresas bus rodomas pagrindiniame lange . " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 839 <nl> + # : . . / . . / . . / uibase . cpp : 951 <nl> + msgid " & Edit . . . " <nl> + msgstr " & Pakeisti . . . " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 842 <nl> + # : . . / . . / . . / uibase . cpp : 954 <nl> + msgid " & New Address . . . " <nl> + msgstr " & Naujas Adresas . . . " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 914 <nl> + msgid " Sending " <nl> + msgstr " Siunčiama " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 922 <nl> + msgid " These are your Bitcoin addresses for receiving payments . You can give a different one to each sender to keep track of who is paying you . The highlighted address will be displayed in the main window . " <nl> + msgstr " Tai yra jūsų Bitcoin adresai priimti pervedimus . Galite duoti skirtinga adresa kiekvienam siuntėjui , kad žinotumėte kas jums moka . Pažymėtas adresas bus rodomas pagrindiniame lange . " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 935 <nl> + msgid " Receiving " <nl> + msgstr " Priėmimo " <nl> + <nl> + # : . . / . . / . . / uibase . cpp : 945 <nl> + msgid " & Delete " <nl> + msgstr " & Ištrinti " <nl> + <nl> + # : . . / . . / . . / util . cpp : 807 <nl> + msgid " Warning : Please check that your computer ' s date and time are correct . If your clock is wrong Bitcoin will not work properly . " <nl> + msgstr " Perspėjimas : Prašome patikrinti kompiuterio laiką ir datą . Jei laikas neteisingai nustatytas , Bitcoin neveiks . " <nl> + <nl> + # : . . / . . / . . / uibase . h : 149 <nl> + msgid " Transaction Details " <nl> + msgstr " Pervedimų detalės " <nl> + <nl> + # : . . / . . / . . / uibase . h : 202 <nl> + msgid " Options " <nl> + msgstr " Parametrai " <nl> + <nl> + # : . . / . . / . . / uibase . h : 230 <nl> + msgid " About Bitcoin " <nl> + msgstr " Apie Bitcoin " <nl> + <nl> + # : . . / . . / . . / uibase . h : 340 <nl> + msgid " Your Bitcoin Addresses " <nl> + msgstr " Jūsų Bitcoin Adresas " <nl> + <nl> + # ~ msgid " " <nl> + # ~ " It ' s good policy to use a new address for each payment you receive . \ n " <nl> + # ~ " \ n " <nl> + # ~ " Label " <nl> + # ~ msgstr " " <nl> + # ~ " Неплохо будет использовать новый адрес для каждого получаемого платежа . \ n " <nl> + # ~ " \ n " <nl> + # ~ " Метка " <nl> + <nl> + # ~ msgid " Will appear as \ " From : Unknown \ " " <nl> + # ~ msgstr " Будет отображаться как \ " От : Аноним \ " " <nl>
Add Lithuanian translation .
bitcoin/bitcoin
95f5b3677241bbbdc954b148f104ff272b384a0e
2011-05-13T23:07:22Z
mmm a / WORKSPACE <nl> ppp b / WORKSPACE <nl> workspace ( name = " org_tensorflow " ) <nl> <nl> http_archive ( <nl> name = " io_bazel_rules_closure " , <nl> - sha256 = " edc91f556b762fc5212d1050d00b12e40dd0b0b1c1d5d96886b59e9a30a6cae4 " , <nl> - strip_prefix = " rules_closure - 3f07fb6a58870afbb36051bd5d54da4479561cc6 " , <nl> + sha256 = " bc41b80486413aaa551860fc37471dbc0666e1dbb5236fb6177cb83b0c105846 " , <nl> + strip_prefix = " rules_closure - dec425a4ff3faf09a56c85d082e4eed05d8ce38f " , <nl> urls = [ <nl> - " http : / / mirror . bazel . build / github . com / bazelbuild / rules_closure / archive / 3f07fb6a58870afbb36051bd5d54da4479561cc6 . tar . gz " , # 2017 - 05 - 31 <nl> - " https : / / github . com / bazelbuild / rules_closure / archive / 3f07fb6a58870afbb36051bd5d54da4479561cc6 . tar . gz " , <nl> + " http : / / mirror . bazel . build / github . com / bazelbuild / rules_closure / archive / dec425a4ff3faf09a56c85d082e4eed05d8ce38f . tar . gz " , # 2017 - 06 - 02 <nl> + " https : / / github . com / bazelbuild / rules_closure / archive / dec425a4ff3faf09a56c85d082e4eed05d8ce38f . tar . gz " , <nl> ] , <nl> ) <nl> <nl> mmm a / configure <nl> ppp b / configure <nl> if is_windows ; then <nl> write_action_env_to_bazelrc " CUDA_PATH " " $ CUDA_PATH " <nl> write_action_env_to_bazelrc " CUDA_COMPUTE_CAPABILITIES " " $ CUDA_COMPUTE_CAPABILITIES " <nl> write_action_env_to_bazelrc " NO_WHOLE_ARCHIVE_OPTION " " 1 " <nl> + write_to_bazelrc " build - - config = win - cuda " <nl> + write_to_bazelrc " test - - config = win - cuda " <nl> + else <nl> + # If CUDA is enabled , always use GPU during build and test . <nl> + write_to_bazelrc " build - - config = cuda " <nl> + write_to_bazelrc " test - - config = cuda " <nl> fi <nl> <nl> # end of if " $ TF_NEED_CUDA " = = " 1 " <nl> mmm a / tensorflow / BUILD <nl> ppp b / tensorflow / BUILD <nl> filegroup ( <nl> " / / tensorflow / contrib / data / python / framework : all_files " , <nl> " / / tensorflow / contrib / data / python / kernel_tests : all_files " , <nl> " / / tensorflow / contrib / data / python / ops : all_files " , <nl> + " / / tensorflow / contrib / data / python / util : all_files " , <nl> " / / tensorflow / contrib / distributions : all_files " , <nl> " / / tensorflow / contrib / factorization : all_files " , <nl> " / / tensorflow / contrib / factorization / kernels : all_files " , <nl> filegroup ( <nl> " / / tensorflow / tensorboard / components / tf_graph_controls / demo : all_files " , <nl> " / / tensorflow / tensorboard / components / tf_graph_dashboard : all_files " , <nl> " / / tensorflow / tensorboard / components / tf_graph_dashboard / demo : all_files " , <nl> + " / / tensorflow / tensorboard / components / tf_graph_debugger_data_card : all_files " , <nl> + " / / tensorflow / tensorboard / components / tf_graph_debugger_data_card / demo : all_files " , <nl> " / / tensorflow / tensorboard / components / tf_graph_info : all_files " , <nl> " / / tensorflow / tensorboard / components / tf_graph_info / demo : all_files " , <nl> " / / tensorflow / tensorboard / components / tf_graph_loader : all_files " , <nl> mmm a / tensorflow / cc / saved_model / loader . cc <nl> ppp b / tensorflow / cc / saved_model / loader . cc <nl> limitations under the License . <nl> # include " tensorflow / core / platform / env . h " <nl> # include " tensorflow / core / platform / protobuf_internal . h " <nl> # include " tensorflow / core / protobuf / saved_model . pb . h " <nl> + # include " tensorflow / core / protobuf / saver . pb . h " <nl> # include " tensorflow / core / public / session . h " <nl> # include " tensorflow / core / public / session_options . h " <nl> # include " tensorflow / core / util / tensor_bundle / naming . h " <nl> mmm a / tensorflow / compiler / aot / BUILD <nl> ppp b / tensorflow / compiler / aot / BUILD <nl> cc_library ( <nl> " / / tensorflow / compiler / xla / legacy_flags : compiler_functor_flags " , <nl> " / / tensorflow / compiler / xla / legacy_flags : cpu_compiler_flags " , <nl> " / / tensorflow / compiler / xla / legacy_flags : cpu_runtime_flags " , <nl> + " / / tensorflow / compiler / xla / legacy_flags : debug_options_flags " , <nl> " / / tensorflow / compiler / xla / legacy_flags : hlo_graph_dumper_flags " , <nl> - " / / tensorflow / compiler / xla / legacy_flags : hlo_pass_pipeline_flags " , <nl> " / / tensorflow / compiler / xla / legacy_flags : llvm_util_flags " , <nl> " / / tensorflow / compiler / xla / legacy_flags : service_flags " , <nl> " / / tensorflow / compiler / xla / legacy_flags : util_flags " , <nl> mmm a / tensorflow / compiler / aot / tfcompile_main . cc <nl> ppp b / tensorflow / compiler / aot / tfcompile_main . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / legacy_flags / compiler_functor_flags . h " <nl> # include " tensorflow / compiler / xla / legacy_flags / cpu_compiler_flags . h " <nl> # include " tensorflow / compiler / xla / legacy_flags / cpu_runtime_flags . h " <nl> + # include " tensorflow / compiler / xla / legacy_flags / debug_options_flags . h " <nl> # include " tensorflow / compiler / xla / legacy_flags / hlo_graph_dumper_flags . h " <nl> - # include " tensorflow / compiler / xla / legacy_flags / hlo_pass_pipeline_flags . h " <nl> # include " tensorflow / compiler / xla / legacy_flags / llvm_util_flags . h " <nl> # include " tensorflow / compiler / xla / legacy_flags / service_flags . h " <nl> # include " tensorflow / compiler / xla / legacy_flags / util_flags . h " <nl> int main ( int argc , char * * argv ) { <nl> xla : : legacy_flags : : AppendCpuCompilerFlags ( & flag_list ) ; <nl> xla : : legacy_flags : : AppendCpuRuntimeFlags ( & flag_list ) ; <nl> xla : : legacy_flags : : AppendHloGraphDumperFlags ( & flag_list ) ; <nl> - xla : : legacy_flags : : AppendHloPassPipelineFlags ( & flag_list ) ; <nl> + xla : : legacy_flags : : AppendDebugOptionsFlags ( & flag_list ) ; <nl> xla : : legacy_flags : : AppendLlvmUtilFlags ( & flag_list ) ; <nl> xla : : legacy_flags : : AppendServiceFlags ( & flag_list ) ; <nl> xla : : legacy_flags : : AppendUtilFlags ( & flag_list ) ; <nl> mmm a / tensorflow / compiler / tf2xla / xla_compiler . cc <nl> ppp b / tensorflow / compiler / tf2xla / xla_compiler . cc <nl> bool XlaCompiler : : Argument : : operator = = ( <nl> } <nl> <nl> XlaCompiler : : XlaCompiler ( XlaCompiler : : Options options ) <nl> - : options_ ( std : : move ( options ) ) , <nl> + : options_ ( options ) , <nl> initialization_status_ ( Status : : OK ( ) ) , <nl> next_step_id_ ( 1 ) , <nl> device_ ( <nl> mmm a / tensorflow / compiler / xla / client / computation . cc <nl> ppp b / tensorflow / compiler / xla / client / computation . cc <nl> Computation : : Computation ( ServiceInterface * parent , <nl> : handle_ ( handle ) , parent_ ( parent ) { } <nl> <nl> Computation : : Computation ( Computation & & computation ) <nl> - : handle_ ( computation . handle_ ) , parent_ ( computation . parent_ ) { <nl> + : handle_ ( std : : move ( computation . handle_ ) ) , parent_ ( computation . parent_ ) { <nl> computation . ResetWithoutFreeing ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / legacy_flags / BUILD <nl> ppp b / tensorflow / compiler / xla / legacy_flags / BUILD <nl> cc_library ( <nl> ] , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " debug_options_flags " , <nl> + srcs = [ " debug_options_flags . cc " ] , <nl> + hdrs = [ " debug_options_flags . h " ] , <nl> + deps = <nl> + [ <nl> + " : parse_flags_from_env " , <nl> + " / / tensorflow / compiler / xla : types " , <nl> + " / / tensorflow / compiler / xla : xla_proto " , <nl> + " / / tensorflow / core : framework_internal " , <nl> + " / / tensorflow / core : lib " , <nl> + ] , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " cpu_compiler_flags " , <nl> srcs = [ " cpu_compiler_flags . cc " ] , <nl> cc_library ( <nl> ] , <nl> ) <nl> <nl> - cc_library ( <nl> - name = " hlo_pass_pipeline_flags " , <nl> - srcs = [ " hlo_pass_pipeline_flags . cc " ] , <nl> - hdrs = [ " hlo_pass_pipeline_flags . h " ] , <nl> - deps = [ <nl> - " : parse_flags_from_env " , <nl> - " / / tensorflow / compiler / xla : types " , <nl> - " / / tensorflow / core : framework_internal " , <nl> - " / / tensorflow / core : lib " , <nl> - ] , <nl> - ) <nl> - <nl> cc_library ( <nl> name = " alias_analysis_flags " , <nl> srcs = [ " alias_analysis_flags . cc " ] , <nl> new file mode 100644 <nl> index 0000000000000 . . 0211462cb1a9a <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / xla / legacy_flags / debug_options_flags . cc <nl> <nl> + / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / compiler / xla / legacy_flags / debug_options_flags . h " <nl> + <nl> + # include < mutex > / / NOLINT ( build / c + + 11 ) : only using std : : call_once , not mutex . <nl> + # include < vector > <nl> + # include " tensorflow / compiler / xla / legacy_flags / parse_flags_from_env . h " <nl> + # include " tensorflow / core / lib / strings / str_util . h " <nl> + <nl> + namespace xla { <nl> + namespace legacy_flags { <nl> + <nl> + struct DebugOptionsFlags { <nl> + string xla_generate_hlo_graph ; <nl> + <nl> + string xla_disable_hlo_passes ; <nl> + } ; <nl> + <nl> + namespace { <nl> + <nl> + DebugOptionsFlags * flag_values ; <nl> + std : : vector < tensorflow : : Flag > * flag_objects ; <nl> + std : : once_flag flags_init ; <nl> + <nl> + / / Allocates flag_values and flag_objects ; this function must not be called more <nl> + / / than once - its call done via call_once . <nl> + void AllocateFlags ( ) { <nl> + flag_values = new DebugOptionsFlags ; <nl> + flag_values - > xla_generate_hlo_graph = " " ; <nl> + flag_values - > xla_disable_hlo_passes = " " ; <nl> + <nl> + flag_objects = new std : : vector < tensorflow : : Flag > ( <nl> + { tensorflow : : Flag ( <nl> + " xla_generate_hlo_graph " , & flag_values - > xla_generate_hlo_graph , <nl> + " HLO modules matching this regex will be dumped to a . dot file " <nl> + " throughout various stages in compilation . " ) , <nl> + <nl> + tensorflow : : Flag ( <nl> + " xla_disable_hlo_passes " , & flag_values - > xla_disable_hlo_passes , <nl> + " Comma - separated list of HLO passes to be disabled . These names " <nl> + " must " <nl> + " exactly match the passes ' names ; no whitespace around commas . " ) } ) ; <nl> + ParseFlagsFromEnv ( * flag_objects ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> + void AppendDebugOptionsFlags ( std : : vector < tensorflow : : Flag > * flag_list ) { <nl> + std : : call_once ( flags_init , & AllocateFlags ) ; <nl> + flag_list - > insert ( flag_list - > end ( ) , flag_objects - > begin ( ) , <nl> + flag_objects - > end ( ) ) ; <nl> + } <nl> + <nl> + xla : : DebugOptions GetDebugOptionsFromFlags ( ) { <nl> + std : : call_once ( flags_init , & AllocateFlags ) ; <nl> + <nl> + DebugOptions options ; <nl> + <nl> + options . set_xla_generate_hlo_graph ( flag_values - > xla_generate_hlo_graph ) ; <nl> + <nl> + std : : vector < string > disabled_passes = <nl> + tensorflow : : str_util : : Split ( flag_values - > xla_disable_hlo_passes , ' , ' ) ; <nl> + for ( const auto & passname : disabled_passes ) { <nl> + options . add_xla_disable_hlo_passes ( passname ) ; <nl> + } <nl> + <nl> + return options ; <nl> + } <nl> + <nl> + } / / namespace legacy_flags <nl> + } / / namespace xla <nl> new file mode 100644 <nl> index 0000000000000 . . d0ef8e66ab0bc <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / xla / legacy_flags / debug_options_flags . h <nl> <nl> + / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef THIRD_PARTY_TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_DEBUG_OPTIONS_FLAGS_H_ <nl> + # define THIRD_PARTY_TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_DEBUG_OPTIONS_FLAGS_H_ <nl> + <nl> + # include < vector > <nl> + <nl> + # include " tensorflow / compiler / xla / xla . pb . h " <nl> + # include " tensorflow / core / util / command_line_flags . h " <nl> + <nl> + namespace xla { <nl> + namespace legacy_flags { <nl> + <nl> + / / Appends flag definitions for debug options to flag_list . <nl> + void AppendDebugOptionsFlags ( std : : vector < tensorflow : : Flag > * flag_list ) ; <nl> + <nl> + / / Fetches a DebugOptions proto message from flags provided to the program . <nl> + / / Flags must be registered with the flags parser using AppendDebugOptionsFlags <nl> + / / first . <nl> + xla : : DebugOptions GetDebugOptionsFromFlags ( ) ; <nl> + <nl> + } / / namespace legacy_flags <nl> + } / / namespace xla <nl> + <nl> + # endif / / THIRD_PARTY_TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_DEBUG_OPTIONS_FLAGS_H_ <nl> deleted file mode 100644 <nl> index edc04d51a70f2 . . 0000000000000 <nl> mmm a / tensorflow / compiler / xla / legacy_flags / hlo_pass_pipeline_flags . cc <nl> ppp / dev / null <nl> <nl> - / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - / / Legacy flags for XLA ' s hlo_pass_pipeline module . <nl> - <nl> - # include < mutex > / / NOLINT ( build / c + + 11 ) : only using std : : call_once , not mutex . <nl> - # include < vector > <nl> - <nl> - # include " tensorflow / compiler / xla / legacy_flags / hlo_pass_pipeline_flags . h " <nl> - # include " tensorflow / compiler / xla / legacy_flags / parse_flags_from_env . h " <nl> - # include " tensorflow / core / platform / types . h " <nl> - # include " tensorflow / core / util / command_line_flags . h " <nl> - <nl> - namespace xla { <nl> - namespace legacy_flags { <nl> - <nl> - / / Pointers to the parsed value of the flags and flag descriptors , initialized <nl> - / / via flags_init . <nl> - static HloPassPipelineFlags * flags ; <nl> - static std : : vector < tensorflow : : Flag > * flag_list ; <nl> - static std : : once_flag flags_init ; <nl> - <nl> - / / Allocate * flags . Called via call_once ( & flags_init , . . . ) . <nl> - static void AllocateFlags ( ) { <nl> - flags = new HloPassPipelineFlags ; <nl> - flags - > xla_disable_hlo_passes = " " ; <nl> - flag_list = new std : : vector < tensorflow : : Flag > ( { <nl> - tensorflow : : Flag ( " xla_disable_hlo_passes " , & flags - > xla_disable_hlo_passes , <nl> - " Comma - separated list of HLO passes to disable . " ) , <nl> - } ) ; <nl> - ParseFlagsFromEnv ( * flag_list ) ; <nl> - } <nl> - <nl> - / / Append to * append_to flag definitions associated with XLA ' s hlo_pass_pipeline <nl> - / / module . <nl> - void AppendHloPassPipelineFlags ( std : : vector < tensorflow : : Flag > * append_to ) { <nl> - std : : call_once ( flags_init , & AllocateFlags ) ; <nl> - append_to - > insert ( append_to - > end ( ) , flag_list - > begin ( ) , flag_list - > end ( ) ) ; <nl> - } <nl> - <nl> - / / Return a pointer to the HloPassPipelineFlags struct ; <nl> - / / repeated calls return the same pointer . <nl> - / / This should be called only after Flags : : Parse ( ) has returned . <nl> - HloPassPipelineFlags * GetHloPassPipelineFlags ( ) { <nl> - std : : call_once ( flags_init , & AllocateFlags ) ; <nl> - return flags ; <nl> - } <nl> - <nl> - } / / namespace legacy_flags <nl> - } / / namespace xla <nl> deleted file mode 100644 <nl> index 520759bbf0d2f . . 0000000000000 <nl> mmm a / tensorflow / compiler / xla / legacy_flags / hlo_pass_pipeline_flags . h <nl> ppp / dev / null <nl> <nl> - / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> - <nl> - Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - you may not use this file except in compliance with the License . <nl> - You may obtain a copy of the License at <nl> - <nl> - http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - <nl> - Unless required by applicable law or agreed to in writing , software <nl> - distributed under the License is distributed on an " AS IS " BASIS , <nl> - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - See the License for the specific language governing permissions and <nl> - limitations under the License . <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> - <nl> - # ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_HLO_PASS_PIPELINE_FLAGS_H_ <nl> - # define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_HLO_PASS_PIPELINE_FLAGS_H_ <nl> - <nl> - / / Legacy flags for XLA ' s hlo_pass_pipeline module . <nl> - <nl> - # include < vector > <nl> - <nl> - # include " tensorflow / compiler / xla / types . h " <nl> - # include " tensorflow / core / platform / types . h " <nl> - # include " tensorflow / core / util / command_line_flags . h " <nl> - <nl> - namespace xla { <nl> - namespace legacy_flags { <nl> - <nl> - / / Append to * flag_list flag definitions associated with XLA ' s hlo_pass_pipeline <nl> - / / module . <nl> - void AppendHloPassPipelineFlags ( std : : vector < tensorflow : : Flag > * flag_list ) ; <nl> - <nl> - / / The values of flags associated with XLA ' s hlo_pass_pipeline module . <nl> - typedef struct { <nl> - / / Comma - separated list of HLO passes to disable . <nl> - string xla_disable_hlo_passes ; <nl> - } HloPassPipelineFlags ; <nl> - <nl> - / / Return a pointer to the HloPassPipelineFlags struct ; <nl> - / / repeated calls return the same pointer . <nl> - / / This should be called only after Flags : : Parse ( ) has returned . <nl> - HloPassPipelineFlags * GetHloPassPipelineFlags ( ) ; <nl> - <nl> - } / / namespace legacy_flags <nl> - } / / namespace xla <nl> - <nl> - # endif / / TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_HLO_PASS_PIPELINE_FLAGS_H_ <nl> mmm a / tensorflow / compiler / xla / service / BUILD <nl> ppp b / tensorflow / compiler / xla / service / BUILD <nl> cc_library ( <nl> ] , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " hlo_dataflow_analysis " , <nl> + srcs = [ <nl> + " hlo_dataflow_analysis . cc " , <nl> + ] , <nl> + hdrs = [ <nl> + " hlo_dataflow_analysis . h " , <nl> + ] , <nl> + deps = [ <nl> + " : call_graph " , <nl> + " : hlo " , <nl> + " : liveness_util " , <nl> + " / / tensorflow / compiler / xla : shape_tree " , <nl> + " / / tensorflow / compiler / xla : shape_util " , <nl> + " / / tensorflow / compiler / xla : status " , <nl> + " / / tensorflow / compiler / xla : statusor " , <nl> + " / / tensorflow / compiler / xla : types " , <nl> + " / / tensorflow / compiler / xla : util " , <nl> + " / / tensorflow / compiler / xla : xla_data_proto " , <nl> + " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core : lib_internal " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " hlo_dataflow_analysis_test " , <nl> + srcs = [ " hlo_dataflow_analysis_test . cc " ] , <nl> + deps = [ <nl> + " : hlo " , <nl> + " : hlo_dataflow_analysis " , <nl> + " : hlo_matchers " , <nl> + " : instruction_fusion " , <nl> + " / / tensorflow / compiler / xla : literal_util " , <nl> + " / / tensorflow / compiler / xla : shape_util " , <nl> + " / / tensorflow / compiler / xla : status_macros " , <nl> + " / / tensorflow / compiler / xla : test " , <nl> + " / / tensorflow / compiler / xla : test_helpers " , <nl> + " / / tensorflow / compiler / xla : xla_data_proto " , <nl> + " / / tensorflow / compiler / xla / tests : hlo_test_base " , <nl> + " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core : test " , <nl> + " / / tensorflow / core : test_main " , <nl> + ] , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " tuple_points_to_analysis " , <nl> srcs = [ <nl> cc_library ( <nl> " / / tensorflow / compiler / xla : statusor " , <nl> " / / tensorflow / compiler / xla : types " , <nl> " / / tensorflow / compiler / xla : util " , <nl> - " / / tensorflow / compiler / xla / legacy_flags : hlo_pass_pipeline_flags " , <nl> " / / tensorflow / core : lib " , <nl> ] , <nl> ) <nl> mmm a / tensorflow / compiler / xla / service / algebraic_simplifier . cc <nl> ppp b / tensorflow / compiler / xla / service / algebraic_simplifier . cc <nl> Status AlgebraicSimplifierVisitor : : HandleConvolution ( <nl> / / bitcasts_ = = true . <nl> <nl> / / TODO ( cwhipkey ) : b / 31337498 , make this layout insensitive . <nl> - if ( ! is_layout_sensitive_ ) return Status : : OK ( ) ; <nl> + if ( ! is_layout_sensitive_ ) { <nl> + return Status : : OK ( ) ; <nl> + } <nl> <nl> const ConvolutionDimensionNumbers & dnums = <nl> convolution - > convolution_dimension_numbers ( ) ; <nl> mmm a / tensorflow / compiler / xla / service / backend . cc <nl> ppp b / tensorflow / compiler / xla / service / backend . cc <nl> const Eigen : : ThreadPoolDevice * Backend : : eigen_intra_op_thread_pool_device ( ) <nl> } <nl> <nl> tensorflow : : thread : : ThreadPool * Backend : : eigen_intra_op_thread_pool ( ) const { <nl> - if ( intra_op_thread_pool_wrapper_ = = nullptr ) return nullptr ; <nl> + if ( intra_op_thread_pool_wrapper_ = = nullptr ) { <nl> + return nullptr ; <nl> + } <nl> return intra_op_thread_pool_wrapper_ - > pool . get ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / service / buffer_assignment . cc <nl> ppp b / tensorflow / compiler / xla / service / buffer_assignment . cc <nl> void BufferAssigner : : BuildColocatedBufferSets ( <nl> const HloOpcode opcode = instruction - > opcode ( ) ; <nl> if ( opcode = = HloOpcode : : kWhile ) { <nl> const HloInstruction * while_hlo = instruction ; <nl> - TF_CHECK_OK ( ShapeUtil : : ForEachSubshape ( <nl> + ShapeUtil : : ForEachSubshape ( <nl> while_hlo - > shape ( ) , <nl> [ this , while_hlo , & points_to_analysis , & buffer_liveness , <nl> buffer_size , computation , colocated_buffer_sets ] ( <nl> void BufferAssigner : : BuildColocatedBufferSets ( <nl> AddWhileSetToColocatedBufferSets ( <nl> colocated_set , init_buffer , while_hlo , * computation , <nl> buffer_liveness , buffer_size , colocated_buffer_sets ) ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> } else if ( opcode = = HloOpcode : : kCall ) { <nl> const HloInstruction * call_hlo = instruction ; <nl> const HloInstruction * root_hlo = <nl> call_hlo - > to_apply ( ) - > root_instruction ( ) ; <nl> - TF_CHECK_OK ( ShapeUtil : : ForEachSubshape ( <nl> + ShapeUtil : : ForEachSubshape ( <nl> call_hlo - > shape ( ) , <nl> [ this , call_hlo , root_hlo , & points_to_analysis , <nl> colocated_buffer_sets ] ( const Shape & / * subshape * / , <nl> void BufferAssigner : : BuildColocatedBufferSets ( <nl> AddBufferToColocatedSet ( root_hlo , index , points_to_analysis , <nl> & colocated_set ) ; <nl> AddSetToColocatedBufferSets ( colocated_set , colocated_buffer_sets ) ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> } <nl> } <nl> } <nl> mmm a / tensorflow / compiler / xla / service / buffer_assignment_test . cc <nl> ppp b / tensorflow / compiler / xla / service / buffer_assignment_test . cc <nl> TEST_F ( BufferAssignmentTest , ExampleWhile ) { <nl> <nl> / / Check that buffer for each subshape of ' while_op ' shares allocation with <nl> / / corresponding buffer from while body computation at same index . <nl> - TF_CHECK_OK ( ShapeUtil : : ForEachSubshape ( <nl> + ShapeUtil : : ForEachSubshape ( <nl> while_op - > shape ( ) , <nl> [ this , & buffers , while_op , body_root ] ( const Shape & / * subshape * / , <nl> const ShapeIndex & index ) { <nl> auto while_op_allocation = GetAllocation ( * buffers , while_op , index ) ; <nl> auto body_root_allocation = GetAllocation ( * buffers , body_root , index ) ; <nl> EXPECT_EQ ( while_op_allocation . index ( ) , body_root_allocation . index ( ) ) ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> <nl> / / Log size information for inspection . <nl> LOG ( INFO ) < < " LogicalBuffer count " < < buffers - > Allocations ( ) . size ( ) <nl> TEST_F ( BufferAssignmentTest , TupleParameterAsOutput ) { <nl> <nl> / / Verify each buffer allocation is marked as an entry computation parameter <nl> / / and is liveout . <nl> - TF_CHECK_OK ( ShapeUtil : : ForEachSubshape ( <nl> + ShapeUtil : : ForEachSubshape ( <nl> tuple_param - > shape ( ) , <nl> [ this , & assignment , tuple_param ] ( const Shape & / * subshape * / , <nl> const ShapeIndex & index ) { <nl> TEST_F ( BufferAssignmentTest , TupleParameterAsOutput ) { <nl> EXPECT_TRUE ( allocation . is_entry_computation_parameter ( ) ) ; <nl> EXPECT_EQ ( 0 , allocation . parameter_number ( ) ) ; <nl> EXPECT_TRUE ( allocation . maybe_live_out ( ) ) ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> } <nl> <nl> TEST_F ( BufferAssignmentTest , ElementOfNestedTupleParameterAsOutput ) { <nl> mmm a / tensorflow / compiler / xla / service / computation_tracker . cc <nl> ppp b / tensorflow / compiler / xla / service / computation_tracker . cc <nl> void ComputationTracker : : ComputeComputationPostOrder ( <nl> <nl> visited - > insert ( versioned_handle ) ; <nl> post_order - > push_back ( versioned_handle ) ; <nl> - return ; <nl> } <nl> <nl> StatusOr < std : : unique_ptr < HloModule > > ComputationTracker : : BuildHloModule ( <nl> mmm a / tensorflow / compiler / xla / service / copy_insertion . cc <nl> ppp b / tensorflow / compiler / xla / service / copy_insertion . cc <nl> class InstructionCopier { <nl> <nl> bool InstructionCopier : : HasAllIndicesFalse ( ) const { <nl> bool all_indices_false = true ; <nl> - TF_CHECK_OK ( indices_to_copy_ . ForEachElement ( <nl> - [ & all_indices_false ] ( const ShapeIndex & / * index * / , bool / * is_leaf * / , <nl> - bool data ) { <nl> - if ( data ) all_indices_false = false ; <nl> - return tensorflow : : Status : : OK ( ) ; <nl> - } ) ) ; <nl> + indices_to_copy_ . ForEachElement ( <nl> + [ & all_indices_false ] ( const ShapeIndex & / * index * / , bool data ) { <nl> + if ( data ) { <nl> + all_indices_false = false ; <nl> + } <nl> + } ) ; <nl> return all_indices_false ; <nl> } <nl> <nl> Status InstructionCopier : : RecordIndicesWhichPointToParamOrConstant ( <nl> <nl> / / Multiple buffers within a parameter / constant may be live out , so collect <nl> / / a set of indices at which to copy first . <nl> - TF_RETURN_IF_ERROR ( points_to . ForEachElement ( <nl> - [ this ] ( const ShapeIndex & index , bool / * is_leaf * / , <nl> + points_to . ForEachElement ( <nl> + [ this ] ( const ShapeIndex & index , <nl> const std : : vector < const LogicalBuffer * > & buffers ) { <nl> if ( IsReadOnlyIndex ( index ) ) { <nl> - return Status : : OK ( ) ; <nl> + return ; <nl> } <nl> for ( const LogicalBuffer * buffer : buffers ) { <nl> / / pointee is the HloInstruction producing the buffer which may be <nl> Status InstructionCopier : : RecordIndicesWhichPointToParamOrConstant ( <nl> break ; <nl> } <nl> } <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status InstructionCopier : : RecordAmbiguousOrNonDistinctIndices ( <nl> / / Mapping from LogicalBuffer to index ( used to detect non - distinct indices ) . <nl> FlatMap < const LogicalBuffer * , std : : vector < ShapeIndex > > <nl> buffer_to_source_indices ; <nl> - TF_RETURN_IF_ERROR ( points_to . ForEachElement ( <nl> + points_to . ForEachElement ( <nl> [ this , & buffer_to_source_indices ] ( <nl> - const ShapeIndex & index , bool / * is_leaf * / , <nl> + const ShapeIndex & index , <nl> const std : : vector < const LogicalBuffer * > & buffers ) { <nl> if ( buffers . size ( ) > 1 ) { <nl> / / Record ambiguous points - to set at ' index ' . <nl> Status InstructionCopier : : RecordAmbiguousOrNonDistinctIndices ( <nl> for ( const LogicalBuffer * buffer : buffers ) { <nl> buffer_to_source_indices [ buffer ] . push_back ( index ) ; <nl> } <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> <nl> / / Record all non - distinct indices detected in ' buffer_to_source_indices ' . <nl> for ( const auto & buff_to_src : buffer_to_source_indices ) { <nl> Status InstructionCopier : : RecordIndicesWhichInterfereWithOtherInstruction ( <nl> ShapeTree < bool > * read_only_indices_out ) { <nl> / / Record all buffer indices for ' instruction_ ' , which interfere with <nl> / / ' other_instruction ' at the same index . <nl> - TF_RETURN_IF_ERROR ( ShapeUtil : : ForEachSubshape ( <nl> + ShapeUtil : : ForEachSubshape ( <nl> instruction_ - > shape ( ) , <nl> [ this , & liveness , other_instruction , read_only_indices_out ] ( <nl> const Shape & / * subshape * / , const ShapeIndex & index ) { <nl> if ( IsReadOnlyIndex ( index ) ) { <nl> - return Status : : OK ( ) ; <nl> + return ; <nl> } <nl> if ( indices_to_copy_ . element ( index ) ) { <nl> / / Return if previous pass already set index . <nl> - return Status : : OK ( ) ; <nl> + return ; <nl> } <nl> const auto & points_to_analysis = liveness . points_to_analysis ( ) ; <nl> / / Lookup buffers for ' instruction_ ' and ' other_instruction ' . <nl> Status InstructionCopier : : RecordIndicesWhichInterfereWithOtherInstruction ( <nl> if ( read_only_indices_out ! = nullptr ) { <nl> * read_only_indices_out - > mutable_element ( index ) = true ; <nl> } <nl> - return Status : : OK ( ) ; <nl> + return ; <nl> } <nl> / / We can ' t say anything about the ambiguity of ' other_instruction ' at <nl> / / this point , so we need to check interference between the single <nl> Status InstructionCopier : : RecordIndicesWhichInterfereWithOtherInstruction ( <nl> break ; <nl> } <nl> } <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status InstructionCopier : : RecordIndicesWhichInterfereWithOtherInstruction ( <nl> Status InstructionCopier : : RecordControlPredecessors ( <nl> const TuplePointsToAnalysis & points_to_analysis , <nl> HloInstruction * parameter ) { <nl> - return indices_to_copy_ . ForEachElement ( <nl> + return indices_to_copy_ . ForEachElementWithStatus ( <nl> [ this , & points_to_analysis , parameter ] ( const ShapeIndex & index , <nl> - bool / * is_leaf * / , bool will_copy ) { <nl> + bool will_copy ) { <nl> if ( will_copy ) { <nl> TF_ASSIGN_OR_RETURN ( <nl> const LogicalBuffer * buffer , <nl> RevertReadOnlyIndicesForEntryParamsAndConstants ( <nl> FlatSet < const LogicalBuffer * > buffer_set ; <nl> <nl> ShapeTree < HloInstruction * > copy_overrides ( init_hlo - > shape ( ) ) ; <nl> - TF_RETURN_IF_ERROR ( points_to . ForEachElement ( <nl> + points_to . ForEachElement ( <nl> [ init_hlo , read_only_indices , shared_copies , & buffer_set , <nl> - & copy_overrides ] ( const ShapeIndex & index , bool / * is_leaf * / , <nl> + & copy_overrides ] ( const ShapeIndex & index , <nl> const std : : vector < const LogicalBuffer * > & buffers ) { <nl> / / Look for read - only entry parameters . <nl> if ( ! read_only_indices - > element ( index ) ) { <nl> - return Status : : OK ( ) ; <nl> + return ; <nl> } <nl> for ( const LogicalBuffer * buffer : buffers ) { <nl> HloInstruction * pointee = buffer - > instruction ( ) ; <nl> RevertReadOnlyIndicesForEntryParamsAndConstants ( <nl> / / single - copy optimization above , so there ' s nothing more to do . <nl> break ; <nl> } <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> return copy_overrides ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / service / cpu / cpu_compiler . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / cpu_compiler . cc <nl> StatusOr < std : : unique_ptr < Executable > > CpuCompiler : : Compile ( <nl> llvm : : Function * ir_function , <nl> ir_emitter . EmitComputation ( <nl> embedded_computation , embedded_computation - > name ( ) , <nl> - / * is_entry_computation = * / computation_is_parallel ) ) ; <nl> + / * is_entry_computation = * / computation_is_parallel , <nl> + / * instruction_order = * / nullptr ) ) ; <nl> / / If this computation is parallel , remember it in the function name map . <nl> / / This way we know what function to execute when we try to run code for <nl> / / the Call instruction . <nl> CpuCompiler : : CompileAheadOfTime ( std : : vector < std : : unique_ptr < HloModule > > modules , <nl> TF_ASSIGN_OR_RETURN ( <nl> llvm : : Function * entry_function , <nl> ir_emitter . EmitComputation ( computation , entry_point_name , <nl> - / * is_entry_computation = * / true ) ) ; <nl> + / * is_entry_computation = * / true , <nl> + & module_sequence . at ( computation ) ) ) ; <nl> <nl> entry_function - > setName ( llvm_ir : : AsStringRef ( entry_point_name ) ) ; <nl> <nl> mmm a / tensorflow / compiler / xla / service / cpu / cpu_executable . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / cpu_executable . cc <nl> StatusOr < std : : unique_ptr < ShapedBuffer > > CpuExecutable : : ExecuteOnStream ( <nl> std : : vector < bool > buffers_in_result ( assignment_ - > Allocations ( ) . size ( ) , false ) ; <nl> TF_RETURN_IF_ERROR ( <nl> result_buffer - > mutable_shape_index_to_buffer_entry ( ) <nl> - - > ForEachMutableElement ( <nl> + - > ForEachMutableElementWithStatus ( <nl> [ & buffers , & buffers_in_result , & result_buffer , this ] ( <nl> - const ShapeIndex & index , bool is_leaf , size_t * buffer_entry ) { <nl> - if ( is_leaf ) { <nl> + const ShapeIndex & index , size_t * buffer_entry ) { <nl> + if ( ShapeUtil : : IsLeafIndex ( result_buffer - > shape ( ) , index ) ) { <nl> const std : : vector < const LogicalBuffer * > & sources = <nl> this - > GetRootPointsToSet ( ) . element ( index ) ; <nl> / / The points to set is unambiguous so the set should be a <nl> mmm a / tensorflow / compiler / xla / service / cpu / ir_emitter . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / ir_emitter . cc <nl> Status IrEmitter : : HandleWhile ( HloInstruction * xla_while ) { <nl> condition - > root_instruction ( ) - > shape ( ) . element_type ( ) = = PRED ) <nl> < < " While condition computation must return bool " ; <nl> / / Check that all while - related buffers share an allocation slice . <nl> - TF_RETURN_IF_ERROR ( ShapeUtil : : ForEachSubshape ( <nl> + TF_RETURN_IF_ERROR ( ShapeUtil : : ForEachSubshapeWithStatus ( <nl> xla_while - > shape ( ) , <nl> [ this , & xla_while ] ( const Shape & / * subshape * / , <nl> const ShapeIndex & index ) - > Status { <nl> mmm a / tensorflow / compiler / xla / service / cpu / ir_emitter . h <nl> ppp b / tensorflow / compiler / xla / service / cpu / ir_emitter . h <nl> class IrEmitter : public DfsHloVisitorWithDefault { <nl> ~ IrEmitter ( ) override ; <nl> <nl> / / Emit and return the given HLO computation as an LLVM IR <nl> - / / function . function_name_prefix is the desired name of the function . If the <nl> - / / name is not unique among already emitted functions then a suffix is <nl> - / / appended to make the name unique . is_entry_computation indicates that this <nl> - / / is the entry computation of the HLO module . If ' instruction_order ' is given <nl> - / / then the HLO instructions are emitted in the given order . In this case , <nl> - / / ' instruction_order ' must be a topological sort of the set of nodes <nl> - / / accessible from the root of the computation . <nl> + / / function . <nl> + / / <nl> + / / function_name_prefix is the desired name of the function . If the name is <nl> + / / not unique among already emitted functions then a suffix is appended to <nl> + / / make the name unique . <nl> + / / <nl> + / / is_entry_computation indicates that this is the entry computation of the <nl> + / / HLO module . <nl> + / / <nl> + / / If ' instruction_order ' is not NULL , then the HLO instructions are emitted <nl> + / / in the given order . In this case , ' instruction_order ' must be a <nl> + / / topological sort of the set of nodes accessible from the root of the <nl> + / / computation . <nl> StatusOr < llvm : : Function * > EmitComputation ( <nl> HloComputation * computation , const string & function_name_prefix , <nl> bool is_entry_computation , <nl> - std : : vector < const HloInstruction * > * instruction_order = nullptr ) ; <nl> + std : : vector < const HloInstruction * > * instruction_order ) ; <nl> <nl> protected : <nl> / / <nl> mmm a / tensorflow / compiler / xla / service / cpu / parallel_cpu_executable . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / parallel_cpu_executable . cc <nl> StatusOr < std : : unique_ptr < ShapedBuffer > > ParallelCpuExecutable : : ExecuteOnStream ( <nl> std : : vector < bool > buffers_in_result ( assignment_ - > Allocations ( ) . size ( ) , false ) ; <nl> TF_RETURN_IF_ERROR ( <nl> result_buffer - > mutable_shape_index_to_buffer_entry ( ) <nl> - - > ForEachMutableElement ( <nl> + - > ForEachMutableElementWithStatus ( <nl> [ & buffers , & buffers_in_result , & result_buffer , this ] ( <nl> - const ShapeIndex & index , bool is_leaf , size_t * buffer_entry ) { <nl> - if ( is_leaf ) { <nl> + const ShapeIndex & index , size_t * buffer_entry ) { <nl> + if ( ShapeUtil : : IsLeafIndex ( result_buffer - > shape ( ) , index ) ) { <nl> const std : : vector < const LogicalBuffer * > & sources = <nl> this - > GetRootPointsToSet ( ) . element ( index ) ; <nl> / / The points to set is unambiguous so the set should be a <nl> mmm a / tensorflow / compiler / xla / service / gpu / gpu_executable . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / gpu_executable . cc <nl> StatusOr < se : : DeviceMemoryBase > GpuExecutable : : ExecuteOnStream ( <nl> / / The points - to set of the root is unambiguous so it ' s known statically <nl> / / which buffers are in the result . Gather these buffers using the root ' s <nl> / / points - to set . <nl> - TF_RETURN_IF_ERROR ( GetRootPointsToSet ( ) . ForEachElement ( <nl> + TF_RETURN_IF_ERROR ( GetRootPointsToSet ( ) . ForEachElementWithStatus ( <nl> [ & referred_by_output , & buffer_allocations , this ] ( <nl> - const ShapeIndex & / * index * / , bool / * is_leaf * / , <nl> + const ShapeIndex & / * index * / , <nl> const std : : vector < const LogicalBuffer * > & buffers ) { <nl> / / The points to set is unambiguous so the set should be a <nl> / / singleton . That is , we know exactly which instruction produced <nl> StatusOr < std : : unique_ptr < ShapedBuffer > > GpuExecutable : : ExecuteOnStream ( <nl> std : : set < se : : DeviceMemoryBase > buffers_in_result ; <nl> TF_RETURN_IF_ERROR ( <nl> shaped_buffer - > mutable_shape_index_to_buffer_entry ( ) <nl> - - > ForEachMutableElement ( <nl> + - > ForEachMutableElementWithStatus ( <nl> [ & buffer_allocations , & buffers_in_result , & shaped_buffer , this ] ( <nl> - const ShapeIndex & index , bool is_leaf , size_t * buffer_entry ) { <nl> - if ( is_leaf ) { <nl> + const ShapeIndex & index , size_t * buffer_entry ) { <nl> + if ( ShapeUtil : : IsLeafIndex ( shaped_buffer - > shape ( ) , index ) ) { <nl> const std : : vector < const LogicalBuffer * > & sources = <nl> this - > GetRootPointsToSet ( ) . element ( index ) ; <nl> / / The points to set is unambiguous so the set should be a <nl> mmm a / tensorflow / compiler / xla / service / gpu / ir_emitter_unnested . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu / ir_emitter_unnested . cc <nl> namespace { <nl> Status CheckWhileBuffersShareAllocation ( <nl> const HloInstruction * xla_while , <nl> const BufferAssignment & buffer_assignment ) { <nl> - return ShapeUtil : : ForEachSubshape ( <nl> + return ShapeUtil : : ForEachSubshapeWithStatus ( <nl> xla_while - > shape ( ) , <nl> [ & buffer_assignment , & xla_while ] ( const Shape & / * subshape * / , <nl> const ShapeIndex & index ) - > Status { <nl> mmm a / tensorflow / compiler / xla / service / hlo_computation . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_computation . cc <nl> bool HloComputation : : operator = = ( const HloComputation & other ) const { <nl> / / If < a , b > are visited but not identical , the recursion should have <nl> / / been aborted . So , if < a , b > are visited at this point , they must be <nl> / / identical . <nl> - if ( visited . count ( std : : make_pair ( a , b ) ) > 0 ) return true ; <nl> + if ( visited . count ( std : : make_pair ( a , b ) ) > 0 ) { <nl> + return true ; <nl> + } <nl> visited . emplace ( a , b ) ; <nl> return a - > Identical ( <nl> * b , eq , [ ] ( const HloComputation * a , const HloComputation * b ) { <nl> new file mode 100644 <nl> index 0000000000000 . . ea87eba960a6e <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / xla / service / hlo_dataflow_analysis . cc <nl> <nl> + / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / compiler / xla / service / hlo_dataflow_analysis . h " <nl> + <nl> + # include < algorithm > <nl> + # include < iosfwd > <nl> + # include < queue > <nl> + # include < set > <nl> + # include < vector > <nl> + <nl> + # include " tensorflow / compiler / xla / map_util . h " <nl> + # include " tensorflow / compiler / xla / ptr_util . h " <nl> + # include " tensorflow / compiler / xla / service / dfs_hlo_visitor_with_default . h " <nl> + # include " tensorflow / compiler / xla / service / hlo_computation . h " <nl> + # include " tensorflow / compiler / xla / service / hlo_instruction . h " <nl> + # include " tensorflow / compiler / xla / service / hlo_opcode . h " <nl> + # include " tensorflow / compiler / xla / service / liveness_util . h " <nl> + # include " tensorflow / compiler / xla / shape_util . h " <nl> + # include " tensorflow / compiler / xla / status . h " <nl> + # include " tensorflow / compiler / xla / types . h " <nl> + # include " tensorflow / compiler / xla / util . h " <nl> + # include " tensorflow / core / lib / core / errors . h " <nl> + # include " tensorflow / core / lib / strings / str_util . h " <nl> + # include " tensorflow / core / lib / strings / strcat . h " <nl> + # include " tensorflow / core / lib / strings / stringprintf . h " <nl> + # include " tensorflow / core / platform / logging . h " <nl> + <nl> + namespace xla { <nl> + <nl> + using : : tensorflow : : strings : : StrAppend ; <nl> + using : : tensorflow : : strings : : StrCat ; <nl> + <nl> + string HloLocation : : ToString ( ) const { <nl> + string index_str = <nl> + ShapeUtil : : IsTuple ( instruction - > shape ( ) ) ? ( " " + index . ToString ( ) ) : " " ; <nl> + return StrCat ( instruction - > FullyQualifiedName ( ) , index_str ) ; <nl> + } <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & out , const HloLocation & location ) { <nl> + out < < location . ToString ( ) ; <nl> + return out ; <nl> + } <nl> + <nl> + string HloUse : : ToString ( ) const { <nl> + string index_str = <nl> + ShapeUtil : : IsTuple ( instruction - > operand ( operand_number ) - > shape ( ) ) <nl> + ? ( " " + operand_index . ToString ( ) ) <nl> + : " " ; <nl> + return StrCat ( instruction - > FullyQualifiedName ( ) , " , operand " , operand_number , <nl> + index_str ) ; <nl> + } <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & out , const HloUse & use ) { <nl> + out < < use . ToString ( ) ; <nl> + return out ; <nl> + } <nl> + <nl> + HloValue : : HloValue ( HloValue : : Id id , HloInstruction * instruction , <nl> + const ShapeIndex & index , bool is_phi ) <nl> + : id_ ( id ) , is_phi_ ( is_phi ) { <nl> + / / The defining location is always the first element in the locations_ vector . <nl> + AddLocation ( instruction , index ) ; <nl> + } <nl> + <nl> + bool HloValue : : operator = = ( const HloValue & other ) const { <nl> + bool equal = instruction ( ) = = other . instruction ( ) & & index ( ) = = other . index ( ) ; <nl> + / / If the values are equal they most both be phi ( or non phi ) . <nl> + CHECK ( ! ( equal & & is_phi ( ) ! = other . is_phi ( ) ) ) ; <nl> + return equal ; <nl> + } <nl> + <nl> + bool HloValue : : operator ! = ( const HloValue & other ) const { <nl> + return ! ( * this = = other ) ; <nl> + } <nl> + <nl> + string HloValue : : ToShortString ( ) const { <nl> + string index_str = <nl> + ShapeUtil : : IsTuple ( instruction ( ) - > shape ( ) ) ? index ( ) . ToString ( ) : " " ; <nl> + return StrCat ( is_phi_ ? " PHI " : " " , instruction ( ) - > FullyQualifiedName ( ) , <nl> + index_str ) ; <nl> + } <nl> + <nl> + string HloValue : : ToString ( int indent ) const { <nl> + string indentation ( indent , ' ' ) ; <nl> + string out = StrCat ( indentation , ToShortString ( ) , " , locations : \ n " ) ; <nl> + for ( const HloLocation & location : locations ( ) ) { <nl> + StrAppend ( & out , indentation , " " , location . ToString ( ) , " \ n " ) ; <nl> + } <nl> + StrAppend ( & out , indentation , " uses : \ n " ) ; <nl> + for ( const HloUse & use : uses ( ) ) { <nl> + StrAppend ( & out , indentation , " " , use . ToString ( ) , " \ n " ) ; <nl> + } <nl> + return out ; <nl> + } <nl> + <nl> + void HloValue : : AddLocation ( HloInstruction * instruction , <nl> + const ShapeIndex & index ) { <nl> + / / The given location should not already exist in locations_ . <nl> + for ( const HloLocation & location : locations_ ) { <nl> + DCHECK ( ! ( location . instruction = = instruction & & location . index = = index ) ) ; <nl> + } <nl> + <nl> + locations_ . push_back ( HloLocation { instruction , index } ) ; <nl> + <nl> + / / Update uses . <nl> + for ( HloInstruction * user : instruction - > users ( ) ) { <nl> + for ( int64 operand_number : user - > OperandIndices ( instruction ) ) { <nl> + if ( ! DoesNotUseOperandBuffer ( instruction , index , user ) ) { <nl> + for ( const HloUse & use : uses_ ) { <nl> + / / Verify that this use does not already exist . <nl> + DCHECK ( ! ( use . instruction = = user & & <nl> + use . operand_number = = operand_number & & <nl> + use . operand_index = = index ) ) ; <nl> + } <nl> + <nl> + uses_ . push_back ( HloUse { user , operand_number , index } ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Update liveout status of this HloValue . <nl> + const HloModule & module = * instruction - > parent ( ) - > parent ( ) ; <nl> + if ( instruction = = module . entry_computation ( ) - > root_instruction ( ) ) { <nl> + live_out_of_module_ = true ; <nl> + } <nl> + } <nl> + <nl> + void HloValue : : RemoveLocation ( HloInstruction * instruction , <nl> + const ShapeIndex & index ) { <nl> + / / The defining location cannot be removed . <nl> + CHECK ( ! ( instruction = = this - > instruction ( ) & & index = = this - > index ( ) ) ) ; <nl> + <nl> + int64 size_before = locations_ . size ( ) ; <nl> + locations_ . erase ( <nl> + std : : remove_if ( locations_ . begin ( ) , locations_ . end ( ) , <nl> + [ instruction , & index ] ( const HloLocation & location ) { <nl> + return location . instruction = = instruction & & <nl> + location . index = = index ; <nl> + } ) , <nl> + locations_ . end ( ) ) ; <nl> + / / Only a single location should have been removed . <nl> + CHECK_EQ ( locations_ . size ( ) , size_before - 1 ) ; <nl> + <nl> + / / Update uses which referred to this location . <nl> + uses_ . erase ( std : : remove_if ( uses_ . begin ( ) , uses_ . end ( ) , <nl> + [ instruction , & index ] ( const HloUse & use ) { <nl> + return use . instruction - > operand ( <nl> + use . operand_number ) = = instruction & & <nl> + use . operand_index = = index ; <nl> + } ) , <nl> + uses_ . end ( ) ) ; <nl> + <nl> + const HloModule & module = * instruction - > parent ( ) - > parent ( ) ; <nl> + if ( instruction = = module . entry_computation ( ) - > root_instruction ( ) ) { <nl> + / / Value has been removed from a location in the entry root instruction . <nl> + / / Check if the value is still live out of the module by walking all <nl> + / / remaining locations . <nl> + live_out_of_module_ = false ; <nl> + for ( const HloLocation & location : locations ( ) ) { <nl> + if ( location . instruction = = <nl> + module . entry_computation ( ) - > root_instruction ( ) ) { <nl> + live_out_of_module_ = true ; <nl> + break ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & out , const HloValue & value ) { <nl> + out < < value . ToString ( ) ; <nl> + return out ; <nl> + } <nl> + <nl> + void HloValueSet : : SortAndUniquifyValues ( ) { <nl> + std : : sort ( value_ids_ . begin ( ) , value_ids_ . end ( ) ) ; <nl> + value_ids_ . erase ( std : : unique ( value_ids_ . begin ( ) , value_ids_ . end ( ) ) , <nl> + value_ids_ . end ( ) ) ; <nl> + } <nl> + <nl> + string HloValueSet : : ToString ( ) const { <nl> + return StrCat ( " HloValueSet : " , tensorflow : : str_util : : Join ( value_ids_ , " , " ) ) ; <nl> + } <nl> + <nl> + / * static * / <nl> + HloValueSet HloValueSet : : Union ( <nl> + tensorflow : : gtl : : ArraySlice < const HloValueSet * > inputs ) { <nl> + HloValueSet union_set ; <nl> + for ( const HloValueSet * input : inputs ) { <nl> + for ( HloValue : : Id value_id : input - > value_ids ( ) ) { <nl> + union_set . value_ids_ . push_back ( value_id ) ; <nl> + } <nl> + } <nl> + union_set . SortAndUniquifyValues ( ) ; <nl> + return union_set ; <nl> + } <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & out , const HloValueSet & value_set ) { <nl> + out < < value_set . ToString ( ) ; <nl> + return out ; <nl> + } <nl> + <nl> + InstructionValueSet InstructionValueSet : : Union ( <nl> + tensorflow : : gtl : : ArraySlice < const InstructionValueSet * > inputs ) { <nl> + CHECK_GT ( inputs . size ( ) , 0 ) ; <nl> + for ( int i = 1 ; i < inputs . size ( ) ; + + i ) { <nl> + CHECK ( ShapeUtil : : Compatible ( inputs [ 0 ] - > shape ( ) , inputs [ i ] - > shape ( ) ) ) ; <nl> + } <nl> + InstructionValueSet union_set ( inputs [ 0 ] - > shape ( ) ) ; <nl> + union_set . ForEachMutableElement ( <nl> + [ & inputs ] ( const ShapeIndex & index , HloValueSet * value_set ) { <nl> + std : : vector < const HloValueSet * > input_sets ; <nl> + for ( const InstructionValueSet * input : inputs ) { <nl> + input_sets . push_back ( & input - > element ( index ) ) ; <nl> + } <nl> + * value_set = HloValueSet : : Union ( input_sets ) ; <nl> + } ) ; <nl> + return union_set ; <nl> + } <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & out , <nl> + const InstructionValueSet & instruction_value_set ) { <nl> + out < < instruction_value_set . ToString ( ) ; <nl> + return out ; <nl> + } <nl> + <nl> + string InstructionValueSet : : ToString ( ) const { <nl> + string out = <nl> + StrCat ( " InstructionValueSet ( " , ShapeUtil : : HumanString ( shape ( ) ) , " ) \ n " ) ; <nl> + ForEachElement ( [ this , & out ] ( const ShapeIndex & index , <nl> + const HloValueSet & value_set ) { <nl> + StrAppend ( & out , " " , index . ToString ( ) , " : " , value_set . ToString ( ) , " \ n " ) ; <nl> + } ) ; <nl> + return out ; <nl> + } <nl> + <nl> + HloDataflowAnalysis : : HloDataflowAnalysis ( HloModule * module , bool ssa_form , <nl> + bool bitcast_defines_value ) <nl> + : module_ ( module ) , <nl> + ssa_form_ ( ssa_form ) , <nl> + bitcast_defines_value_ ( bitcast_defines_value ) , <nl> + call_graph_ ( CallGraph : : Build ( module ) ) { } <nl> + <nl> + bool HloDataflowAnalysis : : ValueIsDefinedAt ( const HloInstruction * instruction , <nl> + const ShapeIndex & index ) const { <nl> + const HloValueSet & value_set = GetValueSet ( instruction , index ) ; <nl> + if ( value_set . value_ids ( ) . size ( ) ! = 1 ) { <nl> + return false ; <nl> + } <nl> + return GetValue ( value_set . GetUniqueValueId ( ) ) . instruction ( ) = = instruction ; <nl> + } <nl> + <nl> + const HloValue & HloDataflowAnalysis : : GetValueDefinedAt ( <nl> + const HloInstruction * instruction , const ShapeIndex & index ) const { <nl> + CHECK ( ValueIsDefinedAt ( instruction , index ) ) ; <nl> + return GetUniqueValueAt ( instruction , index ) ; <nl> + } <nl> + <nl> + HloValue & HloDataflowAnalysis : : GetValueDefinedAt ( <nl> + const HloInstruction * instruction , const ShapeIndex & index ) { <nl> + CHECK ( ValueIsDefinedAt ( instruction , index ) ) ; <nl> + return GetUniqueValueAt ( instruction , index ) ; <nl> + } <nl> + <nl> + HloValue : : Id HloDataflowAnalysis : : NewHloValue ( HloInstruction * instruction , <nl> + const ShapeIndex & index , <nl> + bool is_phi ) { <nl> + int64 value_id = next_value_id_ + + ; <nl> + auto it_added = values_ . emplace ( <nl> + std : : piecewise_construct , std : : forward_as_tuple ( value_id ) , <nl> + std : : forward_as_tuple ( value_id , instruction , index , is_phi ) ) ; <nl> + CHECK ( it_added . second ) ; <nl> + <nl> + / / Clear the vector of values as it is now stale . It will be lazily <nl> + / / reconstructed if needed when HloDataflowAnalysis : : values ( ) is called . <nl> + values_vector_ . clear ( ) ; <nl> + <nl> + return value_id ; <nl> + } <nl> + <nl> + void HloDataflowAnalysis : : DeleteHloValue ( HloValue : : Id value_id ) { <nl> + values_ . erase ( value_id ) ; <nl> + <nl> + / / Clear the vector of values as it is now stale . It will be lazily <nl> + / / reconstructed if needed when HloDataflowAnalysis : : values ( ) is called . <nl> + values_vector_ . clear ( ) ; <nl> + } <nl> + <nl> + string HloDataflowAnalysis : : ToString ( ) const { <nl> + string out = StrCat ( " HloDataflowAnalysis , module " , module_ - > name ( ) , " \ n " ) ; <nl> + StrAppend ( & out , " Instruction value sets : \ n " ) ; <nl> + for ( const std : : unique_ptr < HloComputation > & computation : <nl> + module_ - > computations ( ) ) { <nl> + for ( const std : : unique_ptr < HloInstruction > & instruction : <nl> + computation - > instructions ( ) ) { <nl> + StrAppend ( & out , " " , instruction - > FullyQualifiedName ( ) , " : \ n " ) ; <nl> + if ( ShapeUtil : : IsTuple ( instruction - > shape ( ) ) ) { <nl> + GetInstructionValueSet ( instruction . get ( ) ) <nl> + . ForEachElement ( [ this , & instruction , & out ] ( <nl> + const ShapeIndex & index , <nl> + const HloValueSet & value_set ) { <nl> + StrAppend ( & out , " tuple index " , index . ToString ( ) , " : \ n " ) ; <nl> + for ( HloValue : : Id value_id : value_set . value_ids ( ) ) { <nl> + StrAppend ( <nl> + & out , " " , GetValue ( value_id ) . ToShortString ( ) , <nl> + ValueIsDefinedAt ( instruction . get ( ) , index ) ? " ( def ) " : " " , <nl> + " \ n " ) ; <nl> + } <nl> + } ) ; <nl> + } else { <nl> + const HloValueSet & top_level_value_set = <nl> + GetValueSet ( instruction . get ( ) , / * index = * / { } ) ; <nl> + for ( HloValue : : Id value_id : top_level_value_set . value_ids ( ) ) { <nl> + StrAppend ( & out , " " , GetValue ( value_id ) . ToShortString ( ) , <nl> + ValueIsDefinedAt ( instruction . get ( ) ) ? " ( def ) " : " " , " \ n " ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + StrAppend ( & out , " HloValues : \ n " ) ; <nl> + for ( const auto & pair : values_ ) { <nl> + StrAppend ( & out , pair . second . ToString ( / * indent = * / 4 ) ) ; <nl> + } <nl> + return out ; <nl> + } <nl> + <nl> + const HloValue & HloDataflowAnalysis : : GetValue ( HloValue : : Id value_id ) const { <nl> + return values_ . at ( value_id ) ; <nl> + } <nl> + <nl> + HloValue & HloDataflowAnalysis : : GetValue ( HloValue : : Id value_id ) { <nl> + return values_ . at ( value_id ) ; <nl> + } <nl> + <nl> + const HloValueSet & HloDataflowAnalysis : : GetValueSet ( <nl> + const HloInstruction * instruction , const ShapeIndex & index ) const { <nl> + return GetInstructionValueSet ( instruction ) . element ( index ) ; <nl> + } <nl> + <nl> + HloValueSet & HloDataflowAnalysis : : GetValueSet ( const HloInstruction * instruction , <nl> + const ShapeIndex & index ) { <nl> + return * GetInstructionValueSet ( instruction ) . mutable_element ( index ) ; <nl> + } <nl> + <nl> + std : : vector < const HloValue * > & HloDataflowAnalysis : : values ( ) const { <nl> + if ( values_vector_ . empty ( ) ) { <nl> + / / Lazily construct vector of values . <nl> + values_vector_ . reserve ( values_ . size ( ) ) ; <nl> + for ( auto & pair : values_ ) { <nl> + values_vector_ . push_back ( & pair . second ) ; <nl> + } <nl> + std : : sort ( <nl> + values_vector_ . begin ( ) , values_vector_ . end ( ) , <nl> + [ ] ( const HloValue * a , const HloValue * b ) { return a - > id ( ) < b - > id ( ) ; } ) ; <nl> + } else { <nl> + CHECK_EQ ( values_vector_ . size ( ) , values_ . size ( ) ) ; <nl> + for ( const HloValue * value : values_vector_ ) { <nl> + DCHECK ( ContainsKey ( values_ , value - > id ( ) ) ) ; <nl> + DCHECK ( & GetValue ( value - > id ( ) ) = = value ) ; <nl> + } <nl> + } <nl> + return values_vector_ ; <nl> + } <nl> + <nl> + / * static * / <nl> + InstructionValueSet HloDataflowAnalysis : : Phi ( <nl> + HloInstruction * instruction , <nl> + tensorflow : : gtl : : ArraySlice < const InstructionValueSet * > inputs , <nl> + bool skip_top_level ) { <nl> + CHECK ( ssa_form_ ) ; <nl> + <nl> + for ( const InstructionValueSet * input : inputs ) { <nl> + CHECK ( ShapeUtil : : Compatible ( instruction - > shape ( ) , input - > shape ( ) ) ) ; <nl> + } <nl> + InstructionValueSet new_value_set ( instruction - > shape ( ) ) ; <nl> + new_value_set . ForEachMutableElement ( <nl> + [ this , instruction , & inputs , skip_top_level ] ( const ShapeIndex & index , <nl> + HloValueSet * value_set ) { <nl> + / / If we ' re skipping the top level , just copy over the existing <nl> + / / HloValueSet . <nl> + if ( skip_top_level & & index . empty ( ) ) { <nl> + * value_set = GetInstructionValueSet ( instruction ) . element ( index ) ; <nl> + return ; <nl> + } <nl> + <nl> + / / Identify the existing phi value at this index if it exists . <nl> + const HloValue * existing_phi_value = nullptr ; <nl> + if ( ValueIsDefinedAt ( instruction , index ) & & <nl> + GetUniqueValueAt ( instruction , index ) . is_phi ( ) ) { <nl> + existing_phi_value = & GetUniqueValueAt ( instruction , index ) ; <nl> + } <nl> + <nl> + / / Construct a vector of unique value IDs of the inputs . <nl> + std : : vector < HloValue : : Id > input_value_ids ; <nl> + for ( const InstructionValueSet * input : inputs ) { <nl> + for ( HloValue : : Id value_id : input - > element ( index ) . value_ids ( ) ) { <nl> + input_value_ids . push_back ( value_id ) ; <nl> + } <nl> + } <nl> + input_value_ids . erase ( <nl> + std : : unique ( input_value_ids . begin ( ) , input_value_ids . end ( ) ) , <nl> + input_value_ids . end ( ) ) ; <nl> + <nl> + / / Remove the existing phi value ( if it exists ) . The phi can be its own <nl> + / / input , for example , in while body parameters where the body passes <nl> + / / through the parameter value . <nl> + if ( existing_phi_value ! = nullptr ) { <nl> + auto it = std : : find ( input_value_ids . begin ( ) , input_value_ids . end ( ) , <nl> + existing_phi_value - > id ( ) ) ; <nl> + if ( it ! = input_value_ids . end ( ) ) { <nl> + input_value_ids . erase ( it ) ; <nl> + } <nl> + } <nl> + <nl> + if ( input_value_ids . size ( ) < = 1 ) { <nl> + if ( input_value_ids . size ( ) = = 1 ) { <nl> + * value_set = HloValueSet ( { input_value_ids [ 0 ] } ) ; <nl> + } <nl> + if ( existing_phi_value ) { <nl> + / / The merge point does not have multiple distinct inputs ( which are <nl> + / / not the phi value itself ) . Therefore there is no need to insert a <nl> + / / phi value because there is a single reaching definition ( or no <nl> + / / reaching definition ) . <nl> + DeleteHloValue ( existing_phi_value - > id ( ) ) ; <nl> + } <nl> + } else if ( input_value_ids . size ( ) > 1 ) { <nl> + / / Multiple distinct values reach this point . A phi value is <nl> + / / necessary . <nl> + if ( existing_phi_value ) { <nl> + / / A phi value already exists so reuse it in the new <nl> + / / InstructionValueSet . <nl> + * value_set = HloValueSet ( { existing_phi_value - > id ( ) } ) ; <nl> + } else { <nl> + / / Create a new phi value . <nl> + * value_set = <nl> + HloValueSet ( { NewHloValue ( instruction , index , / * is_phi = * / true ) } ) ; <nl> + } <nl> + } <nl> + } ) ; <nl> + return new_value_set ; <nl> + } <nl> + <nl> + void HloDataflowAnalysis : : UpdateLocationsOfValuesAt ( <nl> + HloInstruction * instruction , const InstructionValueSet & new_value_set , <nl> + const InstructionValueSet * prev_value_set ) { <nl> + if ( prev_value_set ! = nullptr ) { <nl> + / / Remove locations from the old value set . <nl> + prev_value_set - > ForEachElement ( <nl> + [ this , instruction ] ( const ShapeIndex & index , <nl> + const HloValueSet & value_set ) { <nl> + for ( HloValue : : Id value_id : value_set . value_ids ( ) ) { <nl> + / / HloValues in the previous value set may have been deleted . <nl> + if ( ! ContainsKey ( values_ , value_id ) ) { <nl> + continue ; <nl> + } <nl> + / / Don ' t remove the defining location of the value . <nl> + HloValue & value = GetValue ( value_id ) ; <nl> + if ( instruction = = value . instruction ( ) ) { <nl> + CHECK_EQ ( index , value . index ( ) ) ; <nl> + } else { <nl> + value . RemoveLocation ( instruction , index ) ; <nl> + } <nl> + } <nl> + } ) ; <nl> + } <nl> + / / Add locations in the new value set . <nl> + new_value_set . ForEachElement ( <nl> + [ this , instruction ] ( const ShapeIndex & index , <nl> + const HloValueSet & value_set ) { <nl> + for ( HloValue : : Id value_id : value_set . value_ids ( ) ) { <nl> + HloValue & value = GetValue ( value_id ) ; <nl> + if ( instruction = = value . instruction ( ) ) { <nl> + CHECK_EQ ( index , value . index ( ) ) ; <nl> + } else { <nl> + value . AddLocation ( instruction , index ) ; <nl> + } <nl> + } <nl> + } ) ; <nl> + } <nl> + <nl> + InstructionValueSet HloDataflowAnalysis : : RecomputeBitcastValueSet ( <nl> + HloInstruction * bitcast ) { <nl> + CHECK_EQ ( bitcast - > opcode ( ) , HloOpcode : : kBitcast ) ; <nl> + if ( bitcast_defines_value_ ) { <nl> + return GetInstructionValueSet ( bitcast ) ; <nl> + } else { <nl> + return GetInstructionValueSet ( bitcast - > operand ( 0 ) ) ; <nl> + } <nl> + } <nl> + <nl> + InstructionValueSet HloDataflowAnalysis : : RecomputeCopyValueSet ( <nl> + HloInstruction * copy ) { <nl> + CHECK_EQ ( copy - > opcode ( ) , HloOpcode : : kCopy ) ; <nl> + InstructionValueSet new_value_set = GetInstructionValueSet ( copy ) ; <nl> + if ( ShapeUtil : : IsTuple ( copy - > shape ( ) ) ) { <nl> + for ( int i = 0 ; i < ShapeUtil : : TupleElementCount ( copy - > shape ( ) ) ; + + i ) { <nl> + new_value_set . CopySubtreeFrom ( GetInstructionValueSet ( copy - > operand ( 0 ) ) , <nl> + / * source_base_index = * / { i } , <nl> + / * target_base_index = * / { i } ) ; <nl> + } <nl> + } <nl> + return new_value_set ; <nl> + } <nl> + <nl> + InstructionValueSet HloDataflowAnalysis : : RecomputeGetTupleElementValueSet ( <nl> + HloInstruction * gte ) { <nl> + CHECK_EQ ( gte - > opcode ( ) , HloOpcode : : kGetTupleElement ) ; <nl> + InstructionValueSet new_value_set ( gte - > shape ( ) ) ; <nl> + new_value_set . CopySubtreeFrom ( GetInstructionValueSet ( gte - > operand ( 0 ) ) , <nl> + / * source_base_index = * / { gte - > tuple_index ( ) } , <nl> + / * target_base_index = * / { } ) ; <nl> + return new_value_set ; <nl> + } <nl> + <nl> + InstructionValueSet HloDataflowAnalysis : : RecomputeSelectValueSet ( <nl> + HloInstruction * select ) { <nl> + CHECK_EQ ( select - > opcode ( ) , HloOpcode : : kSelect ) ; <nl> + std : : vector < const InstructionValueSet * > inputs = { <nl> + & GetInstructionValueSet ( select - > operand ( 1 ) ) , <nl> + & GetInstructionValueSet ( select - > operand ( 2 ) ) } ; <nl> + / / A phi value is not defined at a kSelect instruction because kSelect does <nl> + / / not create a new value . Rather it forwards a value from its operands . This <nl> + / / contrasts with kWhile instruction ( which does define a phi value ) which has <nl> + / / in - place update semantics . <nl> + InstructionValueSet new_value_set = InstructionValueSet : : Union ( inputs ) ; <nl> + * new_value_set . mutable_element ( / * index = * / { } ) = <nl> + GetInstructionValueSet ( select ) . element ( / * index = * / { } ) ; <nl> + return new_value_set ; <nl> + } <nl> + <nl> + InstructionValueSet HloDataflowAnalysis : : RecomputeTupleValueSet ( <nl> + HloInstruction * tuple ) { <nl> + CHECK_EQ ( tuple - > opcode ( ) , HloOpcode : : kTuple ) ; <nl> + InstructionValueSet new_value_set ( tuple - > shape ( ) ) ; <nl> + * new_value_set . mutable_element ( / * index = * / { } ) = <nl> + GetInstructionValueSet ( tuple ) . element ( / * index = * / { } ) ; <nl> + for ( int64 i = 0 ; i < tuple - > operands ( ) . size ( ) ; + + i ) { <nl> + new_value_set . CopySubtreeFrom ( GetInstructionValueSet ( tuple - > operand ( i ) ) , <nl> + / * source_base_index = * / { } , <nl> + / * target_base_index = * / { i } ) ; <nl> + } <nl> + return new_value_set ; <nl> + } <nl> + <nl> + InstructionValueSet HloDataflowAnalysis : : RecomputeWhileValueSet ( <nl> + HloInstruction * xla_while ) { <nl> + CHECK_EQ ( xla_while - > opcode ( ) , HloOpcode : : kWhile ) ; <nl> + std : : vector < const InstructionValueSet * > inputs = { <nl> + & GetInstructionValueSet ( xla_while - > while_body ( ) - > root_instruction ( ) ) , <nl> + & GetInstructionValueSet ( xla_while - > operand ( 0 ) ) } ; <nl> + if ( ssa_form_ ) { <nl> + return Phi ( xla_while , inputs ) ; <nl> + } else { <nl> + return InstructionValueSet : : Union ( inputs ) ; <nl> + } <nl> + } <nl> + <nl> + void HloDataflowAnalysis : : UpdateInstructionValueSet ( <nl> + HloInstruction * instruction ) { <nl> + / / Recompute from operands . <nl> + InstructionValueSet & value_set = GetInstructionValueSet ( instruction ) ; <nl> + switch ( instruction - > opcode ( ) ) { <nl> + case HloOpcode : : kBitcast : <nl> + value_set = RecomputeBitcastValueSet ( instruction ) ; <nl> + break ; <nl> + case HloOpcode : : kCopy : <nl> + value_set = RecomputeCopyValueSet ( instruction ) ; <nl> + break ; <nl> + case HloOpcode : : kGetTupleElement : <nl> + value_set = RecomputeGetTupleElementValueSet ( instruction ) ; <nl> + break ; <nl> + case HloOpcode : : kSelect : <nl> + value_set = RecomputeSelectValueSet ( instruction ) ; <nl> + break ; <nl> + case HloOpcode : : kTuple : <nl> + value_set = RecomputeTupleValueSet ( instruction ) ; <nl> + break ; <nl> + case HloOpcode : : kParameter : <nl> + value_set = RecomputeParameterValueSet ( instruction ) ; <nl> + break ; <nl> + case HloOpcode : : kCall : <nl> + / / The output of a kCall instruction is exactly the output of the root of <nl> + / / the subcomputation . <nl> + value_set = <nl> + GetInstructionValueSet ( instruction - > to_apply ( ) - > root_instruction ( ) ) ; <nl> + break ; <nl> + case HloOpcode : : kWhile : <nl> + value_set = RecomputeWhileValueSet ( instruction ) ; <nl> + break ; <nl> + default : <nl> + / / Instruction does not forward HloValues ( it defines all values in its <nl> + / / output ) . No update is necessary . <nl> + return ; <nl> + } <nl> + } <nl> + <nl> + void HloDataflowAnalysis : : UpdateInstructionsAndPropagate ( <nl> + tensorflow : : gtl : : ArraySlice < HloInstruction * > instructions ) { <nl> + std : : queue < HloInstruction * > worklist ; <nl> + for ( HloInstruction * instruction : instructions ) { <nl> + worklist . push ( instruction ) ; <nl> + } <nl> + <nl> + while ( ! worklist . empty ( ) ) { <nl> + HloInstruction * instruction = worklist . front ( ) ; <nl> + worklist . pop ( ) ; <nl> + <nl> + VLOG ( 3 ) < < " Worklist top : " < < instruction - > name ( ) ; <nl> + VLOG ( 3 ) < < ToString ( ) ; <nl> + <nl> + / / Save old value for recomputing uses and live out . <nl> + InstructionValueSet old_value = GetInstructionValueSet ( instruction ) ; <nl> + UpdateInstructionValueSet ( instruction ) ; <nl> + <nl> + if ( GetInstructionValueSet ( instruction ) = = old_value ) { <nl> + / / No change to the instruction ' s value set . <nl> + VLOG ( 4 ) < < " No change . " ; <nl> + continue ; <nl> + } <nl> + <nl> + VLOG ( 4 ) < < " New value set for " < < instruction - > name ( ) < < " : " <nl> + < < GetInstructionValueSet ( instruction ) ; <nl> + VLOG ( 4 ) < < " Previously : " < < old_value ; <nl> + <nl> + / / Instruction value was updated . Add users to work list . <nl> + for ( HloInstruction * user : instruction - > users ( ) ) { <nl> + worklist . push ( user ) ; <nl> + <nl> + / / If user calls a computation , then the respective parameter ( s ) of the <nl> + / / computation need to be updated . <nl> + for ( HloComputation * called_computation : user - > called_computations ( ) ) { <nl> + for ( int64 operand_number : user - > OperandIndices ( instruction ) ) { <nl> + worklist . push ( <nl> + called_computation - > parameter_instruction ( operand_number ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / If instruction is a root instruction , then propagate out to any calling <nl> + / / instruction and across any while backedge . <nl> + if ( instruction = = instruction - > parent ( ) - > root_instruction ( ) ) { <nl> + const CallGraphNode & call_graph_node = <nl> + call_graph_ - > GetNode ( instruction - > parent ( ) ) ; <nl> + for ( const CallSite & callsite : call_graph_node . caller_callsites ( ) ) { <nl> + if ( callsite . instruction ( ) - > opcode ( ) = = HloOpcode : : kCall ) { <nl> + worklist . push ( callsite . instruction ( ) ) ; <nl> + } else if ( callsite . instruction ( ) - > opcode ( ) = = HloOpcode : : kWhile ) { <nl> + / / Add the while itself , and the body and condition parameters . <nl> + worklist . push ( callsite . instruction ( ) ) ; <nl> + worklist . push ( <nl> + callsite . instruction ( ) - > while_body ( ) - > parameter_instruction ( 0 ) ) ; <nl> + worklist . push ( <nl> + callsite . instruction ( ) - > while_condition ( ) - > parameter_instruction ( <nl> + 0 ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Update uses . First clear all of the old uses at the particular <nl> + / / operands . Then add the new uses . There may be overlap between the old <nl> + / / uses and new uses . <nl> + UpdateLocationsOfValuesAt ( instruction , GetInstructionValueSet ( instruction ) , <nl> + & old_value ) ; <nl> + } <nl> + } <nl> + <nl> + InstructionValueSet HloDataflowAnalysis : : RecomputeParameterValueSet ( <nl> + HloInstruction * parameter ) { <nl> + CHECK_EQ ( parameter - > opcode ( ) , HloOpcode : : kParameter ) ; <nl> + const CallGraphNode & call_graph_node = <nl> + call_graph_ - > GetNode ( parameter - > parent ( ) ) ; <nl> + <nl> + / / Subcomputations called in a parallel context ( eg , map ) do not have dataflow <nl> + / / from the caller operands . <nl> + if ( call_graph_node . context ( ) = = CallContext : : kParallel | | <nl> + call_graph_node . caller_callsites ( ) . empty ( ) ) { <nl> + return GetInstructionValueSet ( parameter ) ; <nl> + } <nl> + CHECK_EQ ( call_graph_node . context ( ) , CallContext : : kSequential ) ; <nl> + <nl> + std : : vector < const InstructionValueSet * > inputs ; <nl> + for ( const CallSite & callsite : call_graph_node . caller_callsites ( ) ) { <nl> + inputs . push_back ( & GetInstructionValueSet ( <nl> + callsite . instruction ( ) - > operand ( parameter - > parameter_number ( ) ) ) ) ; <nl> + if ( callsite . instruction ( ) - > opcode ( ) = = HloOpcode : : kWhile ) { <nl> + / / In a while instruction , the backedge is also a dataflow input to the <nl> + / / parameter instruction . This code covers the case where the parameter is <nl> + / / in the while body or the parameter is in the while condition . <nl> + inputs . push_back ( & GetInstructionValueSet ( <nl> + callsite . instruction ( ) - > while_body ( ) - > root_instruction ( ) ) ) ; <nl> + } <nl> + } <nl> + <nl> + if ( ssa_form_ ) { <nl> + return Phi ( parameter , inputs ) ; <nl> + } else { <nl> + return InstructionValueSet : : Union ( inputs ) ; <nl> + } <nl> + } <nl> + <nl> + const InstructionValueSet & HloDataflowAnalysis : : GetInstructionValueSet ( <nl> + const HloInstruction * instruction ) const { <nl> + return value_sets_ . at ( instruction ) ; <nl> + } <nl> + <nl> + InstructionValueSet & HloDataflowAnalysis : : GetInstructionValueSet ( <nl> + const HloInstruction * instruction ) { <nl> + return value_sets_ . at ( instruction ) ; <nl> + } <nl> + <nl> + Status HloDataflowAnalysis : : InitializeInstructionValueSets ( ) { <nl> + for ( const std : : unique_ptr < HloComputation > & computation : <nl> + module_ - > computations ( ) ) { <nl> + const CallGraphNode & call_graph_node = <nl> + call_graph_ - > GetNode ( computation . get ( ) ) ; <nl> + for ( const std : : unique_ptr < HloInstruction > & instruction : <nl> + computation - > instructions ( ) ) { <nl> + / / Create an empty shape tree . <nl> + value_sets_ . emplace ( std : : piecewise_construct , <nl> + std : : forward_as_tuple ( instruction . get ( ) ) , <nl> + std : : forward_as_tuple ( instruction - > shape ( ) ) ) ; <nl> + <nl> + / / Lambda to set the value set to define all values in the output of the <nl> + / / instruction . <nl> + auto define_all_values = [ this , & instruction ] ( ) { <nl> + GetInstructionValueSet ( instruction . get ( ) ) <nl> + . ForEachMutableElement ( [ this , & instruction ] ( <nl> + const ShapeIndex & index , <nl> + HloValueSet * value_set ) { <nl> + * value_set = HloValueSet ( { NewHloValue ( instruction . get ( ) , index ) } ) ; <nl> + } ) ; <nl> + } ; <nl> + <nl> + / / Lambda to set the value set to define only the top - level buffer in the <nl> + / / output of the instruction . Any other values flow from the operands of <nl> + / / the instruction ( or from cross - computation dataflow ) . <nl> + auto define_top_level_only = [ this , & instruction ] ( ) { <nl> + GetValueSet ( instruction . get ( ) , / * index = * / { } ) = <nl> + HloValueSet ( { NewHloValue ( instruction . get ( ) , / * index = * / { } ) } ) ; <nl> + } ; <nl> + <nl> + switch ( instruction - > opcode ( ) ) { <nl> + case HloOpcode : : kBitcast : <nl> + if ( bitcast_defines_value_ ) { <nl> + define_all_values ( ) ; <nl> + } <nl> + break ; <nl> + case HloOpcode : : kCall : <nl> + case HloOpcode : : kWhile : <nl> + case HloOpcode : : kGetTupleElement : <nl> + / / These instructions define no values . The values in their output <nl> + / / flow from their operands or from cross computation dataflow . <nl> + break ; <nl> + case HloOpcode : : kParameter : <nl> + if ( call_graph_node . caller_callsites ( ) . empty ( ) | | <nl> + call_graph_node . context ( ) = = CallContext : : kParallel ) { <nl> + / / Parameters of computations called in a parallel context ( eg , map <nl> + / / and reduce ) as well as parameters of dead computations define all <nl> + / / values in their output . Otherwise the values of the parameter <nl> + / / come from the caller ( eg , operands to the kCall instruction ) . <nl> + define_all_values ( ) ; <nl> + } else if ( call_graph_node . context ( ) = = CallContext : : kBoth ) { <nl> + / / We do not support a subcomputation that is called from both a <nl> + / / parallel and sequential context . In this case , the parameter <nl> + / / would both define a value and propagate a value from its <nl> + / / caller . This limitation is not really a problem because the call <nl> + / / graph is typically flattened . <nl> + return Unimplemented ( <nl> + " Computation % s is called in both a parallel ( eg , kMap ) and " <nl> + " sequential ( eg , kCall ) context " , <nl> + computation - > name ( ) . c_str ( ) ) ; <nl> + } <nl> + break ; <nl> + case HloOpcode : : kCopy : <nl> + case HloOpcode : : kSelect : <nl> + case HloOpcode : : kTuple : <nl> + / / These instructions only define their top - level values . Any other <nl> + / / values flow from their operands . <nl> + define_top_level_only ( ) ; <nl> + break ; <nl> + default : <nl> + define_all_values ( ) ; <nl> + break ; <nl> + } <nl> + UpdateLocationsOfValuesAt ( instruction . get ( ) , <nl> + GetInstructionValueSet ( instruction . get ( ) ) ) ; <nl> + } <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + / * static * / <nl> + StatusOr < std : : unique_ptr < HloDataflowAnalysis > > HloDataflowAnalysis : : Run ( <nl> + HloModule * module , bool ssa_form , bool bitcast_defines_value ) { <nl> + VLOG ( 1 ) < < " HloDataflowAnalysis : : Run on module " < < module - > name ( ) ; <nl> + XLA_VLOG_LINES ( 2 , module - > ToString ( ) ) ; <nl> + <nl> + auto dataflow_analysis = WrapUnique ( <nl> + new HloDataflowAnalysis ( module , ssa_form , bitcast_defines_value ) ) ; <nl> + <nl> + TF_RETURN_IF_ERROR ( dataflow_analysis - > InitializeInstructionValueSets ( ) ) ; <nl> + <nl> + / / Construct list of all instructions to initialize the worklist to propagate <nl> + / / the data flow . For efficiency sort the instruction in post order so <nl> + / / producers appear before consumers . <nl> + std : : vector < HloInstruction * > all_instructions ; <nl> + for ( const HloComputation * computation : module - > MakeComputationPostOrder ( ) ) { <nl> + for ( HloInstruction * instruction : <nl> + computation - > MakeInstructionPostOrder ( ) ) { <nl> + all_instructions . push_back ( instruction ) ; <nl> + } <nl> + } <nl> + dataflow_analysis - > UpdateInstructionsAndPropagate ( all_instructions ) ; <nl> + <nl> + VLOG ( 1 ) < < dataflow_analysis - > ToString ( ) ; <nl> + return std : : move ( dataflow_analysis ) ; <nl> + } <nl> + <nl> + } / / namespace xla <nl> new file mode 100644 <nl> index 0000000000000 . . 7b692688caca4 <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / xla / service / hlo_dataflow_analysis . h <nl> <nl> + / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + / / Analysis for determining the possible set of values for all locations <nl> + / / ( instructions and ShapeIndexes ) in the HLO module . Analysis is module - scoped <nl> + / / tracking values across computation boundaries . <nl> + <nl> + # ifndef TENSORFLOW_COMPILER_XLA_SERVICE_HLO_DATAFLOW_ANALYSIS_H_ <nl> + # define TENSORFLOW_COMPILER_XLA_SERVICE_HLO_DATAFLOW_ANALYSIS_H_ <nl> + <nl> + # include < functional > <nl> + # include < string > <nl> + # include < unordered_map > <nl> + # include < vector > <nl> + <nl> + # include " tensorflow / compiler / xla / service / call_graph . h " <nl> + # include " tensorflow / compiler / xla / service / hlo_instruction . h " <nl> + # include " tensorflow / compiler / xla / service / hlo_module . h " <nl> + # include " tensorflow / compiler / xla / shape_tree . h " <nl> + # include " tensorflow / compiler / xla / shape_util . h " <nl> + # include " tensorflow / compiler / xla / status . h " <nl> + # include " tensorflow / compiler / xla / statusor . h " <nl> + # include " tensorflow / compiler / xla / types . h " <nl> + # include " tensorflow / compiler / xla / xla_data . pb . h " <nl> + # include " tensorflow / core / lib / core / status . h " <nl> + # include " tensorflow / core / lib / gtl / array_slice . h " <nl> + # include " tensorflow / core / lib / gtl / flatmap . h " <nl> + # include " tensorflow / core / lib / gtl / flatset . h " <nl> + # include " tensorflow / core / platform / macros . h " <nl> + # include " tensorflow / core / platform / types . h " <nl> + <nl> + namespace xla { <nl> + <nl> + / / Abstraction which identifies a specific point in the XLA graph . An <nl> + / / HloLocation specifies a ShapeIndex within the output of a specific <nl> + / / instruction . <nl> + struct HloLocation { <nl> + HloInstruction * instruction ; <nl> + ShapeIndex index ; <nl> + <nl> + string ToString ( ) const ; <nl> + <nl> + bool operator = = ( const HloLocation & other ) const { <nl> + return instruction = = other . instruction & & index = = other . index ; <nl> + } <nl> + bool operator ! = ( const HloLocation & other ) const { return ! ( * this = = other ) ; } <nl> + } ; <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & out , const HloLocation & location ) ; <nl> + <nl> + / / Defines a single use of an HLO value . <nl> + struct HloUse { <nl> + / / Instruction at which the value is used . <nl> + HloInstruction * instruction ; <nl> + <nl> + / / The operand number in which the value is appears . <nl> + int64 operand_number ; <nl> + <nl> + / / The shape index within the operand in which the value appears . <nl> + ShapeIndex operand_index ; <nl> + <nl> + string ToString ( ) const ; <nl> + <nl> + bool operator = = ( const HloUse & other ) const { <nl> + return instruction = = other . instruction & & <nl> + operand_number = = other . operand_number & & <nl> + operand_index = = other . operand_index ; <nl> + } <nl> + <nl> + bool operator ! = ( const HloUse & other ) const { return ! ( * this = = other ) ; } <nl> + } ; <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & out , const HloUse & use ) ; <nl> + <nl> + / / Class describing a value used by the dataflow analysis . XLA arrays are <nl> + / / trivially a single HloValue . Tuples are made up of more than one HloValue : an <nl> + / / HloValue for the pointer vector , and an HloValue for each child element . <nl> + / / <nl> + / / Every HloValue is defined by a particular instruction and most instructions <nl> + / / define only a single HloValue . Instructions which define a single HloValue <nl> + / / include array - shaped instructions such as Add but also includes Tuple - shaped <nl> + / / instructions such as Tuple . The Tuple instruction defines a single HloValue <nl> + / / which is a vector of pointers to the values containing the Tuple <nl> + / / instruction ' s operands . Though the result of the Tuple instruction includes <nl> + / / multiple values only the top - level HloValue ( the vector of pointers ) is <nl> + / / defined by the Tuple instruction . The values containing the tuple elements <nl> + / / are defined by earlier instructions , usually the operands of the Tuple <nl> + / / instruction . <nl> + / / <nl> + / / Instructions which construct both the tuple * and * the tuple elements define <nl> + / / more than one HloValue . This includes ( at least ) tuple - shaped Constant , <nl> + / / Parameter , Infeed and While instructions . These tuple - shaped instructions do <nl> + / / not assemble a tuple from existing HloValues like the Tuple instruction does , <nl> + / / but rather define all the HloValues in the tuple . <nl> + class HloValue { <nl> + public : <nl> + using Id = int64 ; <nl> + <nl> + / / Construct an HloValue defined by ' instruction ' at shape index ' index ' . If <nl> + / / is_phi is true , then this value is a phi value , for example , at the <nl> + / / parameter of a while body computation . Phi values are only used in the SSA <nl> + / / dataflow analysis ( HloDataflowAnalysis : : ssa_form_ is true ) . <nl> + HloValue ( HloValue : : Id id , HloInstruction * instruction , <nl> + const ShapeIndex & index , bool is_phi = false ) ; <nl> + <nl> + / / Return a unique identifier for this HloValue . This value is used for stable <nl> + / / sorting and iteration <nl> + Id id ( ) const { return id_ ; } <nl> + <nl> + / / Returns whether this value is a phi value . <nl> + bool is_phi ( ) const { return is_phi_ ; } <nl> + <nl> + / / Return the location where this value is defined . <nl> + const HloLocation & DefinitionLocation ( ) const { return locations_ [ 0 ] ; } <nl> + <nl> + / / Return the instruction which defines this HloValue . <nl> + HloInstruction * instruction ( ) const { <nl> + return DefinitionLocation ( ) . instruction ; <nl> + } <nl> + <nl> + / / Return the shape index at which this HloValue is defined in the output of <nl> + / / instruction ( ) . <nl> + const ShapeIndex & index ( ) const { return DefinitionLocation ( ) . index ; } <nl> + <nl> + / / Add or remove a location at which the HloValue appears . The definition <nl> + / / location can not be removed . The uses of the HloValue are updated . <nl> + void AddLocation ( HloInstruction * instruction , const ShapeIndex & index ) ; <nl> + void RemoveLocation ( HloInstruction * instruction , const ShapeIndex & index ) ; <nl> + <nl> + / / Return all locations of the HloValue in the module . <nl> + const std : : vector < HloLocation > & locations ( ) const { return locations_ ; } <nl> + <nl> + / / Return all uses of the HloValue . <nl> + const std : : vector < HloUse > & uses ( ) const { return uses_ ; } <nl> + <nl> + / / Set / get whether this HloValue is live out of the module . <nl> + bool live_out_of_module ( ) const { return live_out_of_module_ ; } <nl> + <nl> + bool operator = = ( const HloValue & other ) const ; <nl> + bool operator ! = ( const HloValue & other ) const ; <nl> + <nl> + / / Return a single - line string representation of the value . <nl> + string ToShortString ( ) const ; <nl> + <nl> + string ToString ( int indent = 0 ) const ; <nl> + <nl> + private : <nl> + / / Unique identifier for this HloValue . Used for stable sorting and iteration . <nl> + const Id id_ ; <nl> + <nl> + / / Whether this instruction is a phi value . <nl> + const bool is_phi_ ; <nl> + <nl> + / / The set of locations of this HloValue . The first element is always the <nl> + / / location of the definition . <nl> + std : : vector < HloLocation > locations_ ; <nl> + <nl> + / / The set of uses of this HloValue . <nl> + std : : vector < HloUse > uses_ ; <nl> + <nl> + / / Whether this value is live out of the HLO module . <nl> + bool live_out_of_module_ = false ; <nl> + } ; <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & out , const HloValue & hlo_value ) ; <nl> + <nl> + / / A class representing the possible set of HloValues at a particular point <nl> + / / ( shape index in the output of an instruction ) in the XLA graph . This set <nl> + / / contains the set of reaching HloValue definitions . For a simple array - shaped <nl> + / / instruction like Add , the HloValueSet of the top - level of the instruction ' s <nl> + / / output trivially contains only the HloValue defined by the instruction . For <nl> + / / instructions which have non - trivial dataflow such as Tuple or Select , the <nl> + / / HloValueSets of the instruction ' s output contains one or more HloValues <nl> + / / defined by the instruction ' s operands or defined further up in the XLA graph . <nl> + class HloValueSet { <nl> + public : <nl> + HloValueSet ( ) = default ; <nl> + <nl> + explicit HloValueSet ( tensorflow : : gtl : : ArraySlice < HloValue : : Id > value_ids ) <nl> + : value_ids_ ( value_ids . begin ( ) , value_ids . end ( ) ) { <nl> + SortAndUniquifyValues ( ) ; <nl> + } <nl> + <nl> + / / Return the union of the given HloValueSets . <nl> + static HloValueSet Union ( <nl> + tensorflow : : gtl : : ArraySlice < const HloValueSet * > inputs ) ; <nl> + <nl> + / / Return the vector of the IDs of all HloValues in the set . Values in the <nl> + / / vector are unique and sorted . <nl> + const std : : vector < HloValue : : Id > & value_ids ( ) const { return value_ids_ ; } <nl> + <nl> + / / Return the unique HLO value in the set . CHECKs if the set does not contain <nl> + / / exactly one value . <nl> + HloValue : : Id GetUniqueValueId ( ) const { <nl> + CHECK_EQ ( value_ids ( ) . size ( ) , 1 ) ; <nl> + return value_ids ( ) [ 0 ] ; <nl> + } <nl> + <nl> + bool operator = = ( const HloValueSet & other ) const { <nl> + return value_ids ( ) = = other . value_ids ( ) ; <nl> + } <nl> + bool operator ! = ( const HloValueSet & other ) const { return ! ( * this = = other ) ; } <nl> + <nl> + string ToString ( ) const ; <nl> + <nl> + private : <nl> + / / Sorts value_ and removes duplicates . This should be called after adding any <nl> + / / elements to values_ . <nl> + void SortAndUniquifyValues ( ) ; <nl> + <nl> + / / HloValues sorted by HloValue : : Id . <nl> + std : : vector < HloValue : : Id > value_ids_ ; <nl> + } ; <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & out , const HloValueSet & hlo_value ) ; <nl> + <nl> + / / A class collecting the HloValues which might be contained in the output of <nl> + / / an HLO instruction . For array - shaped instructions , an InstructionValueSet <nl> + / / trivially holds a single HloValueSet . Tuple - shaped InstructionValueSets <nl> + / / hold multiple HloValueSets . <nl> + class InstructionValueSet : public ShapeTree < HloValueSet > { <nl> + public : <nl> + InstructionValueSet ( const Shape & shape ) : ShapeTree < HloValueSet > ( shape ) { } <nl> + <nl> + / / Return the union of the given InstructionValueSets . <nl> + static InstructionValueSet Union ( <nl> + tensorflow : : gtl : : ArraySlice < const InstructionValueSet * > inputs ) ; <nl> + <nl> + string ToString ( ) const ; <nl> + } ; <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & out , <nl> + const InstructionValueSet & instruction_value_set ) ; <nl> + <nl> + / / Analysis which identifies all HLO values and their uses in an HLO module . <nl> + class HloDataflowAnalysis { <nl> + public : <nl> + / / Run dataflow analysis on the given module . Parameters : <nl> + / / <nl> + / / ssa_form : If true then new values are defined at merge points in the XLA <nl> + / / graph . Abusing nomenclature somewhat , we call these " phi values " . <nl> + / / Merge points exist at While instructions ( formed by the init value and <nl> + / / loop backedge ) , and subcomputations which are called via kCall from <nl> + / / more than one callsite . The SSA form is minimal in that a new phi value <nl> + / / is defined only if the merge point is reachable by multiple different <nl> + / / values . The SSA form is also in loop - closed form in that no values <nl> + / / defined inside of a loop ( while body ) is used outside of the loop . In <nl> + / / SSA form every location in the HLO graph ( instruction and ShapeIndex ) <nl> + / / has a single unique value ( a unique reaching definition ) . <nl> + / / <nl> + / / If ssa_form is false , then merge points do not define new <nl> + / / values . Rather , the HloValueSet for the merge point contains the union <nl> + / / of the merged HloValues . Therefore a location in the HLO graph <nl> + / / ( instruction and ShapeIndex ) may have more than one value ( multiple <nl> + / / reaching definitions ) . <nl> + / / <nl> + / / bitcast_defines_value : If true then the Bitcast HLO instruction defines <nl> + / / a new HLO value in the analysis . If false then Bitcast forwards the <nl> + / / value of its operand . <nl> + static StatusOr < std : : unique_ptr < HloDataflowAnalysis > > Run ( <nl> + HloModule * module , bool ssa_form = false , <nl> + bool bitcast_defines_value = false ) ; <nl> + <nl> + / / Returns true if ' instruction ' defines an HLO value at the given shape index <nl> + / / of its output . <nl> + bool ValueIsDefinedAt ( const HloInstruction * instruction , <nl> + const ShapeIndex & index = { } ) const ; <nl> + <nl> + / / Return the HloValue defined by ' instruction ' at the given shape index of <nl> + / / its output . <nl> + / / <nl> + / / Precondition : ValueIsDefinedAt is true for this instruction and index . <nl> + const HloValue & GetValueDefinedAt ( const HloInstruction * instruction , <nl> + const ShapeIndex & index = { } ) const ; <nl> + HloValue & GetValueDefinedAt ( const HloInstruction * instruction , <nl> + const ShapeIndex & index = { } ) ; <nl> + <nl> + / / Return the InstructionValueSet for the given instruction . <nl> + const InstructionValueSet & GetInstructionValueSet ( <nl> + const HloInstruction * instruction ) const ; <nl> + InstructionValueSet & GetInstructionValueSet ( <nl> + const HloInstruction * instruction ) ; <nl> + <nl> + / / Return the HloValueSet for the given instruction at the given index . <nl> + const HloValueSet & GetValueSet ( const HloInstruction * instruction , <nl> + const ShapeIndex & index = { } ) const ; <nl> + HloValueSet & GetValueSet ( const HloInstruction * instruction , <nl> + const ShapeIndex & index = { } ) ; <nl> + <nl> + / / Return the unique value in the HloValueSet at the given instruction and <nl> + / / shape index . CHECKs if the value set does not contain a exactly one value . <nl> + const HloValue & GetUniqueValueAt ( const HloInstruction * instruction , <nl> + const ShapeIndex & index = { } ) const { <nl> + return GetValue ( GetValueSet ( instruction , index ) . GetUniqueValueId ( ) ) ; <nl> + } <nl> + HloValue & GetUniqueValueAt ( const HloInstruction * instruction , <nl> + const ShapeIndex & index = { } ) { <nl> + return GetValue ( GetValueSet ( instruction , index ) . GetUniqueValueId ( ) ) ; <nl> + } <nl> + <nl> + / / Return the HloValue with the given Id . <nl> + const HloValue & GetValue ( HloValue : : Id value_id ) const ; <nl> + HloValue & GetValue ( HloValue : : Id value_id ) ; <nl> + <nl> + / / Return the total number of HloValues . <nl> + int64 value_count ( ) const { return values_ . size ( ) ; } <nl> + <nl> + / / Return a vector of all HloValues stabily sorted by HloValue : : Id . This <nl> + / / vector is lazily computed . Mutating operations on HloDataflowAnalysis may <nl> + / / invalidate the underlying vector requiring recomputation . <nl> + std : : vector < const HloValue * > & values ( ) const ; <nl> + <nl> + string ToString ( ) const ; <nl> + <nl> + protected : <nl> + HloDataflowAnalysis ( HloModule * module , bool ssa_form , <nl> + bool bitcast_defines_value = false ) ; <nl> + <nl> + / / Creates a new HloValue defined at the given instruction and shape index and <nl> + / / return its ID . <nl> + HloValue : : Id NewHloValue ( HloInstruction * instruction , const ShapeIndex & index , <nl> + bool is_phi = false ) ; <nl> + <nl> + / / Delete the HloValue with the given ID . <nl> + void DeleteHloValue ( HloValue : : Id value_id ) ; <nl> + <nl> + / / Constructs and initializes the InstructionValueSets of all instructions to <nl> + / / contain exactly the HloValues defined by each instruction . These values can <nl> + / / then propagated throughout the HLO graph by calling <nl> + / / UpdateInstructionsAndPropagate . <nl> + Status InitializeInstructionValueSets ( ) ; <nl> + <nl> + / / Updates the value set of the given instruction based on the values flowing <nl> + / / into the instruction ( operands and cross - computation dataflow ) . <nl> + void UpdateInstructionValueSet ( HloInstruction * instruction ) ; <nl> + <nl> + / / Recomputes and returns the value set for the given parameter instruction . <nl> + InstructionValueSet RecomputeBitcastValueSet ( HloInstruction * bitcast ) ; <nl> + InstructionValueSet RecomputeCopyValueSet ( HloInstruction * copy ) ; <nl> + InstructionValueSet RecomputeGetTupleElementValueSet ( HloInstruction * gte ) ; <nl> + InstructionValueSet RecomputeParameterValueSet ( HloInstruction * parameter ) ; <nl> + InstructionValueSet RecomputeSelectValueSet ( HloInstruction * select ) ; <nl> + InstructionValueSet RecomputeTupleValueSet ( HloInstruction * tuple ) ; <nl> + InstructionValueSet RecomputeWhileValueSet ( HloInstruction * xla_while ) ; <nl> + <nl> + / / Update the value sets of the given instructions and propagate the <nl> + / / changes to fixed point . <nl> + void UpdateInstructionsAndPropagate ( <nl> + tensorflow : : gtl : : ArraySlice < HloInstruction * > instructions ) ; <nl> + <nl> + / / Return the result of the SSA Phi function applied to the given inputs at <nl> + / / the given instruction . If skip_top_level is true , then the top level of the <nl> + / / value set of ' instruction ' is not modified . <nl> + InstructionValueSet Phi ( <nl> + HloInstruction * instruction , <nl> + tensorflow : : gtl : : ArraySlice < const InstructionValueSet * > inputs , <nl> + bool skip_top_level = false ) ; <nl> + <nl> + / / Updates the locations of the HloValues in the output of the given <nl> + / / instruction . This should be called after the instruction value set of <nl> + / / ' instruction ' has been changed . ' prev_value_set ' must point to the previous <nl> + / / state of the value set prior to the change . ' prev_value_set ' may be null if <nl> + / / this is the first time locations are being computed . The previous state is <nl> + / / necessary to efficiently remove locations which have been eliminated due to <nl> + / / changes in the instructions ' InstructionValueSet . <nl> + void UpdateLocationsOfValuesAt ( <nl> + HloInstruction * instruction , const InstructionValueSet & new_value_set , <nl> + const InstructionValueSet * prev_value_set = nullptr ) ; <nl> + <nl> + HloModule * const module_ ; <nl> + const bool ssa_form_ ; <nl> + const bool bitcast_defines_value_ ; <nl> + <nl> + std : : unique_ptr < CallGraph > call_graph_ ; <nl> + <nl> + / / The map of all HloValues in the module . <nl> + std : : unordered_map < HloValue : : Id , HloValue > values_ ; <nl> + <nl> + / / A map from instruction to InstructionValueSet . <nl> + std : : unordered_map < const HloInstruction * , InstructionValueSet > value_sets_ ; <nl> + <nl> + / / A lazily constructed vector containing all HloValues sorted by <nl> + / / HloValue : : Id . <nl> + mutable std : : vector < const HloValue * > values_vector_ ; <nl> + <nl> + / / The Id to use for the next HloValue . <nl> + HloValue : : Id next_value_id_ = 0 ; <nl> + } ; <nl> + <nl> + } / / namespace xla <nl> + <nl> + # endif / / TENSORFLOW_COMPILER_XLA_SERVICE_HLO_DATAFLOW_ANALYSIS_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . 0c3208f788284 <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / xla / service / hlo_dataflow_analysis_test . cc <nl> <nl> + / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / compiler / xla / service / hlo_dataflow_analysis . h " <nl> + <nl> + # include " tensorflow / compiler / xla / literal_util . h " <nl> + # include " tensorflow / compiler / xla / service / hlo_computation . h " <nl> + # include " tensorflow / compiler / xla / service / hlo_matchers . h " <nl> + # include " tensorflow / compiler / xla / service / hlo_opcode . h " <nl> + # include " tensorflow / compiler / xla / service / instruction_fusion . h " <nl> + # include " tensorflow / compiler / xla / shape_util . h " <nl> + # include " tensorflow / compiler / xla / status_macros . h " <nl> + # include " tensorflow / compiler / xla / test . h " <nl> + # include " tensorflow / compiler / xla / test_helpers . h " <nl> + # include " tensorflow / compiler / xla / tests / hlo_test_base . h " <nl> + # include " tensorflow / compiler / xla / xla_data . pb . h " <nl> + # include " tensorflow / core / platform / logging . h " <nl> + # include " tensorflow / core / platform / test . h " <nl> + <nl> + namespace xla { <nl> + namespace { <nl> + <nl> + using : : testing : : UnorderedElementsAre ; <nl> + <nl> + / / Test is parameterized on a bool which is whether the dataflow analysis is <nl> + / / performed with SSA form . <nl> + class HloDataflowAnalysisTest : public HloTestBase , <nl> + public : : testing : : WithParamInterface < bool > { <nl> + protected : <nl> + HloDataflowAnalysisTest ( ) : module_ ( TestName ( ) ) { } <nl> + <nl> + / / Run dataflow analysis on the member module . For convenience returns a <nl> + / / reference to the generated analysis stored in analysis_ . <nl> + const HloDataflowAnalysis & RunAnalysis ( bool ssa_form , <nl> + bool bitcast_defines_value = false ) { <nl> + analysis_ = <nl> + HloDataflowAnalysis : : Run ( & module_ , ssa_form , bitcast_defines_value ) <nl> + . ConsumeValueOrDie ( ) ; <nl> + return * analysis_ ; <nl> + } <nl> + <nl> + / / Return a vector of the HloValues at the given program location . <nl> + std : : vector < HloValue > HloValuesAt ( const HloInstruction * instruction , <nl> + const ShapeIndex & index = { } ) { <nl> + CHECK ( analysis_ ! = nullptr ) ; <nl> + std : : vector < HloValue > values ; <nl> + for ( HloValue : : Id value_id : <nl> + analysis_ - > GetValueSet ( instruction , index ) . value_ids ( ) ) { <nl> + values . push_back ( analysis_ - > GetValue ( value_id ) ) ; <nl> + } <nl> + return values ; <nl> + } <nl> + <nl> + HloModule module_ ; <nl> + std : : unique_ptr < HloDataflowAnalysis > analysis_ ; <nl> + <nl> + const Shape scalar_shape_ = ShapeUtil : : MakeShape ( F32 , { } ) ; <nl> + } ; <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , BinaryOperation ) { <nl> + / / Test the dataflow for a simple binary operation ( Add ) . <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto constant1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto constant2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 2 . 0 ) ) ) ; <nl> + auto add = builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> + scalar_shape_ , HloOpcode : : kAdd , constant1 , constant2 ) ) ; <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + / / Each instruction should define a single value . <nl> + EXPECT_EQ ( analysis . values ( ) . size ( ) , 3 ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( constant1 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( constant2 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( add ) ) ; <nl> + <nl> + / / Verify the locations of the values . These locations are all trivial because <nl> + / / there are no instructions which forward values . <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( constant1 ) . locations ( ) , <nl> + UnorderedElementsAre ( HloLocation { constant1 , { } } ) ) ; <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( constant2 ) . locations ( ) , <nl> + UnorderedElementsAre ( HloLocation { constant2 , { } } ) ) ; <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( add ) . locations ( ) , <nl> + UnorderedElementsAre ( HloLocation { add , { } } ) ) ; <nl> + <nl> + / / Verify the uses of the values . <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( constant1 ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { add , 0 , { } } ) ) ; <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( constant2 ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { add , 1 , { } } ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( add ) . uses ( ) . empty ( ) ) ; <nl> + <nl> + / / Verify liveout values from the module . <nl> + EXPECT_FALSE ( analysis . GetValueDefinedAt ( constant1 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_FALSE ( analysis . GetValueDefinedAt ( constant2 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( add ) . live_out_of_module ( ) ) ; <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , TupleAndGtes ) { <nl> + / / Verify the dataflow through a Tuple and GetTupleElement instructions . <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto param0 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , scalar_shape_ , " param0 " ) ) ; <nl> + auto param1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , scalar_shape_ , " param1 " ) ) ; <nl> + auto tuple = <nl> + builder . AddInstruction ( HloInstruction : : CreateTuple ( { param0 , param1 } ) ) ; <nl> + auto gte0 = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , tuple , 0 ) ) ; <nl> + auto gte1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , tuple , 1 ) ) ; <nl> + auto add = builder . AddInstruction ( <nl> + HloInstruction : : CreateBinary ( scalar_shape_ , HloOpcode : : kAdd , gte0 , gte1 ) ) ; <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + / / The two params , tuple , and add should each define one value . <nl> + EXPECT_EQ ( analysis . values ( ) . size ( ) , 4 ) ; <nl> + <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( param0 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( param1 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( tuple , / * index = * / { } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( tuple , / * index = * / { 0 } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( tuple , / * index = * / { 1 } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( gte0 ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( gte1 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( add ) ) ; <nl> + <nl> + / / Verify the locations of the values . <nl> + EXPECT_THAT ( <nl> + analysis . GetValueDefinedAt ( param0 ) . locations ( ) , <nl> + UnorderedElementsAre ( HloLocation { param0 , { } } , HloLocation { tuple , { 0 } } , <nl> + HloLocation { gte0 , { } } ) ) ; <nl> + EXPECT_THAT ( <nl> + analysis . GetValueDefinedAt ( param1 ) . locations ( ) , <nl> + UnorderedElementsAre ( HloLocation { param1 , { } } , HloLocation { tuple , { 1 } } , <nl> + HloLocation { gte1 , { } } ) ) ; <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( tuple ) . locations ( ) , <nl> + UnorderedElementsAre ( HloLocation { tuple , { } } ) ) ; <nl> + <nl> + / / Verify uses . Of interest is that a GetTupleElement instruction is only a <nl> + / / use of the top - level value in the tuple operand . <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( param0 ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { tuple , 0 , { } } , HloUse { add , 0 , { } } ) ) ; <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( param1 ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { tuple , 1 , { } } , HloUse { add , 1 , { } } ) ) ; <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( tuple , / * index = * / { } ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { gte0 , 0 , { } } , HloUse { gte1 , 0 , { } } ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( add ) . live_out_of_module ( ) ) ; <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , NestedTuple ) { <nl> + / / Verify the dataflow through a nested tuple of the following form for two <nl> + / / constants % constant1 and % constant2 : <nl> + / / <nl> + / / % nested_tuple = { { % constant1 , % constant2 } , <nl> + / / { % constant1 , % constant2 } , <nl> + / / % constant1 } <nl> + / / <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto constant1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto constant2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 2 . 0 ) ) ) ; <nl> + auto tuple = builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { constant1 , constant2 } ) ) ; <nl> + auto nested_tuple = builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { tuple , tuple , constant1 } ) ) ; <nl> + auto gte_tuple = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( tuple - > shape ( ) , nested_tuple , 1 ) ) ; <nl> + auto gte_out = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , gte_tuple , 0 ) ) ; <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + EXPECT_EQ ( analysis . values ( ) . size ( ) , 4 ) ; <nl> + <nl> + / / Verify locations and uses . <nl> + EXPECT_THAT ( <nl> + analysis . GetValueDefinedAt ( constant1 ) . locations ( ) , <nl> + UnorderedElementsAre ( <nl> + HloLocation { constant1 , { } } , HloLocation { tuple , { 0 } } , <nl> + HloLocation { nested_tuple , { 0 , 0 } } , HloLocation { nested_tuple , { 1 , 0 } } , <nl> + HloLocation { nested_tuple , { 2 } } , HloLocation { gte_tuple , { 0 } } , <nl> + HloLocation { gte_out , { } } ) ) ; <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( constant1 ) . uses ( ) , <nl> + UnorderedElementsAre ( <nl> + HloUse { tuple , 0 , { } } , HloUse { nested_tuple , 0 , { 0 } } , <nl> + HloUse { nested_tuple , 1 , { 0 } } , HloUse { nested_tuple , 2 , { } } ) ) ; <nl> + EXPECT_THAT ( <nl> + analysis . GetValueDefinedAt ( constant2 ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { tuple , 1 , { } } , HloUse { nested_tuple , 0 , { 1 } } , <nl> + HloUse { nested_tuple , 1 , { 1 } } ) ) ; <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( tuple , / * index = * / { } ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { nested_tuple , 0 , { } } , <nl> + HloUse { nested_tuple , 1 , { } } , <nl> + HloUse { gte_out , 0 , { } } ) ) ; <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( nested_tuple , / * index = * / { } ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { gte_tuple , 0 , { } } ) ) ; <nl> + <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( constant1 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_FALSE ( analysis . GetValueDefinedAt ( constant2 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_FALSE ( <nl> + analysis . GetValueDefinedAt ( tuple , / * index = * / { } ) . live_out_of_module ( ) ) ; <nl> + EXPECT_FALSE ( analysis . GetValueDefinedAt ( nested_tuple , / * index = * / { } ) <nl> + . live_out_of_module ( ) ) ; <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , SingleCall ) { <nl> + / / Test a single call of a subcomputation . The subcomputation adds its two <nl> + / / array - shaped parameters . <nl> + auto subbuilder = HloComputation : : Builder ( " Subcomputation " ) ; <nl> + auto subparam0 = subbuilder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , scalar_shape_ , " param0 " ) ) ; <nl> + auto subparam1 = subbuilder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , scalar_shape_ , " param1 " ) ) ; <nl> + auto add = subbuilder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> + scalar_shape_ , HloOpcode : : kAdd , subparam0 , subparam1 ) ) ; <nl> + HloComputation * called_computation = <nl> + module_ . AddEmbeddedComputation ( subbuilder . Build ( ) ) ; <nl> + <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto constant1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto constant2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 2 . 0 ) ) ) ; <nl> + auto call = builder . AddInstruction ( HloInstruction : : CreateCall ( <nl> + scalar_shape_ , { constant1 , constant2 } , called_computation ) ) ; <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + EXPECT_EQ ( analysis . values ( ) . size ( ) , 3 ) ; <nl> + <nl> + / / The parameters of the subcomputation and the call instruction itself should <nl> + / / not define values . Their values flow from elsewhere . <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( constant1 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( constant2 ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( subparam0 ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( subparam1 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( add ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( call ) ) ; <nl> + <nl> + EXPECT_EQ ( analysis . GetUniqueValueAt ( subparam0 ) , <nl> + analysis . GetValueDefinedAt ( constant1 ) ) ; <nl> + EXPECT_EQ ( analysis . GetUniqueValueAt ( subparam1 ) , <nl> + analysis . GetValueDefinedAt ( constant2 ) ) ; <nl> + EXPECT_EQ ( analysis . GetUniqueValueAt ( call ) , analysis . GetValueDefinedAt ( add ) ) ; <nl> + <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( constant1 ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { add , 0 , { } } , HloUse { call , 0 , { } } ) ) ; <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( constant2 ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { add , 1 , { } } , HloUse { call , 1 , { } } ) ) ; <nl> + <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( add ) . live_out_of_module ( ) ) ; <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , ComputationCalledTwiceWithSameArguments ) { <nl> + / / Test a subcomputation which is called twice with identical values . <nl> + auto subbuilder = HloComputation : : Builder ( " Subcomputation " ) ; <nl> + auto subparam0 = subbuilder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , scalar_shape_ , " param0 " ) ) ; <nl> + auto subparam1 = subbuilder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , scalar_shape_ , " param1 " ) ) ; <nl> + auto add = subbuilder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> + scalar_shape_ , HloOpcode : : kAdd , subparam0 , subparam1 ) ) ; <nl> + HloComputation * called_computation = <nl> + module_ . AddEmbeddedComputation ( subbuilder . Build ( ) ) ; <nl> + <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto constant1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto constant2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 2 . 0 ) ) ) ; <nl> + auto call1 = builder . AddInstruction ( HloInstruction : : CreateCall ( <nl> + scalar_shape_ , { constant1 , constant2 } , called_computation ) ) ; <nl> + auto call2 = builder . AddInstruction ( HloInstruction : : CreateCall ( <nl> + scalar_shape_ , { constant1 , constant2 } , called_computation ) ) ; <nl> + auto sub = builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> + scalar_shape_ , HloOpcode : : kSubtract , call1 , call2 ) ) ; <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + EXPECT_EQ ( analysis . values ( ) . size ( ) , 4 ) ; <nl> + <nl> + / / Definitions should be identical to the single callsite case . <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( constant1 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( constant2 ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( subparam0 ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( subparam1 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( add ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( call1 ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( call2 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( sub ) ) ; <nl> + <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( constant1 ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { add , 0 , { } } , HloUse { call1 , 0 , { } } , <nl> + HloUse { call2 , 0 , { } } ) ) ; <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( constant2 ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { add , 1 , { } } , HloUse { call1 , 1 , { } } , <nl> + HloUse { call2 , 1 , { } } ) ) ; <nl> + / / The Add from the subcomputation is used as both operands of the Subtract . <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( add ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { sub , 0 , { } } , HloUse { sub , 1 , { } } ) ) ; <nl> + <nl> + EXPECT_FALSE ( analysis . GetValueDefinedAt ( add ) . live_out_of_module ( ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( sub ) . live_out_of_module ( ) ) ; <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , ComputationCalledTwiceWithDifferentArguments ) { <nl> + / / Test a subcomputation which is called twice with different argument values . <nl> + auto subbuilder = HloComputation : : Builder ( " Subcomputation " ) ; <nl> + auto subparam0 = subbuilder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , scalar_shape_ , " param0 " ) ) ; <nl> + auto subparam1 = subbuilder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , scalar_shape_ , " param1 " ) ) ; <nl> + auto add = subbuilder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> + scalar_shape_ , HloOpcode : : kAdd , subparam0 , subparam1 ) ) ; <nl> + HloComputation * called_computation = <nl> + module_ . AddEmbeddedComputation ( subbuilder . Build ( ) ) ; <nl> + <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto constant1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto constant2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 2 . 0 ) ) ) ; <nl> + auto call1 = builder . AddInstruction ( HloInstruction : : CreateCall ( <nl> + scalar_shape_ , { constant1 , constant2 } , called_computation ) ) ; <nl> + auto call2 = builder . AddInstruction ( HloInstruction : : CreateCall ( <nl> + scalar_shape_ , { call1 , constant2 } , called_computation ) ) ; <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( call1 ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( call2 ) ) ; <nl> + <nl> + if ( ssa_form ) { <nl> + / / Argument 0 has different values at the two calls , and argument 1 has the <nl> + / / same value , so only argument 0 should have a phi value . <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( subparam0 ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( subparam0 ) . is_phi ( ) ) ; <nl> + <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( subparam1 ) ) ; <nl> + } else { <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( subparam0 ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( subparam1 ) ) ; <nl> + <nl> + EXPECT_THAT ( HloValuesAt ( subparam0 ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( constant1 ) , <nl> + analysis . GetValueDefinedAt ( add ) ) ) ; <nl> + EXPECT_THAT ( HloValuesAt ( subparam1 ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( constant2 ) ) ) ; <nl> + } <nl> + <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( add ) . live_out_of_module ( ) ) ; <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , NestedCalls ) { <nl> + / / Test a module with nested computations . HLO is : <nl> + / / <nl> + / / F32 [ ] inner_computation ( F32 [ ] % param0 , F32 [ ] % param1 ) : <nl> + / / % add = Add ( % param0 , % param1 ) <nl> + / / <nl> + / / F32 [ ] outer_computation ( ( F32 [ ] % param0 , F32 [ ] % param1 ) : <nl> + / / ; ; Note that parameters are interchanged in the call . <nl> + / / % nested_call = Call ( inner_computation , { % param1 , % param0 } ) <nl> + / / <nl> + / / F32 [ ] entry : <nl> + / / % constant1 = Constant ( 1 . 0 ) <nl> + / / % constant2 = Constant ( 2 . 0 ) <nl> + / / % call = Call ( outer_computation , { % constant1 , % constant2 } ) <nl> + / / <nl> + auto inner_builder = HloComputation : : Builder ( " InnerComputation " ) ; <nl> + auto inner_param0 = inner_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , scalar_shape_ , " param0 " ) ) ; <nl> + auto inner_param1 = inner_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , scalar_shape_ , " param1 " ) ) ; <nl> + auto add = inner_builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> + scalar_shape_ , HloOpcode : : kAdd , inner_param0 , inner_param1 ) ) ; <nl> + HloComputation * inner_computation = <nl> + module_ . AddEmbeddedComputation ( inner_builder . Build ( ) ) ; <nl> + <nl> + auto outer_builder = HloComputation : : Builder ( " OuterComputation " ) ; <nl> + auto outer_param0 = outer_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , scalar_shape_ , " param0 " ) ) ; <nl> + auto outer_param1 = outer_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , scalar_shape_ , " param1 " ) ) ; <nl> + / / Swizzle parameters . <nl> + auto nested_call = outer_builder . AddInstruction ( HloInstruction : : CreateCall ( <nl> + scalar_shape_ , { outer_param1 , outer_param0 } , inner_computation ) ) ; <nl> + HloComputation * outer_computation = <nl> + module_ . AddEmbeddedComputation ( outer_builder . Build ( ) ) ; <nl> + <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto constant1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto constant2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 2 . 0 ) ) ) ; <nl> + auto call = builder . AddInstruction ( HloInstruction : : CreateCall ( <nl> + scalar_shape_ , { constant1 , constant2 } , outer_computation ) ) ; <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + / / Only three values should be defined . Most instructions just pass through <nl> + / / their operand values . <nl> + EXPECT_EQ ( analysis . values ( ) . size ( ) , 3 ) ; <nl> + <nl> + / / Verify that the uses of the constants are properly swizzled by parameter <nl> + / / permutation in nested_call . <nl> + EXPECT_THAT ( <nl> + analysis . GetValueDefinedAt ( constant1 ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { call , 0 , { } } , HloUse { nested_call , 1 , { } } , <nl> + HloUse { add , 1 , { } } ) ) ; <nl> + EXPECT_THAT ( <nl> + analysis . GetValueDefinedAt ( constant2 ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { call , 1 , { } } , HloUse { nested_call , 0 , { } } , <nl> + HloUse { add , 0 , { } } ) ) ; <nl> + <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( add ) . live_out_of_module ( ) ) ; <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , SingleWhile ) { <nl> + / / Test a simple single while instruction . The while body includes a <nl> + / / pass - through value . HLO : <nl> + / / <nl> + / / body ( ( F32 [ ] , F32 [ ] ) % tuple_param ) : <nl> + / / % add = Add ( % tuple_param { 0 } , % tuple_param { 1 } ) <nl> + / / return Tuple ( % tuple_param { 0 } , % add ) <nl> + / / <nl> + / / condition ( ( F32 [ ] , F32 [ ] ) % tuple_param ) : <nl> + / / return Constant ( false ) <nl> + / / <nl> + / / entry : <nl> + / / % constant1 = Constant ( 1 . 0 ) <nl> + / / % constant2 = Constant ( 2 . 0 ) <nl> + / / % tuple = Tuple ( % constant1 , % constant2 ) <nl> + / / return While ( % tuple , body , condition ) <nl> + / / <nl> + const Shape tuple_shape = <nl> + ShapeUtil : : MakeTupleShape ( { scalar_shape_ , scalar_shape_ } ) ; <nl> + <nl> + / / Element 0 passes transparently through the body . <nl> + auto body_builder = HloComputation : : Builder ( " body " ) ; <nl> + auto body_param = body_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , tuple_shape , " param " ) ) ; <nl> + auto body_element_0 = body_builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , body_param , 0 ) ) ; <nl> + auto body_element_1 = body_builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , body_param , 1 ) ) ; <nl> + auto add = body_builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> + scalar_shape_ , HloOpcode : : kAdd , body_element_0 , body_element_1 ) ) ; <nl> + auto body_tuple = body_builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { body_element_0 , add } ) ) ; <nl> + HloComputation * body = module_ . AddEmbeddedComputation ( body_builder . Build ( ) ) ; <nl> + <nl> + / / Condition computation trivially returns a constant " false " . <nl> + auto cond_builder = HloComputation : : Builder ( " condition " ) ; <nl> + auto cond_param = cond_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , tuple_shape , " param " ) ) ; <nl> + cond_builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < bool > ( false ) ) ) ; <nl> + HloComputation * condition = <nl> + module_ . AddEmbeddedComputation ( cond_builder . Build ( ) ) ; <nl> + <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto constant1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto constant2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 2 . 0 ) ) ) ; <nl> + auto tuple = builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { constant1 , constant2 } ) ) ; <nl> + auto xla_while = builder . AddInstruction ( <nl> + HloInstruction : : CreateWhile ( tuple_shape , condition , body , tuple ) ) ; <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + if ( ssa_form ) { <nl> + / / Element 0 of the tuple passed through the body so no phi value is <nl> + / / defined . <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( xla_while , / * index = * / { 0 } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( body_param , / * index = * / { 0 } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( cond_param , / * index = * / { 0 } ) ) ; <nl> + <nl> + / / Element 1 of the tuple should be a phi value . <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( xla_while , / * index = * / { 1 } ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( xla_while , / * index = * / { 1 } ) . is_phi ( ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( body_param , / * index = * / { 1 } ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( body_param , / * index = * / { 1 } ) . is_phi ( ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( cond_param , / * index = * / { 1 } ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( cond_param , / * index = * / { 1 } ) . is_phi ( ) ) ; <nl> + <nl> + EXPECT_THAT ( analysis . GetValueDefinedAt ( constant1 ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { add , 0 , { } } , HloUse { tuple , 0 , { } } , <nl> + HloUse { xla_while , 0 , { 0 } } , <nl> + HloUse { body_tuple , 0 , { } } ) ) ; <nl> + <nl> + / / Constant1 passes through the body and out of the module . <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( constant1 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( xla_while , / * index = * / { 1 } ) <nl> + . live_out_of_module ( ) ) ; <nl> + } else { <nl> + / / While instruction and subcomputation parameters should not define values <nl> + / / in non - ssa form . <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( xla_while , / * index = * / { 0 } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( xla_while , / * index = * / { 1 } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( body_param , / * index = * / { 0 } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( body_param , / * index = * / { 1 } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( cond_param , / * index = * / { 0 } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( cond_param , / * index = * / { 1 } ) ) ; <nl> + <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( constant1 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( add ) . live_out_of_module ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , SequentialWhiles ) { <nl> + / / Test sequential while instructions . The while body includes a <nl> + / / pass - through value . HLO : <nl> + / / <nl> + / / body ( ( F32 [ ] , F32 [ ] ) % tuple_param ) : <nl> + / / % add = Add ( % tuple_param { 0 } , % tuple_param { 1 } ) <nl> + / / return Tuple ( % tuple_param { 0 } , % add ) <nl> + / / <nl> + / / condition ( ( F32 [ ] , F32 [ ] ) % tuple_param ) : <nl> + / / return Constant ( false ) <nl> + / / <nl> + / / entry : <nl> + / / % constant1 = Constant ( 1 . 0 ) <nl> + / / % constant2 = Constant ( 2 . 0 ) <nl> + / / % tuple = Tuple ( % constant1 , % constant2 ) <nl> + / / % while0 = While ( % tuple , body , condition ) <nl> + / / % while1 = While ( % while0 , body , condition ) <nl> + / / return While ( % while1 , body , condition ) <nl> + / / <nl> + const Shape tuple_shape = <nl> + ShapeUtil : : MakeTupleShape ( { scalar_shape_ , scalar_shape_ } ) ; <nl> + <nl> + / / Element 0 passes transparently through the body . <nl> + auto body_builder = HloComputation : : Builder ( " body " ) ; <nl> + auto body_param = body_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , tuple_shape , " param " ) ) ; <nl> + auto body_element_0 = body_builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , body_param , 0 ) ) ; <nl> + auto body_element_1 = body_builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , body_param , 1 ) ) ; <nl> + auto add = body_builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> + scalar_shape_ , HloOpcode : : kAdd , body_element_0 , body_element_1 ) ) ; <nl> + body_builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { body_element_0 , add } ) ) ; <nl> + HloComputation * body = module_ . AddEmbeddedComputation ( body_builder . Build ( ) ) ; <nl> + <nl> + auto cond_builder = HloComputation : : Builder ( " condition " ) ; <nl> + cond_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , tuple_shape , " param " ) ) ; <nl> + cond_builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < bool > ( false ) ) ) ; <nl> + HloComputation * condition = <nl> + module_ . AddEmbeddedComputation ( cond_builder . Build ( ) ) ; <nl> + <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto constant1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto constant2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 2 . 0 ) ) ) ; <nl> + auto tuple = builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { constant1 , constant2 } ) ) ; <nl> + auto xla_while0 = builder . AddInstruction ( <nl> + HloInstruction : : CreateWhile ( tuple_shape , condition , body , tuple ) ) ; <nl> + auto xla_while1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateWhile ( tuple_shape , condition , body , xla_while0 ) ) ; <nl> + auto xla_while2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateWhile ( tuple_shape , condition , body , xla_while1 ) ) ; <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + / / Element 0 is passed through all the while instructions and out of the <nl> + / / module . . <nl> + EXPECT_EQ ( analysis . GetUniqueValueAt ( xla_while0 , / * index = * / { 0 } ) , <nl> + analysis . GetValueDefinedAt ( constant1 ) ) ; <nl> + EXPECT_EQ ( analysis . GetUniqueValueAt ( xla_while1 , / * index = * / { 0 } ) , <nl> + analysis . GetValueDefinedAt ( constant1 ) ) ; <nl> + EXPECT_EQ ( analysis . GetUniqueValueAt ( xla_while2 , / * index = * / { 0 } ) , <nl> + analysis . GetValueDefinedAt ( constant1 ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( constant1 ) . live_out_of_module ( ) ) ; <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , NestedWhiles ) { <nl> + / / Test nested while instructions . The inner body passes through element 0 of <nl> + / / its parameter , and the outer body passes through element 1 . HLO : <nl> + / / <nl> + / / inner_body ( ( F32 [ ] , F32 [ ] ) % tuple_param ) : <nl> + / / % add = Add ( % tuple_param { 0 } , % tuple_param { 1 } ) <nl> + / / return Tuple ( % tuple_param { 0 } , % add ) <nl> + / / <nl> + / / outer_body ( ( F32 [ ] , F32 [ ] ) % tuple_param ) : <nl> + / / % negate = Negate ( % tuple_param { 0 } ) <nl> + / / % tuple = Tuple ( % negate , % tuple_param { 1 } ) <nl> + / / return While ( % tuple , inner_body , condition ) <nl> + / / <nl> + / / entry : <nl> + / / % constant1 = Constant ( 1 . 0 ) <nl> + / / % constant2 = Constant ( 2 . 0 ) <nl> + / / % tuple = Tuple ( % constant1 , % constant2 ) <nl> + / / return While ( % tuple , outer_body , condition ) <nl> + / / <nl> + const Shape tuple_shape = <nl> + ShapeUtil : : MakeTupleShape ( { scalar_shape_ , scalar_shape_ } ) ; <nl> + <nl> + auto cond_builder = HloComputation : : Builder ( " condition " ) ; <nl> + cond_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , tuple_shape , " param " ) ) ; <nl> + cond_builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < bool > ( false ) ) ) ; <nl> + HloComputation * condition = <nl> + module_ . AddEmbeddedComputation ( cond_builder . Build ( ) ) ; <nl> + <nl> + / / Element 0 passes transparently through the body . <nl> + auto inner_builder = HloComputation : : Builder ( " inner_body " ) ; <nl> + auto inner_param = inner_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , tuple_shape , " param " ) ) ; <nl> + auto inner_element_0 = inner_builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , inner_param , 0 ) ) ; <nl> + auto inner_element_1 = inner_builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , inner_param , 1 ) ) ; <nl> + auto add = inner_builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> + scalar_shape_ , HloOpcode : : kAdd , inner_element_0 , inner_element_1 ) ) ; <nl> + inner_builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { inner_element_0 , add } ) ) ; <nl> + HloComputation * inner_body = <nl> + module_ . AddEmbeddedComputation ( inner_builder . Build ( ) ) ; <nl> + <nl> + / / Element 1 passes transparently through the body . <nl> + auto outer_builder = HloComputation : : Builder ( " outer_body " ) ; <nl> + auto outer_param = outer_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , tuple_shape , " param " ) ) ; <nl> + auto outer_element_0 = outer_builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , outer_param , 0 ) ) ; <nl> + auto negate = outer_builder . AddInstruction ( HloInstruction : : CreateUnary ( <nl> + scalar_shape_ , HloOpcode : : kNegate , outer_element_0 ) ) ; <nl> + auto outer_element_1 = outer_builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , outer_param , 1 ) ) ; <nl> + auto outer_tuple = outer_builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { negate , outer_element_1 } ) ) ; <nl> + auto nested_while = outer_builder . AddInstruction ( HloInstruction : : CreateWhile ( <nl> + tuple_shape , condition , inner_body , outer_tuple ) ) ; <nl> + HloComputation * outer_body = <nl> + module_ . AddEmbeddedComputation ( outer_builder . Build ( ) ) ; <nl> + <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto constant1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto constant2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 2 . 0 ) ) ) ; <nl> + auto tuple = builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { constant1 , constant2 } ) ) ; <nl> + auto entry_while = builder . AddInstruction ( <nl> + HloInstruction : : CreateWhile ( tuple_shape , condition , outer_body , tuple ) ) ; <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + EXPECT_THAT ( HloValuesAt ( inner_param , / * index = * / { 0 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( negate ) ) ) ; <nl> + if ( ssa_form ) { <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( inner_param , / * index = * / { 1 } ) ) ; <nl> + EXPECT_TRUE ( <nl> + analysis . GetValueDefinedAt ( inner_param , / * index = * / { 1 } ) . is_phi ( ) ) ; <nl> + <nl> + / / Element 0 of the nested while is % negate . <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( nested_while , / * index = * / { 0 } ) ) ; <nl> + EXPECT_THAT ( HloValuesAt ( inner_param , / * index = * / { 0 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( negate ) ) ) ; <nl> + / / Element 1 is a phi value ( join of % add and % constant2 ) . <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( nested_while , / * index = * / { 1 } ) ) ; <nl> + EXPECT_TRUE ( <nl> + analysis . GetValueDefinedAt ( nested_while , / * index = * / { 1 } ) . is_phi ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( entry_while , / * index = * / { 0 } ) ) ; <nl> + EXPECT_TRUE ( <nl> + analysis . GetValueDefinedAt ( entry_while , / * index = * / { 0 } ) . is_phi ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( entry_while , / * index = * / { 1 } ) ) ; <nl> + EXPECT_TRUE ( <nl> + analysis . GetValueDefinedAt ( entry_while , / * index = * / { 1 } ) . is_phi ( ) ) ; <nl> + } else { <nl> + EXPECT_THAT ( HloValuesAt ( inner_param , / * index = * / { 1 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( add ) , <nl> + analysis . GetValueDefinedAt ( constant2 ) ) ) ; <nl> + <nl> + EXPECT_THAT ( HloValuesAt ( nested_while , / * index = * / { 0 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( negate ) ) ) ; <nl> + EXPECT_THAT ( HloValuesAt ( nested_while , / * index = * / { 1 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( add ) , <nl> + analysis . GetValueDefinedAt ( constant2 ) ) ) ; <nl> + <nl> + EXPECT_THAT ( HloValuesAt ( entry_while , / * index = * / { 0 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( negate ) , <nl> + analysis . GetValueDefinedAt ( constant1 ) ) ) ; <nl> + EXPECT_THAT ( HloValuesAt ( entry_while , / * index = * / { 1 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( add ) , <nl> + analysis . GetValueDefinedAt ( constant2 ) ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , SwizzlingWhile ) { <nl> + / / Test a while instruction with a body which permutes it ' s tuple parameter <nl> + / / elements . HLO : <nl> + / / <nl> + / / body ( ( F32 [ ] , F32 [ ] ) % tuple_param ) : <nl> + / / return Tuple ( % tuple_param { 1 } , % tuple_param { 0 } ) <nl> + / / <nl> + / / condition ( ( F32 [ ] , F32 [ ] ) % tuple_param ) : <nl> + / / return Constant ( false ) <nl> + / / <nl> + / / entry : <nl> + / / % constant1 = Constant ( 1 . 0 ) <nl> + / / % constant2 = Constant ( 2 . 0 ) <nl> + / / % tuple = Tuple ( % constant1 , % constant2 ) <nl> + / / return While ( % tuple , body , condition ) <nl> + / / <nl> + const Shape tuple_shape = <nl> + ShapeUtil : : MakeTupleShape ( { scalar_shape_ , scalar_shape_ } ) ; <nl> + <nl> + auto body_builder = HloComputation : : Builder ( " body " ) ; <nl> + auto body_param = body_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , tuple_shape , " param " ) ) ; <nl> + auto body_element_0 = body_builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , body_param , 0 ) ) ; <nl> + auto body_element_1 = body_builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , body_param , 1 ) ) ; <nl> + body_builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { body_element_1 , body_element_0 } ) ) ; <nl> + HloComputation * body = module_ . AddEmbeddedComputation ( body_builder . Build ( ) ) ; <nl> + <nl> + auto cond_builder = HloComputation : : Builder ( " condition " ) ; <nl> + auto cond_param = cond_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , tuple_shape , " param " ) ) ; <nl> + cond_builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < bool > ( false ) ) ) ; <nl> + HloComputation * condition = <nl> + module_ . AddEmbeddedComputation ( cond_builder . Build ( ) ) ; <nl> + <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto constant1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto constant2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 2 . 0 ) ) ) ; <nl> + auto tuple = builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { constant1 , constant2 } ) ) ; <nl> + auto xla_while = builder . AddInstruction ( <nl> + HloInstruction : : CreateWhile ( tuple_shape , condition , body , tuple ) ) ; <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + if ( ssa_form ) { <nl> + / / Element 0 and 1 in the while should both be phi values . <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( body_param , / * index = * / { 0 } ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( body_param , / * index = * / { 0 } ) . is_phi ( ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( body_param , / * index = * / { 1 } ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( body_param , / * index = * / { 1 } ) . is_phi ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( xla_while , / * index = * / { 0 } ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( xla_while , / * index = * / { 0 } ) . is_phi ( ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( xla_while , / * index = * / { 1 } ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( xla_while , / * index = * / { 1 } ) . is_phi ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( cond_param , / * index = * / { 0 } ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( cond_param , / * index = * / { 0 } ) . is_phi ( ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( cond_param , / * index = * / { 1 } ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( cond_param , / * index = * / { 1 } ) . is_phi ( ) ) ; <nl> + <nl> + EXPECT_FALSE ( analysis . GetValueDefinedAt ( constant1 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_FALSE ( analysis . GetValueDefinedAt ( constant2 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( xla_while , / * index = * / { } ) <nl> + . live_out_of_module ( ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( xla_while , / * index = * / { 0 } ) <nl> + . live_out_of_module ( ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( xla_while , / * index = * / { 1 } ) <nl> + . live_out_of_module ( ) ) ; <nl> + } else { <nl> + / / Elements 0 and 1 have both constants as reaching definitions . <nl> + EXPECT_THAT ( HloValuesAt ( xla_while , / * index = * / { 0 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( constant1 ) , <nl> + analysis . GetValueDefinedAt ( constant2 ) ) ) ; <nl> + EXPECT_THAT ( HloValuesAt ( xla_while , / * index = * / { 1 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( constant1 ) , <nl> + analysis . GetValueDefinedAt ( constant2 ) ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( constant1 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( constant2 ) . live_out_of_module ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , ArraySelect ) { <nl> + / / Test a kSelect of an array value . <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto pred = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < bool > ( false ) ) ) ; <nl> + auto constant1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto constant2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 2 . 0 ) ) ) ; <nl> + auto select = builder . AddInstruction ( HloInstruction : : CreateTernary ( <nl> + scalar_shape_ , HloOpcode : : kSelect , pred , constant1 , constant2 ) ) ; <nl> + <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( select ) ) ; <nl> + EXPECT_FALSE ( analysis . GetValueDefinedAt ( constant1 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_FALSE ( analysis . GetValueDefinedAt ( constant2 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( select ) . live_out_of_module ( ) ) ; <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , TupleSelect ) { <nl> + / / Test a kSelect of a tuple value . Non - top - level element flow through the <nl> + / / instruction . <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto pred = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < bool > ( false ) ) ) ; <nl> + auto constant1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto constant2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 2 . 0 ) ) ) ; <nl> + auto constant3 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 3 . 0 ) ) ) ; <nl> + auto constant4 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 4 . 0 ) ) ) ; <nl> + auto tuple1 = <nl> + builder . AddInstruction ( HloInstruction : : CreateTuple ( { constant1 } ) ) ; <nl> + auto tuple2 = <nl> + builder . AddInstruction ( HloInstruction : : CreateTuple ( { constant2 } ) ) ; <nl> + auto tuple3 = <nl> + builder . AddInstruction ( HloInstruction : : CreateTuple ( { constant3 } ) ) ; <nl> + auto tuple4 = <nl> + builder . AddInstruction ( HloInstruction : : CreateTuple ( { constant4 } ) ) ; <nl> + const Shape tuple_shape = tuple1 - > shape ( ) ; <nl> + auto select11 = builder . AddInstruction ( HloInstruction : : CreateTernary ( <nl> + tuple_shape , HloOpcode : : kSelect , pred , tuple1 , tuple1 ) ) ; <nl> + auto select12 = builder . AddInstruction ( HloInstruction : : CreateTernary ( <nl> + tuple_shape , HloOpcode : : kSelect , pred , tuple1 , tuple2 ) ) ; <nl> + auto select34 = builder . AddInstruction ( HloInstruction : : CreateTernary ( <nl> + tuple_shape , HloOpcode : : kSelect , pred , tuple3 , tuple4 ) ) ; <nl> + auto select1234 = builder . AddInstruction ( HloInstruction : : CreateTernary ( <nl> + tuple_shape , HloOpcode : : kSelect , pred , select12 , select34 ) ) ; <nl> + <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + / / Top - level value is always defined by a kSelect . <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( select11 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( select12 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( select34 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( select1234 ) ) ; <nl> + <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( select11 , / * index = * / { 0 } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( select12 , / * index = * / { 0 } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( select34 , / * index = * / { 0 } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( select1234 , / * index = * / { 0 } ) ) ; <nl> + <nl> + EXPECT_THAT ( HloValuesAt ( select11 , / * index = * / { 0 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( constant1 ) ) ) ; <nl> + EXPECT_THAT ( HloValuesAt ( select12 , / * index = * / { 0 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( constant1 ) , <nl> + analysis . GetValueDefinedAt ( constant2 ) ) ) ; <nl> + EXPECT_THAT ( HloValuesAt ( select34 , / * index = * / { 0 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( constant3 ) , <nl> + analysis . GetValueDefinedAt ( constant4 ) ) ) ; <nl> + EXPECT_THAT ( HloValuesAt ( select1234 , / * index = * / { 0 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( constant1 ) , <nl> + analysis . GetValueDefinedAt ( constant2 ) , <nl> + analysis . GetValueDefinedAt ( constant3 ) , <nl> + analysis . GetValueDefinedAt ( constant4 ) ) ) ; <nl> + <nl> + EXPECT_THAT ( <nl> + analysis . GetValueDefinedAt ( constant1 ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { tuple1 , 0 , { } } , HloUse { select11 , 1 , { 0 } } , <nl> + HloUse { select11 , 2 , { 0 } } , HloUse { select12 , 1 , { 0 } } , <nl> + HloUse { select1234 , 1 , { 0 } } ) ) ; <nl> + EXPECT_THAT ( <nl> + analysis . GetValueDefinedAt ( constant2 ) . uses ( ) , <nl> + UnorderedElementsAre ( HloUse { tuple2 , 0 , { } } , HloUse { select12 , 2 , { 0 } } , <nl> + HloUse { select1234 , 1 , { 0 } } ) ) ; <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , NestedTupleSelect ) { <nl> + / / Test kSelect of a nested tuple . <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto pred = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < bool > ( false ) ) ) ; <nl> + auto constant1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto constant2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 2 . 0 ) ) ) ; <nl> + auto constant3 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 3 . 0 ) ) ) ; <nl> + auto constant4 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 4 . 0 ) ) ) ; <nl> + auto constant5 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 5 . 0 ) ) ) ; <nl> + auto inner_tuple1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { constant2 , constant3 } ) ) ; <nl> + auto tuple1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { constant1 , inner_tuple1 } ) ) ; <nl> + auto inner_tuple2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { constant5 , constant3 } ) ) ; <nl> + auto tuple2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { constant4 , inner_tuple2 } ) ) ; <nl> + auto select = builder . AddInstruction ( HloInstruction : : CreateTernary ( <nl> + tuple1 - > shape ( ) , HloOpcode : : kSelect , pred , tuple1 , tuple2 ) ) ; <nl> + <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( select ) ) ; <nl> + <nl> + EXPECT_THAT ( HloValuesAt ( select , / * index = * / { 0 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( constant1 ) , <nl> + analysis . GetValueDefinedAt ( constant4 ) ) ) ; <nl> + EXPECT_THAT ( HloValuesAt ( select , / * index = * / { 1 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( inner_tuple1 ) , <nl> + analysis . GetValueDefinedAt ( inner_tuple2 ) ) ) ; <nl> + EXPECT_THAT ( HloValuesAt ( select , / * index = * / { 1 , 0 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( constant2 ) , <nl> + analysis . GetValueDefinedAt ( constant5 ) ) ) ; <nl> + EXPECT_THAT ( HloValuesAt ( select , / * index = * / { 1 , 1 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( constant3 ) ) ) ; <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , TupleSelectToWhile ) { <nl> + / / Test a tuple - shaped kSelect feeding a kWhile instruction . HLO : <nl> + / / <nl> + / / body ( ( F32 [ ] , F32 [ ] ) % tuple_param ) : <nl> + / / % add = Add ( % tuple_param { 0 } , % tuple_param { 1 } ) <nl> + / / return Tuple ( % tuple_param { 0 } , % add ) <nl> + / / <nl> + / / condition ( ( F32 [ ] , F32 [ ] ) % tuple_param ) : <nl> + / / return Constant ( false ) <nl> + / / <nl> + / / entry : <nl> + / / % constant1 = Constant ( 1 . 0 ) <nl> + / / % constant2 = Constant ( 2 . 0 ) <nl> + / / % constant3 = Constant ( 3 . 0 ) <nl> + / / % tuple1 = Tuple ( % constant1 ) <nl> + / / % tuple2 = Tuple ( % constant2 ) <nl> + / / % select = Select ( % tuple1 , % tuple2 ) <nl> + / / % gte = GetTupleElement ( % select , 0 ) <nl> + / / % tuple = Tuple ( % gte , % constant3 ) <nl> + / / return While ( % tuple , body , condition ) <nl> + / / <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + <nl> + const Shape tuple_shape = <nl> + ShapeUtil : : MakeTupleShape ( { scalar_shape_ , scalar_shape_ } ) ; <nl> + <nl> + / / Element 0 passes transparently through the body . <nl> + auto body_builder = HloComputation : : Builder ( " body " ) ; <nl> + auto body_param = body_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , tuple_shape , " param " ) ) ; <nl> + auto body_element_0 = body_builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , body_param , 0 ) ) ; <nl> + auto body_element_1 = body_builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , body_param , 1 ) ) ; <nl> + auto add = body_builder . AddInstruction ( HloInstruction : : CreateBinary ( <nl> + scalar_shape_ , HloOpcode : : kAdd , body_element_0 , body_element_1 ) ) ; <nl> + body_builder . AddInstruction ( <nl> + HloInstruction : : CreateTuple ( { body_element_0 , add } ) ) ; <nl> + HloComputation * body = module_ . AddEmbeddedComputation ( body_builder . Build ( ) ) ; <nl> + <nl> + auto cond_builder = HloComputation : : Builder ( " condition " ) ; <nl> + cond_builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , tuple_shape , " param " ) ) ; <nl> + cond_builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < bool > ( false ) ) ) ; <nl> + HloComputation * condition = <nl> + module_ . AddEmbeddedComputation ( cond_builder . Build ( ) ) ; <nl> + <nl> + auto pred = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < bool > ( false ) ) ) ; <nl> + auto constant1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto constant2 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 2 . 0 ) ) ) ; <nl> + auto constant3 = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 3 . 0 ) ) ) ; <nl> + auto tuple1 = <nl> + builder . AddInstruction ( HloInstruction : : CreateTuple ( { constant1 } ) ) ; <nl> + auto tuple2 = <nl> + builder . AddInstruction ( HloInstruction : : CreateTuple ( { constant2 } ) ) ; <nl> + auto select = builder . AddInstruction ( HloInstruction : : CreateTernary ( <nl> + tuple1 - > shape ( ) , HloOpcode : : kSelect , pred , tuple1 , tuple2 ) ) ; <nl> + auto gte = builder . AddInstruction ( <nl> + HloInstruction : : CreateGetTupleElement ( scalar_shape_ , select , 0 ) ) ; <nl> + auto tuple = <nl> + builder . AddInstruction ( HloInstruction : : CreateTuple ( { gte , constant3 } ) ) ; <nl> + auto xla_while = builder . AddInstruction ( <nl> + HloInstruction : : CreateWhile ( tuple - > shape ( ) , condition , body , tuple ) ) ; <nl> + <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + if ( ssa_form ) { <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( xla_while , / * index = * / { 0 } ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( xla_while , / * index = * / { 0 } ) . is_phi ( ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( xla_while , / * index = * / { 1 } ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( xla_while , / * index = * / { 1 } ) . is_phi ( ) ) ; <nl> + <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( select , / * index = * / { 0 } ) ) ; <nl> + <nl> + EXPECT_FALSE ( analysis . GetValueDefinedAt ( constant1 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_FALSE ( analysis . GetValueDefinedAt ( constant2 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_FALSE ( analysis . GetValueDefinedAt ( constant3 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( xla_while , / * index = * / { 1 } ) <nl> + . live_out_of_module ( ) ) ; <nl> + } else { <nl> + EXPECT_THAT ( HloValuesAt ( gte ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( constant1 ) , <nl> + analysis . GetValueDefinedAt ( constant2 ) ) ) ; <nl> + EXPECT_THAT ( HloValuesAt ( xla_while , / * index = * / { 0 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( constant1 ) , <nl> + analysis . GetValueDefinedAt ( constant2 ) ) ) ; <nl> + EXPECT_THAT ( HloValuesAt ( xla_while , / * index = * / { 1 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( add ) , <nl> + analysis . GetValueDefinedAt ( constant3 ) ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( constant1 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( constant2 ) . live_out_of_module ( ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( constant3 ) . live_out_of_module ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , BitcastDefinesValue ) { <nl> + / / Test the bitcast_defines_value flag to the dataflow analysis . <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto constant = builder . AddInstruction ( <nl> + HloInstruction : : CreateConstant ( LiteralUtil : : CreateR0 < float > ( 1 . 0 ) ) ) ; <nl> + auto bitcast = builder . AddInstruction ( HloInstruction : : CreateUnary ( <nl> + scalar_shape_ , HloOpcode : : kBitcast , constant ) ) ; <nl> + <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + { <nl> + const HloDataflowAnalysis & analysis = <nl> + RunAnalysis ( ssa_form , / * bitcast_defines_value = * / true ) ; <nl> + <nl> + EXPECT_EQ ( analysis . values ( ) . size ( ) , 2 ) ; <nl> + <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( constant ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( bitcast ) ) ; <nl> + EXPECT_FALSE ( analysis . GetValueDefinedAt ( constant ) . live_out_of_module ( ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( bitcast ) . live_out_of_module ( ) ) ; <nl> + } <nl> + { <nl> + const HloDataflowAnalysis & analysis = <nl> + RunAnalysis ( ssa_form , / * bitcast_defines_value = * / false ) ; <nl> + EXPECT_EQ ( analysis . values ( ) . size ( ) , 1 ) ; <nl> + <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( constant ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( bitcast ) ) ; <nl> + EXPECT_TRUE ( analysis . GetValueDefinedAt ( constant ) . live_out_of_module ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_P ( HloDataflowAnalysisTest , TupleCopy ) { <nl> + / / Test that a tuple - shaped copy only copies ( defines ) the top - level value . <nl> + auto builder = HloComputation : : Builder ( TestName ( ) ) ; <nl> + auto param0 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 0 , scalar_shape_ , " param0 " ) ) ; <nl> + auto param1 = builder . AddInstruction ( <nl> + HloInstruction : : CreateParameter ( 1 , scalar_shape_ , " param1 " ) ) ; <nl> + auto tuple = <nl> + builder . AddInstruction ( HloInstruction : : CreateTuple ( { param0 , param1 } ) ) ; <nl> + auto copy = builder . AddInstruction ( <nl> + HloInstruction : : CreateUnary ( tuple - > shape ( ) , HloOpcode : : kCopy , tuple ) ) ; <nl> + module_ . AddEntryComputation ( builder . Build ( ) ) ; <nl> + <nl> + bool ssa_form = GetParam ( ) ; <nl> + const HloDataflowAnalysis & analysis = RunAnalysis ( ssa_form ) ; <nl> + <nl> + EXPECT_EQ ( analysis . values ( ) . size ( ) , 4 ) ; <nl> + <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( param0 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( param1 ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( tuple , / * index = * / { } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( tuple , / * index = * / { 0 } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( tuple , / * index = * / { 1 } ) ) ; <nl> + EXPECT_TRUE ( analysis . ValueIsDefinedAt ( copy , / * index = * / { } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( copy , / * index = * / { 0 } ) ) ; <nl> + EXPECT_FALSE ( analysis . ValueIsDefinedAt ( copy , / * index = * / { 1 } ) ) ; <nl> + <nl> + EXPECT_THAT ( HloValuesAt ( copy , / * index = * / { 0 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( param0 ) ) ) ; <nl> + EXPECT_THAT ( HloValuesAt ( copy , / * index = * / { 1 } ) , <nl> + UnorderedElementsAre ( analysis . GetValueDefinedAt ( param1 ) ) ) ; <nl> + EXPECT_TRUE ( <nl> + analysis . GetValueDefinedAt ( copy , / * index = * / { } ) . live_out_of_module ( ) ) ; <nl> + } <nl> + <nl> + INSTANTIATE_TEST_CASE_P ( HloDataflowAnalysisInstantiation , <nl> + HloDataflowAnalysisTest , <nl> + : : testing : : Values ( false , true ) ) ; <nl> + <nl> + } / / namespace <nl> + } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / service / hlo_instruction . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_instruction . cc <nl> string HloInstruction : : ToString ( bool compact_operands , <nl> / / Concatenate elements in " v " with spaces separating them , but ignoring <nl> / / empty entries . <nl> for ( const auto & s : v ) { <nl> - if ( s . empty ( ) ) continue ; <nl> + if ( s . empty ( ) ) { <nl> + continue ; <nl> + } <nl> StrAppend ( & operands , ( first ? " " : " " ) , s ) ; <nl> first = false ; <nl> } <nl> HloInstruction : : UseKind HloInstruction : : OperandElementUse ( int64 i ) const { <nl> std : : function < UseKind ( const HloInstruction & ) > reuses_parameter_elements = <nl> [ i , & cache , & reuses_parameter_elements ] ( const HloInstruction & hlo ) { <nl> auto plus = [ ] ( const UseKind & a , const UseKind & b ) { <nl> - if ( a = = UseKind : : kNoUse ) return b ; <nl> - if ( b = = UseKind : : kNoUse ) return a ; <nl> - if ( a = = UseKind : : kReuse | | b = = UseKind : : kReuse ) { <nl> + if ( a = = UseKind : : kNoUse ) { <nl> + return b ; <nl> + } else if ( b = = UseKind : : kNoUse ) { <nl> + return a ; <nl> + } else if ( a = = UseKind : : kReuse | | b = = UseKind : : kReuse ) { <nl> return UseKind : : kReuse ; <nl> - } <nl> - if ( a = = UseKind : : kUsePermutingElements | | <nl> - b = = UseKind : : kUsePermutingElements ) { <nl> + } else if ( a = = UseKind : : kUsePermutingElements | | <nl> + b = = UseKind : : kUsePermutingElements ) { <nl> return UseKind : : kReuse ; <nl> } <nl> CHECK ( UseKind : : kUse = = a & & UseKind : : kUse = = b ) ; <nl> mmm a / tensorflow / compiler / xla / service / hlo_pass_pipeline . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_pass_pipeline . cc <nl> limitations under the License . <nl> <nl> # include < functional > <nl> <nl> - # include " tensorflow / compiler / xla / legacy_flags / hlo_pass_pipeline_flags . h " <nl> # include " tensorflow / compiler / xla / status_macros . h " <nl> # include " tensorflow / compiler / xla / types . h " <nl> # include " tensorflow / compiler / xla / util . h " <nl> StatusOr < bool > HloPassPipeline : : Run ( HloModule * module ) { <nl> <nl> VLOG ( 1 ) < < " Running HLO pass pipeline " < < name ( ) ; <nl> <nl> - legacy_flags : : HloPassPipelineFlags * flags = <nl> - legacy_flags : : GetHloPassPipelineFlags ( ) ; <nl> - std : : vector < string > tmp = <nl> - tensorflow : : str_util : : Split ( flags - > xla_disable_hlo_passes , ' , ' ) ; <nl> - tensorflow : : gtl : : FlatSet < string > disabled_passes ( tmp . begin ( ) , tmp . end ( ) ) ; <nl> + auto repeated_field = <nl> + module - > config ( ) . debug_options ( ) . xla_disable_hlo_passes ( ) ; <nl> + tensorflow : : gtl : : FlatSet < string > disabled_passes ( repeated_field . begin ( ) , <nl> + repeated_field . end ( ) ) ; <nl> if ( ! disabled_passes . empty ( ) ) { <nl> VLOG ( 1 ) < < " Passes disabled by - - xla_disable_hlo_passes : " <nl> < < tensorflow : : str_util : : Join ( disabled_passes , " , " ) ; <nl> StatusOr < bool > HloPassPipeline : : Run ( HloModule * module ) { <nl> bool changed = false ; <nl> string message ; <nl> for ( auto & pass : passes_ ) { <nl> - if ( ! disabled_passes . empty ( ) & & <nl> - disabled_passes . count ( pass - > name ( ) . ToString ( ) ) > 0 ) { <nl> + if ( disabled_passes . count ( pass - > name ( ) . ToString ( ) ) > 0 ) { <nl> VLOG ( 1 ) < < " Skipping HLO pass " < < pass - > name ( ) <nl> < < " , disabled by - - xla_disable_hlo_passes " ; <nl> continue ; <nl> mmm a / tensorflow / compiler / xla / service / hlo_rematerialization . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_rematerialization . cc <nl> StatusOr < bool > HloRematerialization : : Run ( <nl> [ & module_output_size , this ] ( const Shape & subshape , <nl> const ShapeIndex & / * index * / ) { <nl> module_output_size + = size_function_ ( subshape ) ; <nl> - return Status : : OK ( ) ; <nl> - } ) <nl> - . IgnoreError ( ) ; <nl> + } ) ; <nl> <nl> const int64 adjusted_memory_limit_bytes = <nl> memory_limit_bytes - module_output_size ; <nl> mmm a / tensorflow / compiler / xla / service / hlo_tfgraph_builder . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_tfgraph_builder . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / literal_util . h " <nl> # include " tensorflow / compiler / xla / service / hlo_opcode . h " <nl> # include " tensorflow / compiler / xla / shape_util . h " <nl> + # include " tensorflow / core / framework / attr_value . pb . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / tensor_shape . pb . h " <nl> # include " tensorflow / core / lib / strings / str_util . h " <nl> mmm a / tensorflow / compiler / xla / service / layout_assignment . cc <nl> ppp b / tensorflow / compiler / xla / service / layout_assignment . cc <nl> Status LayoutConstraints : : SetInstructionLayout ( <nl> <nl> / / Create a BufferLayoutConstraint for each array shape in the output of the <nl> / / instruction . <nl> - return ShapeUtil : : ForEachSubshape ( <nl> + return ShapeUtil : : ForEachSubshapeWithStatus ( <nl> shape_with_layout , <nl> [ this , instruction ] ( const Shape & subshape , <nl> const ShapeIndex & index ) - > Status { <nl> Status CheckLayouts ( <nl> / / which could be the source of the subshape value . <nl> const PointsToSet & points_to_set = <nl> points_to_analysis - > GetPointsToSet ( instruction . get ( ) ) ; <nl> - TF_RETURN_IF_ERROR ( points_to_set . ForEachElement ( <nl> + TF_RETURN_IF_ERROR ( points_to_set . ForEachElementWithStatus ( <nl> [ & instruction ] ( <nl> - ShapeIndex index , bool is_leaf , <nl> + ShapeIndex index , <nl> const std : : vector < const LogicalBuffer * > & buffers ) - > Status { <nl> - if ( is_leaf ) { <nl> + if ( ShapeUtil : : IsLeafIndex ( instruction - > shape ( ) , index ) ) { <nl> const Shape & instruction_subshape = <nl> ShapeUtil : : GetSubshape ( instruction - > shape ( ) , index ) ; <nl> for ( const LogicalBuffer * buffer : buffers ) { <nl> Status LayoutAssignment : : PropagateUseConstraintToDefs ( <nl> / / match the given layout . <nl> const PointsToSet & points_to_set = <nl> constraints - > points_to_analysis ( ) . GetPointsToSet ( instruction ) ; <nl> - return points_to_set . ForEachElement ( <nl> + return points_to_set . ForEachElementWithStatus ( <nl> [ this , & shape_layout , constraints ] ( <nl> - const ShapeIndex & index , bool is_leaf , <nl> + const ShapeIndex & index , <nl> const std : : vector < const LogicalBuffer * > & buffers ) - > Status { <nl> - if ( is_leaf ) { <nl> + if ( ShapeUtil : : IsLeafIndex ( shape_layout . shape ( ) , index ) ) { <nl> for ( const LogicalBuffer * buffer : buffers ) { <nl> if ( constraints - > BufferLayout ( * buffer ) = = nullptr & & <nl> ShapeUtil : : IsArray ( buffer - > shape ( ) ) ) { <nl> Status LayoutAssignment : : AssignLayouts ( const LayoutConstraints & constraints , <nl> <nl> / / Any remaining layouts in the output of the instruction must be <nl> / / inferrable using points - to analysis . <nl> - TF_RETURN_IF_ERROR ( ShapeUtil : : ForEachMutableSubshape ( <nl> + TF_RETURN_IF_ERROR ( ShapeUtil : : ForEachMutableSubshapeWithStatus ( <nl> instruction - > mutable_shape ( ) , <nl> [ instruction , & constraints ] ( Shape * subshape , const ShapeIndex & index ) { <nl> if ( subshape - > has_layout ( ) | | ! ShapeUtil : : IsArray ( * subshape ) ) { <nl> mmm a / tensorflow / compiler / xla / service / liveness_util . cc <nl> ppp b / tensorflow / compiler / xla / service / liveness_util . cc <nl> limitations under the License . <nl> <nl> namespace xla { <nl> <nl> + bool DoesNotUseOperandBuffer ( const HloInstruction * operand , <nl> + const ShapeIndex & index , <nl> + const HloInstruction * user ) { <nl> + CHECK ( user - > IsUserOf ( operand ) ) <nl> + < < " user : " < < user - > ToString ( ) < < " operand : " < < operand - > ToString ( ) ; <nl> + <nl> + / / GetTupleElement instructions only access the top - level buffer of their <nl> + / / operand . <nl> + return ( user - > opcode ( ) = = HloOpcode : : kGetTupleElement & & ! index . empty ( ) ) ; <nl> + } <nl> + <nl> bool DoesNotUseOperandBuffer ( const HloInstruction * operand , <nl> const ShapeIndex & index , <nl> const HloInstruction * user , <nl> mmm a / tensorflow / compiler / xla / service / liveness_util . h <nl> ppp b / tensorflow / compiler / xla / service / liveness_util . h <nl> limitations under the License . <nl> # ifndef TENSORFLOW_COMPILER_XLA_SERVICE_LIVENESS_UTIL_H_ <nl> # define TENSORFLOW_COMPILER_XLA_SERVICE_LIVENESS_UTIL_H_ <nl> <nl> - # include < utility > <nl> - # include < vector > <nl> - <nl> # include " tensorflow / compiler / xla / service / hlo_instruction . h " <nl> # include " tensorflow / compiler / xla / service / tuple_points_to_analysis . h " <nl> # include " tensorflow / compiler / xla / shape_util . h " <nl> bool DoesNotUseOperandBuffer ( const HloInstruction * operand , <nl> const HloInstruction * user , <nl> const TuplePointsToAnalysis & points_to_analysis ) ; <nl> <nl> + / / Overload which does not require points - to analysis . The result is more <nl> + / / conservative ( returns false more often ) . <nl> + bool DoesNotUseOperandBuffer ( const HloInstruction * operand , <nl> + const ShapeIndex & index , <nl> + const HloInstruction * user ) ; <nl> + <nl> / / Returns true if ' user ' ( at ' user_index ' ) can share a buffer with its operand <nl> / / ' operand ' ( at ' operand_index ' ) . <nl> / / Returns false otherwise . <nl> mmm a / tensorflow / compiler / xla / service / llvm_ir / ir_array . cc <nl> ppp b / tensorflow / compiler / xla / service / llvm_ir / ir_array . cc <nl> llvm : : Value * IrArray : : EmitReadArrayElement ( const Index & index , <nl> llvm : : LoadInst * load = ir_builder - > CreateLoad ( element_address ) ; <nl> llvm_ir : : SetTbaaForInstruction ( load , GetShape ( ) , <nl> / * is_pointer_to = * / false ) ; <nl> - for ( const std : : pair < int , llvm : : MDNode * > & kind_md_pair : metadata_ ) { <nl> - int kind = kind_md_pair . first ; <nl> - llvm : : MDNode * md = kind_md_pair . second ; <nl> - load - > setMetadata ( kind , md ) ; <nl> + for ( const auto & kind_md_pair : metadata_ ) { <nl> + load - > setMetadata ( kind_md_pair . first , kind_md_pair . second ) ; <nl> } <nl> return load ; <nl> } <nl> void IrArray : : EmitWriteArrayElement ( const Index & index , llvm : : Value * value , <nl> llvm : : StoreInst * store = ir_builder - > CreateStore ( value , element_address ) ; <nl> llvm_ir : : SetTbaaForInstruction ( store , GetShape ( ) , <nl> / * is_pointer_to = * / false ) ; <nl> - for ( const std : : pair < int , llvm : : MDNode * > & kind_md_pair : metadata_ ) { <nl> - int kind = kind_md_pair . first ; <nl> - CHECK_NE ( kind , llvm : : LLVMContext : : MD_invariant_load ) ; <nl> - llvm : : MDNode * md = kind_md_pair . second ; <nl> - store - > setMetadata ( kind , md ) ; <nl> + for ( const auto & kind_md_pair : metadata_ ) { <nl> + CHECK_NE ( kind_md_pair . first , llvm : : LLVMContext : : MD_invariant_load ) ; <nl> + store - > setMetadata ( kind_md_pair . first , kind_md_pair . second ) ; <nl> } <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / service / local_service . cc <nl> ppp b / tensorflow / compiler / xla / service / local_service . cc <nl> int64 RequiredSpace ( const Shape & shape , bool allocate_space_for_deep_copy , <nl> / / TODO ( b / 33492279 ) remove once no devices represent result tuples as <nl> / / contiguous buffers . <nl> if ( allocate_space_for_deep_copy ) { <nl> - TF_CHECK_OK ( ShapeUtil : : ForEachSubshape ( <nl> + ShapeUtil : : ForEachSubshape ( <nl> shape , [ & size , transfer_manager ] ( const Shape & subshape , <nl> const ShapeIndex & / * index * / ) { <nl> size + = transfer_manager - > GetByteSizeRequirement ( subshape ) ; <nl> - return tensorflow : : Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> } <nl> return size ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / shaped_buffer . cc <nl> ppp b / tensorflow / compiler / xla / service / shaped_buffer . cc <nl> ShapedBuffer : : MakeUnnestedTupleShapedBuffer ( <nl> } <nl> TF_ASSIGN_OR_RETURN ( std : : unique_ptr < ShapedBuffer > shaped_buffer , <nl> MakeShapedBuffer ( shape , platform , device_ordinal ) ) ; <nl> - TF_CHECK_OK ( shaped_buffer - > mutable_shape_index_to_buffer_entry ( ) <nl> - - > ForEachMutableElement ( <nl> - [ ] ( const ShapeIndex & index , bool is_leaf , <nl> - size_t * buffer_element ) - > tensorflow : : Status { <nl> - if ( is_leaf ) { <nl> - CHECK_EQ ( index . size ( ) , 1 ) ; <nl> - * buffer_element = index [ 0 ] ; <nl> - } <nl> - return tensorflow : : Status : : OK ( ) ; <nl> - } ) ) ; <nl> + shaped_buffer - > mutable_shape_index_to_buffer_entry ( ) - > ForEachMutableElement ( <nl> + [ & shaped_buffer ] ( const ShapeIndex & index , size_t * buffer_element ) { <nl> + if ( ShapeUtil : : IsLeafIndex ( shaped_buffer - > shape ( ) , index ) ) { <nl> + CHECK_EQ ( index . size ( ) , 1 ) ; <nl> + * buffer_element = index [ 0 ] ; <nl> + } <nl> + } ) ; <nl> shaped_buffer - > mutable_buffers ( ) - > reserve ( buffers . size ( ) ) ; <nl> for ( const perftools : : gputools : : DeviceMemoryBase & memory_base : buffers ) { <nl> shaped_buffer - > mutable_buffers ( ) - > push_back ( memory_base ) ; <nl> ScopedShapedBuffer : : MakeScopedShapedBuffer ( const Shape & shape , <nl> <nl> / / Allocate an appropriate sized buffer for each array element in the shape . <nl> TF_RETURN_IF_ERROR ( <nl> - shaped_buffer - > shape_index_to_buffer_entry_ . ForEachMutableElement ( <nl> - [ & shaped_buffer ] ( const ShapeIndex & index , bool is_leaf , <nl> - size_t * buffer_entry ) - > tensorflow : : Status { <nl> - if ( is_leaf ) { <nl> + shaped_buffer - > shape_index_to_buffer_entry_ <nl> + . ForEachMutableElementWithStatus ( [ & shaped_buffer ] ( <nl> + const ShapeIndex & index , <nl> + size_t * buffer_entry ) <nl> + - > tensorflow : : Status { <nl> + if ( ShapeUtil : : IsLeafIndex ( shaped_buffer - > shape ( ) , index ) ) { <nl> TF_ASSIGN_OR_RETURN ( <nl> perftools : : gputools : : DeviceMemoryBase memory_base , <nl> shaped_buffer - > allocator_ - > Allocate ( <nl> mmm a / tensorflow / compiler / xla / service / tuple_points_to_analysis . cc <nl> ppp b / tensorflow / compiler / xla / service / tuple_points_to_analysis . cc <nl> std : : ostream & operator < < ( std : : ostream & out , const BufferAlias & buffer_alias ) { <nl> <nl> bool PointsToSet : : IsAmbiguous ( ) const { <nl> bool ambiguous = false ; <nl> - TF_CHECK_OK ( ForEachElement ( <nl> - [ & ambiguous ] ( const ShapeIndex & / * index * / , bool / * is_leaf * / , <nl> + ForEachElement ( <nl> + [ & ambiguous ] ( const ShapeIndex & / * index * / , <nl> const std : : vector < const LogicalBuffer * > & points_to ) { <nl> ambiguous | = points_to . size ( ) > 1 ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> return ambiguous ; <nl> } <nl> <nl> bool PointsToSet : : IsDistinct ( ) const { <nl> bool distinct = true ; <nl> std : : set < const LogicalBuffer * > all_points_to ; <nl> - TF_CHECK_OK ( ForEachElement ( [ & distinct , & all_points_to ] ( <nl> - const ShapeIndex & / * index * / , bool / * is_leaf * / , <nl> - const std : : vector < const LogicalBuffer * > & points_to ) { <nl> + ForEachElement ( [ & distinct , & all_points_to ] ( <nl> + const ShapeIndex & / * index * / , <nl> + const std : : vector < const LogicalBuffer * > & points_to ) { <nl> for ( auto & buffer : points_to ) { <nl> if ( all_points_to . count ( buffer ) ! = 0 ) { <nl> distinct = false ; <nl> } <nl> all_points_to . insert ( buffer ) ; <nl> } <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> return distinct ; <nl> } <nl> <nl> size_t PointsToSet : : size ( ) const { <nl> tensorflow : : gtl : : FlatSet < const LogicalBuffer * > PointsToSet : : CreateFlattenedSet ( ) <nl> const { <nl> tensorflow : : gtl : : FlatSet < const LogicalBuffer * > flat_set ; <nl> - TF_CHECK_OK ( ForEachElement ( <nl> - [ & flat_set ] ( const ShapeIndex & / * index * / , bool / * is_leaf * / , <nl> - const std : : vector < const LogicalBuffer * > & buffers ) { <nl> - flat_set . insert ( buffers . begin ( ) , buffers . end ( ) ) ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + ForEachElement ( [ & flat_set ] ( const ShapeIndex & / * index * / , <nl> + const std : : vector < const LogicalBuffer * > & buffers ) { <nl> + flat_set . insert ( buffers . begin ( ) , buffers . end ( ) ) ; <nl> + } ) ; <nl> return flat_set ; <nl> } <nl> <nl> bool PointsToSet : : ContainsBuffer ( const LogicalBuffer & buffer ) const { <nl> bool found = false ; <nl> - TF_CHECK_OK ( ForEachElement ( [ & found , & buffer ] ( <nl> - const ShapeIndex & / * index * / , bool / * is_leaf * / , <nl> - const std : : vector < const LogicalBuffer * > & pointed_to_buffers ) { <nl> + ForEachElement ( [ & found , & buffer ] ( const ShapeIndex & / * index * / , <nl> + const std : : vector < const LogicalBuffer * > & <nl> + pointed_to_buffers ) { <nl> if ( ! found & & <nl> std : : find ( pointed_to_buffers . begin ( ) , pointed_to_buffers . end ( ) , <nl> & buffer ) ! = pointed_to_buffers . end ( ) ) { <nl> found = true ; <nl> } <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> return found ; <nl> } <nl> <nl> Status TuplePointsToAnalysis : : PopulateDefinedBuffersAndAliases ( <nl> instruction . get ( ) , & instruction_defined_buffers_ [ instruction . get ( ) ] ) ) ; <nl> <nl> const PointsToSet & points_to_set = GetPointsToSet ( instruction . get ( ) ) ; <nl> - TF_RETURN_IF_ERROR ( points_to_set . ForEachElement ( [ this , & instruction ] ( <nl> - const ShapeIndex & index , bool / * is_leaf * / , <nl> - const std : : vector < const LogicalBuffer * > & pointed_to_buffers ) { <nl> - for ( const LogicalBuffer * buffer : pointed_to_buffers ) { <nl> - if ( buffer_aliases_ . count ( buffer ) = = 0 ) { <nl> - buffer_aliases_ . insert ( { buffer , std : : vector < BufferAlias > ( ) } ) ; <nl> - } <nl> - buffer_aliases_ [ buffer ] . emplace_back ( instruction . get ( ) , index ) ; <nl> - } <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + points_to_set . ForEachElement ( <nl> + [ this , & instruction ] ( <nl> + const ShapeIndex & index , <nl> + const std : : vector < const LogicalBuffer * > & pointed_to_buffers ) { <nl> + for ( const LogicalBuffer * buffer : pointed_to_buffers ) { <nl> + if ( buffer_aliases_ . count ( buffer ) = = 0 ) { <nl> + buffer_aliases_ . insert ( { buffer , std : : vector < BufferAlias > ( ) } ) ; <nl> + } <nl> + buffer_aliases_ [ buffer ] . emplace_back ( instruction . get ( ) , index ) ; <nl> + } <nl> + } ) ; <nl> } <nl> return Status : : OK ( ) ; <nl> } <nl> Status TuplePointsToAnalysis : : DefaultAction ( HloInstruction * hlo_instruction ) { <nl> / / contains a single element LogicalBuffer ( hlo_instruction , i ) . This indicates <nl> / / that this instruction is the source of all buffers in its own output . <nl> PointsToSet & points_to_set = CreateEmptyPointsToSet ( hlo_instruction ) ; <nl> - TF_RETURN_IF_ERROR ( points_to_set . ForEachMutableElement ( <nl> - [ this , hlo_instruction ] ( const ShapeIndex & index , bool / * is_leaf * / , <nl> + points_to_set . ForEachMutableElement ( <nl> + [ this , hlo_instruction ] ( const ShapeIndex & index , <nl> std : : vector < const LogicalBuffer * > * buffers ) { <nl> const LogicalBuffer & buffer = NewLogicalBuffer ( hlo_instruction , index ) ; <nl> buffers - > push_back ( & buffer ) ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> <nl> if ( ShapeUtil : : IsTuple ( hlo_instruction - > shape ( ) ) ) { <nl> / / If the hlo instruction is a tuple - shaped , then trivially the instruction <nl> Status TuplePointsToAnalysis : : HandleGetTupleElement ( <nl> <nl> / / Copy the points - to set ( and tuple sources ) at index { element_index } of the <nl> / / operand to the points - to set for this GetTupleElement instruction . <nl> - TF_RETURN_IF_ERROR ( points_to_set . ForEachMutableElement ( [ & , this ] ( <nl> - const ShapeIndex & target_index , bool / * is_leaf * / , <nl> - std : : vector < const LogicalBuffer * > * points_to ) { <nl> - / / Construct an index into the operand by prepending element_index to the <nl> - / / index for the GetTupleElement instruction ' s points - to set . <nl> - ShapeIndex src_index ; <nl> - src_index . push_back ( element_index ) ; <nl> - for ( auto element : target_index ) { <nl> - src_index . push_back ( element ) ; <nl> - } <nl> + points_to_set . ForEachMutableElement ( <nl> + [ & , this ] ( const ShapeIndex & target_index , <nl> + std : : vector < const LogicalBuffer * > * points_to ) { <nl> + / / Construct an index into the operand by prepending element_index to <nl> + / / the index for the GetTupleElement instruction ' s points - to set . <nl> + ShapeIndex src_index ; <nl> + src_index . push_back ( element_index ) ; <nl> + for ( auto element : target_index ) { <nl> + src_index . push_back ( element ) ; <nl> + } <nl> <nl> - * points_to = operand_points_to_set . element ( src_index ) ; <nl> - for ( HloInstruction * tuple : <nl> - operand_points_to_set . tuple_sources ( src_index ) ) { <nl> - points_to_set . add_tuple_source ( target_index , tuple ) ; <nl> - } <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + * points_to = operand_points_to_set . element ( src_index ) ; <nl> + for ( HloInstruction * tuple : <nl> + operand_points_to_set . tuple_sources ( src_index ) ) { <nl> + points_to_set . add_tuple_source ( target_index , tuple ) ; <nl> + } <nl> + } ) ; <nl> <nl> return Status : : OK ( ) ; <nl> } <nl> Status TuplePointsToAnalysis : : HandleTuple ( <nl> <nl> / / Copy the points - to set ( and tuple sources ) of the operand into the <nl> / / respective subtree of the tuple instructions points - to set . <nl> - TF_RETURN_IF_ERROR ( operand_points_to_set . ForEachElement ( <nl> + operand_points_to_set . ForEachElement ( <nl> [ & points_to_set , & operand_points_to_set , i ] ( <nl> - const ShapeIndex & src_index , bool / * is_leaf * / , <nl> + const ShapeIndex & src_index , <nl> const std : : vector < const LogicalBuffer * > & points_to ) { <nl> ShapeIndex target_index ; <nl> target_index . push_back ( i ) ; <nl> Status TuplePointsToAnalysis : : HandleTuple ( <nl> operand_points_to_set . tuple_sources ( src_index ) ) { <nl> points_to_set . add_tuple_source ( target_index , tuple ) ; <nl> } <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> } <nl> <nl> points_to_set . add_tuple_source ( { } , tuple ) ; <nl> Status TuplePointsToAnalysis : : HandleSelect ( HloInstruction * select , <nl> / / add in elements of the on_false points - to set ( tuple sources ) . <nl> PointsToSet & points_to_set = CreateCopiedPointsToSet ( select , on_true ) ; <nl> const PointsToSet & false_points_to_set = * FindOrDie ( points_to_ , on_false ) ; <nl> - TF_RETURN_IF_ERROR ( points_to_set . ForEachMutableElement ( <nl> - [ & ] ( const ShapeIndex & index , bool / * is_leaf * / , <nl> - std : : vector < const LogicalBuffer * > * buffers ) { <nl> + points_to_set . ForEachMutableElement ( <nl> + [ & ] ( const ShapeIndex & index , std : : vector < const LogicalBuffer * > * buffers ) { <nl> for ( const LogicalBuffer * false_buffer : <nl> false_points_to_set . element ( index ) ) { <nl> points_to_set . AddPointedToBuffer ( * false_buffer , index ) ; <nl> Status TuplePointsToAnalysis : : HandleSelect ( HloInstruction * select , <nl> for ( HloInstruction * tuple : false_points_to_set . tuple_sources ( index ) ) { <nl> points_to_set . add_tuple_source ( index , tuple ) ; <nl> } <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> <nl> / / Select creates a new ( top - level ) buffer to store its result , so its <nl> / / respective element in the points - to set should contain only itself . <nl> TuplePointsToAnalysis : : GetBuffersDefinedByInstruction ( <nl> Status TuplePointsToAnalysis : : GatherBuffersDefinedByInstruction ( <nl> const HloInstruction * instruction , <nl> std : : vector < const LogicalBuffer * > * buffers ) { <nl> - return GetPointsToSet ( instruction ) <nl> - . ForEachElement ( [ this , buffers , instruction ] ( <nl> - const ShapeIndex & index , bool / * is_leaf * / , <nl> - const std : : vector < const LogicalBuffer * > & source_buffers ) { <nl> - / / Add buffers which ' instruction ' is the source of . <nl> - CHECK ( ! source_buffers . empty ( ) ) ; <nl> - if ( source_buffers . size ( ) = = 1 & & <nl> - source_buffers [ 0 ] - > instruction ( ) = = instruction ) { <nl> - / / If this instruction is the source of this buffer the <nl> - / / indices must match . <nl> - DCHECK ( source_buffers [ 0 ] - > index ( ) = = index ) ; <nl> - buffers - > push_back ( source_buffers [ 0 ] ) ; <nl> - } else { <nl> - / / If the points - to set includes more than one buffer then <nl> - / / necessarily this instruction did not produce the <nl> - / / buffer . <nl> - for ( const LogicalBuffer * source_buffer : source_buffers ) { <nl> - DCHECK ( source_buffer - > instruction ( ) ! = instruction ) ; <nl> - } <nl> - } <nl> - return Status : : OK ( ) ; <nl> - } ) ; <nl> + GetPointsToSet ( instruction ) <nl> + . ForEachElement ( <nl> + [ this , buffers , instruction ] ( <nl> + const ShapeIndex & index , <nl> + const std : : vector < const LogicalBuffer * > & source_buffers ) { <nl> + / / Add buffers which ' instruction ' is the source of . <nl> + CHECK ( ! source_buffers . empty ( ) ) ; <nl> + if ( source_buffers . size ( ) = = 1 & & <nl> + source_buffers [ 0 ] - > instruction ( ) = = instruction ) { <nl> + / / If this instruction is the source of this buffer the <nl> + / / indices must match . <nl> + DCHECK ( source_buffers [ 0 ] - > index ( ) = = index ) ; <nl> + buffers - > push_back ( source_buffers [ 0 ] ) ; <nl> + } else { <nl> + / / If the points - to set includes more than one buffer then <nl> + / / necessarily this instruction did not produce the <nl> + / / buffer . <nl> + for ( const LogicalBuffer * source_buffer : source_buffers ) { <nl> + DCHECK ( source_buffer - > instruction ( ) ! = instruction ) ; <nl> + } <nl> + } <nl> + } ) ; <nl> + return Status : : OK ( ) ; <nl> } <nl> <nl> PointsToSet & TuplePointsToAnalysis : : CreateCopiedPointsToSet ( <nl> PointsToSet & TuplePointsToAnalysis : : CreateCopiedPointsToSet ( <nl> / / from src PointsToSet . <nl> PointsToSet & dst_points_to_set = CreateEmptyPointsToSet ( instruction ) ; <nl> const PointsToSet & src_points_to_set = GetPointsToSet ( src ) ; <nl> - TF_CHECK_OK ( dst_points_to_set . ForEachMutableElement ( <nl> + dst_points_to_set . ForEachMutableElement ( <nl> [ this , & dst_points_to_set , & src_points_to_set ] ( <nl> - const ShapeIndex & index , bool / * is_leaf * / , <nl> - std : : vector < const LogicalBuffer * > * buffers ) { <nl> + const ShapeIndex & index , std : : vector < const LogicalBuffer * > * buffers ) { <nl> * buffers = src_points_to_set . element ( index ) ; <nl> for ( auto & tuple_source : src_points_to_set . tuple_sources ( index ) ) { <nl> dst_points_to_set . add_tuple_source ( index , tuple_source ) ; <nl> } <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> return * FindOrDie ( points_to_ , instruction ) ; <nl> } <nl> <nl> void TuplePointsToAnalysis : : InstructionToString ( <nl> tensorflow : : strings : : StrAppend ( output , prefix , " instruction " , <nl> instruction - > ToShortString ( ) , " : \ n " ) ; <nl> const PointsToSet & points_to_set = GetPointsToSet ( instruction ) ; <nl> - TF_CHECK_OK ( points_to_set . ForEachElement ( [ & prefix , & output ] ( <nl> - const ShapeIndex & index , bool / * is_leaf * / , <nl> - const std : : vector < const LogicalBuffer * > & points_to ) { <nl> + points_to_set . ForEachElement ( [ & prefix , & output ] ( <nl> + const ShapeIndex & index , <nl> + const std : : vector < const LogicalBuffer * > & <nl> + points_to ) { <nl> tensorflow : : strings : : StrAppend ( <nl> output , prefix , " { " , tensorflow : : str_util : : Join ( index , " , " ) , " } : " , <nl> tensorflow : : str_util : : Join ( <nl> void TuplePointsToAnalysis : : InstructionToString ( <nl> out - > append ( source - > ToString ( ) ) ; <nl> } ) , <nl> " \ n " ) ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> } <nl> <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / shape_tree . h <nl> ppp b / tensorflow / compiler / xla / shape_tree . h <nl> class ShapeTree { <nl> / / <nl> / / index : the index of the element in the shape . See ShapeUtil : : GetSubshape <nl> / / for definition of index . <nl> - / / is_leaf : Whether this element is a leaf element in the shape . That is , <nl> - / / whether this index corresponds to an array and not a ( nested ) <nl> - / / tuple element . <nl> / / data : The data value at this elemnt . <nl> - / / <nl> - / / If any call to the given function returns a non - OK status , then traversal <nl> - / / is aborted and the status value is returned . <nl> - using VisitorFunction = std : : function < Status ( <nl> - const ShapeIndex & / * index * / , bool / * is_leaf * / , const T & / * data * / ) > ; <nl> - Status ForEachElement ( const VisitorFunction & func ) const ; <nl> + using VisitorFunction = <nl> + std : : function < void ( const ShapeIndex & / * index * / , const T & / * data * / ) > ; <nl> + void ForEachElement ( const VisitorFunction & func ) const ; <nl> + <nl> + using MutableVisitorFunction = <nl> + std : : function < void ( const ShapeIndex & / * index * / , T * / * data * / ) > ; <nl> + void ForEachMutableElement ( const MutableVisitorFunction & func ) ; <nl> + <nl> + / / Variants of ForEach ( Mutable ) Element which propagate a Status value from the <nl> + / / visitor . <nl> + using StatusVisitorFunction = <nl> + std : : function < Status ( const ShapeIndex & / * index * / , const T & / * data * / ) > ; <nl> + Status ForEachElementWithStatus ( const StatusVisitorFunction & func ) const ; <nl> <nl> - using MutableVisitorFunction = std : : function < Status ( <nl> - const ShapeIndex & / * index * / , bool / * is_leaf * / , T * / * data * / ) > ; <nl> - Status ForEachMutableElement ( const MutableVisitorFunction & func ) ; <nl> + using MutableStatusVisitorFunction = <nl> + std : : function < Status ( const ShapeIndex & / * index * / , T * / * data * / ) > ; <nl> + Status ForEachMutableElementWithStatus ( <nl> + const MutableStatusVisitorFunction & func ) ; <nl> <nl> / / Copy the subtree of values from ' other ' rooted at ShapeIndex <nl> / / ' source_base_index ' into the subtree of value in this ShapeTree rooted at <nl> class ShapeTree { <nl> / / Helpers for traversing the shape via ForEachElement . The helpers <nl> / / recursively traverse the subtree rooted at " index " ( defined as in <nl> / / ShapeUtil : : GetSubshape ) . <nl> - static Status ForEachHelper ( const VisitorFunction & func , const Node & node , <nl> - ShapeIndex * index ) ; <nl> - static Status ForEachMutableHelper ( const MutableVisitorFunction & func , <nl> + static Status ForEachHelper ( const StatusVisitorFunction & func , <nl> + const Node & node , ShapeIndex * index ) ; <nl> + static Status ForEachMutableHelper ( const MutableStatusVisitorFunction & func , <nl> Node * node , ShapeIndex * index ) ; <nl> <nl> / / Return the tree node at the given index . <nl> const internal : : ShapeTreeNode < T > * ShapeTree < T > : : Lookup ( <nl> <nl> / * static * / <nl> template < typename T > <nl> - Status ShapeTree < T > : : ForEachHelper ( const VisitorFunction & func , <nl> + Status ShapeTree < T > : : ForEachHelper ( const StatusVisitorFunction & func , <nl> const Node & node , ShapeIndex * index ) { <nl> - TF_RETURN_IF_ERROR ( func ( * index , node . children . empty ( ) , node . data ) ) ; <nl> + TF_RETURN_IF_ERROR ( func ( * index , node . data ) ) ; <nl> for ( int64 i = 0 ; i < node . children . size ( ) ; + + i ) { <nl> index - > push_back ( i ) ; <nl> TF_RETURN_IF_ERROR ( ForEachHelper ( func , * node . children [ i ] , index ) ) ; <nl> Status ShapeTree < T > : : ForEachHelper ( const VisitorFunction & func , <nl> <nl> / * static * / <nl> template < typename T > <nl> - Status ShapeTree < T > : : ForEachMutableHelper ( const MutableVisitorFunction & func , <nl> - Node * node , ShapeIndex * index ) { <nl> - TF_RETURN_IF_ERROR ( func ( * index , node - > children . empty ( ) , & node - > data ) ) ; <nl> + Status ShapeTree < T > : : ForEachMutableHelper ( <nl> + const MutableStatusVisitorFunction & func , Node * node , ShapeIndex * index ) { <nl> + TF_RETURN_IF_ERROR ( func ( * index , & node - > data ) ) ; <nl> for ( int64 i = 0 ; i < node - > children . size ( ) ; + + i ) { <nl> index - > push_back ( i ) ; <nl> TF_RETURN_IF_ERROR ( <nl> Status ShapeTree < T > : : ForEachMutableHelper ( const MutableVisitorFunction & func , <nl> } <nl> <nl> template < typename T > <nl> - Status ShapeTree < T > : : ForEachElement ( const VisitorFunction & func ) const { <nl> + Status ShapeTree < T > : : ForEachElementWithStatus ( <nl> + const StatusVisitorFunction & func ) const { <nl> ShapeIndex index ; <nl> return ForEachHelper ( func , root_ , & index ) ; <nl> } <nl> <nl> template < typename T > <nl> - Status ShapeTree < T > : : ForEachMutableElement ( const MutableVisitorFunction & func ) { <nl> + Status ShapeTree < T > : : ForEachMutableElementWithStatus ( <nl> + const MutableStatusVisitorFunction & func ) { <nl> ShapeIndex index ; <nl> return ForEachMutableHelper ( func , & root_ , & index ) ; <nl> } <nl> <nl> + template < typename T > <nl> + void ShapeTree < T > : : ForEachElement ( const VisitorFunction & func ) const { <nl> + ShapeIndex index ; <nl> + return ForEachHelper ( <nl> + [ & func ] ( const ShapeIndex & index , const T & data ) { <nl> + func ( index , data ) ; <nl> + return Status : : OK ( ) ; <nl> + } , <nl> + root_ , & index ) <nl> + . IgnoreError ( ) ; <nl> + } <nl> + <nl> + template < typename T > <nl> + void ShapeTree < T > : : ForEachMutableElement ( const MutableVisitorFunction & func ) { <nl> + ShapeIndex index ; <nl> + return ForEachMutableHelper ( <nl> + [ & func ] ( const ShapeIndex & index , T * data ) { <nl> + func ( index , data ) ; <nl> + return Status : : OK ( ) ; <nl> + } , <nl> + & root_ , & index ) <nl> + . IgnoreError ( ) ; <nl> + } <nl> + <nl> template < typename T > <nl> void ShapeTree < T > : : CopySubtreeFrom ( const ShapeTree < T > & other , <nl> const ShapeIndex & source_base_index , <nl> void ShapeTree < T > : : CopySubtreeFrom ( const ShapeTree < T > & other , <nl> CHECK ( ShapeUtil : : Compatible ( <nl> ShapeUtil : : GetSubshape ( shape ( ) , target_base_index ) , <nl> ShapeUtil : : GetSubshape ( other . shape ( ) , source_base_index ) ) ) ; <nl> - ForEachMutableElement ( <nl> - [ this , & other , & source_base_index , & target_base_index ] ( <nl> - const ShapeIndex & index , bool / * is_leaf * / , T * data ) { <nl> - / / Copy the data element only if index is in the <nl> - / / subtree rooted at target_base_index . <nl> - for ( int i = 0 ; i < target_base_index . size ( ) ; + + i ) { <nl> - if ( i > = index . size ( ) | | index [ i ] ! = target_base_index [ i ] ) { <nl> - return Status : : OK ( ) ; <nl> - } <nl> - } <nl> - / / Construct source element index to copy from . <nl> - ShapeIndex source_index = source_base_index ; <nl> - for ( int i = target_base_index . size ( ) ; i < index . size ( ) ; + + i ) { <nl> - source_index . push_back ( index [ i ] ) ; <nl> - } <nl> - * data = other . element ( source_index ) ; <nl> - return Status : : OK ( ) ; <nl> - } ) <nl> - . IgnoreError ( ) ; <nl> + ForEachMutableElement ( [ this , & other , & source_base_index , & target_base_index ] ( <nl> + const ShapeIndex & index , T * data ) { <nl> + / / Copy the data element only if index is in the <nl> + / / subtree rooted at target_base_index . <nl> + for ( int i = 0 ; i < target_base_index . size ( ) ; + + i ) { <nl> + if ( i > = index . size ( ) | | index [ i ] ! = target_base_index [ i ] ) { <nl> + return ; <nl> + } <nl> + } <nl> + / / Construct source element index to copy from . <nl> + ShapeIndex source_index = source_base_index ; <nl> + for ( int i = target_base_index . size ( ) ; i < index . size ( ) ; + + i ) { <nl> + source_index . push_back ( index [ i ] ) ; <nl> + } <nl> + * data = other . element ( source_index ) ; <nl> + } ) ; <nl> } <nl> <nl> template < typename T > <nl> bool ShapeTree < T > : : operator = = ( const ShapeTree < T > & other ) const { <nl> bool equal = true ; <nl> - ForEachElement ( [ this , & other , & equal ] ( const ShapeIndex & index , <nl> - bool / * is_leaf * / , const T & data ) { <nl> - if ( data ! = other . element ( index ) ) { <nl> - equal = false ; <nl> - } <nl> - return Status : : OK ( ) ; <nl> - } ) <nl> - . IgnoreError ( ) ; <nl> + ForEachElement ( <nl> + [ this , & other , & equal ] ( const ShapeIndex & index , const T & data ) { <nl> + if ( data ! = other . element ( index ) ) { <nl> + equal = false ; <nl> + } <nl> + } ) ; <nl> return equal ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / shape_tree_test . cc <nl> ppp b / tensorflow / compiler / xla / shape_tree_test . cc <nl> void ShapeTreeTest : : TestShapeConstructor ( const Shape & shape , <nl> int expected_num_nodes ) { <nl> ShapeTree < int > int_tree ( shape ) ; <nl> int num_nodes = 0 ; <nl> - TF_CHECK_OK ( int_tree . ForEachElement ( <nl> - [ & num_nodes ] ( const ShapeIndex & / * index * / , bool / * is_leaf * / , int data ) { <nl> - EXPECT_EQ ( 0 , data ) ; <nl> - + + num_nodes ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + int_tree . ForEachElement ( [ & num_nodes ] ( const ShapeIndex & / * index * / , int data ) { <nl> + EXPECT_EQ ( 0 , data ) ; <nl> + + + num_nodes ; <nl> + } ) ; <nl> EXPECT_EQ ( expected_num_nodes , num_nodes ) ; <nl> <nl> ShapeTree < bool > bool_tree ( shape ) ; <nl> num_nodes = 0 ; <nl> - TF_CHECK_OK ( bool_tree . ForEachElement ( <nl> - [ & num_nodes ] ( const ShapeIndex & / * index * / , bool / * is_leaf * / , bool data ) { <nl> + bool_tree . ForEachElement ( <nl> + [ & num_nodes ] ( const ShapeIndex & / * index * / , bool data ) { <nl> EXPECT_EQ ( false , data ) ; <nl> + + num_nodes ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> EXPECT_EQ ( expected_num_nodes , num_nodes ) ; <nl> } <nl> <nl> void ShapeTreeTest : : TestInitValueConstructor ( const Shape & shape , <nl> int expected_num_nodes ) { <nl> ShapeTree < int > tree ( shape , 42 ) ; <nl> int num_nodes = 0 ; <nl> - TF_CHECK_OK ( tree . ForEachElement ( <nl> - [ & num_nodes ] ( const ShapeIndex & / * index * / , bool / * is_leaf * / , int data ) { <nl> - EXPECT_EQ ( 42 , data ) ; <nl> - + + num_nodes ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + tree . ForEachElement ( [ & num_nodes ] ( const ShapeIndex & / * index * / , int data ) { <nl> + EXPECT_EQ ( 42 , data ) ; <nl> + + + num_nodes ; <nl> + } ) ; <nl> EXPECT_EQ ( expected_num_nodes , num_nodes ) ; <nl> <nl> num_nodes = 0 ; <nl> - TF_CHECK_OK ( tree . ForEachMutableElement ( <nl> - [ & num_nodes ] ( const ShapeIndex & / * index * / , bool / * is_leaf * / , int * data ) { <nl> + tree . ForEachMutableElement ( <nl> + [ & num_nodes ] ( const ShapeIndex & / * index * / , int * data ) { <nl> EXPECT_EQ ( 42 , * data ) ; <nl> * data = num_nodes ; <nl> + + num_nodes ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> EXPECT_EQ ( expected_num_nodes , num_nodes ) ; <nl> <nl> num_nodes = 0 ; <nl> - TF_CHECK_OK ( tree . ForEachElement ( <nl> - [ & num_nodes ] ( const ShapeIndex & / * index * / , bool / * is_leaf * / , int data ) { <nl> - EXPECT_EQ ( num_nodes , data ) ; <nl> - + + num_nodes ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + tree . ForEachElement ( [ & num_nodes ] ( const ShapeIndex & / * index * / , int data ) { <nl> + EXPECT_EQ ( num_nodes , data ) ; <nl> + + + num_nodes ; <nl> + } ) ; <nl> EXPECT_EQ ( expected_num_nodes , num_nodes ) ; <nl> } <nl> <nl> TEST_F ( ShapeTreeTest , TupleShape ) { <nl> <nl> / / Sum all elements in the shape . <nl> int sum = 0 ; <nl> - TF_CHECK_OK ( shape_tree . ForEachElement ( <nl> - [ & sum ] ( const ShapeIndex & / * index * / , bool / * is_leaf * / , int data ) { <nl> - sum + = data ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + shape_tree . ForEachElement ( <nl> + [ & sum ] ( const ShapeIndex & / * index * / , int data ) { sum + = data ; } ) ; <nl> EXPECT_EQ ( 66 , sum ) ; <nl> <nl> / / Test the copy constructor . <nl> TEST_F ( ShapeTreeTest , TupleShape ) { <nl> EXPECT_EQ ( - 100 , copy . element ( { 2 } ) ) ; <nl> <nl> / / Write zero to all data elements . <nl> - TF_CHECK_OK ( shape_tree . ForEachMutableElement ( <nl> - [ & sum ] ( const ShapeIndex & / * index * / , bool / * is_leaf * / , int * data ) { <nl> - * data = 0 ; <nl> - return Status : : OK ( ) ; <nl> - } ) ) ; <nl> + shape_tree . ForEachMutableElement ( <nl> + [ & sum ] ( const ShapeIndex & / * index * / , int * data ) { * data = 0 ; } ) ; <nl> EXPECT_EQ ( 0 , shape_tree . element ( { } ) ) ; <nl> EXPECT_EQ ( 0 , shape_tree . element ( { 0 } ) ) ; <nl> EXPECT_EQ ( 0 , shape_tree . element ( { 1 } ) ) ; <nl> mmm a / tensorflow / compiler / xla / shape_util . cc <nl> ppp b / tensorflow / compiler / xla / shape_util . cc <nl> bool CompareShapes ( const Shape & lhs , const Shape & rhs , bool compare_layouts ) { <nl> return return_shape ; <nl> } <nl> <nl> + / * static * / <nl> + bool ShapeUtil : : IsLeafIndex ( const Shape & shape , const ShapeIndex & index ) { <nl> + return ! IsTuple ( GetSubshape ( shape , index ) ) ; <nl> + } <nl> + <nl> / * static * / Shape ShapeUtil : : StripDegenerateDimensions ( const Shape & shape ) { <nl> std : : vector < int64 > dimension_sizes ; <nl> std : : vector < int64 > degenerate_dimensions ; <nl> namespace { <nl> / / Helper for ForEachSubshape which visits the subshapes of the given shape in <nl> / / DFS pre - order starting with the index . <nl> Status ForEachSubshapeHelper ( const Shape & shape , <nl> - const ShapeUtil : : VisitorFunction & func , <nl> + const ShapeUtil : : StatusVisitorFunction & func , <nl> ShapeIndex * index ) { <nl> TF_RETURN_IF_ERROR ( func ( shape , * index ) ) ; <nl> if ( ShapeUtil : : IsTuple ( shape ) ) { <nl> Status ForEachSubshapeHelper ( const Shape & shape , <nl> / / Helper for ForEachMutableSubshape which visits the subshapes of the given <nl> / / shape in DFS pre - order starting with the index . <nl> Status ForEachMutableSubshapeHelper ( <nl> - Shape * shape , const ShapeUtil : : MutatingVisitorFunction & func , <nl> + Shape * shape , const ShapeUtil : : MutatingStatusVisitorFunction & func , <nl> ShapeIndex * index ) { <nl> TF_RETURN_IF_ERROR ( func ( shape , * index ) ) ; <nl> if ( ShapeUtil : : IsTuple ( * shape ) ) { <nl> Status ForEachMutableSubshapeHelper ( <nl> <nl> } / / namespace <nl> <nl> - / * static * / Status ShapeUtil : : ForEachSubshape ( const Shape & shape , <nl> - const VisitorFunction & func ) { <nl> + / * static * / void ShapeUtil : : ForEachSubshape ( const Shape & shape , <nl> + const VisitorFunction & func ) { <nl> ShapeIndex index ; <nl> - return ForEachSubshapeHelper ( shape , func , & index ) ; <nl> + ForEachSubshapeHelper ( <nl> + shape , <nl> + [ & func ] ( const Shape & subshape , const ShapeIndex & index ) { <nl> + func ( subshape , index ) ; <nl> + return Status : : OK ( ) ; <nl> + } , <nl> + & index ) <nl> + . IgnoreError ( ) ; <nl> } <nl> <nl> - / * static * / Status ShapeUtil : : ForEachMutableSubshape ( <nl> + / * static * / void ShapeUtil : : ForEachMutableSubshape ( <nl> Shape * shape , const MutatingVisitorFunction & func ) { <nl> ShapeIndex index ; <nl> + ForEachMutableSubshapeHelper ( <nl> + shape , <nl> + [ & func ] ( Shape * subshape , const ShapeIndex & index ) { <nl> + func ( subshape , index ) ; <nl> + return Status : : OK ( ) ; <nl> + } , <nl> + & index ) <nl> + . IgnoreError ( ) ; <nl> + } <nl> + <nl> + / * static * / Status ShapeUtil : : ForEachSubshapeWithStatus ( <nl> + const Shape & shape , const StatusVisitorFunction & func ) { <nl> + ShapeIndex index ; <nl> + return ForEachSubshapeHelper ( shape , func , & index ) ; <nl> + } <nl> + <nl> + / * static * / Status ShapeUtil : : ForEachMutableSubshapeWithStatus ( <nl> + Shape * shape , const MutatingStatusVisitorFunction & func ) { <nl> + ShapeIndex index ; <nl> return ForEachMutableSubshapeHelper ( shape , func , & index ) ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / shape_util . h <nl> ppp b / tensorflow / compiler / xla / shape_util . h <nl> class ShapeUtil { <nl> static const Shape & GetSubshape ( const Shape & shape , const ShapeIndex & index ) ; <nl> static Shape * GetMutableSubshape ( Shape * shape , const ShapeIndex & index ) ; <nl> <nl> + / / Returns whether the given index in the given shape is a leaf element of the <nl> + / / shape . <nl> + static bool IsLeafIndex ( const Shape & shape , const ShapeIndex & index ) ; <nl> + <nl> / / Calls the given visitor function for each subshape of the given shape . <nl> - / / Returns early if an error status is returned . Subshapes are visited in DFS <nl> - / / pre - order starting with the entire shape ( index { } ) . <nl> - using VisitorFunction = std : : function < Status ( const Shape & / * subshape * / , <nl> - const ShapeIndex & / * index * / ) > ; <nl> - static Status ForEachSubshape ( const Shape & shape , <nl> - const VisitorFunction & func ) ; <nl> - <nl> - / / Mutating variant of ForEachSubshape . <nl> + / / Subshapes are visited in DFS pre - order starting with the entire shape <nl> + / / ( index { } ) . <nl> + using VisitorFunction = std : : function < void ( const Shape & / * subshape * / , <nl> + const ShapeIndex & / * index * / ) > ; <nl> + static void ForEachSubshape ( const Shape & shape , const VisitorFunction & func ) ; <nl> using MutatingVisitorFunction = <nl> + std : : function < void ( Shape * / * subshape * / , const ShapeIndex & / * index * / ) > ; <nl> + static void ForEachMutableSubshape ( Shape * shape , <nl> + const MutatingVisitorFunction & func ) ; <nl> + <nl> + / / Variants of ForEach ( Mutable ) Subshape which propagate Status from the <nl> + / / visitor function . <nl> + using StatusVisitorFunction = std : : function < Status ( <nl> + const Shape & / * subshape * / , const ShapeIndex & / * index * / ) > ; <nl> + static Status ForEachSubshapeWithStatus ( const Shape & shape , <nl> + const StatusVisitorFunction & func ) ; <nl> + using MutatingStatusVisitorFunction = <nl> std : : function < Status ( Shape * / * subshape * / , const ShapeIndex & / * index * / ) > ; <nl> - static Status ForEachMutableSubshape ( Shape * shape , <nl> - const MutatingVisitorFunction & func ) ; <nl> + static Status ForEachMutableSubshapeWithStatus ( <nl> + Shape * shape , const MutatingStatusVisitorFunction & func ) ; <nl> <nl> / / Removes all degenerate dimensions ( size one ) from the given shape . The <nl> / / stripped minor_to_major preserves the relative ordering of non - degenerate <nl> mmm a / tensorflow / compiler / xla / shape_util_test . cc <nl> ppp b / tensorflow / compiler / xla / shape_util_test . cc <nl> TEST ( ShapeUtilTest , GetSubshape ) { <nl> ShapeUtil : : GetSubshape ( nested_tuple_shape , { 2 , 0 } ) ) ) ; <nl> } <nl> <nl> + TEST ( ShapeUtilTest , IsLeafIndex ) { <nl> + / / Test array shape . <nl> + Shape array_shape = ShapeUtil : : MakeShape ( F32 , { 42 , 42 , 123 } ) ; <nl> + EXPECT_TRUE ( ShapeUtil : : IsLeafIndex ( array_shape , { } ) ) ; <nl> + <nl> + / / Test tuple shape . <nl> + Shape tuple_shape = ShapeUtil : : MakeTupleShape ( { array_shape , array_shape } ) ; <nl> + EXPECT_FALSE ( ShapeUtil : : IsLeafIndex ( tuple_shape , { } ) ) ; <nl> + EXPECT_TRUE ( ShapeUtil : : IsLeafIndex ( tuple_shape , { 0 } ) ) ; <nl> + EXPECT_TRUE ( ShapeUtil : : IsLeafIndex ( tuple_shape , { 1 } ) ) ; <nl> + <nl> + / / Test nested tuple shape . <nl> + Shape nested_tuple_shape = ShapeUtil : : MakeTupleShape ( <nl> + { array_shape , ShapeUtil : : MakeTupleShape ( { array_shape , array_shape } ) , <nl> + ShapeUtil : : MakeTupleShape ( <nl> + { ShapeUtil : : MakeTupleShape ( { array_shape , array_shape } ) , <nl> + array_shape } ) } ) ; <nl> + EXPECT_FALSE ( ShapeUtil : : IsLeafIndex ( nested_tuple_shape , { } ) ) ; <nl> + EXPECT_TRUE ( ShapeUtil : : IsLeafIndex ( nested_tuple_shape , { 0 } ) ) ; <nl> + EXPECT_FALSE ( ShapeUtil : : IsLeafIndex ( nested_tuple_shape , { 1 } ) ) ; <nl> + EXPECT_TRUE ( ShapeUtil : : IsLeafIndex ( nested_tuple_shape , { 1 , 0 } ) ) ; <nl> + EXPECT_TRUE ( ShapeUtil : : IsLeafIndex ( nested_tuple_shape , { 1 , 1 } ) ) ; <nl> + } <nl> + <nl> TEST ( ShapeUtilTest , HumanString ) { <nl> Shape opaque = ShapeUtil : : MakeOpaqueShape ( ) ; <nl> Shape scalar = ShapeUtil : : MakeShape ( F32 , { } ) ; <nl> TEST ( ShapeUtilTest , HumanString ) { <nl> TEST ( ShapeUtilTest , ForEachSubshapeArray ) { <nl> const Shape shape = ShapeUtil : : MakeShape ( F32 , { 2 , 3 } ) ; <nl> int calls = 0 ; <nl> - EXPECT_IS_OK ( ShapeUtil : : ForEachSubshape ( <nl> + ShapeUtil : : ForEachSubshape ( <nl> shape , [ & calls , & shape ] ( const Shape & subshape , const ShapeIndex & index ) { <nl> EXPECT_EQ ( & shape , & subshape ) ; <nl> EXPECT_TRUE ( index . empty ( ) ) ; <nl> + + calls ; <nl> - return tensorflow : : Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> EXPECT_EQ ( 1 , calls ) ; <nl> } <nl> <nl> TEST ( ShapeUtilTest , ForEachSubshapeNestedTuple ) { <nl> ShapeUtil : : MakeTupleShape ( { ShapeUtil : : MakeShape ( F32 , { 101 } ) , <nl> ShapeUtil : : MakeShape ( PRED , { 33 } ) } ) } ) ; <nl> int calls = 0 ; <nl> - EXPECT_IS_OK ( ShapeUtil : : ForEachSubshape ( <nl> + ShapeUtil : : ForEachSubshape ( <nl> shape , [ & calls , & shape ] ( const Shape & subshape , const ShapeIndex & index ) { <nl> EXPECT_TRUE ( <nl> ShapeUtil : : Equal ( subshape , ShapeUtil : : GetSubshape ( shape , index ) ) ) ; <nl> TEST ( ShapeUtilTest , ForEachSubshapeNestedTuple ) { <nl> EXPECT_EQ ( 33 , ShapeUtil : : ElementsIn ( subshape ) ) ; <nl> } <nl> + + calls ; <nl> - return tensorflow : : Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> EXPECT_EQ ( 5 , calls ) ; <nl> } <nl> <nl> TEST ( ShapeUtilTest , ForEachMutableSubshapeNestedTuple ) { <nl> ShapeUtil : : MakeTupleShape ( { ShapeUtil : : MakeShape ( F32 , { 101 } ) , <nl> ShapeUtil : : MakeShape ( PRED , { 33 } ) } ) } ) ; <nl> int calls = 0 ; <nl> - EXPECT_IS_OK ( ShapeUtil : : ForEachMutableSubshape ( <nl> + ShapeUtil : : ForEachMutableSubshape ( <nl> & shape , [ & calls , & shape ] ( const Shape * subshape , const ShapeIndex & index ) { <nl> / / Pointer values should be equal <nl> EXPECT_EQ ( subshape , ShapeUtil : : GetMutableSubshape ( & shape , index ) ) ; <nl> TEST ( ShapeUtilTest , ForEachMutableSubshapeNestedTuple ) { <nl> EXPECT_EQ ( 33 , ShapeUtil : : ElementsIn ( * subshape ) ) ; <nl> } <nl> + + calls ; <nl> - return tensorflow : : Status : : OK ( ) ; <nl> - } ) ) ; <nl> + } ) ; <nl> EXPECT_EQ ( 5 , calls ) ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / statusor_test . cc <nl> ppp b / tensorflow / compiler / xla / statusor_test . cc <nl> class BenchmarkType { <nl> <nl> / / Calibrate the amount of time spent just calling DoWork , since each of our <nl> / / tests will do this , we can subtract this out of benchmark results . <nl> - static void BM_CalibrateWorkLoop ( int iters ) { <nl> + void BM_CalibrateWorkLoop ( int iters ) { <nl> tensorflow : : testing : : StopTiming ( ) ; <nl> BenchmarkFactory < BenchmarkType > factory ; <nl> BenchmarkType * result = factory . TrivialFactory ( ) ; <nl> tensorflow : : testing : : StartTiming ( ) ; <nl> for ( int i = 0 ; i ! = iters ; + + i ) { <nl> - if ( result ! = nullptr ) result - > DoWork ( ) ; <nl> + if ( result ! = nullptr ) { <nl> + result - > DoWork ( ) ; <nl> + } <nl> } <nl> } <nl> BENCHMARK ( BM_CalibrateWorkLoop ) ; <nl> <nl> / / Measure the time taken to call into the factory , return the value , <nl> / / determine that it is OK , and invoke a trivial function . <nl> - static void BM_TrivialFactory ( int iters ) { <nl> + void BM_TrivialFactory ( int iters ) { <nl> tensorflow : : testing : : StopTiming ( ) ; <nl> BenchmarkFactory < BenchmarkType > factory ; <nl> tensorflow : : testing : : StartTiming ( ) ; <nl> for ( int i = 0 ; i ! = iters ; + + i ) { <nl> BenchmarkType * result = factory . TrivialFactory ( ) ; <nl> - if ( result ! = nullptr ) result - > DoWork ( ) ; <nl> + if ( result ! = nullptr ) { <nl> + result - > DoWork ( ) ; <nl> + } <nl> } <nl> } <nl> BENCHMARK ( BM_TrivialFactory ) ; <nl> BENCHMARK ( BM_TrivialFactory ) ; <nl> / / Measure the time taken to call into the factory , providing an <nl> / / out - param for the result , evaluating the status result and the <nl> / / result pointer , and invoking the trivial function . <nl> - static void BM_ArgumentFactory ( int iters ) { <nl> + void BM_ArgumentFactory ( int iters ) { <nl> tensorflow : : testing : : StopTiming ( ) ; <nl> BenchmarkFactory < BenchmarkType > factory ; <nl> tensorflow : : testing : : StartTiming ( ) ; <nl> BENCHMARK ( BM_ArgumentFactory ) ; <nl> <nl> / / Measure the time to use the StatusOr < T * > factory , evaluate the result , <nl> / / and invoke the trivial function . <nl> - static void BM_StatusOrFactory ( int iters ) { <nl> + void BM_StatusOrFactory ( int iters ) { <nl> tensorflow : : testing : : StopTiming ( ) ; <nl> BenchmarkFactory < BenchmarkType > factory ; <nl> tensorflow : : testing : : StartTiming ( ) ; <nl> BENCHMARK ( BM_StatusOrFactory ) ; <nl> / / Measure the time taken to call into the factory , providing an <nl> / / out - param for the result , evaluating the status result and the <nl> / / result pointer , and invoking the trivial function . <nl> - static void BM_ArgumentFactoryFail ( int iters ) { <nl> + void BM_ArgumentFactoryFail ( int iters ) { <nl> tensorflow : : testing : : StopTiming ( ) ; <nl> BenchmarkFactory < BenchmarkType > factory ; <nl> tensorflow : : testing : : StartTiming ( ) ; <nl> BENCHMARK ( BM_ArgumentFactoryFail ) ; <nl> <nl> / / Measure the time to use the StatusOr < T * > factory , evaluate the result , <nl> / / and invoke the trivial function . <nl> - static void BM_StatusOrFactoryFail ( int iters ) { <nl> + void BM_StatusOrFactoryFail ( int iters ) { <nl> tensorflow : : testing : : StopTiming ( ) ; <nl> BenchmarkFactory < BenchmarkType > factory ; <nl> tensorflow : : testing : : StartTiming ( ) ; <nl> BENCHMARK ( BM_StatusOrFactoryFail ) ; <nl> / / Measure the time taken to call into the factory , providing an <nl> / / out - param for the result , evaluating the status result and the <nl> / / result pointer , and invoking the trivial function . <nl> - static void BM_ArgumentFactoryFailShortMsg ( int iters ) { <nl> + void BM_ArgumentFactoryFailShortMsg ( int iters ) { <nl> tensorflow : : testing : : StopTiming ( ) ; <nl> BenchmarkFactory < BenchmarkType > factory ; <nl> tensorflow : : testing : : StartTiming ( ) ; <nl> BENCHMARK ( BM_ArgumentFactoryFailShortMsg ) ; <nl> <nl> / / Measure the time to use the StatusOr < T * > factory , evaluate the result , <nl> / / and invoke the trivial function . <nl> - static void BM_StatusOrFactoryFailShortMsg ( int iters ) { <nl> + void BM_StatusOrFactoryFailShortMsg ( int iters ) { <nl> tensorflow : : testing : : StopTiming ( ) ; <nl> BenchmarkFactory < BenchmarkType > factory ; <nl> tensorflow : : testing : : StartTiming ( ) ; <nl> BENCHMARK ( BM_StatusOrFactoryFailShortMsg ) ; <nl> / / Measure the time taken to call into the factory , providing an <nl> / / out - param for the result , evaluating the status result and the <nl> / / result pointer , and invoking the trivial function . <nl> - static void BM_ArgumentFactoryFailLongMsg ( int iters ) { <nl> + void BM_ArgumentFactoryFailLongMsg ( int iters ) { <nl> tensorflow : : testing : : StopTiming ( ) ; <nl> BenchmarkFactory < BenchmarkType > factory ; <nl> tensorflow : : testing : : StartTiming ( ) ; <nl> BENCHMARK ( BM_ArgumentFactoryFailLongMsg ) ; <nl> <nl> / / Measure the time to use the StatusOr < T * > factory , evaluate the result , <nl> / / and invoke the trivial function . <nl> - static void BM_StatusOrFactoryFailLongMsg ( int iters ) { <nl> + void BM_StatusOrFactoryFailLongMsg ( int iters ) { <nl> tensorflow : : testing : : StopTiming ( ) ; <nl> BenchmarkFactory < BenchmarkType > factory ; <nl> tensorflow : : testing : : StartTiming ( ) ; <nl> mmm a / tensorflow / compiler / xla / tests / BUILD <nl> ppp b / tensorflow / compiler / xla / tests / BUILD <nl> cc_library ( <nl> " / / tensorflow / compiler / xla / client : computation_builder " , <nl> " / / tensorflow / compiler / xla / client : global_data " , <nl> " / / tensorflow / compiler / xla / client : local_client " , <nl> - " / / tensorflow / compiler / xla / legacy_flags : hlo_pass_pipeline_flags " , <nl> + " / / tensorflow / compiler / xla / legacy_flags : debug_options_flags " , <nl> " / / tensorflow / compiler / xla / tests : literal_test_util " , <nl> " / / tensorflow / compiler / xla / tests : test_utils " , <nl> " / / tensorflow / core : lib " , <nl> xla_test ( <nl> " / / tensorflow / compiler / xla / client : computation_builder " , <nl> " / / tensorflow / compiler / xla / client : local_client " , <nl> " / / tensorflow / compiler / xla / legacy_flags : cpu_compiler_flags " , <nl> + " / / tensorflow / compiler / xla / legacy_flags : debug_options_flags " , <nl> " / / tensorflow / compiler / xla / tests : client_library_test_base " , <nl> " / / tensorflow / compiler / xla / tests : literal_test_util " , <nl> " / / tensorflow / core : lib " , <nl> xla_test ( <nl> " / / tensorflow / compiler / xla / client : computation_builder " , <nl> " / / tensorflow / compiler / xla / client : global_data " , <nl> " / / tensorflow / compiler / xla / legacy_flags : cpu_compiler_flags " , <nl> - " / / tensorflow / compiler / xla / legacy_flags : hlo_pass_pipeline_flags " , <nl> " / / tensorflow / compiler / xla / tests : literal_test_util " , <nl> " / / tensorflow / compiler / xla / tests : test_utils " , <nl> " / / tensorflow / core : lib " , <nl> mmm a / tensorflow / compiler / xla / tests / client_library_test_base . cc <nl> ppp b / tensorflow / compiler / xla / tests / client_library_test_base . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / client / client_library . h " <nl> # include " tensorflow / compiler / xla / client / computation . h " <nl> # include " tensorflow / compiler / xla / client / local_client . h " <nl> - # include " tensorflow / compiler / xla / legacy_flags / hlo_pass_pipeline_flags . h " <nl> + # include " tensorflow / compiler / xla / legacy_flags / debug_options_flags . h " <nl> # include " tensorflow / compiler / xla / literal_util . h " <nl> # include " tensorflow / compiler / xla / ptr_util . h " <nl> # include " tensorflow / compiler / xla / shape_util . h " <nl> Client * GetOrCreateLocalClientOrDie ( se : : Platform * platform ) { <nl> } <nl> } / / namespace <nl> <nl> - ClientLibraryTestBase : : ClientLibraryTestBase ( <nl> - se : : Platform * platform , <nl> - tensorflow : : gtl : : ArraySlice < string > disabled_pass_names ) <nl> + ClientLibraryTestBase : : ClientLibraryTestBase ( se : : Platform * platform ) <nl> : client_ ( GetOrCreateLocalClientOrDie ( platform ) ) { <nl> - legacy_flags : : HloPassPipelineFlags * flags = <nl> - legacy_flags : : GetHloPassPipelineFlags ( ) ; <nl> - flags - > xla_disable_hlo_passes = <nl> - tensorflow : : str_util : : Join ( disabled_pass_names , " , " ) ; <nl> + * ( execution_options_ . mutable_debug_options ( ) ) = <nl> + legacy_flags : : GetDebugOptionsFromFlags ( ) ; <nl> } <nl> <nl> string ClientLibraryTestBase : : TestName ( ) const { <nl> mmm a / tensorflow / compiler / xla / tests / client_library_test_base . h <nl> ppp b / tensorflow / compiler / xla / tests / client_library_test_base . h <nl> namespace xla { <nl> class ClientLibraryTestBase : public : : testing : : Test { <nl> protected : <nl> explicit ClientLibraryTestBase ( <nl> - perftools : : gputools : : Platform * platform = nullptr , <nl> - tensorflow : : gtl : : ArraySlice < string > disabled_pass_names = { } ) ; <nl> + perftools : : gputools : : Platform * platform = nullptr ) ; <nl> <nl> / / Returns the name of the test currently being run . <nl> string TestName ( ) const ; <nl> class ClientLibraryTestBase : public : : testing : : Test { <nl> <nl> void SetSeed ( uint64 seed ) { execution_options_ . set_seed ( seed ) ; } <nl> <nl> + / / Provides mutable access to the execution DebugOptions field ; this lets <nl> + / / tests tweak the options that will be used to compile / run the graph . <nl> + DebugOptions * mutable_debug_options ( ) { <nl> + return execution_options_ . mutable_debug_options ( ) ; <nl> + } <nl> + <nl> / / TODO ( b / 25566808 ) : Add helper that populates a literal from a testdata file . <nl> <nl> / / Convenience methods for building and running a computation from a builder . <nl> mmm a / tensorflow / compiler / xla / tests / compute_constant_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / compute_constant_test . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / client / global_data . h " <nl> # include " tensorflow / compiler / xla / layout_util . h " <nl> # include " tensorflow / compiler / xla / legacy_flags / cpu_compiler_flags . h " <nl> - # include " tensorflow / compiler / xla / legacy_flags / hlo_pass_pipeline_flags . h " <nl> # include " tensorflow / compiler / xla / literal_util . h " <nl> # include " tensorflow / compiler / xla / shape_util . h " <nl> # include " tensorflow / compiler / xla / status_macros . h " <nl> ClientType client_types [ ] = { ClientType : : kLocal , ClientType : : kCompileOnly } ; <nl> class ComputeConstantTest : public : : testing : : Test { <nl> public : <nl> explicit ComputeConstantTest ( <nl> - perftools : : gputools : : Platform * platform = nullptr , <nl> - tensorflow : : gtl : : ArraySlice < string > disabled_pass_names = { } ) <nl> - : platform_ ( platform ) { <nl> - legacy_flags : : HloPassPipelineFlags * flags = <nl> - legacy_flags : : GetHloPassPipelineFlags ( ) ; <nl> - flags - > xla_disable_hlo_passes = <nl> - tensorflow : : str_util : : Join ( disabled_pass_names , " , " ) ; <nl> - } <nl> + perftools : : gputools : : Platform * platform = nullptr ) <nl> + : platform_ ( platform ) { } <nl> <nl> string TestName ( ) const { <nl> return : : testing : : UnitTest : : GetInstance ( ) - > current_test_info ( ) - > name ( ) ; <nl> mmm a / tensorflow / compiler / xla / tests / convert_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / convert_test . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / client / computation_builder . h " <nl> # include " tensorflow / compiler / xla / client / local_client . h " <nl> # include " tensorflow / compiler / xla / legacy_flags / cpu_compiler_flags . h " <nl> + # include " tensorflow / compiler / xla / legacy_flags / debug_options_flags . h " <nl> # include " tensorflow / compiler / xla / shape_util . h " <nl> # include " tensorflow / compiler / xla / tests / client_library_test_base . h " <nl> # include " tensorflow / compiler / xla / tests / literal_test_util . h " <nl> namespace { <nl> class ConvertTest : public ClientLibraryTestBase { <nl> public : <nl> explicit ConvertTest ( perftools : : gputools : : Platform * platform = nullptr ) <nl> - : ClientLibraryTestBase ( platform , <nl> - / * disabled_pass_names = * / { " algsimp " , " inline " } ) { } <nl> + : ClientLibraryTestBase ( platform ) { <nl> + mutable_debug_options ( ) - > add_xla_disable_hlo_passes ( " algsimp " ) ; <nl> + mutable_debug_options ( ) - > add_xla_disable_hlo_passes ( " inline " ) ; <nl> + } <nl> } ; <nl> <nl> TEST_F ( ConvertTest , ConvertR1S32ToR1S32 ) { <nl> TEST_F ( ConvertTest , ConvertReshape ) { <nl> int main ( int argc , char * * argv ) { <nl> std : : vector < tensorflow : : Flag > flag_list ; <nl> xla : : legacy_flags : : AppendCpuCompilerFlags ( & flag_list ) ; <nl> + xla : : legacy_flags : : AppendDebugOptionsFlags ( & flag_list ) ; <nl> xla : : string usage = tensorflow : : Flags : : Usage ( argv [ 0 ] , flag_list ) ; <nl> const bool parse_result = tensorflow : : Flags : : Parse ( & argc , argv , flag_list ) ; <nl> if ( ! parse_result ) { <nl> mmm a / tensorflow / compiler / xla / tests / hlo_test_base . cc <nl> ppp b / tensorflow / compiler / xla / tests / hlo_test_base . cc <nl> std : : unique_ptr < Literal > HloTestBase : : ExecuteAndTransfer ( <nl> return TransferFromDevice ( result_shape , device_base ) ; <nl> } <nl> <nl> - string HloTestBase : : TestName ( ) const { <nl> + / * static * / <nl> + string HloTestBase : : TestName ( ) { <nl> return : : testing : : UnitTest : : GetInstance ( ) - > current_test_info ( ) - > name ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / tests / hlo_test_base . h <nl> ppp b / tensorflow / compiler / xla / tests / hlo_test_base . h <nl> class HloTestBase : public : : testing : : Test { <nl> - > Clear ( ) ; <nl> } <nl> <nl> - string TestName ( ) const ; <nl> + static string TestName ( ) ; <nl> <nl> std : : unique_ptr < Backend > backend_ ; <nl> <nl> mmm a / tensorflow / compiler / xla / tests / local_client_test_base . cc <nl> ppp b / tensorflow / compiler / xla / tests / local_client_test_base . cc <nl> LocalClientTestBase : : ShapedBufferToScopedShapedBuffer ( <nl> } <nl> * scoped_buffer - > mutable_buffers ( ) = shaped_buffer - > buffers ( ) ; <nl> <nl> - TF_CHECK_OK ( <nl> - scoped_buffer - > mutable_shape_index_to_buffer_entry ( ) <nl> - - > ForEachMutableElement ( <nl> - [ & shaped_buffer ] ( const ShapeIndex & index , bool is_leaf , <nl> - size_t * buffer_entry ) - > : : tensorflow : : Status { <nl> - if ( is_leaf ) { <nl> - * buffer_entry = <nl> - shaped_buffer - > shape_index_to_buffer_entry ( ) . element ( <nl> - index ) ; <nl> - } <nl> - return tensorflow : : Status : : OK ( ) ; <nl> - } ) ) ; <nl> + scoped_buffer - > mutable_shape_index_to_buffer_entry ( ) - > ForEachMutableElement ( <nl> + [ & shaped_buffer ] ( const ShapeIndex & index , size_t * buffer_entry ) { <nl> + if ( ShapeUtil : : IsLeafIndex ( shaped_buffer - > shape ( ) , index ) ) { <nl> + * buffer_entry = <nl> + shaped_buffer - > shape_index_to_buffer_entry ( ) . element ( index ) ; <nl> + } <nl> + } ) ; <nl> return scoped_buffer ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / xla / tests / map_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / map_test . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / client / lib / arithmetic . h " <nl> # include " tensorflow / compiler / xla / client / local_client . h " <nl> # include " tensorflow / compiler / xla / legacy_flags / cpu_compiler_flags . h " <nl> + # include " tensorflow / compiler / xla / legacy_flags / debug_options_flags . h " <nl> # include " tensorflow / compiler / xla / literal_util . h " <nl> # include " tensorflow / compiler / xla / shape_util . h " <nl> # include " tensorflow / compiler / xla / statusor . h " <nl> namespace { <nl> class MapTest : public ClientLibraryTestBase { <nl> public : <nl> explicit MapTest ( perftools : : gputools : : Platform * platform = nullptr ) <nl> - : ClientLibraryTestBase ( platform , <nl> - / * disabled_pass_names = * / { " algsimp " , " inline " } ) { } <nl> + : ClientLibraryTestBase ( platform ) { <nl> + mutable_debug_options ( ) - > add_xla_disable_hlo_passes ( " algsimp " ) ; <nl> + mutable_debug_options ( ) - > add_xla_disable_hlo_passes ( " inline " ) ; <nl> + } <nl> <nl> / / Creates a function that adds its scalar argument with the constant 1 . 0 . <nl> / / <nl> class MapTest : public ClientLibraryTestBase { <nl> / / Creates a function that adds its scalar argument with the constant 1 . 0 and <nl> / / then multiplies by the original element . <nl> / / <nl> - / / / mmmmmmmmmmmmmmm \ <nl> - / / / \ <nl> + / / / mmmmmmmmmmmmmmmmmm | <nl> + / / / | <nl> / / x { R0F32 } mmm - > ( add ) mmm - > ( mul ) <nl> / / / <nl> / / 1 . 0f mmmmmmmmm / <nl> class MapTest : public ClientLibraryTestBase { <nl> <nl> / / Creates a function that adds three scalar arguments <nl> / / <nl> - / / x { R0F32 } mmm - \ <nl> - / / \ <nl> + / / x { R0F32 } mmmmmm - | <nl> + / / | <nl> / / y { R0F32 } mmm - > ( add ) mmm > ( add ) <nl> / / / <nl> / / z { R0F32 } mmmmmmmmmmmmmmm / <nl> TEST_F ( MapTestWithFullOpt , MapSquare ) { <nl> int main ( int argc , char * * argv ) { <nl> std : : vector < tensorflow : : Flag > flag_list ; <nl> xla : : legacy_flags : : AppendCpuCompilerFlags ( & flag_list ) ; <nl> + xla : : legacy_flags : : AppendDebugOptionsFlags ( & flag_list ) ; <nl> xla : : string usage = tensorflow : : Flags : : Usage ( argv [ 0 ] , flag_list ) ; <nl> const bool parse_result = tensorflow : : Flags : : Parse ( & argc , argv , flag_list ) ; <nl> if ( ! parse_result ) { <nl> mmm a / tensorflow / compiler / xla / tests / round_trip_transfer_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / round_trip_transfer_test . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / tests / client_library_test_base . h " <nl> # include " tensorflow / compiler / xla / tests / literal_test_util . h " <nl> # include " tensorflow / compiler / xla / tests / test_macros . h " <nl> - # include " tensorflow / compiler / xla / xla_data . pb . h " <nl> # include " tensorflow / core / platform / test . h " <nl> # include " tensorflow / core / platform / types . h " <nl> <nl> mmm a / tensorflow / compiler / xla / tests / vector_ops_simple_test . cc <nl> ppp b / tensorflow / compiler / xla / tests / vector_ops_simple_test . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / client / lib / arithmetic . h " <nl> # include " tensorflow / compiler / xla / client / local_client . h " <nl> # include " tensorflow / compiler / xla / legacy_flags / cpu_compiler_flags . h " <nl> + # include " tensorflow / compiler / xla / legacy_flags / debug_options_flags . h " <nl> # include " tensorflow / compiler / xla / shape_util . h " <nl> # include " tensorflow / compiler / xla / statusor . h " <nl> # include " tensorflow / compiler / xla / test_helpers . h " <nl> namespace { <nl> class VecOpsSimpleTest : public ClientLibraryTestBase { <nl> public : <nl> explicit VecOpsSimpleTest ( perftools : : gputools : : Platform * platform = nullptr ) <nl> - : ClientLibraryTestBase ( platform , <nl> - / * disabled_pass_names = * / { " algsimp " , " inline " } ) { } <nl> + : ClientLibraryTestBase ( platform ) { <nl> + mutable_debug_options ( ) - > add_xla_disable_hlo_passes ( " algsimp " ) ; <nl> + mutable_debug_options ( ) - > add_xla_disable_hlo_passes ( " inline " ) ; <nl> + } <nl> <nl> ErrorSpec error_spec_ { 0 . 0001 } ; <nl> } ; <nl> XLA_TEST_F ( VecOpsSimpleTest , VectorPredicateNotEqual ) { <nl> int main ( int argc , char * * argv ) { <nl> std : : vector < tensorflow : : Flag > flag_list ; <nl> xla : : legacy_flags : : AppendCpuCompilerFlags ( & flag_list ) ; <nl> + xla : : legacy_flags : : AppendDebugOptionsFlags ( & flag_list ) ; <nl> xla : : string usage = tensorflow : : Flags : : Usage ( argv [ 0 ] , flag_list ) ; <nl> const bool parse_result = tensorflow : : Flags : : Parse ( & argc , argv , flag_list ) ; <nl> if ( ! parse_result ) { <nl> mmm a / tensorflow / compiler / xla / xla . proto <nl> ppp b / tensorflow / compiler / xla / xla . proto <nl> message DebugOptions { <nl> / / various stages in compilation ( file names are LOG ( INFO ) ' d ) . Set to " . * " to <nl> / / dump * all * HLO modules . <nl> string xla_generate_hlo_graph = 1 ; <nl> + <nl> + / / List of HLO passes to disable . These names must exactly match the pass <nl> + / / names as specified by the HloPassInterface : : name ( ) method . <nl> + repeated string xla_disable_hlo_passes = 2 ; <nl> } <nl> <nl> / / These settings control how XLA compiles and / or runs code . Not all settings <nl> mmm a / tensorflow / contrib / cmake / tf_python . cmake <nl> ppp b / tensorflow / contrib / cmake / tf_python . cmake <nl> add_python_module ( " tensorflow / contrib / data / python " ) <nl> add_python_module ( " tensorflow / contrib / data / python / framework " ) <nl> add_python_module ( " tensorflow / contrib / data / python / kernel_tests " ) <nl> add_python_module ( " tensorflow / contrib / data / python / ops " ) <nl> + add_python_module ( " tensorflow / contrib / data / python / util " ) <nl> add_python_module ( " tensorflow / contrib / deprecated " ) <nl> add_python_module ( " tensorflow / contrib / distributions " ) <nl> add_python_module ( " tensorflow / contrib / distributions / python " ) <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / batch_dataset_op_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / batch_dataset_op_test . py <nl> def testBatchDataset ( self ) : <nl> " " " Test an dataset that maps a TF function across its input elements . " " " <nl> # The pipeline is TensorSliceDataset - > MapDataset ( square_3 ) - > <nl> # RepeatDataset ( count ) - > BatchDataset ( batch_size ) . <nl> - components = [ np . arange ( 7 ) , <nl> + components = ( np . arange ( 7 ) , <nl> np . array ( [ [ 1 , 2 , 3 ] ] ) * np . arange ( 7 ) [ : , np . newaxis ] , <nl> - np . array ( 37 . 0 ) * np . arange ( 7 ) ] <nl> + np . array ( 37 . 0 ) * np . arange ( 7 ) ) <nl> <nl> count = array_ops . placeholder ( dtypes . int64 , shape = [ ] ) <nl> batch_size = array_ops . placeholder ( dtypes . int64 , shape = [ ] ) <nl> def testUnbatchDataset ( self ) : <nl> op = iter . get_next ( ) <nl> <nl> with self . test_session ( ) as sess : <nl> - for i in range ( 10 ) : <nl> - self . assertAllClose ( sess . run ( op ) , ( i , ) * 3 ) <nl> + for i in range ( 3 ) : <nl> + self . assertAllClose ( [ range ( 10 ) ] , sess . run ( op ) ) <nl> <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( op ) <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / dataset_constructor_op_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / dataset_constructor_op_test . py <nl> class DatasetConstructorTest ( test . TestCase ) : <nl> <nl> def testTensorDataset ( self ) : <nl> " " " Test an dataset that represents a single tuple of tensors . " " " <nl> - components = [ np . array ( 1 ) , np . array ( [ 1 , 2 , 3 ] ) , np . array ( 37 . 0 ) ] <nl> + components = ( np . array ( 1 ) , np . array ( [ 1 , 2 , 3 ] ) , np . array ( 37 . 0 ) ) <nl> <nl> iterator = ( dataset_ops . Dataset . from_tensors ( components ) <nl> . make_initializable_iterator ( ) ) <nl> def testTensorDataset ( self ) : <nl> <nl> def testTensorSliceDataset ( self ) : <nl> " " " Test an dataset that represents the slices from a tuple of tensors . " " " <nl> - components = [ <nl> + components = ( <nl> np . tile ( np . array ( [ [ 1 ] , [ 2 ] , [ 3 ] , [ 4 ] ] ) , 20 ) , np . tile ( <nl> np . array ( [ [ 12 ] , [ 13 ] , [ 14 ] , [ 15 ] ] ) , 22 ) , <nl> np . array ( [ 37 . 0 , 38 . 0 , 39 . 0 , 40 . 0 ] ) <nl> - ] <nl> + ) <nl> <nl> iterator = ( dataset_ops . Dataset . from_tensor_slices ( components ) <nl> . make_initializable_iterator ( ) ) <nl> def testTensorSliceDataset ( self ) : <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( get_next ) <nl> <nl> + def testTensorSliceDatasetWithDict ( self ) : <nl> + components = { " foo " : [ 1 , 2 , 3 ] , " bar " : [ [ 4 . 0 ] , [ 5 . 0 ] , [ 6 . 0 ] ] } <nl> + iterator = ( dataset_ops . Dataset . from_tensor_slices ( components ) <nl> + . make_initializable_iterator ( ) ) <nl> + init_op = iterator . initializer <nl> + get_next = iterator . get_next ( ) <nl> + <nl> + self . assertEqual ( dtypes . int32 , iterator . output_types [ " foo " ] ) <nl> + self . assertEqual ( dtypes . float32 , iterator . output_types [ " bar " ] ) <nl> + self . assertEqual ( ( ) , iterator . output_shapes [ " foo " ] ) <nl> + self . assertEqual ( ( 1 , ) , iterator . output_shapes [ " bar " ] ) <nl> + <nl> + with self . test_session ( ) as sess : <nl> + sess . run ( init_op ) <nl> + for i in range ( 3 ) : <nl> + results = sess . run ( get_next ) <nl> + self . assertEqual ( components [ " foo " ] [ i ] , results [ " foo " ] ) <nl> + self . assertEqual ( components [ " bar " ] [ i ] , results [ " bar " ] ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + sess . run ( get_next ) <nl> + <nl> def testSparseTensorSliceDataset ( self ) : <nl> " " " Test a dataset based on slices of a ` tf . SparseTensor ` . " " " <nl> st = array_ops . sparse_placeholder ( dtypes . float64 ) <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / filter_dataset_op_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / filter_dataset_op_test . py <nl> <nl> class FilterDatasetTest ( test . TestCase ) : <nl> <nl> def testFilterDataset ( self ) : <nl> - components = [ <nl> + components = ( <nl> np . arange ( 7 , dtype = np . int64 ) , <nl> np . array ( [ [ 1 , 2 , 3 ] ] , dtype = np . int64 ) * np . arange ( <nl> 7 , dtype = np . int64 ) [ : , np . newaxis ] , <nl> np . array ( 37 . 0 , dtype = np . float64 ) * np . arange ( 7 ) <nl> - ] <nl> + ) <nl> count = array_ops . placeholder ( dtypes . int64 , shape = [ ] ) <nl> modulus = array_ops . placeholder ( dtypes . int64 ) <nl> <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / flat_map_dataset_op_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / flat_map_dataset_op_test . py <nl> class FlatMapDatasetTest ( test . TestCase ) : <nl> # pylint : disable = g - long - lambda <nl> def testFlatMapDataset ( self ) : <nl> repeats = [ 1 , 2 , 3 , 4 , 5 , 0 , 1 ] <nl> - components = [ np . array ( repeats , dtype = np . int64 ) ] <nl> + components = np . array ( repeats , dtype = np . int64 ) <nl> iterator = ( <nl> dataset_ops . Dataset . from_tensor_slices ( components ) <nl> . flat_map ( lambda x : dataset_ops . Dataset . from_tensors ( [ x ] ) . repeat ( x ) ) <nl> . make_initializable_iterator ( ) ) <nl> init_op = iterator . initializer <nl> - get_next , = iterator . get_next ( ) <nl> + get_next = iterator . get_next ( ) <nl> <nl> with self . test_session ( ) as sess : <nl> sess . run ( init_op ) <nl> def testFlatMapDataset ( self ) : <nl> <nl> def testNestedFlatMapDataset ( self ) : <nl> repeats = [ [ 1 , 2 ] , [ 3 , 4 ] , [ 5 , 0 ] , [ 1 , 7 ] ] <nl> - components = [ np . array ( repeats , dtype = np . int64 ) ] <nl> + components = np . array ( repeats , dtype = np . int64 ) <nl> iterator = ( <nl> dataset_ops . Dataset . from_tensor_slices ( components ) <nl> - . flat_map ( lambda x : dataset_ops . Dataset . from_tensor_slices ( [ x ] ) <nl> - . flat_map ( lambda y : dataset_ops . Dataset . from_tensors ( [ y ] ) <nl> + . flat_map ( lambda x : dataset_ops . Dataset . from_tensor_slices ( x ) <nl> + . flat_map ( lambda y : dataset_ops . Dataset . from_tensors ( y ) <nl> . repeat ( y ) ) ) . make_initializable_iterator ( ) ) <nl> init_op = iterator . initializer <nl> - get_next , = iterator . get_next ( ) <nl> + get_next = iterator . get_next ( ) <nl> <nl> with self . test_session ( ) as sess : <nl> sess . run ( init_op ) <nl> def testNestedFlatMapDataset ( self ) : <nl> <nl> def testSharedResourceNestedFlatMapDataset ( self ) : <nl> repeats = [ [ 1 , 2 ] , [ 3 , 4 ] , [ 5 , 0 ] , [ 1 , 7 ] ] <nl> - components = [ np . array ( repeats , dtype = np . int64 ) ] <nl> + components = np . array ( repeats , dtype = np . int64 ) <nl> iterator = ( <nl> dataset_ops . Dataset . from_tensor_slices ( components ) <nl> - . flat_map ( lambda x : dataset_ops . Dataset . from_tensor_slices ( [ x ] ) <nl> - . flat_map ( lambda y : dataset_ops . Dataset . from_tensors ( [ y ] ) <nl> + . flat_map ( lambda x : dataset_ops . Dataset . from_tensor_slices ( x ) <nl> + . flat_map ( lambda y : dataset_ops . Dataset . from_tensors ( y ) <nl> . repeat ( y ) ) ) . make_initializable_iterator ( <nl> shared_name = " shared_flat_map_iterator " ) ) <nl> init_op = iterator . initializer <nl> - get_next , = iterator . get_next ( ) <nl> + get_next = iterator . get_next ( ) <nl> <nl> # Create two concurrent sessions that share the same iterator <nl> # resource on the same server , and verify that a random <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / iterator_ops_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / iterator_ops_test . py <nl> def testAttemptingGradientsRaiseExceptions ( self ) : <nl> gradients_impl . gradients ( value , [ component , side ] ) <nl> <nl> def testOneShotIterator ( self ) : <nl> - components = [ np . arange ( 7 ) , <nl> + components = ( np . arange ( 7 ) , <nl> np . array ( [ [ 1 , 2 , 3 ] ] ) * np . arange ( 7 ) [ : , np . newaxis ] , <nl> - np . array ( 37 . 0 ) * np . arange ( 7 ) ] <nl> + np . array ( 37 . 0 ) * np . arange ( 7 ) ) <nl> <nl> def _map_fn ( x , y , z ) : <nl> return math_ops . square ( x ) , math_ops . square ( y ) , math_ops . square ( z ) <nl> def _map_fn ( x , y , z ) : <nl> sess . run ( get_next ) <nl> <nl> def testOneShotIteratorCaptureByValue ( self ) : <nl> - components = [ np . arange ( 7 ) , <nl> + components = ( np . arange ( 7 ) , <nl> np . array ( [ [ 1 , 2 , 3 ] ] ) * np . arange ( 7 ) [ : , np . newaxis ] , <nl> - np . array ( 37 . 0 ) * np . arange ( 7 ) ] <nl> - tensor_components = [ ops . convert_to_tensor ( c ) for c in components ] <nl> + np . array ( 37 . 0 ) * np . arange ( 7 ) ) <nl> + tensor_components = tuple ( [ ops . convert_to_tensor ( c ) for c in components ] ) <nl> <nl> def _map_fn ( x , y , z ) : <nl> return math_ops . square ( x ) , math_ops . square ( y ) , math_ops . square ( z ) <nl> def _map_fn ( x , y , z ) : <nl> sess . run ( get_next ) <nl> <nl> def testOneShotIteratorInsideContainer ( self ) : <nl> - components = [ np . arange ( 7 ) , <nl> + components = ( np . arange ( 7 ) , <nl> np . array ( [ [ 1 , 2 , 3 ] ] ) * np . arange ( 7 ) [ : , np . newaxis ] , <nl> - np . array ( 37 . 0 ) * np . arange ( 7 ) ] <nl> + np . array ( 37 . 0 ) * np . arange ( 7 ) ) <nl> <nl> def within_container ( ) : <nl> def _map_fn ( x , y , z ) : <nl> def _map_fn ( x , y , z ) : <nl> sess . run ( get_next ) <nl> <nl> def testSimpleSharedResource ( self ) : <nl> - components = [ <nl> + components = ( <nl> np . array ( 1 , dtype = np . int64 ) , <nl> np . array ( [ 1 , 2 , 3 ] , dtype = np . int64 ) , <nl> np . array ( 37 . 0 , dtype = np . float64 ) <nl> - ] <nl> + ) <nl> <nl> server = server_lib . Server . create_local_server ( ) <nl> <nl> def testSimpleSharedResource ( self ) : <nl> # new graph . <nl> iterator = dataset_ops . Iterator . from_structure ( <nl> shared_name = " shared_iterator " , <nl> - output_types = [ dtypes . int64 , dtypes . int64 , dtypes . float64 ] , <nl> - output_shapes = [ [ ] , [ 3 ] , [ ] ] ) <nl> + output_types = ( dtypes . int64 , dtypes . int64 , dtypes . float64 ) , <nl> + output_shapes = ( [ ] , [ 3 ] , [ ] ) ) <nl> get_next = iterator . get_next ( ) <nl> <nl> with session . Session ( server . target ) as sess : <nl> def testSimpleSharedResource ( self ) : <nl> sess . run ( get_next ) <nl> <nl> def testNotInitializedError ( self ) : <nl> - components = [ np . array ( 1 ) , np . array ( [ 1 , 2 , 3 ] ) , np . array ( 37 . 0 ) ] <nl> + components = ( np . array ( 1 ) , np . array ( [ 1 , 2 , 3 ] ) , np . array ( 37 . 0 ) ) <nl> iterator = ( dataset_ops . Dataset . from_tensors ( components ) <nl> . make_initializable_iterator ( ) ) <nl> get_next = iterator . get_next ( ) <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / map_dataset_op_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / map_dataset_op_test . py <nl> def testMapDataset ( self ) : <nl> " " " Test an dataset that maps a TF function across its input elements . " " " <nl> # The pipeline is TensorSliceDataset - > MapDataset ( square_3 ) - > <nl> # RepeatDataset ( count ) . <nl> - components = [ np . arange ( 7 ) , <nl> + components = ( np . arange ( 7 ) , <nl> np . array ( [ [ 1 , 2 , 3 ] ] ) * np . arange ( 7 ) [ : , np . newaxis ] , <nl> - np . array ( 37 . 0 ) * np . arange ( 7 ) ] <nl> + np . array ( 37 . 0 ) * np . arange ( 7 ) ) <nl> count = array_ops . placeholder ( dtypes . int64 , shape = [ ] ) <nl> <nl> dataset = self . _buildMapDataset ( components , count ) <nl> def testParallelMapDataset ( self ) : <nl> " " " Test an dataset that maps a TF function across its input elements . " " " <nl> # The pipeline is TensorSliceDataset - > ParallelMapDataset ( square_3 ) - > <nl> # RepeatDataset ( count ) . <nl> - components = [ np . arange ( 7 ) , <nl> + components = ( np . arange ( 7 ) , <nl> np . array ( [ [ 1 , 2 , 3 ] ] ) * np . arange ( 7 ) [ : , np . newaxis ] , <nl> - np . array ( 37 . 0 ) * np . arange ( 7 ) ] <nl> + np . array ( 37 . 0 ) * np . arange ( 7 ) ) <nl> count = array_ops . placeholder ( dtypes . int64 , shape = [ ] ) <nl> num_threads = array_ops . placeholder ( dtypes . int32 , shape = [ ] ) <nl> output_buffer_size = array_ops . placeholder ( dtypes . int64 , shape = [ ] ) <nl> def iterator_thread ( ) : <nl> def _testDisposeParallelMapDataset ( self , explicit_dispose ) : <nl> # The pipeline is TensorSliceDataset - > MapDataset ( square_3 ) - > <nl> # RepeatDataset ( 1000 ) . <nl> - components = [ np . arange ( 1000 ) , <nl> + components = ( np . arange ( 1000 ) , <nl> np . array ( [ [ 1 , 2 , 3 ] ] ) * np . arange ( 1000 ) [ : , np . newaxis ] , <nl> - np . array ( 37 . 0 ) * np . arange ( 1000 ) ] <nl> + np . array ( 37 . 0 ) * np . arange ( 1000 ) ) <nl> <nl> dataset = self . _buildParallelMapDataset ( components , 1000 , 100 , 100 ) <nl> iterator = dataset . make_initializable_iterator ( ) <nl> def testImplicitDisposeParallelMapDataset ( self ) : <nl> self . _testDisposeParallelMapDataset ( False ) <nl> <nl> def testParallelMapError ( self ) : <nl> - components = [ np . array ( [ 1 . , 2 . , 3 . , np . nan , 5 . ] ) . astype ( np . float32 ) ] <nl> + components = np . array ( [ 1 . , 2 . , 3 . , np . nan , 5 . ] ) . astype ( np . float32 ) <nl> <nl> dataset = ( dataset_ops . Dataset . from_tensor_slices ( components ) <nl> . map ( lambda x : array_ops . check_numerics ( x , " message " ) ) ) <nl> def testCaptureHashTable ( self ) : <nl> lookup_ops . KeyValueTensorInitializer ( keys , values ) , default_val ) <nl> <nl> input_sentences = dataset_ops . Dataset . from_tensor_slices ( <nl> - constant_op . constant ( [ <nl> - " brain brain tank salad surgery " , <nl> - " surgery brain " , <nl> - ] ) ) <nl> + [ " brain brain tank salad surgery " , " surgery brain " ] ) <nl> <nl> iterator = ( input_sentences <nl> . map ( lambda x : string_ops . string_split ( [ x ] ) . values ) <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / range_dataset_op_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / range_dataset_op_test . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> - import numpy as np <nl> - <nl> from tensorflow . contrib . data . python . ops import dataset_ops <nl> from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> def testStopLessThanStartWithNegativeStep ( self ) : <nl> sess . run ( get_next ) <nl> <nl> def testEnumerateDataset ( self ) : <nl> - components = [ np . array ( [ " a " , " b " ] ) , np . array ( [ 1 , 2 ] ) , np . array ( [ 37 . 0 , 38 ] ) ] <nl> + components = ( [ " a " , " b " ] , [ 1 , 2 ] , [ 37 . 0 , 38 ] ) <nl> start = constant_op . constant ( 20 , dtype = dtypes . int64 ) <nl> <nl> iterator = ( dataset_ops . Dataset . from_tensor_slices ( components ) . enumerate ( <nl> def testEnumerateDataset ( self ) : <nl> <nl> with self . test_session ( ) as sess : <nl> sess . run ( init_op ) <nl> - self . assertEqual ( ( 20 , [ b " a " , 1 , 37 . 0 ] ) , sess . run ( get_next ) ) <nl> - self . assertEqual ( ( 21 , [ b " b " , 2 , 38 . 0 ] ) , sess . run ( get_next ) ) <nl> + self . assertEqual ( ( 20 , ( b " a " , 1 , 37 . 0 ) ) , sess . run ( get_next ) ) <nl> + self . assertEqual ( ( 21 , ( b " b " , 2 , 38 . 0 ) ) , sess . run ( get_next ) ) <nl> <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> sess . run ( get_next ) <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / reader_dataset_ops_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / reader_dataset_ops_test . py <nl> def testRead ( self ) : <nl> with self . assertRaises ( errors . OutOfRangeError ) : <nl> self . _next_actual_batch ( sess ) <nl> <nl> + def testReadWithEquivalentDataset ( self ) : <nl> + # TODO ( mrry ) : Add support for tf . SparseTensor as a Dataset component . <nl> + features = { <nl> + " file " : parsing_ops . FixedLenFeature ( [ ] , dtypes . int64 ) , <nl> + " record " : parsing_ops . FixedLenFeature ( [ ] , dtypes . int64 ) , <nl> + } <nl> + dataset = ( dataset_ops . TFRecordDataset ( self . test_filenames ) <nl> + . map ( lambda x : parsing_ops . parse_single_example ( x , features ) ) <nl> + . repeat ( 10 ) <nl> + . batch ( 2 ) ) <nl> + iterator = dataset . make_initializable_iterator ( ) <nl> + init_op = iterator . initializer <nl> + next_element = iterator . get_next ( ) <nl> + <nl> + with self . test_session ( ) as sess : <nl> + sess . run ( init_op ) <nl> + for file_batch , _ , _ , _ , record_batch in self . _next_expected_batch ( <nl> + range ( self . _num_files ) , 2 , 10 ) : <nl> + actual_batch = sess . run ( next_element ) <nl> + self . assertAllEqual ( file_batch , actual_batch [ " file " ] ) <nl> + self . assertAllEqual ( record_batch , actual_batch [ " record " ] ) <nl> + with self . assertRaises ( errors . OutOfRangeError ) : <nl> + sess . run ( next_element ) <nl> + <nl> + <nl> if __name__ = = " __main__ " : <nl> test . main ( ) <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / sequence_dataset_op_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / sequence_dataset_op_test . py <nl> class SequenceDatasetTest ( test . TestCase ) : <nl> <nl> def testRepeatTensorDataset ( self ) : <nl> " " " Test a dataset that repeats its input multiple times . " " " <nl> - components = [ np . array ( 1 ) , np . array ( [ 1 , 2 , 3 ] ) , np . array ( 37 . 0 ) ] <nl> + components = ( np . array ( 1 ) , np . array ( [ 1 , 2 , 3 ] ) , np . array ( 37 . 0 ) ) <nl> # This placeholder can be fed when dataset - definition subgraph <nl> # runs ( i . e . ` init_op ` below ) to configure the number of <nl> # repetitions used in a particular iterator . <nl> def testRepeatTensorDataset ( self ) : <nl> self . assertAllEqual ( component , result_component ) <nl> <nl> def testTakeTensorDataset ( self ) : <nl> - components = [ np . arange ( 10 ) ] <nl> + components = ( np . arange ( 10 ) , ) <nl> count_placeholder = array_ops . placeholder ( dtypes . int64 , shape = [ ] ) <nl> <nl> iterator = ( dataset_ops . Dataset . from_tensor_slices ( components ) <nl> def testTakeTensorDataset ( self ) : <nl> sess . run ( get_next ) <nl> <nl> def testSkipTensorDataset ( self ) : <nl> - components = [ np . arange ( 10 ) ] <nl> + components = ( np . arange ( 10 ) , ) <nl> count_placeholder = array_ops . placeholder ( dtypes . int64 , shape = [ ] ) <nl> <nl> iterator = ( dataset_ops . Dataset . from_tensor_slices ( components ) <nl> def testSkipTensorDataset ( self ) : <nl> <nl> def testRepeatRepeatTensorDataset ( self ) : <nl> " " " Test the composition of repeat datasets . " " " <nl> - components = [ np . array ( 1 ) , np . array ( [ 1 , 2 , 3 ] ) , np . array ( 37 . 0 ) ] <nl> + components = ( np . array ( 1 ) , np . array ( [ 1 , 2 , 3 ] ) , np . array ( 37 . 0 ) ) <nl> inner_count = array_ops . placeholder ( dtypes . int64 , shape = [ ] ) <nl> outer_count = array_ops . placeholder ( dtypes . int64 , shape = [ ] ) <nl> <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / shuffle_dataset_op_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / shuffle_dataset_op_test . py <nl> <nl> class ShuffleDatasetTest ( test . TestCase ) : <nl> <nl> def testShuffleDataset ( self ) : <nl> - components = [ <nl> + components = ( <nl> np . array ( [ 1 , 2 , 3 , 4 ] ) , np . array ( [ 5 , 6 , 7 , 8 ] ) , <nl> np . array ( [ 9 . 0 , 10 . 0 , 11 . 0 , 12 . 0 ] ) <nl> - ] <nl> + ) <nl> count_placeholder = array_ops . placeholder_with_default ( <nl> constant_op . constant ( 5 , dtypes . int64 ) , shape = [ ] ) <nl> buffer_size_placeholder = array_ops . placeholder ( dtypes . int64 , shape = [ ] ) <nl> def testShuffleDataset ( self ) : <nl> shuffle_dataset = repeat_dataset . shuffle ( buffer_size_placeholder , <nl> seed_placeholder ) <nl> <nl> - self . assertEqual ( [ c . shape [ 1 : ] for c in components ] , <nl> + self . assertEqual ( tuple ( [ c . shape [ 1 : ] for c in components ] ) , <nl> shuffle_dataset . output_shapes ) <nl> <nl> # Create initialization ops for iterators without and with <nl> def testShuffleDataset ( self ) : <nl> sess . run ( get_next ) <nl> <nl> def testDefaultArguments ( self ) : <nl> - components = np . array ( [ 0 , 1 , 2 , 3 , 4 ] ) <nl> + components = [ 0 , 1 , 2 , 3 , 4 ] <nl> iterator = ( dataset_ops . Dataset . from_tensor_slices ( components ) . shuffle ( 5 ) <nl> . repeat ( ) . make_one_shot_iterator ( ) ) <nl> <nl> mmm a / tensorflow / contrib / data / python / kernel_tests / zip_dataset_op_test . py <nl> ppp b / tensorflow / contrib / data / python / kernel_tests / zip_dataset_op_test . py <nl> def testZipDataset ( self ) : <nl> array_ops . placeholder ( dtypes . float64 ) <nl> ] <nl> <nl> - datasets = [ <nl> + datasets = tuple ( [ <nl> dataset_ops . Dataset . from_tensor_slices ( component_placeholder ) <nl> for component_placeholder in component_placeholders <nl> - ] <nl> + ] ) <nl> zipped = dataset_ops . Dataset . zip ( datasets ) <nl> <nl> iterator = zipped . make_initializable_iterator ( ) <nl> mmm a / tensorflow / contrib / data / python / ops / BUILD <nl> ppp b / tensorflow / contrib / data / python / ops / BUILD <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " / / tensorflow / contrib / data / python / framework : function " , <nl> + " / / tensorflow / contrib / data / python / util : nest " , <nl> " / / tensorflow / contrib / util : util_py " , <nl> " / / tensorflow / python : dataset_ops_gen " , <nl> " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : parsing_ops " , <nl> - " / / tensorflow / python : util " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / contrib / data / python / ops / dataset_ops . py <nl> ppp b / tensorflow / contrib / data / python / ops / dataset_ops . py <nl> <nl> import numpy as np <nl> <nl> from tensorflow . contrib . data . python . framework import function <nl> + from tensorflow . contrib . data . python . util import nest <nl> from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> <nl> from tensorflow . python . ops import random_ops <nl> from tensorflow . python . ops import resource_variable_ops <nl> from tensorflow . python . platform import gfile <nl> - from tensorflow . python . util import nest <nl> <nl> <nl> class Iterator ( object ) : <nl> def _parse_example ( serialized , features ) : <nl> result . extend ( [ val . indices , val . values , val . dense_shape ] ) <nl> else : <nl> result . append ( val ) <nl> - return result <nl> + return tuple ( result ) <nl> <nl> <nl> def _get_file_names ( file_pattern , randomize_input ) : <nl> new file mode 100644 <nl> index 0000000000000 . . b9691c8e49120 <nl> mmm / dev / null <nl> ppp b / tensorflow / contrib / data / python / util / BUILD <nl> <nl> + package ( default_visibility = [ " / / tensorflow : internal " ] ) <nl> + <nl> + licenses ( [ " notice " ] ) # Apache 2 . 0 <nl> + <nl> + exports_files ( [ " LICENSE " ] ) <nl> + <nl> + load ( " / / tensorflow : tensorflow . bzl " , " py_test " ) <nl> + <nl> + py_library ( <nl> + name = " nest " , <nl> + srcs = [ " nest . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow / python : util " , <nl> + ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " nest_test " , <nl> + size = " small " , <nl> + srcs = [ " nest_test . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : nest " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : framework_for_generated_wrappers " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : util " , <nl> + " / / third_party / py / numpy " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " all_files " , <nl> + srcs = glob ( <nl> + [ " * * / * " ] , <nl> + exclude = [ <nl> + " * * / METADATA " , <nl> + " * * / OWNERS " , <nl> + ] , <nl> + ) , <nl> + visibility = [ " / / tensorflow : __subpackages__ " ] , <nl> + ) <nl> new file mode 100644 <nl> index 0000000000000 . . 91c8416d5aebe <nl> mmm / dev / null <nl> ppp b / tensorflow / contrib / data / python / util / nest . py <nl> <nl> + # Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + " " " # # Functions for working with arbitrarily nested sequences of elements . <nl> + <nl> + NOTE ( mrry ) : This fork of the ` tensorflow . python . util . nest ` module <nl> + makes two changes : <nl> + <nl> + 1 . It adds support for dictionaries as a level of nesting in nested structures . <nl> + 2 . It removes support for lists as a level of nesting in nested structures . <nl> + <nl> + The motivation for this change is twofold : <nl> + <nl> + 1 . Many input - processing functions ( e . g . ` tf . parse_example ( ) ` ) return <nl> + dictionaries , and we would like to support them natively in datasets . <nl> + 2 . It seems more natural for lists to be treated ( e . g . in Dataset constructors ) <nl> + as tensors , rather than lists of ( lists of . . . ) tensors . <nl> + " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + import collections as _collections <nl> + <nl> + import six as _six <nl> + <nl> + from tensorflow . python . util . all_util import remove_undocumented <nl> + <nl> + <nl> + def _sequence_like ( instance , args ) : <nl> + " " " Converts the sequence ` args ` to the same type as ` instance ` . <nl> + <nl> + Args : <nl> + instance : an instance of ` tuple ` , ` list ` , or a ` namedtuple ` class . <nl> + args : elements to be converted to a sequence . <nl> + <nl> + Returns : <nl> + ` args ` with the type of ` instance ` . <nl> + " " " <nl> + if isinstance ( instance , dict ) : <nl> + # This is a dict . Iterate over the keys in sorted order to make <nl> + # this deterministic . <nl> + return { k : v for k , v in zip ( sorted ( instance . keys ( ) ) , args ) } <nl> + elif ( isinstance ( instance , tuple ) and <nl> + hasattr ( instance , " _fields " ) and <nl> + isinstance ( instance . _fields , _collections . Sequence ) and <nl> + all ( isinstance ( f , _six . string_types ) for f in instance . _fields ) ) : <nl> + # This is a namedtuple <nl> + return type ( instance ) ( * args ) <nl> + else : <nl> + # Not a namedtuple <nl> + return type ( instance ) ( args ) <nl> + <nl> + <nl> + def _elements_of ( nest ) : <nl> + if isinstance ( nest , dict ) : <nl> + # Iterate over dict keys in sorted order to make this deterministic . <nl> + return [ v for _ , v in sorted ( nest . items ( ) ) ] <nl> + else : <nl> + return nest <nl> + <nl> + <nl> + def _yield_flat_nest ( nest ) : <nl> + for n in _elements_of ( nest ) : <nl> + if is_sequence ( n ) : <nl> + for ni in _yield_flat_nest ( n ) : <nl> + yield ni <nl> + else : <nl> + yield n <nl> + <nl> + <nl> + def is_sequence ( seq ) : <nl> + " " " Returns a true if ` seq ` is a Sequence or dict ( except strings / lists ) . <nl> + <nl> + NOTE ( mrry ) : This differs from ` tensorflow . python . util . nest . is_sequence ( ) ` , <nl> + which * does * treat a Python list as a sequence . For ergonomic <nl> + reasons , ` tf . contrib . data ` users would prefer to treat lists as <nl> + implict ` tf . Tensor ` objects , and dicts as ( nested ) sequences . <nl> + <nl> + Args : <nl> + seq : an input sequence . <nl> + <nl> + Returns : <nl> + True if the sequence is a not a string or list and is a <nl> + collections . Sequence . <nl> + " " " <nl> + return ( isinstance ( seq , ( _collections . Sequence , dict ) ) <nl> + and not isinstance ( seq , ( list , _six . string_types ) ) ) <nl> + <nl> + <nl> + def flatten ( nest ) : <nl> + " " " Returns a flat sequence from a given nested structure . <nl> + <nl> + If ` nest ` is not a sequence , this returns a single - element list : ` [ nest ] ` . <nl> + <nl> + Args : <nl> + nest : an arbitrarily nested structure or a scalar object . <nl> + Note , numpy arrays are considered scalars . <nl> + <nl> + Returns : <nl> + A Python list , the flattened version of the input . <nl> + " " " <nl> + return list ( _yield_flat_nest ( nest ) ) if is_sequence ( nest ) else [ nest ] <nl> + <nl> + <nl> + def _recursive_assert_same_structure ( nest1 , nest2 , check_types ) : <nl> + is_sequence_nest1 = is_sequence ( nest1 ) <nl> + if is_sequence_nest1 ! = is_sequence ( nest2 ) : <nl> + raise ValueError ( <nl> + " The two structures don ' t have the same nested structure . " <nl> + " First structure : % s , second structure : % s . " % ( nest1 , nest2 ) ) <nl> + <nl> + if is_sequence_nest1 : <nl> + type_nest1 = type ( nest1 ) <nl> + type_nest2 = type ( nest2 ) <nl> + if check_types and type_nest1 ! = type_nest2 : <nl> + raise TypeError ( <nl> + " The two structures don ' t have the same sequence type . First " <nl> + " structure has type % s , while second structure has type % s . " <nl> + % ( type_nest1 , type_nest2 ) ) <nl> + <nl> + for n1 , n2 in zip ( _elements_of ( nest1 ) , _elements_of ( nest2 ) ) : <nl> + _recursive_assert_same_structure ( n1 , n2 , check_types ) <nl> + <nl> + <nl> + def assert_same_structure ( nest1 , nest2 , check_types = True ) : <nl> + " " " Asserts that two structures are nested in the same way . <nl> + <nl> + Args : <nl> + nest1 : an arbitrarily nested structure . <nl> + nest2 : an arbitrarily nested structure . <nl> + check_types : if ` True ` ( default ) types of sequences are checked as <nl> + well . If set to ` False ` , for example a list and a tuple of objects will <nl> + look same if they have the same size . <nl> + <nl> + Raises : <nl> + ValueError : If the two structures do not have the same number of elements or <nl> + if the two structures are not nested in the same way . <nl> + TypeError : If the two structures differ in the type of sequence in any of <nl> + their substructures . Only possible if ` check_types ` is ` True ` . <nl> + " " " <nl> + len_nest1 = len ( flatten ( nest1 ) ) if is_sequence ( nest1 ) else 1 <nl> + len_nest2 = len ( flatten ( nest2 ) ) if is_sequence ( nest2 ) else 1 <nl> + if len_nest1 ! = len_nest2 : <nl> + raise ValueError ( " The two structures don ' t have the same number of " <nl> + " elements . First structure : % s , second structure : % s . " <nl> + % ( nest1 , nest2 ) ) <nl> + _recursive_assert_same_structure ( nest1 , nest2 , check_types ) <nl> + <nl> + <nl> + def _packed_nest_with_indices ( structure , flat , index ) : <nl> + " " " Helper function for pack_nest_as . <nl> + <nl> + Args : <nl> + structure : Substructure ( tuple of elements and / or tuples ) to mimic <nl> + flat : Flattened values to output substructure for . <nl> + index : Index at which to start reading from flat . <nl> + <nl> + Returns : <nl> + The tuple ( new_index , child ) , where : <nl> + * new_index - the updated index into ` flat ` having processed ` structure ` . <nl> + * packed - the subset of ` flat ` corresponding to ` structure ` , <nl> + having started at ` index ` , and packed into the same nested <nl> + format . <nl> + <nl> + Raises : <nl> + ValueError : if ` structure ` contains more elements than ` flat ` <nl> + ( assuming indexing starts from ` index ` ) . <nl> + " " " <nl> + packed = [ ] <nl> + for s in structure : <nl> + if is_sequence ( s ) : <nl> + new_index , child = _packed_nest_with_indices ( s , flat , index ) <nl> + packed . append ( _sequence_like ( s , child ) ) <nl> + index = new_index <nl> + else : <nl> + packed . append ( flat [ index ] ) <nl> + index + = 1 <nl> + return index , packed <nl> + <nl> + <nl> + def pack_sequence_as ( structure , flat_sequence ) : <nl> + " " " Returns a given flattened sequence packed into a nest . <nl> + <nl> + If ` structure ` is a scalar , ` flat_sequence ` must be a single - element list ; <nl> + in this case the return value is ` flat_sequence [ 0 ] ` . <nl> + <nl> + Args : <nl> + structure : tuple or list constructed of scalars and / or other tuples / lists , <nl> + or a scalar . Note : numpy arrays are considered scalars . <nl> + flat_sequence : flat sequence to pack . <nl> + <nl> + Returns : <nl> + packed : ` flat_sequence ` converted to have the same recursive structure as <nl> + ` structure ` . <nl> + <nl> + Raises : <nl> + ValueError : If nest and structure have different element counts . <nl> + " " " <nl> + if not ( is_sequence ( flat_sequence ) or isinstance ( flat_sequence , list ) ) : <nl> + raise TypeError ( " flat_sequence must be a sequence " ) <nl> + <nl> + if not is_sequence ( structure ) : <nl> + if len ( flat_sequence ) ! = 1 : <nl> + raise ValueError ( " Structure is a scalar but len ( flat_sequence ) = = % d > 1 " <nl> + % len ( flat_sequence ) ) <nl> + return flat_sequence [ 0 ] <nl> + <nl> + flat_structure = flatten ( structure ) <nl> + if len ( flat_structure ) ! = len ( flat_sequence ) : <nl> + raise ValueError ( <nl> + " Could not pack sequence . Structure had % d elements , but flat_sequence " <nl> + " had % d elements . Structure : % s , flat_sequence : % s . " <nl> + % ( len ( flat_structure ) , len ( flat_sequence ) , structure , flat_sequence ) ) <nl> + <nl> + _ , packed = _packed_nest_with_indices ( structure , flat_sequence , 0 ) <nl> + return _sequence_like ( structure , packed ) <nl> + <nl> + <nl> + def map_structure ( func , * structure , * * check_types_dict ) : <nl> + " " " Applies ` func ` to each entry in ` structure ` and returns a new structure . <nl> + <nl> + Applies ` func ( x [ 0 ] , x [ 1 ] , . . . ) ` where x [ i ] is an entry in <nl> + ` structure [ i ] ` . All structures in ` structure ` must have the same arity , <nl> + and the return value will contain the results in the same structure . <nl> + <nl> + Args : <nl> + func : A callable that acceps as many arguments are there are structures . <nl> + * structure : scalar , or tuple or list of constructed scalars and / or other <nl> + tuples / lists , or scalars . Note : numpy arrays are considered scalars . <nl> + * * check_types_dict : only valid keyword argument is ` check_types ` . If set to <nl> + ` True ` ( default ) the types of iterables within the structures have to be <nl> + same ( e . g . ` map_structure ( func , [ 1 ] , ( 1 , ) ) ` raises a ` TypeError ` <nl> + exception ) . To allow this set this argument to ` False ` . <nl> + <nl> + Returns : <nl> + A new structure with the same arity as ` structure ` , whose values correspond <nl> + to ` func ( x [ 0 ] , x [ 1 ] , . . . ) ` where ` x [ i ] ` is a value in the corresponding <nl> + location in ` structure [ i ] ` . If there are different sequence types and <nl> + ` check_types ` is ` False ` the sequence types of the first structure will be <nl> + used . <nl> + <nl> + Raises : <nl> + TypeError : If ` func ` is not callable or if the structures do not match <nl> + each other by depth tree . <nl> + ValueError : If no structure is provided or if the structures do not match <nl> + each other by type . <nl> + ValueError : If wrong keyword arguments are provided . <nl> + " " " <nl> + if not callable ( func ) : <nl> + raise TypeError ( " func must be callable , got : % s " % func ) <nl> + <nl> + if not structure : <nl> + raise ValueError ( " Must provide at least one structure " ) <nl> + <nl> + if check_types_dict : <nl> + if " check_types " not in check_types_dict or len ( check_types_dict ) > 1 : <nl> + raise ValueError ( " Only valid keyword argument is check_types " ) <nl> + check_types = check_types_dict [ " check_types " ] <nl> + else : <nl> + check_types = True <nl> + <nl> + for other in structure [ 1 : ] : <nl> + assert_same_structure ( structure [ 0 ] , other , check_types = check_types ) <nl> + <nl> + flat_structure = [ flatten ( s ) for s in structure ] <nl> + entries = zip ( * flat_structure ) <nl> + <nl> + return pack_sequence_as ( <nl> + structure [ 0 ] , [ func ( * x ) for x in entries ] ) <nl> + <nl> + <nl> + def _yield_flat_up_to ( shallow_tree , input_tree ) : <nl> + " " " Yields elements ` input_tree ` partially flattened up to ` shallow_tree ` . " " " <nl> + if is_sequence ( shallow_tree ) : <nl> + for shallow_branch , input_branch in zip ( shallow_tree , input_tree ) : <nl> + for input_leaf in _yield_flat_up_to ( shallow_branch , input_branch ) : <nl> + yield input_leaf <nl> + else : <nl> + yield input_tree <nl> + <nl> + <nl> + def assert_shallow_structure ( shallow_tree , input_tree , check_types = True ) : <nl> + " " " Asserts that ` shallow_tree ` is a shallow structure of ` input_tree ` . <nl> + <nl> + That is , this function tests if the ` input_tree ` structure can be created from <nl> + the ` shallow_tree ` structure by replacing its leaf nodes with deeper <nl> + tree structures . <nl> + <nl> + Examples : <nl> + <nl> + The following code will raise an exception : <nl> + ` ` ` python <nl> + shallow_tree = [ " a " , " b " ] <nl> + input_tree = [ " c " , [ " d " , " e " ] , " f " ] <nl> + assert_shallow_structure ( shallow_tree , input_tree ) <nl> + ` ` ` <nl> + <nl> + The following code will not raise an exception : <nl> + ` ` ` python <nl> + shallow_tree = [ " a " , " b " ] <nl> + input_tree = [ " c " , [ " d " , " e " ] ] <nl> + assert_shallow_structure ( shallow_tree , input_tree ) <nl> + ` ` ` <nl> + <nl> + Args : <nl> + shallow_tree : an arbitrarily nested structure . <nl> + input_tree : an arbitrarily nested structure . <nl> + check_types : if ` True ` ( default ) the sequence types of ` shallow_tree ` and <nl> + ` input_tree ` have to be the same . <nl> + <nl> + Raises : <nl> + TypeError : If ` shallow_tree ` is a sequence but ` input_tree ` is not . <nl> + TypeError : If the sequence types of ` shallow_tree ` are different from <nl> + ` input_tree ` . Only raised if ` check_types ` is ` True ` . <nl> + ValueError : If the sequence lengths of ` shallow_tree ` are different from <nl> + ` input_tree ` . <nl> + " " " <nl> + if is_sequence ( shallow_tree ) : <nl> + if not is_sequence ( input_tree ) : <nl> + raise TypeError ( <nl> + " If shallow structure is a sequence , input must also be a sequence . " <nl> + " Input has type : % s . " % type ( input_tree ) ) <nl> + <nl> + if check_types and not isinstance ( input_tree , type ( shallow_tree ) ) : <nl> + raise TypeError ( <nl> + " The two structures don ' t have the same sequence type . Input " <nl> + " structure has type % s , while shallow structure has type % s . " <nl> + % ( type ( input_tree ) , type ( shallow_tree ) ) ) <nl> + <nl> + if len ( input_tree ) ! = len ( shallow_tree ) : <nl> + raise ValueError ( <nl> + " The two structures don ' t have the same sequence length . Input " <nl> + " structure has length % s , while shallow structure has length % s . " <nl> + % ( len ( input_tree ) , len ( shallow_tree ) ) ) <nl> + <nl> + for shallow_branch , input_branch in zip ( shallow_tree , input_tree ) : <nl> + assert_shallow_structure ( shallow_branch , input_branch , <nl> + check_types = check_types ) <nl> + <nl> + <nl> + def flatten_up_to ( shallow_tree , input_tree ) : <nl> + " " " Flattens ` input_tree ` up to ` shallow_tree ` . <nl> + <nl> + Any further depth in structure in ` input_tree ` is retained as elements in the <nl> + partially flatten output . <nl> + <nl> + If ` shallow_tree ` and ` input_tree ` are not sequences , this returns a <nl> + single - element list : ` [ input_tree ] ` . <nl> + <nl> + Use Case : <nl> + <nl> + Sometimes we may wish to partially flatten a nested sequence , retaining some <nl> + of the nested structure . We achieve this by specifying a shallow structure , <nl> + ` shallow_tree ` , we wish to flatten up to . <nl> + <nl> + The input , ` input_tree ` , can be thought of as having the same structure as <nl> + ` shallow_tree ` , but with leaf nodes that are themselves tree structures . <nl> + <nl> + Examples : <nl> + <nl> + ` ` ` python <nl> + input_tree = [ [ [ 2 , 2 ] , [ 3 , 3 ] ] , [ [ 4 , 9 ] , [ 5 , 5 ] ] ] <nl> + shallow_tree = [ [ True , True ] , [ False , True ] ] <nl> + <nl> + flattened_input_tree = flatten_up_to ( shallow_tree , input_tree ) <nl> + flattened_shallow_tree = flatten_up_to ( shallow_tree , shallow_tree ) <nl> + <nl> + # Output is : <nl> + # [ [ 2 , 2 ] , [ 3 , 3 ] , [ 4 , 9 ] , [ 5 , 5 ] ] <nl> + # [ True , True , False , True ] <nl> + ` ` ` <nl> + <nl> + ` ` ` python <nl> + input_tree = [ [ ( ' a ' , 1 ) , [ ( ' b ' , 2 ) , [ ( ' c ' , 3 ) , [ ( ' d ' , 4 ) ] ] ] ] ] <nl> + shallow_tree = [ [ ' level_1 ' , [ ' level_2 ' , [ ' level_3 ' , [ ' level_4 ' ] ] ] ] ] <nl> + <nl> + input_tree_flattened_as_shallow_tree = flatten_up_to ( shallow_tree , input_tree ) <nl> + input_tree_flattened = flatten ( input_tree ) <nl> + <nl> + # Output is : <nl> + # [ ( ' a ' , 1 ) , ( ' b ' , 2 ) , ( ' c ' , 3 ) , ( ' d ' , 4 ) ] <nl> + # [ ' a ' , 1 , ' b ' , 2 , ' c ' , 3 , ' d ' , 4 ] <nl> + ` ` ` <nl> + <nl> + Non - Sequence Edge Cases : <nl> + <nl> + ` ` ` python <nl> + flatten_up_to ( 0 , 0 ) # Output : [ 0 ] <nl> + flatten_up_to ( 0 , [ 0 , 1 , 2 ] ) # Output : [ [ 0 , 1 , 2 ] ] <nl> + flatten_up_to ( [ 0 , 1 , 2 ] , 0 ) # Output : TypeError <nl> + flatten_up_to ( [ 0 , 1 , 2 ] , [ 0 , 1 , 2 ] ) # Output : [ 0 , 1 , 2 ] <nl> + ` ` ` <nl> + <nl> + Args : <nl> + shallow_tree : a possibly pruned structure of input_tree . <nl> + input_tree : an arbitrarily nested structure or a scalar object . <nl> + Note , numpy arrays are considered scalars . <nl> + <nl> + Returns : <nl> + A Python list , the partially flattened version of ` input_tree ` according to <nl> + the structure of ` shallow_tree ` . <nl> + <nl> + Raises : <nl> + TypeError : If ` shallow_tree ` is a sequence but ` input_tree ` is not . <nl> + TypeError : If the sequence types of ` shallow_tree ` are different from <nl> + ` input_tree ` . <nl> + ValueError : If the sequence lengths of ` shallow_tree ` are different from <nl> + ` input_tree ` . <nl> + " " " <nl> + assert_shallow_structure ( shallow_tree , input_tree ) <nl> + return list ( _yield_flat_up_to ( shallow_tree , input_tree ) ) <nl> + <nl> + <nl> + def map_structure_up_to ( shallow_tree , func , * inputs ) : <nl> + " " " Applies a function or op to a number of partially flattened inputs . <nl> + <nl> + The ` inputs ` are flattened up to ` shallow_tree ` before being mapped . <nl> + <nl> + Use Case : <nl> + <nl> + Sometimes we wish to apply a function to a partially flattened <nl> + sequence ( for example when the function itself takes sequence inputs ) . We <nl> + achieve this by specifying a shallow structure , ` shallow_tree ` we wish to <nl> + flatten up to . <nl> + <nl> + The ` inputs ` , can be thought of as having the same structure as <nl> + ` shallow_tree ` , but with leaf nodes that are themselves tree structures . <nl> + <nl> + This function therefore will return something with the same base structure as <nl> + ` shallow_tree ` . <nl> + <nl> + Examples : <nl> + <nl> + ` ` ` python <nl> + ab_tuple = collections . namedtuple ( " ab_tuple " , " a , b " ) <nl> + op_tuple = collections . namedtuple ( " op_tuple " , " add , mul " ) <nl> + inp_val = ab_tuple ( a = 2 , b = 3 ) <nl> + inp_ops = ab_tuple ( a = op_tuple ( add = 1 , mul = 2 ) , b = op_tuple ( add = 2 , mul = 3 ) ) <nl> + out = map_structure_up_to ( inp_val , lambda val , ops : ( val + ops . add ) * ops . mul , <nl> + inp_val , inp_ops ) <nl> + <nl> + # Output is : ab_tuple ( a = 6 , b = 15 ) <nl> + ` ` ` <nl> + <nl> + ` ` ` python <nl> + data_list = [ [ 2 , 4 , 6 , 8 ] , [ [ 1 , 3 , 5 , 7 , 9 ] , [ 3 , 5 , 7 ] ] ] <nl> + name_list = [ ' evens ' , [ ' odds ' , ' primes ' ] ] <nl> + out = map_structure_up_to ( <nl> + name_list , <nl> + lambda name , sec : " first_ { } _ { } " . format ( len ( sec ) , name ) , <nl> + name_list , data_list ) <nl> + <nl> + # Output is : [ ' first_4_evens ' , [ ' first_5_odds ' , ' first_3_primes ' ] ] <nl> + ` ` ` <nl> + <nl> + Args : <nl> + shallow_tree : a shallow tree , common to all the inputs . <nl> + func : callable which will be applied to each input individually . <nl> + * inputs : arbitrarily nested combination of objects that are compatible with <nl> + shallow_tree . The function ` func ` is applied to corresponding <nl> + partially flattened elements of each input , so the function must support <nl> + arity of ` len ( inputs ) ` . <nl> + <nl> + Raises : <nl> + TypeError : If ` shallow_tree ` is a sequence but ` input_tree ` is not . <nl> + TypeError : If the sequence types of ` shallow_tree ` are different from <nl> + ` input_tree ` . <nl> + ValueError : If the sequence lengths of ` shallow_tree ` are different from <nl> + ` input_tree ` . <nl> + <nl> + Returns : <nl> + result of repeatedly applying ` func ` , with same structure as <nl> + ` shallow_tree ` . <nl> + " " " <nl> + if not inputs : <nl> + raise ValueError ( " Cannot map over no sequences " ) <nl> + for input_tree in inputs : <nl> + assert_shallow_structure ( shallow_tree , input_tree ) <nl> + <nl> + # Flatten each input separately , apply the function to corresponding elements , <nl> + # then repack based on the structure of the first input . <nl> + all_flattened_up_to = [ flatten_up_to ( shallow_tree , input_tree ) <nl> + for input_tree in inputs ] <nl> + results = [ func ( * tensors ) for tensors in zip ( * all_flattened_up_to ) ] <nl> + return pack_sequence_as ( structure = shallow_tree , flat_sequence = results ) <nl> + <nl> + <nl> + _allowed_symbols = [ <nl> + " assert_same_structure " , <nl> + " is_sequence " , <nl> + " flatten " , <nl> + " pack_sequence_as " , <nl> + " map_structure " , <nl> + " assert_shallow_structure " , <nl> + " flatten_up_to " , <nl> + " map_structure_up_to " , <nl> + ] <nl> + <nl> + remove_undocumented ( __name__ , _allowed_symbols ) <nl> new file mode 100644 <nl> index 0000000000000 . . 7852e4f86176c <nl> mmm / dev / null <nl> ppp b / tensorflow / contrib / data / python / util / nest_test . py <nl> <nl> + # Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Tests for utilities working with arbitrarily nested structures . " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + import collections <nl> + <nl> + import numpy as np <nl> + <nl> + from tensorflow . contrib . data . python . util import nest <nl> + from tensorflow . python . framework import constant_op <nl> + from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . ops import math_ops <nl> + from tensorflow . python . platform import test <nl> + <nl> + <nl> + class NestTest ( test . TestCase ) : <nl> + <nl> + def testFlattenAndPack ( self ) : <nl> + structure = ( ( 3 , 4 ) , 5 , ( 6 , 7 , ( 9 , 10 ) , 8 ) ) <nl> + flat = [ " a " , " b " , " c " , " d " , " e " , " f " , " g " , " h " ] <nl> + self . assertEqual ( nest . flatten ( structure ) , [ 3 , 4 , 5 , 6 , 7 , 9 , 10 , 8 ] ) <nl> + self . assertEqual ( <nl> + nest . pack_sequence_as ( structure , flat ) , ( ( " a " , " b " ) , " c " , <nl> + ( " d " , " e " , ( " f " , " g " ) , " h " ) ) ) <nl> + point = collections . namedtuple ( " Point " , [ " x " , " y " ] ) <nl> + structure = ( point ( x = 4 , y = 2 ) , ( ( point ( x = 1 , y = 0 ) , ) , ) ) <nl> + flat = [ 4 , 2 , 1 , 0 ] <nl> + self . assertEqual ( nest . flatten ( structure ) , flat ) <nl> + restructured_from_flat = nest . pack_sequence_as ( structure , flat ) <nl> + self . assertEqual ( restructured_from_flat , structure ) <nl> + self . assertEqual ( restructured_from_flat [ 0 ] . x , 4 ) <nl> + self . assertEqual ( restructured_from_flat [ 0 ] . y , 2 ) <nl> + self . assertEqual ( restructured_from_flat [ 1 ] [ 0 ] [ 0 ] . x , 1 ) <nl> + self . assertEqual ( restructured_from_flat [ 1 ] [ 0 ] [ 0 ] . y , 0 ) <nl> + <nl> + self . assertEqual ( [ 5 ] , nest . flatten ( 5 ) ) <nl> + self . assertEqual ( [ np . array ( [ 5 ] ) ] , nest . flatten ( np . array ( [ 5 ] ) ) ) <nl> + <nl> + self . assertEqual ( " a " , nest . pack_sequence_as ( 5 , [ " a " ] ) ) <nl> + self . assertEqual ( <nl> + np . array ( [ 5 ] ) , nest . pack_sequence_as ( " scalar " , [ np . array ( [ 5 ] ) ] ) ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , " Structure is a scalar " ) : <nl> + nest . pack_sequence_as ( " scalar " , [ 4 , 5 ] ) <nl> + <nl> + with self . assertRaisesRegexp ( TypeError , " flat_sequence " ) : <nl> + nest . pack_sequence_as ( [ 4 , 5 ] , " bad_sequence " ) <nl> + <nl> + with self . assertRaises ( ValueError ) : <nl> + nest . pack_sequence_as ( [ 5 , 6 , [ 7 , 8 ] ] , [ " a " , " b " , " c " ] ) <nl> + <nl> + def testIsSequence ( self ) : <nl> + self . assertFalse ( nest . is_sequence ( " 1234 " ) ) <nl> + self . assertFalse ( nest . is_sequence ( [ 1 , 3 , [ 4 , 5 ] ] ) ) <nl> + self . assertTrue ( nest . is_sequence ( ( ( 7 , 8 ) , ( 5 , 6 ) ) ) ) <nl> + self . assertFalse ( nest . is_sequence ( [ ] ) ) <nl> + self . assertFalse ( nest . is_sequence ( set ( [ 1 , 2 ] ) ) ) <nl> + ones = array_ops . ones ( [ 2 , 3 ] ) <nl> + self . assertFalse ( nest . is_sequence ( ones ) ) <nl> + self . assertFalse ( nest . is_sequence ( math_ops . tanh ( ones ) ) ) <nl> + self . assertFalse ( nest . is_sequence ( np . ones ( ( 4 , 5 ) ) ) ) <nl> + self . assertTrue ( nest . is_sequence ( { " foo " : 1 , " bar " : 2 } ) ) <nl> + <nl> + def testAssertSameStructure ( self ) : <nl> + structure1 = ( ( ( 1 , 2 ) , 3 ) , 4 , ( 5 , 6 ) ) <nl> + structure2 = ( ( ( " foo1 " , " foo2 " ) , " foo3 " ) , " foo4 " , ( " foo5 " , " foo6 " ) ) <nl> + structure_different_num_elements = ( " spam " , " eggs " ) <nl> + structure_different_nesting = ( ( ( 1 , 2 ) , 3 ) , 4 , 5 , ( 6 , ) ) <nl> + nest . assert_same_structure ( structure1 , structure2 ) <nl> + nest . assert_same_structure ( " abc " , 1 . 0 ) <nl> + nest . assert_same_structure ( " abc " , np . array ( [ 0 , 1 ] ) ) <nl> + nest . assert_same_structure ( " abc " , constant_op . constant ( [ 0 , 1 ] ) ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , <nl> + " don ' t have the same number of elements " ) : <nl> + nest . assert_same_structure ( structure1 , structure_different_num_elements ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , <nl> + " don ' t have the same number of elements " ) : <nl> + nest . assert_same_structure ( ( 0 , 1 ) , np . array ( [ 0 , 1 ] ) ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , <nl> + " don ' t have the same number of elements " ) : <nl> + nest . assert_same_structure ( 0 , ( 0 , 1 ) ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , <nl> + " don ' t have the same nested structure " ) : <nl> + nest . assert_same_structure ( structure1 , structure_different_nesting ) <nl> + <nl> + named_type_0 = collections . namedtuple ( " named_0 " , ( " a " , " b " ) ) <nl> + named_type_1 = collections . namedtuple ( " named_1 " , ( " a " , " b " ) ) <nl> + self . assertRaises ( TypeError , nest . assert_same_structure , ( 0 , 1 ) , <nl> + named_type_0 ( " a " , " b " ) ) <nl> + <nl> + nest . assert_same_structure ( named_type_0 ( 3 , 4 ) , named_type_0 ( " a " , " b " ) ) <nl> + <nl> + self . assertRaises ( TypeError , nest . assert_same_structure , <nl> + named_type_0 ( 3 , 4 ) , named_type_1 ( 3 , 4 ) ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , <nl> + " don ' t have the same nested structure " ) : <nl> + nest . assert_same_structure ( named_type_0 ( 3 , 4 ) , named_type_0 ( ( 3 , ) , 4 ) ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , <nl> + " don ' t have the same nested structure " ) : <nl> + nest . assert_same_structure ( ( ( 3 , ) , 4 ) , ( 3 , ( 4 , ) ) ) <nl> + <nl> + structure1_list = { " a " : ( ( 1 , 2 ) , 3 ) , " b " : 4 , " c " : ( 5 , 6 ) } <nl> + with self . assertRaisesRegexp ( TypeError , <nl> + " don ' t have the same sequence type " ) : <nl> + nest . assert_same_structure ( structure1 , structure1_list ) <nl> + nest . assert_same_structure ( structure1 , structure2 , check_types = False ) <nl> + nest . assert_same_structure ( structure1 , structure1_list , check_types = False ) <nl> + <nl> + def testMapStructure ( self ) : <nl> + structure1 = ( ( ( 1 , 2 ) , 3 ) , 4 , ( 5 , 6 ) ) <nl> + structure2 = ( ( ( 7 , 8 ) , 9 ) , 10 , ( 11 , 12 ) ) <nl> + structure1_plus1 = nest . map_structure ( lambda x : x + 1 , structure1 ) <nl> + nest . assert_same_structure ( structure1 , structure1_plus1 ) <nl> + self . assertAllEqual ( <nl> + [ 2 , 3 , 4 , 5 , 6 , 7 ] , <nl> + nest . flatten ( structure1_plus1 ) ) <nl> + structure1_plus_structure2 = nest . map_structure ( <nl> + lambda x , y : x + y , structure1 , structure2 ) <nl> + self . assertEqual ( <nl> + ( ( ( 1 + 7 , 2 + 8 ) , 3 + 9 ) , 4 + 10 , ( 5 + 11 , 6 + 12 ) ) , <nl> + structure1_plus_structure2 ) <nl> + <nl> + self . assertEqual ( 3 , nest . map_structure ( lambda x : x - 1 , 4 ) ) <nl> + <nl> + self . assertEqual ( 7 , nest . map_structure ( lambda x , y : x + y , 3 , 4 ) ) <nl> + <nl> + with self . assertRaisesRegexp ( TypeError , " callable " ) : <nl> + nest . map_structure ( " bad " , structure1_plus1 ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , " same nested structure " ) : <nl> + nest . map_structure ( lambda x , y : None , 3 , ( 3 , ) ) <nl> + <nl> + with self . assertRaisesRegexp ( TypeError , " same sequence type " ) : <nl> + nest . map_structure ( lambda x , y : None , ( ( 3 , 4 ) , 5 ) , { " a " : ( 3 , 4 ) , " b " : 5 } ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , " same nested structure " ) : <nl> + nest . map_structure ( lambda x , y : None , ( ( 3 , 4 ) , 5 ) , ( 3 , ( 4 , 5 ) ) ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , " same nested structure " ) : <nl> + nest . map_structure ( lambda x , y : None , ( ( 3 , 4 ) , 5 ) , ( 3 , ( 4 , 5 ) ) , <nl> + check_types = False ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , " Only valid keyword argument " ) : <nl> + nest . map_structure ( lambda x : None , structure1 , foo = " a " ) <nl> + <nl> + with self . assertRaisesRegexp ( ValueError , " Only valid keyword argument " ) : <nl> + nest . map_structure ( lambda x : None , structure1 , check_types = False , foo = " a " ) <nl> + <nl> + def testAssertShallowStructure ( self ) : <nl> + inp_ab = ( " a " , " b " ) <nl> + inp_abc = ( " a " , " b " , " c " ) <nl> + expected_message = ( <nl> + " The two structures don ' t have the same sequence length . Input " <nl> + " structure has length 2 , while shallow structure has length 3 . " ) <nl> + with self . assertRaisesRegexp ( ValueError , expected_message ) : <nl> + nest . assert_shallow_structure ( inp_abc , inp_ab ) <nl> + <nl> + inp_ab1 = ( ( 1 , 1 ) , ( 2 , 2 ) ) <nl> + inp_ab2 = { " a " : ( 1 , 1 ) , " b " : ( 2 , 2 ) } <nl> + expected_message = ( <nl> + " The two structures don ' t have the same sequence type . Input structure " <nl> + " has type < ( type | class ) ' tuple ' > , while shallow structure has type " <nl> + " < ( type | class ) ' dict ' > . " ) <nl> + with self . assertRaisesRegexp ( TypeError , expected_message ) : <nl> + nest . assert_shallow_structure ( inp_ab2 , inp_ab1 ) <nl> + nest . assert_shallow_structure ( inp_ab2 , inp_ab1 , check_types = False ) <nl> + <nl> + def testFlattenUpTo ( self ) : <nl> + input_tree = ( ( ( 2 , 2 ) , ( 3 , 3 ) ) , ( ( 4 , 9 ) , ( 5 , 5 ) ) ) <nl> + shallow_tree = ( ( True , True ) , ( False , True ) ) <nl> + flattened_input_tree = nest . flatten_up_to ( shallow_tree , input_tree ) <nl> + flattened_shallow_tree = nest . flatten_up_to ( shallow_tree , shallow_tree ) <nl> + self . assertEqual ( flattened_input_tree , [ ( 2 , 2 ) , ( 3 , 3 ) , ( 4 , 9 ) , ( 5 , 5 ) ] ) <nl> + self . assertEqual ( flattened_shallow_tree , [ True , True , False , True ] ) <nl> + <nl> + input_tree = ( ( ( " a " , 1 ) , ( ( " b " , 2 ) , ( ( " c " , 3 ) , ( ( " d " , 4 ) ) ) ) ) ) <nl> + shallow_tree = ( ( " level_1 " , ( " level_2 " , ( " level_3 " , ( " level_4 " ) ) ) ) ) <nl> + input_tree_flattened_as_shallow_tree = nest . flatten_up_to ( shallow_tree , <nl> + input_tree ) <nl> + input_tree_flattened = nest . flatten ( input_tree ) <nl> + self . assertEqual ( input_tree_flattened_as_shallow_tree , <nl> + [ ( " a " , 1 ) , ( " b " , 2 ) , ( " c " , 3 ) , ( " d " , 4 ) ] ) <nl> + self . assertEqual ( input_tree_flattened , [ " a " , 1 , " b " , 2 , " c " , 3 , " d " , 4 ] ) <nl> + <nl> + # # Shallow non - list edge - case . <nl> + # Using iterable elements . <nl> + input_tree = [ " input_tree " ] <nl> + shallow_tree = " shallow_tree " <nl> + flattened_input_tree = nest . flatten_up_to ( shallow_tree , input_tree ) <nl> + flattened_shallow_tree = nest . flatten_up_to ( shallow_tree , shallow_tree ) <nl> + self . assertEqual ( flattened_input_tree , [ input_tree ] ) <nl> + self . assertEqual ( flattened_shallow_tree , [ shallow_tree ] ) <nl> + <nl> + input_tree = ( " input_tree_0 " , " input_tree_1 " ) <nl> + shallow_tree = " shallow_tree " <nl> + flattened_input_tree = nest . flatten_up_to ( shallow_tree , input_tree ) <nl> + flattened_shallow_tree = nest . flatten_up_to ( shallow_tree , shallow_tree ) <nl> + self . assertEqual ( flattened_input_tree , [ input_tree ] ) <nl> + self . assertEqual ( flattened_shallow_tree , [ shallow_tree ] ) <nl> + <nl> + # Using non - iterable elements . <nl> + input_tree = ( 0 , ) <nl> + shallow_tree = 9 <nl> + flattened_input_tree = nest . flatten_up_to ( shallow_tree , input_tree ) <nl> + flattened_shallow_tree = nest . flatten_up_to ( shallow_tree , shallow_tree ) <nl> + self . assertEqual ( flattened_input_tree , [ input_tree ] ) <nl> + self . assertEqual ( flattened_shallow_tree , [ shallow_tree ] ) <nl> + <nl> + input_tree = ( 0 , 1 ) <nl> + shallow_tree = 9 <nl> + flattened_input_tree = nest . flatten_up_to ( shallow_tree , input_tree ) <nl> + flattened_shallow_tree = nest . flatten_up_to ( shallow_tree , shallow_tree ) <nl> + self . assertEqual ( flattened_input_tree , [ input_tree ] ) <nl> + self . assertEqual ( flattened_shallow_tree , [ shallow_tree ] ) <nl> + <nl> + # # Both non - list edge - case . <nl> + # Using iterable elements . <nl> + input_tree = " input_tree " <nl> + shallow_tree = " shallow_tree " <nl> + flattened_input_tree = nest . flatten_up_to ( shallow_tree , input_tree ) <nl> + flattened_shallow_tree = nest . flatten_up_to ( shallow_tree , shallow_tree ) <nl> + self . assertEqual ( flattened_input_tree , [ input_tree ] ) <nl> + self . assertEqual ( flattened_shallow_tree , [ shallow_tree ] ) <nl> + <nl> + # Using non - iterable elements . <nl> + input_tree = 0 <nl> + shallow_tree = 0 <nl> + flattened_input_tree = nest . flatten_up_to ( shallow_tree , input_tree ) <nl> + flattened_shallow_tree = nest . flatten_up_to ( shallow_tree , shallow_tree ) <nl> + self . assertEqual ( flattened_input_tree , [ input_tree ] ) <nl> + self . assertEqual ( flattened_shallow_tree , [ shallow_tree ] ) <nl> + <nl> + # # Input non - list edge - case . <nl> + # Using iterable elements . <nl> + input_tree = " input_tree " <nl> + shallow_tree = ( " shallow_tree " , ) <nl> + expected_message = ( " If shallow structure is a sequence , input must also " <nl> + " be a sequence . Input has type : < ( type | class ) ' str ' > . " ) <nl> + with self . assertRaisesRegexp ( TypeError , expected_message ) : <nl> + flattened_input_tree = nest . flatten_up_to ( shallow_tree , input_tree ) <nl> + flattened_shallow_tree = nest . flatten_up_to ( shallow_tree , shallow_tree ) <nl> + self . assertEqual ( flattened_shallow_tree , list ( shallow_tree ) ) <nl> + <nl> + input_tree = " input_tree " <nl> + shallow_tree = ( " shallow_tree_9 " , " shallow_tree_8 " ) <nl> + with self . assertRaisesRegexp ( TypeError , expected_message ) : <nl> + flattened_input_tree = nest . flatten_up_to ( shallow_tree , input_tree ) <nl> + flattened_shallow_tree = nest . flatten_up_to ( shallow_tree , shallow_tree ) <nl> + self . assertEqual ( flattened_shallow_tree , list ( shallow_tree ) ) <nl> + <nl> + # Using non - iterable elements . <nl> + input_tree = 0 <nl> + shallow_tree = ( 9 , ) <nl> + expected_message = ( " If shallow structure is a sequence , input must also " <nl> + " be a sequence . Input has type : < ( type | class ) ' int ' > . " ) <nl> + with self . assertRaisesRegexp ( TypeError , expected_message ) : <nl> + flattened_input_tree = nest . flatten_up_to ( shallow_tree , input_tree ) <nl> + flattened_shallow_tree = nest . flatten_up_to ( shallow_tree , shallow_tree ) <nl> + self . assertEqual ( flattened_shallow_tree , list ( shallow_tree ) ) <nl> + <nl> + input_tree = 0 <nl> + shallow_tree = ( 9 , 8 ) <nl> + with self . assertRaisesRegexp ( TypeError , expected_message ) : <nl> + flattened_input_tree = nest . flatten_up_to ( shallow_tree , input_tree ) <nl> + flattened_shallow_tree = nest . flatten_up_to ( shallow_tree , shallow_tree ) <nl> + self . assertEqual ( flattened_shallow_tree , list ( shallow_tree ) ) <nl> + <nl> + def testMapStructureUpTo ( self ) : <nl> + ab_tuple = collections . namedtuple ( " ab_tuple " , " a , b " ) <nl> + op_tuple = collections . namedtuple ( " op_tuple " , " add , mul " ) <nl> + inp_val = ab_tuple ( a = 2 , b = 3 ) <nl> + inp_ops = ab_tuple ( a = op_tuple ( add = 1 , mul = 2 ) , b = op_tuple ( add = 2 , mul = 3 ) ) <nl> + out = nest . map_structure_up_to ( <nl> + inp_val , lambda val , ops : ( val + ops . add ) * ops . mul , inp_val , inp_ops ) <nl> + self . assertEqual ( out . a , 6 ) <nl> + self . assertEqual ( out . b , 15 ) <nl> + <nl> + data_list = ( ( 2 , 4 , 6 , 8 ) , ( ( 1 , 3 , 5 , 7 , 9 ) , ( 3 , 5 , 7 ) ) ) <nl> + name_list = ( " evens " , ( " odds " , " primes " ) ) <nl> + out = nest . map_structure_up_to ( <nl> + name_list , lambda name , sec : " first_ { } _ { } " . format ( len ( sec ) , name ) , <nl> + name_list , data_list ) <nl> + self . assertEqual ( out , ( " first_4_evens " , ( " first_5_odds " , " first_3_primes " ) ) ) <nl> + <nl> + <nl> + if __name__ = = " __main__ " : <nl> + test . main ( ) <nl> mmm a / tensorflow / contrib / factorization / python / ops / wals . py <nl> ppp b / tensorflow / contrib / factorization / python / ops / wals . py <nl> <nl> from tensorflow . python . ops import state_ops <nl> from tensorflow . python . ops import variable_scope <nl> from tensorflow . python . platform import tf_logging as logging <nl> + from tensorflow . python . summary import summary <nl> from tensorflow . python . training import session_run_hook <nl> <nl> <nl> def __init__ ( self , <nl> processed_col_indices , <nl> row_prep_ops , <nl> col_prep_ops , <nl> - cache_init_ops ) : <nl> + cache_init_ops , <nl> + completed_sweeps_var ) : <nl> " " " Initializes SweepHook . <nl> <nl> Args : <nl> def __init__ ( self , <nl> cache_init_ops : list of ops , to be run once before training , in the given <nl> order . These are typically local initialization ops ( such as cache <nl> initialization ) . <nl> + completed_sweeps_var : An integer tf . Variable , indicates the number of <nl> + completed sweeps . It is updated by the hook . <nl> " " " <nl> - # TODO ( walidk ) : Provide a counter for the number of completed sweeps . <nl> self . _num_rows = num_rows <nl> self . _num_cols = num_cols <nl> self . _row_prep_ops = row_prep_ops <nl> self . _col_prep_ops = col_prep_ops <nl> self . _cache_init_ops = cache_init_ops <nl> self . _is_row_sweep_var = is_row_sweep_var <nl> + self . _completed_sweeps_var = completed_sweeps_var <nl> # Boolean variable that determines whether the cache_init_ops have been run . <nl> self . _is_initialized = False <nl> # Boolean variable that is set to True when a sweep is completed . <nl> # Used to run the prep_ops at the beginning of a sweep , in before_run ( ) . <nl> self . _is_sweep_done = False <nl> # Ops to run jointly with train_op , responsible for updating <nl> - # _is_row_sweep_var and incrementing the global_step counter . They have <nl> - # control_dependencies on train_op . <nl> + # _is_row_sweep_var and incrementing the global_step and completed_sweeps <nl> + # counters . They have control_dependencies on train_op . <nl> self . _fetches = self . _create_switch_ops ( processed_row_indices , <nl> processed_col_indices , <nl> train_op ) <nl> def _create_switch_ops ( self , <nl> processed_row_indices , <nl> processed_col_indices , <nl> train_op ) : <nl> - " " " Creates ops to update is_row_sweep_var and to increment global_step . <nl> + " " " Creates ops to update is_row_sweep_var , global_step and completed_sweeps . <nl> <nl> Creates two boolean tensors processed_rows and processed_cols , which keep <nl> track of which rows / cols have been processed during the current sweep . <nl> def _create_switch_ops ( self , <nl> processed_rows [ processed_row_indices ] to True . <nl> - When is_row_sweep_var is False , it sets <nl> processed_cols [ processed_col_indices ] to True . <nl> - When all rows or all cols have been processed , negates is_row_sweep_var and <nl> - resets processed_rows and processed_cols to False . <nl> + When all rows or all cols have been processed , negates is_row_sweep_var , <nl> + increments the completed_sweeps counter , and resets processed_rows and <nl> + processed_cols to False . <nl> All of the ops created by this function have control_dependencies on <nl> train_op . <nl> <nl> def _create_switch_ops ( self , <nl> sweep ) have been processed . <nl> switch_ops : An op that updates is_row_sweep_var when is_sweep_done is <nl> True . Has control_dependencies on train_op . <nl> - global_step_incr_op : An op that increments the global_step counter . Has <nl> - control_dependenciens on switch_ops . <nl> + incr_ops : An op that increments the global_step and completed_sweeps <nl> + counters . Has control_dependenciens on switch_ops . <nl> " " " <nl> + <nl> processed_rows_init = array_ops . fill ( dims = [ self . _num_rows ] , value = False ) <nl> with ops . colocate_with ( processed_rows_init ) : <nl> processed_rows = variable_scope . variable ( <nl> def get_reset_op ( ) : <nl> switch_ops = control_flow_ops . group ( switch_op , reset_op , <nl> name = " sweep_hook_switch_ops " ) <nl> <nl> - # Op to increment the global step <nl> - global_step = framework_variables . get_global_step ( ) <nl> with ops . control_dependencies ( [ switch_ops ] ) : <nl> + # Op to increment the completed_sweeps counter . <nl> + completed_sweeps_incr_op = control_flow_ops . cond ( <nl> + is_sweep_done , <nl> + lambda : state_ops . assign_add ( self . _completed_sweeps_var , 1 ) . op , <nl> + control_flow_ops . no_op , <nl> + name = " completed_sweeps_incr " ) <nl> + <nl> + # Op to increment the global_step counter . <nl> + global_step = framework_variables . get_global_step ( ) <nl> if global_step is not None : <nl> global_step_incr_op = state_ops . assign_add ( <nl> global_step , 1 , name = " global_step_incr " ) . op <nl> def get_reset_op ( ) : <nl> global_step_incr_op = control_flow_ops . no_op ( <nl> name = " global_step_incr " ) <nl> <nl> - return [ is_sweep_done , switch_ops , global_step_incr_op ] <nl> + incr_ops = control_flow_ops . group ( <nl> + completed_sweeps_incr_op , global_step_incr_op , <nl> + name = " counter_incr_ops " ) <nl> + <nl> + return [ is_sweep_done , switch_ops , incr_ops ] <nl> <nl> def begin ( self ) : <nl> pass <nl> def before_run ( self , run_context ) : <nl> <nl> self . _is_initialized = True <nl> <nl> - # Request running the switch_ops and the global_step_incr_op <nl> + # Request running the switch_ops and the incr_ops <nl> logging . info ( " Partial fit starting . " ) <nl> return session_run_hook . SessionRunArgs ( fetches = self . _fetches ) <nl> <nl> def after_run ( self , run_context , run_values ) : <nl> logging . info ( " Partial fit done . " ) <nl> <nl> <nl> + class _StopAtSweepHook ( session_run_hook . SessionRunHook ) : <nl> + " " " Hook that requests stop at a given sweep . " " " <nl> + <nl> + def __init__ ( self , last_sweep ) : <nl> + " " " Initializes a ` StopAtSweepHook ` . <nl> + <nl> + This hook requests stop at a given sweep . Relies on the tensor named <nl> + COMPLETED_SWEEPS in the default graph . <nl> + <nl> + Args : <nl> + last_sweep : Integer , number of the last sweep to run . <nl> + " " " <nl> + self . _last_sweep = last_sweep <nl> + <nl> + def begin ( self ) : <nl> + try : <nl> + self . _completed_sweeps_var = ops . get_default_graph ( ) . get_tensor_by_name ( <nl> + WALSMatrixFactorization . COMPLETED_SWEEPS + " : 0 " ) <nl> + except KeyError : <nl> + raise RuntimeError ( WALSMatrixFactorization . COMPLETED_SWEEPS + <nl> + " counter should be created to use StopAtSweepHook . " ) <nl> + <nl> + def before_run ( self , run_context ) : <nl> + return session_run_hook . SessionRunArgs ( self . _completed_sweeps_var ) <nl> + <nl> + def after_run ( self , run_context , run_values ) : <nl> + completed_sweeps = run_values . results <nl> + if completed_sweeps > = self . _last_sweep : <nl> + run_context . request_stop ( ) <nl> + <nl> + <nl> def _wals_factorization_model_function ( features , labels , mode , params ) : <nl> " " " Model function for the WALSFactorization estimator . <nl> <nl> def _wals_factorization_model_function ( features , labels , mode , params ) : <nl> use_gramian_cache = ( <nl> params [ " use_gramian_cache_for_training " ] <nl> and mode = = model_fn . ModeKeys . TRAIN ) <nl> + max_sweeps = params [ " max_sweeps " ] <nl> model = factorization_ops . WALSModel ( <nl> params [ " num_rows " ] , <nl> params [ " num_cols " ] , <nl> def _wals_factorization_model_function ( features , labels , mode , params ) : <nl> # update_col_factors_op <nl> <nl> is_row_sweep_var = variable_scope . variable ( <nl> - True , " is_row_sweep " , <nl> + True , <nl> + trainable = False , <nl> + name = " is_row_sweep " , <nl> + collections = [ ops . GraphKeys . GLOBAL_VARIABLES ] ) <nl> + completed_sweeps_var = variable_scope . variable ( <nl> + 0 , <nl> + trainable = False , <nl> + name = WALSMatrixFactorization . COMPLETED_SWEEPS , <nl> collections = [ ops . GraphKeys . GLOBAL_VARIABLES ] ) <nl> + <nl> # The row sweep is determined by is_row_sweep_var ( controlled by the <nl> # sweep_hook ) in TRAIN mode , and manually in EVAL mode . <nl> is_row_sweep = ( features [ WALSMatrixFactorization . PROJECT_ROW ] <nl> def update_col_factors ( ) : <nl> row_prep_ops , <nl> col_prep_ops , <nl> cache_init_ops , <nl> + completed_sweeps_var , <nl> ) <nl> + training_hooks = [ sweep_hook ] <nl> + if max_sweeps is not None : <nl> + training_hooks . append ( _StopAtSweepHook ( max_sweeps ) ) <nl> + <nl> + summary . scalar ( " loss " , loss ) <nl> + summary . scalar ( " completed_sweeps " , completed_sweeps_var ) <nl> <nl> # Prediction ops ( only return predictions in INFER mode ) <nl> predictions = { } <nl> def get_col_projection ( ) : <nl> loss = loss , <nl> eval_metric_ops = { } , <nl> train_op = train_op , <nl> - training_hooks = [ sweep_hook ] ) <nl> + training_hooks = training_hooks ) <nl> <nl> <nl> class WALSMatrixFactorization ( estimator . Estimator ) : <nl> class WALSMatrixFactorization ( estimator . Estimator ) : <nl> PROJECTION_WEIGHTS = " projection_weights " <nl> # Predictions key <nl> PROJECTION_RESULT = " projection " <nl> + # Name of the completed_sweeps variable <nl> + COMPLETED_SWEEPS = " completed_sweeps " <nl> <nl> def __init__ ( self , <nl> num_rows , <nl> def __init__ ( self , <nl> col_weights = 1 , <nl> use_factors_weights_cache_for_training = True , <nl> use_gramian_cache_for_training = True , <nl> + max_sweeps = None , <nl> model_dir = None , <nl> config = None ) : <nl> " " " Creates a model for matrix factorization using the WALS method . <nl> def __init__ ( self , <nl> use_gramian_cache_for_training : Boolean , whether the Gramians will be <nl> cached on the workers before the updates start , during training . <nl> Defaults to True . Note that caching is disabled during prediction . <nl> + max_sweeps : integer , optional . Specifies the number of sweeps for which <nl> + to train the model , where a sweep is defined as a full update of all the <nl> + row factors ( resp . column factors ) . <nl> + If ` steps ` or ` max_steps ` is also specified in model . fit ( ) , training <nl> + stops when either of the steps condition or sweeps condition is met . <nl> model_dir : The directory to save the model results and log files . <nl> config : A Configuration object . See Estimator . <nl> <nl> def __init__ ( self , <nl> " num_col_shards " : num_col_shards , <nl> " row_weights " : row_weights , <nl> " col_weights " : col_weights , <nl> + " max_sweeps " : max_sweeps , <nl> " use_factors_weights_cache_for_training " : <nl> use_factors_weights_cache_for_training , <nl> " use_gramian_cache_for_training " : use_gramian_cache_for_training <nl> mmm a / tensorflow / contrib / factorization / python / ops / wals_test . py <nl> ppp b / tensorflow / contrib / factorization / python / ops / wals_test . py <nl> def batch_size ( self ) : <nl> def use_cache ( self ) : <nl> return False <nl> <nl> + @ property <nl> + def max_sweeps ( self ) : <nl> + return None <nl> + <nl> def setUp ( self ) : <nl> self . _num_rows = 5 <nl> self . _num_cols = 7 <nl> def setUp ( self ) : <nl> num_col_shards = self . _num_col_shards , <nl> row_weights = self . _row_weights , <nl> col_weights = self . _col_weights , <nl> + max_sweeps = self . max_sweeps , <nl> use_factors_weights_cache_for_training = self . use_cache , <nl> use_gramian_cache_for_training = self . use_cache ) <nl> <nl> def test_eval ( self ) : <nl> loss = { } . " " " . format ( loss , true_loss ) ) <nl> <nl> <nl> + class WALSMatrixFactorizationTestSweeps ( WALSMatrixFactorizationTest ) : <nl> + <nl> + @ property <nl> + def max_sweeps ( self ) : <nl> + return 2 <nl> + <nl> + # We set the column steps to None so that we rely only on max_sweeps to stop <nl> + # training . <nl> + @ property <nl> + def col_steps ( self ) : <nl> + return None <nl> + <nl> + <nl> class WALSMatrixFactorizationTestCached ( WALSMatrixFactorizationTest ) : <nl> <nl> @ property <nl> def ind_feed ( row_indices , col_indices ) : <nl> <nl> with self . test_session ( ) as sess : <nl> is_row_sweep_var = variables . Variable ( True ) <nl> + completed_sweeps_var = variables . Variable ( 0 ) <nl> sweep_hook = wals_lib . _SweepHook ( <nl> is_row_sweep_var , <nl> self . _train_op , <nl> def ind_feed ( row_indices , col_indices ) : <nl> self . _input_col_indices_ph , <nl> self . _row_prep_ops , <nl> self . _col_prep_ops , <nl> - self . _init_ops ) <nl> + self . _init_ops , <nl> + completed_sweeps_var ) <nl> mon_sess = monitored_session . _HookedSession ( sess , [ sweep_hook ] ) <nl> sess . run ( [ variables . global_variables_initializer ( ) ] ) <nl> <nl> def ind_feed ( row_indices , col_indices ) : <nl> mon_sess . run ( self . _train_op , ind_feed ( [ 3 , 4 ] , [ 0 , 1 , 2 , 3 , 4 , 5 , 6 ] ) ) <nl> self . assertFalse ( sess . run ( is_row_sweep_var ) , <nl> msg = ' Row sweep is complete but is_row_sweep is True . ' ) <nl> + self . assertTrue ( sess . run ( completed_sweeps_var ) = = 1 , <nl> + msg = ' Completed sweeps should be equal to 1 . ' ) <nl> self . assertTrue ( sweep_hook . _is_sweep_done , <nl> msg = ' Sweep is complete but is_sweep_done is False . ' ) <nl> # Col init ops should run . Col sweep not completed . <nl> def ind_feed ( row_indices , col_indices ) : <nl> msg = ' Col sweep is complete but is_row_sweep is False ' ) <nl> self . assertTrue ( sweep_hook . _is_sweep_done , <nl> msg = ' Sweep is complete but is_sweep_done is False . ' ) <nl> + self . assertTrue ( sess . run ( completed_sweeps_var ) = = 2 , <nl> + msg = ' Completed sweeps should be equal to 2 . ' ) <nl> + <nl> + <nl> + class StopAtSweepHookTest ( test . TestCase ) : <nl> + <nl> + def test_stop ( self ) : <nl> + hook = wals_lib . _StopAtSweepHook ( last_sweep = 10 ) <nl> + completed_sweeps = variables . Variable ( <nl> + 8 , name = wals_lib . WALSMatrixFactorization . COMPLETED_SWEEPS ) <nl> + train_op = state_ops . assign_add ( completed_sweeps , 1 ) <nl> + hook . begin ( ) <nl> + <nl> + with self . test_session ( ) as sess : <nl> + sess . run ( [ variables . global_variables_initializer ( ) ] ) <nl> + mon_sess = monitored_session . _HookedSession ( sess , [ hook ] ) <nl> + mon_sess . run ( train_op ) <nl> + # completed_sweeps is 9 after running train_op . <nl> + self . assertFalse ( mon_sess . should_stop ( ) ) <nl> + mon_sess . run ( train_op ) <nl> + # completed_sweeps is 10 after running train_op . <nl> + self . assertTrue ( mon_sess . should_stop ( ) ) <nl> <nl> <nl> if __name__ = = ' __main__ ' : <nl> mmm a / tensorflow / contrib / layers / python / layers / regularizers . py <nl> ppp b / tensorflow / contrib / layers / python / layers / regularizers . py <nl> def l1_l2_regularizer ( scale_l1 = 1 . 0 , scale_l2 = 1 . 0 , scope = None ) : <nl> Raises : <nl> ValueError : If scale is negative or if scale is not a float . <nl> " " " <nl> + if isinstance ( scale_l1 , numbers . Integral ) : <nl> + raise ValueError ( ' scale_l1 cannot be an integer : % s ' % ( scale_l1 , ) ) <nl> + if isinstance ( scale_l2 , numbers . Integral ) : <nl> + raise ValueError ( ' scale_l2 cannot be an integer : % s ' % ( scale_l2 , ) ) <nl> scope = scope or ' l1_l2_regularizer ' <nl> + if scale_l1 = = 0 . : <nl> + return l2_regularizer ( scale_l2 , scope ) <nl> + if scale_l2 = = 0 . : <nl> + return l1_regularizer ( scale_l1 , scope ) <nl> return sum_regularizer ( [ l1_regularizer ( scale_l1 ) , <nl> l2_regularizer ( scale_l2 ) ] , <nl> scope = scope ) <nl> mmm a / tensorflow / contrib / layers / python / layers / regularizers_test . py <nl> ppp b / tensorflow / contrib / layers / python / layers / regularizers_test . py <nl> def test_l1_l2 ( self ) : <nl> self . assertEquals ( loss . op . name , ' l1_l2_regularizer ' ) <nl> self . assertAlmostEqual ( loss . eval ( ) , num_elem + num_elem / 2 , 5 ) <nl> <nl> + def test_l1_l2_scale_l1Zero ( self ) : <nl> + shape = [ 5 , 5 , 5 ] <nl> + num_elem = 5 * 5 * 5 <nl> + tensor = constant_op . constant ( 1 . 0 , shape = shape ) <nl> + loss = regularizers . l1_l2_regularizer ( 0 . 0 , 1 . 0 ) ( tensor ) <nl> + with self . test_session ( ) : <nl> + self . assertEquals ( loss . op . name , ' l1_l2_regularizer ' ) <nl> + self . assertAlmostEqual ( loss . eval ( ) , num_elem / 2 , 5 ) <nl> + <nl> + def test_l1_l2_scale_l2Zero ( self ) : <nl> + shape = [ 5 , 5 , 5 ] <nl> + num_elem = 5 * 5 * 5 <nl> + tensor = constant_op . constant ( 1 . 0 , shape = shape ) <nl> + loss = regularizers . l1_l2_regularizer ( 1 . 0 , 0 . 0 ) ( tensor ) <nl> + with self . test_session ( ) : <nl> + self . assertEquals ( loss . op . name , ' l1_l2_regularizer ' ) <nl> + self . assertAlmostEqual ( loss . eval ( ) , num_elem , 5 ) <nl> + <nl> + def test_l1_l2_scales_Zero ( self ) : <nl> + shape = [ 5 , 5 , 5 ] <nl> + tensor = constant_op . constant ( 1 . 0 , shape = shape ) <nl> + loss = regularizers . l1_l2_regularizer ( 0 . 0 , 0 . 0 ) ( tensor ) <nl> + self . assertEquals ( loss , None ) <nl> + <nl> def testL1L2RegularizerWithScope ( self ) : <nl> with self . test_session ( ) : <nl> shape = [ 5 , 5 , 5 ] <nl> mmm a / tensorflow / contrib / lookup / BUILD <nl> ppp b / tensorflow / contrib / lookup / BUILD <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : constant_op " , <nl> " / / tensorflow / python : control_flow_ops " , <nl> " / / tensorflow / python : framework " , <nl> " / / tensorflow / python : framework_for_generated_wrappers " , <nl> mmm a / tensorflow / contrib / lookup / lookup_ops . py <nl> ppp b / tensorflow / contrib / lookup / lookup_ops . py <nl> <nl> import collections <nl> import functools <nl> <nl> + from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import sparse_tensor <nl> def initialize ( self , table ) : <nl> name = scope ) <nl> # pylint : enable = protected - access <nl> ops . add_to_collection ( ops . GraphKeys . TABLE_INITIALIZERS , init_op ) <nl> - ops . add_to_collection ( ops . GraphKeys . ASSET_FILEPATHS , filename ) <nl> + # If the filename tensor is anything other than a string constant ( e . g . , if <nl> + # it is a placeholder ) then it does not make sense to track it as an asset . <nl> + if constant_op . is_constant ( filename ) : <nl> + ops . add_to_collection ( ops . GraphKeys . ASSET_FILEPATHS , filename ) <nl> return init_op <nl> <nl> <nl> mmm a / tensorflow / contrib / lookup / lookup_ops_test . py <nl> ppp b / tensorflow / contrib / lookup / lookup_ops_test . py <nl> def test_string_index_table_from_file_tensor_filename ( self ) : <nl> self . assertRaises ( errors_impl . OpError , ids . eval ) <nl> lookup_ops . tables_initializer ( ) . run ( ) <nl> self . assertAllEqual ( ( 1 , 2 , 3 ) , ids . eval ( ) ) <nl> + self . assertEqual ( 1 , <nl> + len ( ops . get_collection ( ops . GraphKeys . ASSET_FILEPATHS ) ) ) <nl> + <nl> + def test_string_index_table_from_file_placeholder_filename ( self ) : <nl> + vocabulary_file = self . _createVocabFile ( " f2i_vocab1 . txt " ) <nl> + with self . test_session ( ) : <nl> + vocabulary_placeholder = array_ops . placeholder ( dtypes . string , [ ] ) <nl> + table = lookup . index_table_from_file ( <nl> + vocabulary_file = vocabulary_placeholder , num_oov_buckets = 1 ) <nl> + ids = table . lookup ( constant_op . constant ( [ " salad " , " surgery " , " tarkus " ] ) ) <nl> + <nl> + self . assertRaises ( errors_impl . OpError , ids . eval ) <nl> + <nl> + feed_dict = { vocabulary_placeholder . name : vocabulary_file } <nl> + lookup_ops . tables_initializer ( ) . run ( feed_dict = feed_dict ) <nl> + self . assertAllEqual ( ( 1 , 2 , 3 ) , ids . eval ( ) ) <nl> + self . assertEqual ( 0 , <nl> + len ( ops . get_collection ( ops . GraphKeys . ASSET_FILEPATHS ) ) ) <nl> <nl> def test_int32_index_table_from_file ( self ) : <nl> vocabulary_file = self . _createVocabFile ( <nl> mmm a / tensorflow / contrib / session_bundle / bundle_shim_test . cc <nl> ppp b / tensorflow / contrib / session_bundle / bundle_shim_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / contrib / session_bundle / bundle_shim . h " <nl> <nl> + # include " google / protobuf / any . pb . h " <nl> # include " tensorflow / cc / saved_model / signature_constants . h " <nl> # include " tensorflow / cc / saved_model / tag_constants . h " <nl> # include " tensorflow / contrib / session_bundle / test_util . h " <nl> mmm a / tensorflow / contrib / session_bundle / signature . cc <nl> ppp b / tensorflow / contrib / session_bundle / signature . cc <nl> limitations under the License . <nl> # include " tensorflow / core / platform / protobuf_internal . h " <nl> # include " tensorflow / core / platform / types . h " <nl> # include " tensorflow / core / protobuf / meta_graph . pb . h " <nl> - # include " tensorflow / core / protobuf / saver . pb . h " <nl> # include " tensorflow / core / public / session . h " <nl> <nl> namespace tensorflow { <nl> mmm a / tensorflow / core / BUILD <nl> ppp b / tensorflow / core / BUILD <nl> cc_library ( <nl> deps = [ <nl> " : lib " , <nl> " : op_gen_overrides_proto_cc " , <nl> + " : protos_all_cc " , <nl> ] , <nl> ) <nl> <nl> tf_cuda_library ( <nl> " : lib_internal " , <nl> " : proto_text " , <nl> " : protos_all_cc " , <nl> + " / / tensorflow / core / kernels : bounds_check " , <nl> " / / tensorflow / core / kernels : required " , <nl> " / / third_party / eigen3 " , <nl> ] , <nl> mmm a / tensorflow / core / common_runtime / device . h <nl> ppp b / tensorflow / core / common_runtime / device . h <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / control_flow . h " <nl> - # include " tensorflow / core / framework / device_attributes . pb . h " <nl> # include " tensorflow / core / framework / device_attributes . pb_text . h " <nl> + # include " tensorflow / core / framework / device_attributes . pb . h " <nl> # include " tensorflow / core / framework / device_base . h " <nl> # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> mmm a / tensorflow / core / common_runtime / direct_session . cc <nl> ppp b / tensorflow / core / common_runtime / direct_session . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / simple_placer . h " <nl> # include " tensorflow / core / common_runtime / step_stats_collector . h " <nl> # include " tensorflow / core / framework / function . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / graph . pb_text . h " <nl> + # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / graph_def_util . h " <nl> # include " tensorflow / core / framework / log_memory . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / common_runtime / shape_refiner . cc <nl> ppp b / tensorflow / core / common_runtime / shape_refiner . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / common_shape_fns . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> + # include " tensorflow / core / kernels / bounds_check . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / lib / gtl / stl_util . h " <nl> # include " tensorflow / core / public / session . h " <nl> Status ShapeRefiner : : EvaluateConstantTensorForEdge ( const Node * node , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + Status ShapeRefiner : : TryToInferTensorOutputFromInputShapes ( const Edge * edge , <nl> + Tensor * output , <nl> + bool * success ) { <nl> + * success = false ; <nl> + const Node * node = edge - > src ( ) ; <nl> + auto it = node_to_context_ . find ( node ) ; <nl> + if ( it = = node_to_context_ . end ( ) ) { <nl> + return errors : : FailedPrecondition ( " Node does not have context . " ) ; <nl> + } <nl> + InferenceContext * c = it - > second . get ( ) ; <nl> + <nl> + if ( node - > def ( ) . op ( ) = = " Shape " ) { <nl> + / / If input shapes to the shape op are fully defined , <nl> + / / we can infer the shape op ' s output tensor . <nl> + bool fully_defined_inputs = c - > FullyDefined ( c - > input ( 0 ) ) ; <nl> + if ( fully_defined_inputs ) { <nl> + int input_rank = c - > Rank ( c - > input ( 0 ) ) ; <nl> + Tensor t ( node - > output_type ( 0 ) , TensorShape ( { input_rank } ) ) ; <nl> + if ( node - > output_type ( 0 ) = = DT_INT32 ) { <nl> + auto flat = t . flat < int > ( ) ; <nl> + for ( int i = 0 ; i < input_rank ; i + + ) { <nl> + int64 dimension = c - > Value ( c - > Dim ( c - > input ( 0 ) , i ) ) ; <nl> + if ( ! FastBoundsCheck ( dimension , std : : numeric_limits < int32 > : : max ( ) ) ) { <nl> + return errors : : FailedPrecondition ( <nl> + " Shape has output type int32 , but dimension exceeds maximum " <nl> + " int32 value " ) ; <nl> + } <nl> + flat ( i ) = static_cast < int32 > ( dimension ) ; <nl> + } <nl> + } else if ( node - > output_type ( 0 ) = = DT_INT64 ) { <nl> + auto flat = t . flat < int64 > ( ) ; <nl> + for ( int i = 0 ; i < input_rank ; i + + ) { <nl> + flat ( i ) = c - > Value ( c - > Dim ( c - > input ( 0 ) , i ) ) ; <nl> + } <nl> + } else { <nl> + return errors : : FailedPrecondition ( <nl> + " Shape has output type that is not int32 or int64 " ) ; <nl> + } <nl> + * output = t ; <nl> + * success = true ; <nl> + } <nl> + } else if ( node - > def ( ) . op ( ) = = " Rank " ) { <nl> + bool rank_known = c - > RankKnown ( c - > input ( 0 ) ) ; <nl> + if ( rank_known ) { <nl> + int32 input_rank = c - > Rank ( c - > input ( 0 ) ) ; <nl> + Tensor t ( node - > output_type ( 0 ) , TensorShape ( { } ) ) ; <nl> + t . flat < int32 > ( ) ( 0 ) = input_rank ; <nl> + * output = t ; <nl> + * success = true ; <nl> + } <nl> + } else if ( node - > def ( ) . op ( ) = = " Size " ) { <nl> + bool fully_defined_inputs = c - > FullyDefined ( c - > input ( 0 ) ) ; <nl> + if ( fully_defined_inputs ) { <nl> + int32 rank = c - > Rank ( c - > input ( 0 ) ) ; <nl> + Tensor t ( node - > output_type ( 0 ) , TensorShape ( { } ) ) ; <nl> + int64 size = 1 ; <nl> + for ( int i = 0 ; i < rank ; i + + ) { <nl> + size * = c - > Value ( c - > Dim ( c - > input ( 0 ) , i ) ) ; <nl> + } <nl> + if ( node - > output_type ( 0 ) = = DT_INT32 ) { <nl> + if ( ! FastBoundsCheck ( size , std : : numeric_limits < int32 > : : max ( ) ) ) { <nl> + return errors : : FailedPrecondition ( <nl> + " Size has output type int32 , but size exceeds maximum int32 " <nl> + " value " ) ; <nl> + } <nl> + t . flat < int32 > ( ) ( 0 ) = static_cast < int32 > ( size ) ; <nl> + } else if ( node - > output_type ( 0 ) = = DT_INT64 ) { <nl> + t . flat < int64 > ( ) ( 0 ) = size ; <nl> + } else { <nl> + return errors : : FailedPrecondition ( <nl> + " Size has output type that is not int32 or int64 " ) ; <nl> + } <nl> + * output = t ; <nl> + * success = true ; <nl> + } <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> Status ShapeRefiner : : ExtractConstantSubgraph ( <nl> Node * target_node , Graph * out_graph , bool * is_constant_graph , <nl> std : : vector < std : : pair < string , Tensor > > * const_inputs ) { <nl> Status ShapeRefiner : : ExtractConstantSubgraph ( <nl> dst_copy , current_edge - > dst_input ( ) ) ; <nl> } <nl> <nl> - / / If we have a copy of the input tensor materialized already , <nl> - / / then add to the list of inputs to feed and do not recurse further . <nl> const string & output_tensor_name = <nl> strings : : StrCat ( current_node - > name ( ) , " : " , current_edge - > src_output ( ) ) ; <nl> + <nl> + / / Some tensor values can be inferred . For example , a shape op <nl> + / / with input shapes fully defined can have its output tensor inferred . <nl> + Tensor tensor_inferred ; <nl> + bool successfully_inferred_tensor = false ; <nl> + TF_RETURN_IF_ERROR ( TryToInferTensorOutputFromInputShapes ( <nl> + current_edge , & tensor_inferred , & successfully_inferred_tensor ) ) ; <nl> + if ( successfully_inferred_tensor ) { <nl> + const_inputs - > emplace_back ( output_tensor_name , tensor_inferred ) ; <nl> + const_inputs_added . insert ( output_tensor_name ) ; <nl> + continue ; <nl> + } <nl> + <nl> + / / If we have a copy of the input tensor materialized already , <nl> + / / then add to the list of inputs to feed and do not recurse further . <nl> auto it = const_tensor_map_ . find ( output_tensor_name ) ; <nl> if ( it ! = const_tensor_map_ . end ( ) & & <nl> const_inputs_added . count ( output_tensor_name ) = = 0 ) { <nl> - const_inputs - > emplace_back ( <nl> - std : : make_pair ( output_tensor_name , it - > second ) ) ; <nl> + const_inputs - > emplace_back ( output_tensor_name , it - > second ) ; <nl> const_inputs_added . insert ( output_tensor_name ) ; <nl> continue ; <nl> } <nl> mmm a / tensorflow / core / common_runtime / shape_refiner . h <nl> ppp b / tensorflow / core / common_runtime / shape_refiner . h <nl> class ShapeRefiner { <nl> } <nl> <nl> private : <nl> + / / Tries to infer tensor output based on the input shapes of the node . In some <nl> + / / cases , the shapes of the inputs are sufficient for inferring the contents <nl> + / / of the output tensor . For example , a Shape op with fully defined input <nl> + / / shapes can have its output tensor inferred . <nl> + Status TryToInferTensorOutputFromInputShapes ( const Edge * edge , Tensor * output , <nl> + bool * success ) ; <nl> + <nl> / / Extracts the subgraph ending at ' node ' that is statically <nl> / / computable and inserts into ' out_graph ' . If statically computable , <nl> / / ' is_constant_graph ' will be true . <nl> mmm a / tensorflow / core / common_runtime / shape_refiner_test . cc <nl> ppp b / tensorflow / core / common_runtime / shape_refiner_test . cc <nl> REGISTER_OP ( " ShapeData " ) <nl> return Status : : OK ( ) ; <nl> } ) ; <nl> <nl> + REGISTER_OP ( " ShapeDataInt64 " ) <nl> + . Input ( " a : int64 " ) <nl> + . Output ( " o : int64 " ) <nl> + . SetShapeFn ( [ ] ( shape_inference : : InferenceContext * c ) { <nl> + const Tensor * shape_data = c - > input_tensor ( 0 ) ; <nl> + if ( shape_data = = nullptr ) { <nl> + return shape_inference : : UnknownShape ( c ) ; <nl> + } <nl> + <nl> + std : : vector < shape_inference : : DimensionHandle > dims ; <nl> + dims . reserve ( shape_data - > NumElements ( ) ) ; <nl> + for ( int i = 0 ; i < shape_data - > NumElements ( ) ; + + i ) { <nl> + dims . emplace_back ( c - > MakeDim ( shape_data - > flat < int64 > ( ) ( i ) ) ) ; <nl> + } <nl> + <nl> + c - > set_output ( 0 , c - > MakeShape ( dims ) ) ; <nl> + return Status : : OK ( ) ; <nl> + } ) ; <nl> + <nl> } / / namespace <nl> <nl> + TEST ( ShapeRefinerTest , PropagateShapeAcrossTensorContent ) { <nl> + Scope root = Scope : : NewRootScope ( ) ; <nl> + <nl> + / / Create variable 2x4 tensor . <nl> + auto input = ops : : Variable ( root , { 2 , 4 } , DT_INT32 ) ; <nl> + <nl> + / / Shape is a vector of 2 elements ( 2 , 4 ) <nl> + auto shape = ops : : Shape ( root , input ) ; <nl> + <nl> + / / Ones for indices of the slice . ( get the 4 ) . <nl> + auto ones = ops : : Const ( root , { 1 } ) ; <nl> + <nl> + / / Slice an element of the shape ( 4 ) . <nl> + auto sliced = ops : : Slice ( root , shape , ones , ones ) ; <nl> + <nl> + Node * shape_data ; <nl> + TF_ASSERT_OK ( NodeBuilder ( " Test " , " ShapeData " ) <nl> + . Input ( sliced . node ( ) ) <nl> + . Finalize ( root . graph ( ) , & shape_data ) ) ; <nl> + <nl> + ShapeRefiner m ( TF_GRAPH_DEF_VERSION , OpRegistry : : Global ( ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( ones . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( input . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( shape . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( sliced . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( shape_data ) ) ; <nl> + <nl> + shape_inference : : InferenceContext * ctx = m . GetContext ( shape_data ) ; <nl> + EXPECT_EQ ( " [ 4 ] " , ctx - > DebugString ( ctx - > output ( 0 ) ) ) ; <nl> + } <nl> + <nl> + TEST ( ShapeRefinerTest , PropagateShapeAcrossTensorContentInt64 ) { <nl> + Scope root = Scope : : NewRootScope ( ) ; <nl> + <nl> + / / Create variable 2x4 tensor . <nl> + auto input = ops : : Variable ( <nl> + root , { 2 , 4 , static_cast < int64 > ( std : : numeric_limits < int32 > : : max ( ) ) * 2 } , <nl> + DT_INT64 ) ; <nl> + <nl> + / / Shape is a vector of 2 elements ( 2 , 4 ) <nl> + auto attrs = ops : : Shape : : OutType ( DT_INT64 ) ; <nl> + auto shape = ops : : Shape ( root , input , attrs ) ; <nl> + <nl> + / / Ones for indices of the slice . ( get the 4 ) . <nl> + auto ones = ops : : Const ( root , { 1 } ) ; <nl> + <nl> + / / Slice an element of the shape ( 4 ) . <nl> + auto sliced = ops : : Slice ( root , shape , ones , ones ) ; <nl> + <nl> + Node * shape_data ; <nl> + TF_ASSERT_OK ( NodeBuilder ( " Test " , " ShapeDataInt64 " ) <nl> + . Input ( sliced . node ( ) ) <nl> + . Finalize ( root . graph ( ) , & shape_data ) ) ; <nl> + <nl> + ShapeRefiner m ( TF_GRAPH_DEF_VERSION , OpRegistry : : Global ( ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( ones . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( input . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( shape . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( sliced . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( shape_data ) ) ; <nl> + <nl> + shape_inference : : InferenceContext * ctx = m . GetContext ( shape_data ) ; <nl> + EXPECT_EQ ( " [ 4 ] " , ctx - > DebugString ( ctx - > output ( 0 ) ) ) ; <nl> + } <nl> + <nl> + TEST ( ShapeRefinerTest , PropagateShapeAcrossTensorContentInt32Overflow ) { <nl> + Scope root = Scope : : NewRootScope ( ) ; <nl> + <nl> + / / Create variable 2x4 tensor . <nl> + auto input = ops : : Variable ( <nl> + root , { 2 , 4 , static_cast < int64 > ( std : : numeric_limits < int32 > : : max ( ) ) * 2 } , <nl> + DT_INT32 ) ; <nl> + <nl> + / / Shape is a vector of 2 elements ( 2 , 4 ) <nl> + auto shape = ops : : Shape ( root , input ) ; <nl> + <nl> + / / Ones for indices of the slice . ( get the 4 ) . <nl> + auto ones = ops : : Const ( root , { 1 } ) ; <nl> + <nl> + / / Slice an element of the shape ( 4 ) . <nl> + auto sliced = ops : : Slice ( root , shape , ones , ones ) ; <nl> + <nl> + Node * shape_data ; <nl> + TF_ASSERT_OK ( NodeBuilder ( " Test " , " ShapeData " ) <nl> + . Input ( sliced . node ( ) ) <nl> + . Finalize ( root . graph ( ) , & shape_data ) ) ; <nl> + <nl> + ShapeRefiner m ( TF_GRAPH_DEF_VERSION , OpRegistry : : Global ( ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( ones . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( input . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( shape . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( sliced . node ( ) ) ) ; <nl> + <nl> + / / Expect an error since there ' s an overflow . <nl> + EXPECT_FALSE ( m . AddNode ( shape_data ) . ok ( ) ) ; <nl> + } <nl> + <nl> + TEST ( ShapeRefinerTest , PropagateRankAcrossTensorContent ) { <nl> + Scope root = Scope : : NewRootScope ( ) ; <nl> + <nl> + / / Create variable 2x4x3 tensor . <nl> + auto input = ops : : Variable ( root , { 2 , 4 , 3 } , DT_INT32 ) ; <nl> + <nl> + / / Rank 3 . <nl> + auto rank = ops : : Rank ( root , input ) ; <nl> + <nl> + auto identity = ops : : Identity ( root , rank ) ; <nl> + <nl> + Node * shape_data ; <nl> + TF_ASSERT_OK ( NodeBuilder ( " Test " , " ShapeData " ) <nl> + . Input ( identity . node ( ) ) <nl> + . Finalize ( root . graph ( ) , & shape_data ) ) ; <nl> + <nl> + ShapeRefiner m ( TF_GRAPH_DEF_VERSION , OpRegistry : : Global ( ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( input . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( rank . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( identity . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( shape_data ) ) ; <nl> + <nl> + shape_inference : : InferenceContext * ctx = m . GetContext ( shape_data ) ; <nl> + EXPECT_EQ ( " [ 3 ] " , ctx - > DebugString ( ctx - > output ( 0 ) ) ) ; <nl> + } <nl> + <nl> + TEST ( ShapeRefinerTest , PropagateSizeAcrossTensorContent ) { <nl> + Scope root = Scope : : NewRootScope ( ) ; <nl> + <nl> + / / Create variable . <nl> + auto input = ops : : Variable ( root , { 1 , 2 , 3 , 4 , 5 } , DT_INT32 ) ; <nl> + <nl> + / / 5 ! . <nl> + auto size = ops : : Size ( root , input ) ; <nl> + <nl> + auto identity = ops : : Identity ( root , size ) ; <nl> + <nl> + Node * shape_data ; <nl> + TF_ASSERT_OK ( NodeBuilder ( " Test " , " ShapeData " ) <nl> + . Input ( identity . node ( ) ) <nl> + . Finalize ( root . graph ( ) , & shape_data ) ) ; <nl> + <nl> + ShapeRefiner m ( TF_GRAPH_DEF_VERSION , OpRegistry : : Global ( ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( input . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( size . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( identity . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( shape_data ) ) ; <nl> + <nl> + shape_inference : : InferenceContext * ctx = m . GetContext ( shape_data ) ; <nl> + EXPECT_EQ ( " [ 120 ] " , ctx - > DebugString ( ctx - > output ( 0 ) ) ) ; <nl> + } <nl> + <nl> + TEST ( ShapeRefinerTest , PropagateSizeAcrossTensorContentInt64 ) { <nl> + Scope root = Scope : : NewRootScope ( ) ; <nl> + <nl> + / / Create variable . <nl> + auto input = <nl> + ops : : Variable ( root , <nl> + { 1 , 2 , 3 , 4 , 5 , <nl> + static_cast < int64 > ( std : : numeric_limits < int32 > : : max ( ) ) * 2 } , <nl> + DT_INT64 ) ; <nl> + <nl> + / / 5 ! * int32_max_value * 2 . <nl> + auto attrs = ops : : Size : : OutType ( DT_INT64 ) ; <nl> + auto size = ops : : Size ( root , input , attrs ) ; <nl> + <nl> + auto identity = ops : : Identity ( root , size ) ; <nl> + <nl> + Node * shape_data ; <nl> + TF_ASSERT_OK ( NodeBuilder ( " Test " , " ShapeDataInt64 " ) <nl> + . Input ( identity . node ( ) ) <nl> + . Finalize ( root . graph ( ) , & shape_data ) ) ; <nl> + <nl> + ShapeRefiner m ( TF_GRAPH_DEF_VERSION , OpRegistry : : Global ( ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( input . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( size . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( identity . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( shape_data ) ) ; <nl> + <nl> + shape_inference : : InferenceContext * ctx = m . GetContext ( shape_data ) ; <nl> + EXPECT_EQ ( " [ 515396075280 ] " , ctx - > DebugString ( ctx - > output ( 0 ) ) ) ; <nl> + } <nl> + <nl> + TEST ( ShapeRefinerTest , PropagateSizeAcrossTensorContentInt32Overflow ) { <nl> + Scope root = Scope : : NewRootScope ( ) ; <nl> + <nl> + / / Create variable . <nl> + auto input = <nl> + ops : : Variable ( root , <nl> + { 1 , 2 , 3 , 4 , 5 , <nl> + static_cast < int64 > ( std : : numeric_limits < int32 > : : max ( ) ) * 2 } , <nl> + DT_INT32 ) ; <nl> + <nl> + / / 5 ! . <nl> + auto size = ops : : Size ( root , input ) ; <nl> + <nl> + auto identity = ops : : Identity ( root , size ) ; <nl> + <nl> + Node * shape_data ; <nl> + TF_ASSERT_OK ( NodeBuilder ( " Test " , " ShapeData " ) <nl> + . Input ( identity . node ( ) ) <nl> + . Finalize ( root . graph ( ) , & shape_data ) ) ; <nl> + <nl> + ShapeRefiner m ( TF_GRAPH_DEF_VERSION , OpRegistry : : Global ( ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( input . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( size . node ( ) ) ) ; <nl> + TF_ASSERT_OK ( m . AddNode ( identity . node ( ) ) ) ; <nl> + EXPECT_FALSE ( m . AddNode ( shape_data ) . ok ( ) ) ; <nl> + } <nl> + <nl> TEST ( ShapeRefinerTest , PropagateShape ) { <nl> Scope root = Scope : : NewRootScope ( ) ; <nl> / / 3x2 input <nl> mmm a / tensorflow / core / common_runtime / simple_placer_test . cc <nl> ppp b / tensorflow / core / common_runtime / simple_placer_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / device_factory . h " <nl> # include " tensorflow / core / common_runtime / device_set . h " <nl> # include " tensorflow / core / framework / device_attributes . pb . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / kernel_def_builder . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / op_def_builder . h " <nl> mmm a / tensorflow / core / debug / debug_io_utils_test . cc <nl> ppp b / tensorflow / core / debug / debug_io_utils_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / debug / debug_io_utils . h " <nl> <nl> + # include " tensorflow / core / framework / summary . pb . h " <nl> # include " tensorflow / core / framework / tensor_testutil . h " <nl> # include " tensorflow / core / lib / core / notification . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> mmm a / tensorflow / core / debug / grpc_session_debug_test . cc <nl> ppp b / tensorflow / core / debug / grpc_session_debug_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / graph / default_device . h " <nl> # include " tensorflow / core / graph / graph . h " <nl> # include " tensorflow / core / graph / testlib . h " <nl> - # include " tensorflow / core / lib / core / error_codes . pb . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / lib / io / path . h " <nl> # include " tensorflow / core / lib / strings / strcat . h " <nl> limitations under the License . <nl> # include " tensorflow / core / platform / logging . h " <nl> # include " tensorflow / core / platform / test . h " <nl> # include " tensorflow / core / protobuf / debug . pb . h " <nl> - # include " tensorflow / core / protobuf / master . pb . h " <nl> # include " tensorflow / core / public / session . h " <nl> # include " tensorflow / core / util / port . h " <nl> <nl> mmm a / tensorflow / core / distributed_runtime / BUILD <nl> ppp b / tensorflow / core / distributed_runtime / BUILD <nl> cc_library ( <nl> " / / tensorflow / core : lib_internal " , <nl> " / / tensorflow / core : master_proto_cc " , <nl> " / / tensorflow / core : proto_text " , <nl> + " / / tensorflow / core : protos_all_cc " , <nl> " / / tensorflow / core : worker_proto_cc " , <nl> ] , <nl> ) <nl> cc_test ( <nl> srcs = [ " message_wrappers_test . cc " ] , <nl> deps = [ <nl> " : message_wrappers " , <nl> + " / / tensorflow / core : protos_all_cc " , <nl> " / / tensorflow / core : tensor_testutil " , <nl> " / / tensorflow / core : test " , <nl> " / / tensorflow / core : test_main " , <nl> mmm a / tensorflow / core / distributed_runtime / message_wrappers . cc <nl> ppp b / tensorflow / core / distributed_runtime / message_wrappers . cc <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> # include " tensorflow / core / distributed_runtime / message_wrappers . h " <nl> + # include " tensorflow / core / framework / cost_graph . pb . h " <nl> + # include " tensorflow / core / framework / step_stats . pb . h " <nl> + # include " tensorflow / core / protobuf / config . pb . h " <nl> + # include " tensorflow / core / protobuf / named_tensor . pb . h " <nl> <nl> namespace tensorflow { <nl> <nl> mmm a / tensorflow / core / distributed_runtime / message_wrappers . h <nl> ppp b / tensorflow / core / distributed_runtime / message_wrappers . h <nl> limitations under the License . <nl> # define THIRD_PARTY_TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_MESSAGE_WRAPPERS_H_ <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> + # include " tensorflow / core / framework / cost_graph . pb . h " <nl> + # include " tensorflow / core / framework / step_stats . pb . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> # include " tensorflow / core / framework / tensor . pb_text . h " <nl> + # include " tensorflow / core / framework / versions . pb . h " <nl> + # include " tensorflow / core / protobuf / config . pb . h " <nl> # include " tensorflow / core / protobuf / master . pb . h " <nl> # include " tensorflow / core / protobuf / worker . pb . h " <nl> <nl> mmm a / tensorflow / core / distributed_runtime / message_wrappers_test . cc <nl> ppp b / tensorflow / core / distributed_runtime / message_wrappers_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / distributed_runtime / message_wrappers . h " <nl> <nl> + # include " tensorflow / core / framework / cost_graph . pb . h " <nl> + # include " tensorflow / core / framework / step_stats . pb . h " <nl> # include " tensorflow / core / framework / tensor_testutil . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / platform / test . h " <nl> + # include " tensorflow / core / protobuf / config . pb . h " <nl> <nl> namespace tensorflow { <nl> <nl> mmm a / tensorflow / core / distributed_runtime / rpc / BUILD <nl> ppp b / tensorflow / core / distributed_runtime / rpc / BUILD <nl> cc_binary ( <nl> " / / tensorflow / core : core_cpu " , <nl> " / / tensorflow / core : framework_internal " , <nl> " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core : protos_all_cc " , <nl> " / / tensorflow / core / distributed_runtime : server_lib " , <nl> " / / tensorflow / core / kernels : constant_op " , <nl> " / / tensorflow / core / kernels : cwise_op " , <nl> mmm a / tensorflow / core / distributed_runtime / rpc / grpc_session . cc <nl> ppp b / tensorflow / core / distributed_runtime / rpc / grpc_session . cc <nl> limitations under the License . <nl> # include " tensorflow / core / distributed_runtime / master_interface . h " <nl> # include " tensorflow / core / distributed_runtime / rpc / grpc_channel . h " <nl> # include " tensorflow / core / distributed_runtime / rpc / grpc_remote_master . h " <nl> + # include " tensorflow / core / framework / attr_value . pb . h " <nl> + # include " tensorflow / core / framework / node_def . pb . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / platform / mutex . h " <nl> # include " tensorflow / core / protobuf / master . pb . h " <nl> mmm a / tensorflow / core / distributed_runtime / rpc / grpc_session_test . cc <nl> ppp b / tensorflow / core / distributed_runtime / rpc / grpc_session_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / platform / init_main . h " <nl> # include " tensorflow / core / platform / logging . h " <nl> # include " tensorflow / core / platform / test . h " <nl> - # include " tensorflow / core / protobuf / master . pb . h " <nl> # include " tensorflow / core / public / session . h " <nl> # include " tensorflow / core / util / port . h " <nl> <nl> mmm a / tensorflow / core / distributed_runtime / rpc / grpc_tensorflow_server . cc <nl> ppp b / tensorflow / core / distributed_runtime / rpc / grpc_tensorflow_server . cc <nl> limitations under the License . <nl> # include " tensorflow / core / lib / strings / str_util . h " <nl> # include " tensorflow / core / lib / strings / strcat . h " <nl> # include " tensorflow / core / platform / init_main . h " <nl> + # include " tensorflow / core / protobuf / cluster . pb . h " <nl> # include " tensorflow / core / protobuf / tensorflow_server . pb . h " <nl> # include " tensorflow / core / public / session_options . h " <nl> # include " tensorflow / core / util / command_line_flags . h " <nl> mmm a / tensorflow / core / distributed_runtime / rpc / grpc_testlib_server . cc <nl> ppp b / tensorflow / core / distributed_runtime / rpc / grpc_testlib_server . cc <nl> limitations under the License . <nl> # include " tensorflow / core / lib / strings / strcat . h " <nl> # include " tensorflow / core / platform / env . h " <nl> # include " tensorflow / core / platform / init_main . h " <nl> + # include " tensorflow / core / protobuf / cluster . pb . h " <nl> # include " tensorflow / core / public / session_options . h " <nl> # include " tensorflow / core / util / command_line_flags . h " <nl> <nl> mmm a / tensorflow / core / distributed_runtime / rpcbench_test . cc <nl> ppp b / tensorflow / core / distributed_runtime / rpcbench_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / platform / test . h " <nl> # include " tensorflow / core / platform / test_benchmark . h " <nl> # include " tensorflow / core / platform / types . h " <nl> + # include " tensorflow / core / protobuf / cluster . pb . h " <nl> # include " tensorflow / core / protobuf / tensorflow_server . pb . h " <nl> # include " tensorflow / core / public / session . h " <nl> <nl> mmm a / tensorflow / core / distributed_runtime / tensor_coding . cc <nl> ppp b / tensorflow / core / distributed_runtime / tensor_coding . cc <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> # include " tensorflow / core / distributed_runtime / tensor_coding . h " <nl> + <nl> + # include " google / protobuf / any . pb . h " <nl> # include " tensorflow / core / common_runtime / device . h " <nl> <nl> namespace tensorflow { <nl> mmm a / tensorflow / core / distributed_runtime / worker_cache_logger . cc <nl> ppp b / tensorflow / core / distributed_runtime / worker_cache_logger . cc <nl> limitations under the License . <nl> # include " tensorflow / core / distributed_runtime / worker_cache_logger . h " <nl> <nl> # include " tensorflow / core / common_runtime / step_stats_collector . h " <nl> + # include " tensorflow / core / framework / allocation_description . pb . h " <nl> + # include " tensorflow / core / framework / tensor_description . pb . h " <nl> # include " tensorflow / core / lib / strings / strcat . h " <nl> # include " tensorflow / core / lib / strings / stringprintf . h " <nl> # include " tensorflow / core / platform / mutex . h " <nl> mmm a / tensorflow / core / example / example_parser_configuration . cc <nl> ppp b / tensorflow / core / example / example_parser_configuration . cc <nl> limitations under the License . <nl> <nl> # include < vector > <nl> <nl> - # include " tensorflow / core / example / example . pb . h " <nl> # include " tensorflow / core / example / feature . pb_text . h " <nl> # include " tensorflow / core / framework / numeric_op . h " <nl> # include " tensorflow / core / framework / register_types . h " <nl> mmm a / tensorflow / core / example / example_parser_configuration_test . cc <nl> ppp b / tensorflow / core / example / example_parser_configuration_test . cc <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> # include " tensorflow / core / example / example_parser_configuration . h " <nl> <nl> - # include " tensorflow / core / example / example . pb . h " <nl> + # include " tensorflow / core / framework / attr_value . pb . h " <nl> + # include " tensorflow / core / framework / node_def . pb . h " <nl> # include " tensorflow / core / framework / tensor_testutil . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / lib / io / path . h " <nl> mmm a / tensorflow / core / framework / function_test . cc <nl> ppp b / tensorflow / core / framework / function_test . cc <nl> limitations under the License . <nl> # include < vector > <nl> # include " tensorflow / core / framework / function . pb . h " <nl> # include " tensorflow / core / framework / function_testlib . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / tensor_testutil . h " <nl> # include " tensorflow / core / kernels / ops_util . h " <nl> mmm a / tensorflow / core / framework / function_testlib . cc <nl> ppp b / tensorflow / core / framework / function_testlib . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / function . h " <nl> # include " tensorflow / core / framework / tensor_testutil . h " <nl> + # include " tensorflow / core / framework / versions . pb . h " <nl> # include " tensorflow / core / public / version . h " <nl> <nl> namespace tensorflow { <nl> mmm a / tensorflow / core / framework / graph_def_util . cc <nl> ppp b / tensorflow / core / framework / graph_def_util . cc <nl> limitations under the License . <nl> # include < unordered_set > <nl> # include < vector > <nl> <nl> + # include " tensorflow / core / framework / function . pb . h " <nl> # include " tensorflow / core / framework / node_def_util . h " <nl> # include " tensorflow / core / framework / op_def_util . h " <nl> # include " tensorflow / core / framework / versions . pb_text . h " <nl> mmm a / tensorflow / core / framework / kernel_def_builder . cc <nl> ppp b / tensorflow / core / framework / kernel_def_builder . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / framework / kernel_def_builder . h " <nl> + # include " tensorflow / core / framework / attr_value . pb . h " <nl> # include " tensorflow / core / framework / kernel_def . pb_text . h " <nl> + # include " tensorflow / core / framework / kernel_def_builder . h " <nl> <nl> namespace tensorflow { <nl> <nl> mmm a / tensorflow / core / framework / log_memory . cc <nl> ppp b / tensorflow / core / framework / log_memory . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / log_memory . h " <nl> <nl> - # include " tensorflow / core / framework / log_memory . pb . h " <nl> # include " tensorflow / core / framework / log_memory . pb_text . h " <nl> + # include " tensorflow / core / framework / log_memory . pb . h " <nl> <nl> namespace tensorflow { <nl> <nl> mmm a / tensorflow / core / framework / op_def_builder_test . cc <nl> ppp b / tensorflow / core / framework / op_def_builder_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / op_def_builder . h " <nl> <nl> + # include " tensorflow / core / framework / attr_value . pb . h " <nl> # include " tensorflow / core / framework / op_def . pb . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> mmm a / tensorflow / core / framework / op_gen_lib . cc <nl> ppp b / tensorflow / core / framework / op_gen_lib . cc <nl> limitations under the License . <nl> # include " tensorflow / core / framework / op_gen_lib . h " <nl> <nl> # include < vector > <nl> + # include " tensorflow / core / framework / attr_value . pb . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / lib / strings / str_util . h " <nl> # include " tensorflow / core / lib / strings / strcat . h " <nl> mmm a / tensorflow / core / framework / op_segment_test . cc <nl> ppp b / tensorflow / core / framework / op_segment_test . cc <nl> limitations under the License . <nl> <nl> # include < vector > <nl> # include " tensorflow / core / framework / allocator . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / kernels / ops_util . h " <nl> mmm a / tensorflow / core / graph / algorithm_test . cc <nl> ppp b / tensorflow / core / graph / algorithm_test . cc <nl> limitations under the License . <nl> # include < string > <nl> # include < vector > <nl> <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / graph / graph . h " <nl> # include " tensorflow / core / graph / graph_def_builder . h " <nl> # include " tensorflow / core / graph / subgraph . h " <nl> mmm a / tensorflow / core / graph / default_device . h <nl> ppp b / tensorflow / core / graph / default_device . h <nl> limitations under the License . <nl> # include < string > <nl> <nl> # include " tensorflow / core / framework / graph . pb . h " <nl> + # include " tensorflow / core / framework / node_def . pb . h " <nl> <nl> namespace tensorflow { <nl> namespace graph { <nl> mmm a / tensorflow / core / graph / graph_constructor . cc <nl> ppp b / tensorflow / core / graph / graph_constructor . cc <nl> bool IsValidNodeName ( StringPiece s , bool allow_internal_ops ) { <nl> class GraphConstructor { <nl> public : <nl> struct Options { <nl> - Options ( const GraphConstructorOptions & in ) <nl> + Options ( const GraphConstructorOptions & in ) / / NOLINT ( runtime / explicit ) <nl> : allow_internal_ops ( in . allow_internal_ops ) , <nl> expect_device_spec ( in . expect_device_spec ) , <nl> importing ( false ) { } <nl> - Options ( const ImportGraphDefOptions & in ) <nl> + Options ( const ImportGraphDefOptions & in ) / / NOLINT ( runtime / explicit ) <nl> : allow_internal_ops ( false ) , <nl> expect_device_spec ( false ) , <nl> prefix ( in . prefix . empty ( ) | | StringPiece ( in . prefix ) . ends_with ( " / " ) <nl> mmm a / tensorflow / core / graph / validate . cc <nl> ppp b / tensorflow / core / graph / validate . cc <nl> limitations under the License . <nl> # include " tensorflow / core / framework / graph_def_util . h " <nl> # include " tensorflow / core / framework / node_def_util . h " <nl> # include " tensorflow / core / framework / op_def_util . h " <nl> + # include " tensorflow / core / framework / versions . pb . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / platform / types . h " <nl> <nl> mmm a / tensorflow / core / grappler / costs / BUILD <nl> ppp b / tensorflow / core / grappler / costs / BUILD <nl> cc_test ( <nl> " / / tensorflow / cc : scope " , <nl> " / / tensorflow / core : framework " , <nl> " / / tensorflow / core : lib_proto_parsing " , <nl> + " / / tensorflow / core : tensor_testutil " , <nl> " / / tensorflow / core : test " , <nl> " / / tensorflow / core : test_main " , <nl> " / / tensorflow / core / grappler : grappler_item " , <nl> cc_library ( <nl> " / / tensorflow / core : framework " , <nl> " / / tensorflow / core : protos_all_cc " , <nl> " / / tensorflow / core / grappler : grappler_item " , <nl> + " / / tensorflow / core / grappler : op_types " , <nl> " / / tensorflow / core / grappler : utils " , <nl> " / / tensorflow / core / grappler / clusters : utils " , <nl> " / / tensorflow / core / grappler / costs : cost_estimator " , <nl> cc_test ( <nl> " / / tensorflow / core : tensorflow " , <nl> " / / tensorflow / core : test " , <nl> " / / tensorflow / core : test_main " , <nl> + " / / tensorflow / core / grappler : grappler_item " , <nl> + " / / tensorflow / core / grappler : op_types " , <nl> + " / / tensorflow / core / grappler : utils " , <nl> + " / / tensorflow / core / grappler / clusters : utils " , <nl> " / / tensorflow / core / grappler / clusters : virtual_cluster " , <nl> ] , <nl> ) <nl> mmm a / tensorflow / core / grappler / costs / analytical_cost_estimator . cc <nl> ppp b / tensorflow / core / grappler / costs / analytical_cost_estimator . cc <nl> limitations under the License . <nl> # include < limits > <nl> # include < unordered_map > <nl> <nl> - # include " tensorflow / core / framework / attr_value . pb . h " <nl> # include " tensorflow / core / graph / types . h " <nl> # include " tensorflow / core / grappler / costs / graph_properties . h " <nl> # include " tensorflow / core / grappler / costs / op_performance_data . pb . h " <nl> mmm a / tensorflow / core / grappler / costs / cost_estimator . h <nl> ppp b / tensorflow / core / grappler / costs / cost_estimator . h <nl> limitations under the License . <nl> # define TENSORFLOW_GRAPPLER_COSTS_COST_ESTIMATOR_H_ <nl> <nl> # include < chrono > <nl> + # include < unordered_map > <nl> # include " tensorflow / core / lib / core / status . h " <nl> <nl> namespace tensorflow { <nl> struct Costs { <nl> / / streams from main memory . <nl> / / If the time estimation is inaccurate . <nl> bool inaccurate = false ; <nl> + <nl> + / / Max possible memory usage per device . <nl> + std : : unordered_map < string , uint64 > estimated_max_memory_per_device ; <nl> } ; <nl> <nl> inline std : : ostream & operator < < ( std : : ostream & os , const Costs : : MicroSeconds d ) { <nl> mmm a / tensorflow / core / grappler / costs / graph_properties_test . cc <nl> ppp b / tensorflow / core / grappler / costs / graph_properties_test . cc <nl> limitations under the License . <nl> # include " tensorflow / cc / framework / scope . h " <nl> # include " tensorflow / cc / ops / standard_ops . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> + # include " tensorflow / core / framework / tensor_testutil . h " <nl> # include " tensorflow / core / grappler / clusters / single_machine . h " <nl> # include " tensorflow / core / grappler / grappler_item . h " <nl> # include " tensorflow / core / grappler / inputs / trivial_test_graph_input_yielder . h " <nl> TEST_F ( GraphPropertiesTest , Variables ) { <nl> item . fetch . push_back ( " Var " ) ; <nl> <nl> Tensor initial_val ( DT_FLOAT , TensorShape ( { 3 , 7 } ) ) ; <nl> + test : : FillIota < float > ( & initial_val , 0 ) ; <nl> TF_CHECK_OK ( NodeDefBuilder ( " InitialVal " , " Const " ) <nl> . Attr ( " dtype " , DT_FLOAT ) <nl> . Attr ( " value " , initial_val ) <nl> mmm a / tensorflow / core / grappler / costs / measuring_cost_estimator . cc <nl> ppp b / tensorflow / core / grappler / costs / measuring_cost_estimator . cc <nl> limitations under the License . <nl> <nl> # include < limits > <nl> <nl> + # include " tensorflow / core / framework / cost_graph . pb . h " <nl> # include " tensorflow / core / grappler / clusters / cluster . h " <nl> # include " tensorflow / core / grappler / costs / robust_stats . h " <nl> # include " tensorflow / core / grappler / grappler_item . h " <nl> mmm a / tensorflow / core / grappler / costs / op_level_cost_estimator . cc <nl> ppp b / tensorflow / core / grappler / costs / op_level_cost_estimator . cc <nl> constexpr char kNoOp [ ] = " NoOp " ; <nl> constexpr char kReshape [ ] = " Reshape " ; <nl> constexpr char kRecv [ ] = " _Recv " ; <nl> constexpr char kBatchMatMul [ ] = " BatchMatMul " ; <nl> + constexpr char kVariable [ ] = " Variable " ; <nl> + constexpr char kVariableV2 [ ] = " VariableV2 " ; <nl> <nl> OpLevelCostEstimator : : OpLevelCostEstimator ( ) { <nl> / / Syntactic sugar to build and return a lambda that takes an OpInfo and <nl> OpLevelCostEstimator : : OpLevelCostEstimator ( ) { <nl> { kNoOp , wrap ( & OpLevelCostEstimator : : PredictNoOp ) } , <nl> { kReshape , wrap ( & OpLevelCostEstimator : : PredictNoOp ) } , <nl> { kRecv , wrap ( & OpLevelCostEstimator : : PredictNoOp ) } , <nl> + { kVariable , wrap ( & OpLevelCostEstimator : : PredictNoOp ) } , <nl> + { kVariableV2 , wrap ( & OpLevelCostEstimator : : PredictNoOp ) } , <nl> { kBatchMatMul , wrap ( & OpLevelCostEstimator : : PredictBatchMatMul ) } } ; <nl> } <nl> <nl> int64 OpLevelCostEstimator : : CalculateSingleInputSize ( <nl> for ( const auto & dim : input_shape . dim ( ) ) { <nl> input_size * = dim . size ( ) ; <nl> } <nl> - return input_size * DataTypeSize ( input . dtype ( ) ) ; <nl> + return input_size * DataTypeSize ( BaseType ( input . dtype ( ) ) ) ; <nl> } <nl> <nl> int64 OpLevelCostEstimator : : CalculateInputSize ( <nl> int64 OpLevelCostEstimator : : CalculateOutputSize ( <nl> for ( const auto & output : op_features . outputs ( ) ) { <nl> DataType dt = output . dtype ( ) ; <nl> const auto & original_output_shape = output . shape ( ) ; <nl> - int64 output_size = DataTypeSize ( dt ) ; <nl> + int64 output_size = DataTypeSize ( BaseType ( dt ) ) ; <nl> int num_dims = std : : max ( 1 , original_output_shape . dim_size ( ) ) ; <nl> auto output_shape = MaybeGetMinimumShape ( original_output_shape , num_dims , <nl> found_unknown_shapes ) ; <nl> mmm a / tensorflow / core / grappler / costs / op_level_cost_estimator_test . cc <nl> ppp b / tensorflow / core / grappler / costs / op_level_cost_estimator_test . cc <nl> void DescribeMatrix ( int rows , int columns , OpInfo * op_features ) { <nl> input - > set_dtype ( DT_FLOAT ) ; <nl> } <nl> <nl> + void SetCpuDevice ( OpInfo * op_features ) { <nl> + auto device = op_features - > mutable_device ( ) ; <nl> + device - > set_type ( " CPU " ) ; <nl> + device - > set_num_cores ( 1 ) ; <nl> + device - > set_frequency ( 2000 ) ; / / Mhz <nl> + } <nl> + <nl> / / Returns an OpInfo for MatMul with the minimum set of fields set up . <nl> OpInfo DescribeMatMul ( int m , int n , int l , int k ) { <nl> OpInfo op_features ; <nl> - auto device = op_features . mutable_device ( ) ; <nl> - device - > set_type ( " CPU " ) ; <nl> + SetCpuDevice ( & op_features ) ; <nl> op_features . set_op ( " MatMul " ) ; <nl> <nl> DescribeMatrix ( m , l , & op_features ) ; <nl> OpInfo DescribeMatMul ( int m , int n , int l , int k ) { <nl> / / Returns an OpInfo for MatMul with unknown input shapes . <nl> OpInfo DescribeMatMulUnknownShape ( ) { <nl> OpInfo op_features ; <nl> - auto device = op_features . mutable_device ( ) ; <nl> - device - > set_type ( " CPU " ) ; <nl> + SetCpuDevice ( & op_features ) ; <nl> op_features . set_op ( " MatMul " ) ; <nl> <nl> auto input = op_features . add_inputs ( ) ; <nl> void DescribeArbitraryRankInput ( const std : : vector < int > & dims , DataType dtype , <nl> OpInfo DescribeBatchMatMul ( const std : : vector < int > & dims_a , <nl> const std : : vector < int > & dims_b ) { <nl> OpInfo op_features ; <nl> - auto device = op_features . mutable_device ( ) ; <nl> - device - > set_type ( " CPU " ) ; <nl> + SetCpuDevice ( & op_features ) ; <nl> op_features . set_op ( " BatchMatMul " ) ; <nl> <nl> DescribeArbitraryRankInput ( dims_a , DT_FLOAT , & op_features ) ; <nl> void DescribeTensor4D ( int dim0 , int dim1 , int dim2 , int dim3 , <nl> OpInfo DescribeConvolution ( int batch , int ix , int iy , int iz1 , int iz2 , int kx , <nl> int ky , int oz ) { <nl> OpInfo op_features ; <nl> - auto device = op_features . mutable_device ( ) ; <nl> - device - > set_type ( " CPU " ) ; <nl> + SetCpuDevice ( & op_features ) ; <nl> op_features . set_op ( " Conv2D " ) ; <nl> <nl> DescribeTensor4D ( batch , ix , iy , iz1 , & op_features ) ; <nl> mmm a / tensorflow / core / grappler / costs / utils . cc <nl> ppp b / tensorflow / core / grappler / costs / utils . cc <nl> DeviceProperties GetDeviceInfo ( const CostGraphDef : : Node & node ) { <nl> return GetDeviceInfo ( node . device ( ) ) ; <nl> } <nl> <nl> - OpInfo BuildOpInfo ( <nl> - const NodeDef & node , const string & device_str , <nl> + OpInfo BuildOpInfoWithoutDevice ( <nl> + const NodeDef & node , <nl> const std : : unordered_map < string , const NodeDef * > & name_to_node , <nl> const std : : vector < OpInfo : : TensorProperties > & inputs ) { <nl> OpInfo op_info ; <nl> op_info . set_op ( node . op ( ) ) ; <nl> * op_info . mutable_attr ( ) = node . attr ( ) ; <nl> - * op_info . mutable_device ( ) = GetDeviceInfo ( device_str ) ; <nl> for ( auto & input : inputs ) { <nl> * op_info . add_inputs ( ) = input ; <nl> } <nl> OpPerformanceList CostGraphToOpPerformanceData ( const CostGraphDef & cost_graph , <nl> <nl> std : : vector < OpInfo : : TensorProperties > inputs = <nl> FindInputFeatures ( node , name_to_cost , name_to_node ) ; <nl> - ( * perf - > mutable_op ( ) ) = <nl> - BuildOpInfo ( node , cost_node - > device ( ) , name_to_node , inputs ) ; <nl> + * perf - > mutable_op ( ) = BuildOpInfoWithoutDevice ( node , name_to_node , inputs ) ; <nl> + * perf - > mutable_op ( ) - > mutable_device ( ) = GetDeviceInfo ( cost_node - > device ( ) ) ; <nl> <nl> perf - > set_temporary_memory_size ( cost_node - > temporary_memory_size ( ) ) ; <nl> / / Note that CostGraphDef : : Node : : compute_cost is microseconds , while <nl> mmm a / tensorflow / core / grappler / costs / utils . h <nl> ppp b / tensorflow / core / grappler / costs / utils . h <nl> DeviceProperties GetDeviceInfo ( const string & device_str ) ; <nl> / / Return a string describing a node given a nodeinfo . <nl> string GetOpDescription ( const OpInfo & op_info ) ; <nl> <nl> - / / Builds the OpInfo proto for node , given all nodes in the graph , the node ' s <nl> - / / device and its input properties which are typically built by shape inference <nl> - / / or calling FindInputFeatures . <nl> - OpInfo BuildOpInfo ( <nl> - const NodeDef & node , const string & device_str , <nl> + / / Builds the OpInfo for node without filling its device information , given all <nl> + / / nodes in the graph and its input properties . <nl> + OpInfo BuildOpInfoWithoutDevice ( <nl> + const NodeDef & node , <nl> const std : : unordered_map < string , const NodeDef * > & name_to_node , <nl> const std : : vector < OpInfo : : TensorProperties > & inputs ) ; <nl> <nl> mmm a / tensorflow / core / grappler / costs / virtual_scheduler . cc <nl> ppp b / tensorflow / core / grappler / costs / virtual_scheduler . cc <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> # include " tensorflow / core / grappler / costs / virtual_scheduler . h " <nl> + <nl> + # include < math . h > <nl> + <nl> # include " tensorflow / core / framework / attr_value . pb . h " <nl> # include " tensorflow / core / framework / node_def . pb . h " <nl> # include " tensorflow / core / grappler / clusters / utils . h " <nl> # include " tensorflow / core / grappler / costs / utils . h " <nl> + # include " tensorflow / core / grappler / op_types . h " <nl> # include " tensorflow / core / grappler / utils . h " <nl> # include " tensorflow / core / util / device_name_utils . h " <nl> <nl> VirtualScheduler : : VirtualScheduler ( const GrapplerItem * grappler_item , <nl> const bool use_static_shapes , <nl> const string & default_device_type , <nl> Cluster * cluster , VirtualPlacer * placer ) <nl> - : graph_properties_ ( * grappler_item ) , <nl> - graph_costs_ ( Costs : : ZeroCosts ( ) ) , <nl> - / / TODO ( dyoon ) : Use a better way than FIFO . <nl> + : / / TODO ( dyoon ) : Use a better way than FIFO . <nl> ready_nodes_ ( new FIFOManager ( ) ) , <nl> + graph_costs_ ( Costs : : ZeroCosts ( ) ) , <nl> + graph_properties_ ( * grappler_item ) , <nl> cluster_ ( cluster ) , <nl> grappler_item_ ( grappler_item ) , <nl> use_static_shapes_ ( use_static_shapes ) , <nl> VirtualScheduler : : VirtualScheduler ( const GrapplerItem * grappler_item , <nl> } <nl> <nl> Status VirtualScheduler : : Init ( ) { <nl> + / / Init ( ) preprocesses the input grappler_item and graph_properties to extract <nl> + / / necessary information for emulating tensorflow op scheduling and <nl> + / / construct internal data structures ( NodeState and DeviceState ) for virtual <nl> + / / scheduling . <nl> + <nl> / / Construct graph properties . <nl> Status status ; <nl> if ( use_static_shapes_ ) { <nl> Status VirtualScheduler : : Init ( ) { <nl> const auto & graph = grappler_item_ - > graph ; <nl> const auto & fetch_nodes = grappler_item_ - > fetch ; <nl> <nl> - / / First , get the nodes that would run to output fetch_nodes . <nl> + / / Get the nodes that would run to output fetch_nodes . <nl> std : : vector < const NodeDef * > nodes = <nl> ComputeTransitiveFanin ( graph , fetch_nodes ) ; <nl> <nl> / / TODO ( dyoon ) : this is a bit inefficient as name_to_node is already built in <nl> / / ComputeTransitiveFanin ( ) . <nl> - / / <nl> / / Once ComputeTransitiveFanin is complete , only the nodes that can be reached <nl> / / from the fetch nodes are scheduled . So the scheduled nodes should be <nl> / / exactly the same as those executed for real . One possible discrepancy could <nl> Status VirtualScheduler : : Init ( ) { <nl> name_to_node [ node - > name ( ) ] = node ; <nl> } <nl> <nl> - / / Build node_map . <nl> + / / Build node_map ; for each node , create its NodeState and connect its inputs <nl> + / / and outputs . <nl> for ( const auto * curr_node : nodes ) { <nl> auto & curr_node_state = GetNodeStateOrCreateIt ( curr_node ) ; <nl> const string curr_node_device = DeviceName ( curr_node ) ; <nl> for ( const string & input_node_name : curr_node - > input ( ) ) { <nl> - / / Note that input_node_name may be in < node_name > : < output_number > format , <nl> - / / where " : < output_number > " may be omitted . NodeName ( ) extracts only the <nl> - / / node_name ( prefeix " ^ " , if there was for control input , is also <nl> - / / deleted ) . <nl> + / / Note that input_node_name may be in < prefix > < node_name > : < port_num > <nl> + / / format , where < prefix > ( e . g . , " ^ " for control dependency ) and <nl> + / / " : < port_num > " may be omitted . NodeName ( ) extracts only the node_name . <nl> const NodeDef * input_node = name_to_node [ NodeName ( input_node_name ) ] ; <nl> + <nl> CHECK ( input_node ) ; <nl> - / / Add input_to_curr_node to curr_node ' s input , and <nl> - / / add output_to_input_node to input_source_node ' s output . <nl> - / / Default values for when input_node and curr_node on the same device . <nl> - const NodeDef * input_to_curr_node = input_node ; <nl> - const NodeDef * input_source_node = input_node ; <nl> - const NodeDef * output_to_input_node = curr_node ; <nl> const string in_device = DeviceName ( input_node ) ; <nl> - if ( curr_node_device ! = in_device ) { <nl> - if ( cached_ops_ . count ( input_node ) > 0 & & <nl> - cached_ops_ [ input_node ] . count ( curr_node_device ) > 0 ) { <nl> - / / Different device , but found an already - transferred copy ; connect <nl> - / / the cached node to curr_node . <nl> - input_to_curr_node = cached_ops_ [ input_node ] [ curr_node_device ] ; <nl> - input_source_node = input_to_curr_node ; <nl> - output_to_input_node = curr_node ; <nl> + const auto input_node_port_num = NodePosition ( input_node_name ) ; <nl> + <nl> + if ( curr_node_device = = in_device ) { <nl> + / / Same device : connect input_node and curr_node directly . <nl> + curr_node_state . inputs . push_back ( <nl> + std : : make_pair ( input_node , input_node_port_num ) ) ; <nl> + auto & input_node_state = GetNodeStateOrCreateIt ( input_node ) ; <nl> + input_node_state . outputs [ input_node_port_num ] . push_back ( curr_node ) ; <nl> + } else { <nl> + if ( cached_recv_nodes_ . count ( input_node ) > 0 & & <nl> + cached_recv_nodes_ [ input_node ] . count ( curr_node_device ) > 0 ) { <nl> + / / Different device , but found an already - cached copy ( a _Recv op ) ; <nl> + / / connect the _Recv to curr_node . <nl> + const auto * recv_op = <nl> + cached_recv_nodes_ [ input_node ] [ curr_node_device ] ; <nl> + / / recv_op ' s output port is hard - coded to zero . <nl> + curr_node_state . inputs . push_back ( std : : make_pair ( recv_op , 0 ) ) ; <nl> + auto & input_node_state = node_map_ . at ( recv_op ) ; <nl> + input_node_state . outputs [ 0 ] . push_back ( curr_node ) ; <nl> } else { <nl> / / Different device , no cached copy ; transfer input_node to the <nl> / / curr_node ' s device . <nl> - auto sendrecv_and_identity = <nl> - TransferNode ( input_node , curr_node , input_node_name ) ; <nl> - const auto * sendrecv = sendrecv_and_identity . first ; <nl> - const auto * identity = sendrecv_and_identity . second ; <nl> - input_to_curr_node = identity ; <nl> - input_source_node = input_node ; <nl> - output_to_input_node = sendrecv ; <nl> - <nl> - / / Cache the identity op for future use . <nl> - cached_ops_ [ input_node ] [ curr_node_device ] = identity ; <nl> + auto send_and_recv = <nl> + CreateSendRecv ( input_node , curr_node , input_node_name ) ; <nl> + / / Note that CreateSendRecv ( ) already connected input / output between <nl> + / / _Send and _Recv ops . <nl> + const auto * send = send_and_recv . first ; <nl> + const auto * recv = send_and_recv . second ; <nl> + / / recv_op ' s output port is hard - coded to zero . <nl> + curr_node_state . inputs . push_back ( std : : make_pair ( recv , 0 ) ) ; <nl> + auto & input_node_state = GetNodeStateOrCreateIt ( input_node ) ; <nl> + input_node_state . outputs [ input_node_port_num ] . push_back ( send ) ; <nl> + <nl> + / / Cache the _Recv op for future use . <nl> + cached_recv_nodes_ [ input_node ] [ curr_node_device ] = recv ; <nl> } <nl> } <nl> - curr_node_state . inputs . push_back ( input_to_curr_node ) ; <nl> - <nl> - / / Note that we do not care output number ( in case a tf op has multiple <nl> - / / outputs ) , as VirtualScheduler only cares which nodes become ready as <nl> - / / a node is executed . <nl> - auto & input_node_state = GetNodeStateOrCreateIt ( input_source_node ) ; <nl> - input_node_state . outputs . push_back ( output_to_input_node ) ; <nl> } <nl> <nl> if ( curr_node - > input ( ) . empty ( ) ) { <nl> - curr_node_state . time_ready = <nl> - Costs : : Duration ( ) ; / / Node without input : ready at time 0 . <nl> + / / Node without input : ready at time 0 . <nl> + curr_node_state . time_ready = Costs : : Duration ( ) ; <nl> ready_nodes_ - > AddNode ( curr_node ) ; <nl> } <nl> + <nl> + if ( IsPersistentNode ( curr_node ) ) { <nl> + auto & device_state = device_ [ curr_node_device ] ; <nl> + for ( int port_num = 0 ; <nl> + port_num < curr_node_state . output_properties . size ( ) ; + + port_num ) { <nl> + device_state . persistent_nodes . insert ( <nl> + std : : make_pair ( curr_node , port_num ) ) ; <nl> + } <nl> + } <nl> } <nl> <nl> if ( ready_nodes_ - > Empty ( ) ) { <nl> Status VirtualScheduler : : Init ( ) { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - void VirtualScheduler : : MaybeUpdateInputProperties ( <nl> - const NodeDef * node , std : : vector < OpInfo : : TensorProperties > * inputs ) const { <nl> - if ( IsSendOp ( node ) | | IsRecvOp ( node ) ) { <nl> - / / _Send and _Recv ops are inserted from VirtualScheduler , so <nl> + void VirtualScheduler : : MaybeUpdateInputOutput ( const NodeDef * node ) { <nl> + CHECK ( ! initialized_ ) < < " MaybeUpdateInputOutput is called after Init ( ) . " ; <nl> + / / This method is called when NodeState is created and adds input and output <nl> + / / properties for a few exceptional cases that GraphProperties cannot provide <nl> + / / input / output properties . <nl> + if ( IsSend ( * node ) | | IsRecv ( * node ) ) { <nl> + auto & node_state = node_map_ [ node ] ; <nl> + auto & inputs = node_state . input_properties ; <nl> + auto & outputs = node_state . output_properties ; <nl> + <nl> + / / _Send and _Recv ops are created from VirtualScheduler , so <nl> / / there should be no inputs TensorProperties . <nl> - CHECK_EQ ( inputs - > size ( ) , 0 ) ; <nl> + CHECK ( inputs . empty ( ) ) ; <nl> + CHECK ( outputs . empty ( ) ) ; <nl> const auto & attr = node - > attr ( ) ; <nl> / / This is the original input source to the _Send and _Recv , and this <nl> / / string includes " ^ " if it was control dependency , and output port <nl> / / / ( e . g . , " : 2 " ) if the input source had multiple outputs . <nl> const auto & input_source_name = attr . at ( kAttrInputSrc ) . s ( ) ; <nl> - if ( input_source_name [ 0 ] = = ' ^ ' ) { <nl> + if ( IsControlInput ( input_source_name ) ) { <nl> / / Control dependency ; regardless of the input source tensor size , <nl> / / send 4B . <nl> OpInfo : : TensorProperties control_message ; <nl> void VirtualScheduler : : MaybeUpdateInputProperties ( <nl> control_message . mutable_shape ( ) - > add_dim ( ) - > set_size ( 1 ) ; <nl> auto * value = control_message . mutable_value ( ) ; <nl> value - > add_float_val ( 1 ) ; <nl> - inputs - > push_back ( control_message ) ; <nl> + inputs . push_back ( control_message ) ; <nl> + outputs . push_back ( control_message ) ; <nl> } else { <nl> + auto output_properties = <nl> + graph_properties_ . GetOutputProperties ( NodeName ( input_source_name ) ) ; <nl> / / Like with HasInputProperties , if a node does not have output <nl> / / properties , it ' s likely it was pruned during the shape inference run . <nl> - if ( graph_properties_ . HasOutputProperties ( NodeName ( input_source_name ) ) ) { <nl> - const auto input_position = NodePosition ( input_source_name ) ; <nl> + if ( ! output_properties . empty ( ) ) { <nl> + const auto input_node_port_num = NodePosition ( input_source_name ) ; <nl> / / Use the input source ' s output property as _Send and _Recv ' s input <nl> / / property . <nl> - auto outputs = <nl> - graph_properties_ . GetOutputProperties ( NodeName ( input_source_name ) ) ; <nl> - CHECK_GT ( outputs . size ( ) , input_position ) ; <nl> - inputs - > push_back ( outputs [ input_position ] ) ; <nl> + CHECK_GT ( output_properties . size ( ) , input_node_port_num ) ; <nl> + inputs . push_back ( output_properties [ input_node_port_num ] ) ; <nl> + outputs . push_back ( output_properties [ input_node_port_num ] ) ; <nl> } <nl> } <nl> } <nl> } <nl> <nl> - bool VirtualScheduler : : IsSendOp ( const NodeDef * node ) const { <nl> - return node - > op ( ) = = kSend ; <nl> + float VirtualScheduler : : Round2 ( const float x ) const { <nl> + / / Not using std : : round from < cmath > here because not all platforms seem to <nl> + / / support that ( specifically Android ) . <nl> + return : : round ( 100 . 0 * x ) / 100 . 0 ; <nl> } <nl> <nl> - bool VirtualScheduler : : IsRecvOp ( const NodeDef * node ) const { <nl> - return node - > op ( ) = = kRecv ; <nl> + bool VirtualScheduler : : IsPersistentNode ( const NodeDef * node ) const { <nl> + / / Variables are persistent nodes . <nl> + return IsVariable ( * node ) ; <nl> } <nl> <nl> string VirtualScheduler : : DeviceName ( const NodeDef * node ) const { <nl> + CHECK ( ! initialized_ ) < < " DeviceName is called after Init ( ) . " ; <nl> + <nl> / / TODO ( dyoon ) : integrate this part with VirtualPlacer . <nl> - if ( IsSendOp ( node ) ) { <nl> - const auto & node_state = node_map_ . at ( node ) ; <nl> - const auto * from = node_state . inputs [ 0 ] ; <nl> - const auto * to = node_state . outputs [ 0 ] ; <nl> - return ChannelDeviceName ( from , to ) ; <nl> - } else { <nl> - return node - > device ( ) . empty ( ) ? " / " + default_device_type_ + " : 0 " <nl> - : node - > device ( ) ; <nl> - } <nl> + return node - > device ( ) . empty ( ) ? " / device : " + default_device_type_ + " : 0 " <nl> + : node - > device ( ) ; <nl> } <nl> <nl> string VirtualScheduler : : ChannelDeviceName ( const NodeDef * from , <nl> const NodeDef * to ) const { <nl> + CHECK ( ! initialized_ ) < < " ChannelDeviceName is called after Init ( ) . " ; <nl> + <nl> return kChannelDevice + " : " + DeviceName ( from ) + " to " + DeviceName ( to ) ; <nl> } <nl> <nl> - std : : pair < const NodeDef * , const NodeDef * > VirtualScheduler : : TransferNode ( <nl> + std : : pair < const NodeDef * , const NodeDef * > VirtualScheduler : : CreateSendRecv ( <nl> const NodeDef * from , const NodeDef * to , const string & input_name ) { <nl> + CHECK ( ! initialized_ ) < < " CreateSendRecv is called after Init ( ) . " ; <nl> + <nl> / / Connect " from " node to " to " node with _Send and _Recv such that <nl> / / from - > _Send - > _Recv - > to . <nl> / / _Send is placed on " Channel " device , and _Recv is on the same device <nl> std : : pair < const NodeDef * , const NodeDef * > VirtualScheduler : : TransferNode ( <nl> / / NodeDefs created here need not be correct : in terms of name , <nl> / / input names , attrs , etc . <nl> <nl> + auto input_node_port_num = NodePosition ( input_name ) ; <nl> + <nl> / / _Send op . <nl> auto * send = new NodeDef ( ) ; <nl> send - > set_name ( " Send " + from - > name ( ) + " from " + DeviceName ( from ) + " to " + <nl> DeviceName ( to ) ) ; <nl> - send - > set_op ( kSend ) ; <nl> + send - > set_op ( " _Send " ) ; <nl> send - > add_input ( from - > name ( ) ) ; <nl> send - > set_device ( ChannelDeviceName ( from , to ) ) ; <nl> auto & send_attr = * ( send - > mutable_attr ( ) ) ; <nl> std : : pair < const NodeDef * , const NodeDef * > VirtualScheduler : : TransferNode ( <nl> / / _Recv op . <nl> auto * recv = new NodeDef ( ) ; <nl> recv - > set_name ( " Recv " + from - > name ( ) + " on " + DeviceName ( to ) ) ; <nl> - recv - > set_op ( kRecv ) ; <nl> + recv - > set_op ( " _Recv " ) ; <nl> recv - > add_input ( send - > name ( ) ) ; <nl> recv - > set_device ( DeviceName ( to ) ) ; <nl> auto & recv_attr = * ( recv - > mutable_attr ( ) ) ; <nl> recv_attr [ kAttrInputSrc ] . set_s ( input_name ) ; <nl> <nl> - / / Update NodeState for _Send and _Recv ops . <nl> + / / NodeState for _Send op . <nl> auto & send_node_state = GetNodeStateOrCreateIt ( send ) ; <nl> - send_node_state . inputs . push_back ( from ) ; <nl> - send_node_state . outputs . push_back ( recv ) ; <nl> + send_node_state . device_name = send - > device ( ) ; / / Set Channel device . <nl> + send_node_state . inputs . push_back ( std : : make_pair ( from , input_node_port_num ) ) ; <nl> + send_node_state . outputs [ 0 ] . push_back ( recv ) ; <nl> + <nl> + / / NodeState for _Recv op . <nl> auto & recv_node_state = GetNodeStateOrCreateIt ( recv ) ; <nl> - recv_node_state . inputs . push_back ( send ) ; <nl> - recv_node_state . outputs . push_back ( to ) ; <nl> + recv_node_state . inputs . push_back ( std : : make_pair ( send , 0 ) ) ; <nl> + recv_node_state . outputs [ 0 ] . push_back ( to ) ; <nl> <nl> / / Keep the created nodes . <nl> additional_nodes_ . emplace_back ( std : : unique_ptr < NodeDef > ( send ) ) ; <nl> std : : pair < const NodeDef * , const NodeDef * > VirtualScheduler : : TransferNode ( <nl> <nl> NodeInfo VirtualScheduler : : GetCurrNodeInfo ( ) const { <nl> const NodeDef * node = ready_nodes_ - > GetCurrNode ( ) ; <nl> - std : : vector < OpInfo : : TensorProperties > inputs = <nl> - graph_properties_ . GetInputProperties ( node - > name ( ) ) ; <nl> - / / Some ops created within VirtualScheduler may need further processing to <nl> - / / the input properties . <nl> - MaybeUpdateInputProperties ( node , & inputs ) ; <nl> <nl> - / / This is for compatibility ; we can just use palcer_ - > get_device ( ) for all <nl> + / / This is for compatibility ; we can just use placer_ - > get_device ( ) for all <nl> / / cases , once VirtualCluster is properly set up . <nl> DeviceProperties device ; <nl> if ( placer_ ) { <nl> NodeInfo VirtualScheduler : : GetCurrNodeInfo ( ) const { <nl> int device_id ; <nl> DeviceNameUtils : : ParsedName parsed ; <nl> if ( ! node - > device ( ) . empty ( ) & & <nl> - DeviceNameUtils : : ParseFullName ( DeviceName ( node ) , & parsed ) ) { <nl> + DeviceNameUtils : : ParseFullName ( node_map_ . at ( node ) . device_name , <nl> + & parsed ) ) { <nl> device_type = parsed . type ; <nl> device_id = parsed . id ; <nl> } else { <nl> NodeInfo VirtualScheduler : : GetCurrNodeInfo ( ) const { <nl> } <nl> <nl> / / Special case for _Send op . <nl> - if ( IsSendOp ( node ) ) { <nl> + if ( IsSend ( * node ) ) { <nl> device . set_type ( kChannelDevice ) ; <nl> } <nl> <nl> + / / Construct NodeInfo . <nl> + const auto & node_state = node_map_ . at ( node ) ; <nl> NodeInfo node_info ; <nl> node_info . name = node - > name ( ) ; <nl> - node_info . device_name = graph_properties_ . GetDeviceName ( node - > name ( ) ) ; <nl> - std : : vector < OpInfo : : TensorProperties > outputs = <nl> - graph_properties_ . GetOutputProperties ( node - > name ( ) ) ; <nl> + node_info . device_name = node_state . device_name ; <nl> auto & op_info = node_info . op_info ; <nl> op_info . set_op ( node - > op ( ) ) ; <nl> * op_info . mutable_attr ( ) = node - > attr ( ) ; <nl> - for ( auto & input : inputs ) { <nl> - op_info . add_inputs ( ) - > Swap ( & input ) ; <nl> + for ( auto & input : node_state . input_properties ) { <nl> + * op_info . add_inputs ( ) = input ; <nl> } <nl> - for ( auto & output : outputs ) { <nl> - op_info . add_outputs ( ) - > Swap ( & output ) ; <nl> + for ( auto & output : node_state . output_properties ) { <nl> + * op_info . add_outputs ( ) = output ; <nl> } <nl> op_info . mutable_device ( ) - > Swap ( & device ) ; <nl> - / / add some more to the node_info . <nl> return node_info ; <nl> } <nl> <nl> NodeState & VirtualScheduler : : GetNodeStateOrCreateIt ( const NodeDef * node ) { <nl> + CHECK ( ! initialized_ ) < < " GetNodeStateOrCreateIt is called after Init ( ) . " ; <nl> + <nl> auto it = node_map_ . find ( node ) ; <nl> if ( it = = node_map_ . end ( ) ) { <nl> + / / Not found ; create a NodeState for this node . <nl> it = node_map_ . emplace ( node , NodeState ( ) ) . first ; <nl> - } <nl> - return it - > second ; <nl> - } <nl> + auto & node_state = it - > second ; <nl> + node_state . input_properties = <nl> + graph_properties_ . GetInputProperties ( node - > name ( ) ) ; <nl> + node_state . output_properties = <nl> + graph_properties_ . GetOutputProperties ( node - > name ( ) ) ; <nl> + <nl> + / / Some ops may need further processing to the input / output properties : <nl> + / / _Send and _Recv . <nl> + MaybeUpdateInputOutput ( node ) ; <nl> + <nl> + if ( ! IsSend ( * node ) ) { <nl> + node_state . device_name = DeviceName ( node ) ; <nl> + / / For _Send op , device_name will be set to Channel in CreateSendRecv ( ) . <nl> + } <nl> <nl> - Costs & VirtualScheduler : : FindOrCreateZero ( const string & op_name , <nl> - std : : map < string , Costs > * op_cost ) { <nl> - auto it = op_cost - > find ( op_name ) ; <nl> - if ( it = = op_cost - > end ( ) ) { <nl> - it = op_cost - > emplace ( op_name , Costs : : ZeroCosts ( ) ) . first ; <nl> + / / Initialize output port related data : <nl> + / / Assume the size of OutputProperties represents the number of output ports <nl> + / / of this node . <nl> + for ( int i = 0 ; i < node_state . output_properties . size ( ) ; + + i ) { <nl> + node_state . time_no_references [ i ] = Costs : : Duration : : max ( ) ; <nl> + node_state . num_outputs_executed [ i ] = 0 ; <nl> + / / Populate an empty vector for each port . The caller will add nodes <nl> + / / that use this port as input . <nl> + node_state . outputs [ i ] = { } ; <nl> + } <nl> + / / Port_num - 1 is for control dependency . <nl> + node_state . time_no_references [ - 1 ] = Costs : : Duration : : max ( ) ; <nl> + node_state . num_outputs_executed [ - 1 ] = 0 ; <nl> + node_state . outputs [ - 1 ] = { } ; <nl> } <nl> return it - > second ; <nl> } <nl> <nl> - bool VirtualScheduler : : PopCurrNode ( ) { <nl> - const auto * node = ready_nodes_ - > GetCurrNode ( ) ; <nl> - auto & node_state = node_map_ [ node ] ; <nl> - auto & device = device_ [ DeviceName ( node ) ] ; <nl> - auto curr_time = device . GetCurrTime ( ) ; <nl> + int64 VirtualScheduler : : CalculateOutputSize ( <nl> + const std : : vector < OpInfo : : TensorProperties > & output_properties , <nl> + const int port_num ) const { <nl> + if ( port_num < 0 ) { <nl> + return 4 ; / / 4B for control dependency . <nl> + } <nl> <nl> - / / Increment num_inputs_ready of the output nodes . <nl> - for ( auto * output : node_state . outputs ) { <nl> - auto & output_state = node_map_ [ output ] ; <nl> - output_state . num_inputs_ready + + ; <nl> - if ( output_state . num_inputs_ready = = output_state . inputs . size ( ) ) { <nl> - / / This output node is now ready . <nl> - output_state . time_ready = curr_time ; <nl> - ready_nodes_ - > AddNode ( output ) ; <nl> - } <nl> + if ( port_num > = output_properties . size ( ) ) { <nl> + VLOG ( 3 ) < < " VirtualScheduler : : CalculateOutputSize ( ) - - " <nl> + < < " port_num : " < < port_num <nl> + < < " > = output_properties . size ( ) : " < < output_properties . size ( ) ; <nl> + return 0 ; <nl> } <nl> <nl> - / / Increment num_outputs_executed of the input nodes . <nl> - for ( auto * input : node_state . inputs ) { <nl> - auto & input_state = node_map_ [ input ] ; <nl> - input_state . num_outputs_executed + + ; <nl> - if ( input_state . num_outputs_executed = = input_state . outputs . size ( ) ) { <nl> - / / All the outputs are executed ; no reference to this input nodel <nl> - input_state . time_no_reference = curr_time ; <nl> - / / TODO ( dyoon ) : collect device memory usage ; note that this input node <nl> - / / use device memory between time_scheduled and time_no_reference . <nl> + const auto & output = output_properties [ port_num ] ; <nl> + int64 output_size = DataTypeSize ( BaseType ( output . dtype ( ) ) ) ; <nl> + <nl> + for ( const auto & dim : output . shape ( ) . dim ( ) ) { <nl> + auto dim_size = dim . size ( ) ; <nl> + if ( dim_size < 0 ) { <nl> + / / Zero output size if there ' s any unknown dim . <nl> + output_size = 0 ; <nl> + VLOG ( 3 ) < < " VirtualScheduler : : CalculateOutputSize ( ) - - " <nl> + < < " unknown dim : " < < output_size ; <nl> + break ; <nl> } <nl> + output_size * = dim_size ; <nl> } <nl> <nl> - / / Remove the current node ; assume FIFO . <nl> - ready_nodes_ - > RemoveCurrNode ( ) ; <nl> + return output_size ; <nl> + } <nl> <nl> - return ! ready_nodes_ - > Empty ( ) ; <nl> + Costs & VirtualScheduler : : FindOrCreateZero ( const string & op_name , <nl> + std : : map < string , Costs > * op_cost ) { <nl> + auto it = op_cost - > find ( op_name ) ; <nl> + if ( it = = op_cost - > end ( ) ) { <nl> + / / Note that default constructor of Costs sets some memory related fields <nl> + / / to unknown values so we should explicitly initialize it with ZeroCosts . <nl> + it = op_cost - > emplace ( op_name , Costs : : ZeroCosts ( ) ) . first ; <nl> + } <nl> + return it - > second ; <nl> } <nl> <nl> bool VirtualScheduler : : MarkCurrNodeExecuted ( const Costs & node_costs ) { <nl> bool VirtualScheduler : : MarkCurrNodeExecuted ( const Costs & node_costs ) { <nl> <nl> / / Update node and device states . <nl> auto & node_state = node_map_ [ node ] ; <nl> - auto & device = device_ [ DeviceName ( node ) ] ; <nl> + auto & device = device_ [ node_state . device_name ] ; <nl> device . nodes_executed . push_back ( node ) ; <nl> / / Node is scheduled when the device is available AND all the inputs are <nl> / / ready ; hence , time_scheduled is time_ready if time_ready > device curr <nl> bool VirtualScheduler : : MarkCurrNodeExecuted ( const Costs & node_costs ) { <nl> auto curr_time = device . GetCurrTime ( ) ; <nl> node_state . time_finished = curr_time ; <nl> <nl> + / / Update device memory usage . <nl> + if ( ! IsPersistentNode ( node ) ) { <nl> + for ( const auto & port_num_output_pair : node_state . outputs ) { <nl> + int port_num = port_num_output_pair . first ; <nl> + / / There ' s a chance that a specific output is not used at all . <nl> + if ( node_state . outputs [ port_num ] . empty ( ) ) { <nl> + node_state . time_no_references [ port_num ] = curr_time ; <nl> + } else { <nl> + device . memory_usage + = <nl> + CalculateOutputSize ( node_state . output_properties , port_num ) ; <nl> + device . nodes_in_memory . insert ( std : : make_pair ( node , port_num ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> / / Update device ' s per - op cost . <nl> auto & device_op_cost = FindOrCreateZero ( op_name , & device . op_to_cost ) ; <nl> device_op_cost = CombineCosts ( device_op_cost , node_costs ) ; <nl> bool VirtualScheduler : : MarkCurrNodeExecuted ( const Costs & node_costs ) { <nl> < < " , scheduled : " < < node_state . time_scheduled . count ( ) <nl> < < " , finished : " < < node_state . time_finished . count ( ) ; <nl> <nl> - return PopCurrNode ( ) ; <nl> + / / Increment num_inputs_ready of the output nodes <nl> + for ( const auto & port_num_output_pair : node_state . outputs ) { <nl> + for ( auto * output_node : port_num_output_pair . second ) { <nl> + auto & output_state = node_map_ [ output_node ] ; <nl> + output_state . num_inputs_ready + + ; <nl> + if ( output_state . num_inputs_ready = = output_state . inputs . size ( ) ) { <nl> + / / This output node is now ready . <nl> + output_state . time_ready = curr_time ; <nl> + ready_nodes_ - > AddNode ( output_node ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Increment num_outputs_executed of the input nodes . <nl> + for ( const auto & input_port : node_state . inputs ) { <nl> + auto * input = input_port . first ; <nl> + auto port = input_port . second ; <nl> + auto & input_state = node_map_ [ input ] ; <nl> + input_state . num_outputs_executed [ port ] + + ; <nl> + if ( input_state . num_outputs_executed [ port ] = = <nl> + input_state . outputs [ port ] . size ( ) & & <nl> + ! IsPersistentNode ( input ) ) { <nl> + / / All the outputs are executed ; no reference to this output port of <nl> + / / input node . <nl> + input_state . time_no_references [ port ] = curr_time ; <nl> + auto & input_device = device_ [ input_state . device_name ] ; <nl> + input_device . memory_usage - = <nl> + CalculateOutputSize ( input_state . output_properties , port ) ; <nl> + <nl> + input_device . nodes_in_memory . erase ( std : : make_pair ( input , port ) ) ; <nl> + } <nl> + } <nl> + <nl> + if ( ! IsPersistentNode ( node ) ) { <nl> + / / Now that output memory is added and used up nodes are deallocated , <nl> + / / check max memory usage . <nl> + if ( device . memory_usage > device . max_memory_usage ) { <nl> + device . max_memory_usage = device . memory_usage ; <nl> + device . mem_usage_snapshot_at_peak = device . nodes_in_memory ; <nl> + } <nl> + } <nl> + <nl> + / / Remove the current node ; assume FIFO . <nl> + ready_nodes_ - > RemoveCurrNode ( ) ; <nl> + <nl> + return ! ready_nodes_ - > Empty ( ) ; <nl> } <nl> <nl> Costs VirtualScheduler : : Summary ( ) const { <nl> Costs VirtualScheduler : : Summary ( ) const { <nl> for ( const auto & device : device_ ) { <nl> const auto & name = device . first ; <nl> const auto & state = device . second ; <nl> + <nl> + std : : map < string , int64 > op_to_memory ; <nl> + / / First profile only persistent memory usage . <nl> + int64 persistent_memory_usage = 0 ; <nl> + std : : set < string > persisent_ops ; <nl> + for ( const auto & node_port : state . persistent_nodes ) { <nl> + const auto * node = node_port . first ; <nl> + const auto port = node_port . second ; <nl> + const auto output_size = <nl> + CalculateOutputSize ( node_map_ . at ( node ) . output_properties , port ) ; <nl> + persistent_memory_usage + = output_size ; <nl> + op_to_memory [ node - > op ( ) ] + = output_size ; <nl> + persisent_ops . insert ( node - > op ( ) ) ; <nl> + } <nl> + int64 max_memory_usage = persistent_memory_usage + state . max_memory_usage ; <nl> + <nl> VLOG ( 1 ) < < " Device = " < < name <nl> < < " , num_nodes = " < < state . nodes_executed . size ( ) <nl> - < < " , execution_time = " < < state . GetCurrTime ( ) . count ( ) ; <nl> - VLOG ( 1 ) < < " Per - op execution time : " ; <nl> + < < " , execution_time = " < < state . GetCurrTime ( ) . count ( ) <nl> + < < " , memory usage : " <nl> + < < " persistenst = " <nl> + < < Round2 ( persistent_memory_usage / 1024 . 0 / 1024 . 0 / 1024 . 0 ) <nl> + < < " GB , peak = " <nl> + < < Round2 ( state . max_memory_usage / 1024 . 0 / 1024 . 0 / 1024 . 0 ) <nl> + < < " GB , total = " <nl> + < < Round2 ( max_memory_usage / 1024 . 0 / 1024 . 0 / 1024 . 0 ) <nl> + < < " GB , at the end : " < < state . memory_usage < < " B " ; <nl> + <nl> + VLOG ( 1 ) < < " Per - op execution time ( and memory usage at peak memory usage ) : " ; <nl> + / / Profile non - persistent op memory usage . <nl> + for ( const auto & node_port : state . mem_usage_snapshot_at_peak ) { <nl> + const auto * node = node_port . first ; <nl> + const auto port = node_port . second ; <nl> + op_to_memory [ node - > op ( ) ] + = <nl> + CalculateOutputSize ( node_map_ . at ( node ) . output_properties , port ) ; <nl> + } <nl> for ( const auto & op_cost_pair : state . op_to_cost ) { <nl> const auto & op = op_cost_pair . first ; <nl> const auto & cost = op_cost_pair . second . execution_time . count ( ) ; <nl> - if ( cost ) { / / Skip printing out zero - cost ops . <nl> - VLOG ( 1 ) < < " + " < < op < < " : " < < cost ; <nl> + const float mem_usage_gb = <nl> + Round2 ( op_to_memory [ op ] / 1024 . 0 / 1024 . 0 / 1024 . 0 ) ; <nl> + int64 op_mem_usage = op_to_memory . at ( op ) ; <nl> + const float mem_usage_percent = <nl> + max_memory_usage > 0 ? Round2 ( 100 . 0 * op_mem_usage / max_memory_usage ) <nl> + : 0 . 0 ; <nl> + if ( cost | | mem_usage_percent > 1 . 0 ) { <nl> + / / Print out only non - zero cost ops or ops with > 1 % memory usage . <nl> + VLOG ( 1 ) < < " + " < < op < < " : " < < cost < < " ( " < < mem_usage_gb <nl> + < < " GB [ " < < mem_usage_percent < < " % ] " <nl> + < < ( persisent_ops . count ( op ) > 0 ? " : persistent op ) " : " ) " ) ; <nl> } <nl> } <nl> + VLOG ( 1 ) < < " Per - op execution time ( and memory usage at peak memory usage ) : " ; <nl> if ( critical_path_costs . execution_time < = state . GetCurrTime ( ) ) { <nl> critical_path_costs = state . device_costs ; <nl> } <nl> mmm a / tensorflow / core / grappler / costs / virtual_scheduler . h <nl> ppp b / tensorflow / core / grappler / costs / virtual_scheduler . h <nl> namespace tensorflow { <nl> namespace grappler { <nl> <nl> struct NodeState { <nl> - std : : vector < const NodeDef * > inputs ; <nl> - std : : vector < const NodeDef * > outputs ; <nl> + / / A node ( i . e . , an op ) takes a set of input : port pairs and produces <nl> + / / a set of output ports . <nl> + <nl> + / / Cross references to input and output nodes from graphdef . <nl> + std : : vector < std : : pair < const NodeDef * , int > > inputs ; / / Input , port pairs . <nl> + / / List of output nodes ( a list of nodes that takes this output port as input ) <nl> + / / keyed by port_num . Note that port_num - 1 is used for control dependency . <nl> + std : : unordered_map < int , std : : vector < const NodeDef * > > outputs ; <nl> + <nl> + / / Info from GraphProperties . <nl> + std : : vector < OpInfo : : TensorProperties > input_properties ; <nl> + std : : vector < OpInfo : : TensorProperties > output_properties ; <nl> + <nl> + / / Canonical device name used within VirtualScheduler . <nl> + string device_name ; <nl> + <nl> + / / States updated as scheduling nodes . <nl> int num_inputs_ready ; <nl> - int num_outputs_executed ; <nl> + std : : unordered_map < int , int > num_outputs_executed ; <nl> Costs : : Duration time_ready ; <nl> Costs : : Duration time_scheduled ; <nl> Costs : : Duration time_finished ; <nl> - Costs : : Duration time_no_reference ; <nl> + / / Time that all the consumers are executed ( hence , no need to keep this <nl> + / / output in memory ) , keyed by port_num . <nl> + std : : unordered_map < int , Costs : : Duration > time_no_references ; <nl> + <nl> + / / Note that a node may have multiple output ports . The length of outputs , <nl> + / / num_outputs_executed , and time_no_references should be <nl> + / / identical when a NodeState is fully initialized . <nl> + / / They should be 1 + output_properties . size ( ) as we add [ - 1 ] for control <nl> + / / dependency . <nl> <nl> / / Node will be ready to be executed at time_ready , scheduled at <nl> / / time_scheduled , and finishes execution at time_finished . <nl> - / / Between time_scheduled and time_no_reference , the node ' s output tensor <nl> - / / needs to be on the device , using up device memory . <nl> + / / Each output port uses up memory space from time_scheduled to its <nl> + / / time_no_references . <nl> <nl> NodeState ( ) { <nl> num_inputs_ready = 0 ; <nl> - num_outputs_executed = 0 ; <nl> time_ready = Costs : : Duration : : max ( ) ; <nl> time_scheduled = Costs : : Duration : : max ( ) ; <nl> time_finished = Costs : : Duration : : max ( ) ; <nl> - time_no_reference = Costs : : Duration : : max ( ) ; <nl> + / / Note that num_outputs_executed and time_no_references are not initialized <nl> + / / here , since we don ' t know the size ( i . e . , # outputs for this node ) . <nl> } <nl> } ; <nl> <nl> struct DeviceState { <nl> + / / Nodes executed on this device in execution order . <nl> std : : vector < const NodeDef * > nodes_executed ; <nl> - Costs device_costs ; <nl> - std : : map < string , Costs > op_to_cost ; / / Per - op cost . <nl> <nl> - DeviceState ( ) { device_costs = Costs : : ZeroCosts ( ) ; } <nl> + / / Nodes currently allocated in memory : set of NodeDef * and port_num pairs <nl> + / / so that we can track which output of the node is in memory . <nl> + std : : set < std : : pair < const NodeDef * , int > > nodes_in_memory ; <nl> + <nl> + / / Nodes allocated in memory persistently : e . g . , Variables . <nl> + std : : set < std : : pair < const NodeDef * , int > > persistent_nodes ; <nl> + <nl> + / / Snapshot of nodes_in_memory , when memory usage is at peak . <nl> + / / Same to nodes_in_memory , it ' s a set of NodeDef * and port_num pairs . <nl> + std : : set < std : : pair < const NodeDef * , int > > mem_usage_snapshot_at_peak ; <nl> + <nl> + Costs device_costs ; <nl> + std : : map < string , Costs > op_to_cost ; / / Per - op cost . <nl> + std : : map < string , int64 > op_to_memory ; / / Per - op memory usage at peak usage . <nl> + int64 memory_usage ; <nl> + int64 max_memory_usage ; <nl> + <nl> + DeviceState ( ) { <nl> + device_costs = Costs : : ZeroCosts ( ) ; <nl> + memory_usage = 0 ; <nl> + max_memory_usage = 0 ; <nl> + } <nl> <nl> Costs : : Duration GetCurrTime ( ) const { return device_costs . execution_time ; } <nl> } ; <nl> class VirtualScheduler { <nl> const string & default_device_type , Cluster * cluster , <nl> VirtualPlacer * placer ) ; <nl> <nl> + / / Initializes NodeState and DeviceState from grappler_item_ and <nl> + / / graph_properties_ . <nl> Status Init ( ) ; <nl> <nl> NodeInfo GetCurrNodeInfo ( ) const ; <nl> + <nl> + / / Returns true if there is any node to be scheduled . <nl> bool MarkCurrNodeExecuted ( const Costs & node_costs ) ; <nl> <nl> + / / Prints out summary of execution ( timing , memory usage , etc . ) <nl> Costs Summary ( ) const ; <nl> <nl> + protected : <nl> + / / GetDeviceStates and GetNodeStates are currently for testing purpuse only . <nl> + / / Retrieves detailed scheduling results . <nl> + const std : : unordered_map < string , DeviceState > & GetDeviceStates ( ) const { <nl> + return device_ ; <nl> + } <nl> + const std : : unordered_map < const NodeDef * , NodeState > & GetNodeStates ( ) const { <nl> + return node_map_ ; <nl> + } <nl> + <nl> + / / Returns the size of output at port_num ( unit : bytes ) . A special case is <nl> + / / port_num - 1 , which is for control dependency and assumed to be 4 bytes . <nl> + int64 CalculateOutputSize ( <nl> + const std : : vector < OpInfo : : TensorProperties > & output_properties , <nl> + const int port_num ) const ; <nl> + <nl> private : <nl> - const string kSend = " _Send " ; <nl> - const string kRecv = " _Recv " ; <nl> + / / Constants . <nl> const string kAttrInputSrc = " input_source_ " ; <nl> const string kAttrSrcDevice = " src_device_ " ; <nl> const string kAttrDstDevice = " dst_device_ " ; <nl> const string kChannelDevice = " Channel " ; <nl> <nl> - void MaybeUpdateInputProperties ( <nl> - const NodeDef * node , std : : vector < OpInfo : : TensorProperties > * inputs ) const ; <nl> + / / Methods called from Init ( ) . Fails if initialize_ is set . <nl> + void MaybeUpdateInputOutput ( const NodeDef * node ) ; <nl> NodeState & GetNodeStateOrCreateIt ( const NodeDef * node ) ; <nl> - std : : pair < const NodeDef * , const NodeDef * > TransferNode ( <nl> + std : : pair < const NodeDef * , const NodeDef * > CreateSendRecv ( <nl> const NodeDef * from , const NodeDef * to , const string & input_name ) ; <nl> string DeviceName ( const NodeDef * node ) const ; <nl> string ChannelDeviceName ( const NodeDef * from , const NodeDef * to ) const ; <nl> + <nl> + / / Helper methods . <nl> Costs & FindOrCreateZero ( const string & op_name , <nl> std : : map < string , Costs > * op_cost ) ; <nl> + float Round2 ( const float x ) const ; <nl> + bool IsPersistentNode ( const NodeDef * node ) const ; <nl> <nl> - bool PopCurrNode ( ) ; <nl> - bool IsSendOp ( const NodeDef * node ) const ; <nl> - bool IsRecvOp ( const NodeDef * node ) const ; <nl> - <nl> - GraphProperties graph_properties_ ; <nl> - std : : map < string , int > op_counts_ ; / / Op counts with key with input shape . <nl> - std : : map < string , int > op_costs_ ; / / Individual op costs ( with input shapes ) . <nl> - Costs graph_costs_ ; / / Graph cost . <nl> - std : : map < string , Costs > op_to_cost_ ; / / Per - op cost . <nl> + / / Scheduler states : <nl> std : : unique_ptr < ReadyNodeManager > ready_nodes_ ; <nl> std : : unordered_map < const NodeDef * , NodeState > node_map_ ; <nl> std : : unordered_map < string , DeviceState > device_ ; <nl> + <nl> / / Pool of NodeDefs for SendRecv and Identity ops created . <nl> std : : vector < std : : unique_ptr < NodeDef > > additional_nodes_ ; <nl> - / / Cache of ops transferred to another device . <nl> + / / Cache of nodes transferred to another device . <nl> std : : unordered_map < const NodeDef * , std : : unordered_map < string , const NodeDef * > > <nl> - cached_ops_ ; <nl> + cached_recv_nodes_ ; <nl> + <nl> + / / Stats : <nl> + std : : map < string , int > op_counts_ ; / / Op counts with key with input shape . <nl> + std : : map < string , int > op_costs_ ; / / Individual op costs ( with input shapes ) . <nl> + Costs graph_costs_ ; / / Graph cost . <nl> + std : : map < string , Costs > op_to_cost_ ; / / Per - op cost . <nl> + <nl> + / / Auxilliary data structures for constructing NodeState and DeviceState . <nl> + GraphProperties graph_properties_ ; <nl> Cluster * cluster_ ; / / Not owned . <nl> const GrapplerItem * grappler_item_ ; / / Not owned . <nl> bool use_static_shapes_ ; <nl> mmm a / tensorflow / core / grappler / costs / virtual_scheduler_test . cc <nl> ppp b / tensorflow / core / grappler / costs / virtual_scheduler_test . cc <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> namespace grappler { <nl> + / / Class for testing virtual scheduler . <nl> + class TestVirtualScheduler : public VirtualScheduler { <nl> + public : <nl> + TestVirtualScheduler ( const GrapplerItem * grappler_item , <nl> + const bool use_static_shapes , <nl> + const string & default_device_type , Cluster * cluster , <nl> + VirtualPlacer * placer ) <nl> + : VirtualScheduler ( grappler_item , use_static_shapes , default_device_type , <nl> + cluster , placer ) { } <nl> + <nl> + FRIEND_TEST ( VirtualSchedulerTest , CalculateOutputSize ) ; <nl> + FRIEND_TEST ( VirtualSchedulerTest , MemoryUsage ) ; <nl> + FRIEND_TEST ( VirtualSchedulerTest , ControlDependency ) ; <nl> + FRIEND_TEST ( VirtualSchedulerTest , ComplexDependency ) ; <nl> + FRIEND_TEST ( VirtualSchedulerTest , Variable ) ; <nl> + } ; <nl> <nl> class VirtualSchedulerTest : public : : testing : : Test { <nl> protected : <nl> + const string kCPU0 = " / job : localhost / replica : 0 / task : 0 / cpu : 0 " ; <nl> + <nl> void SetUp ( ) override { <nl> / / Initializes cluster_ and placer_ . <nl> std : : unordered_map < string , DeviceProperties > devices ; <nl> DeviceProperties cpu_device ; <nl> cpu_device . set_type ( " CPU " ) ; <nl> - devices [ " / job : localhost / replica : 0 / task : 0 / cpu : 0 " ] = cpu_device ; <nl> - DeviceProperties gpu_device ; <nl> - gpu_device . set_type ( " GPU " ) ; <nl> - devices [ " / job : localhost / replica : 0 / task : 0 / gpu : 0 " ] = gpu_device ; <nl> + devices [ kCPU0 ] = cpu_device ; <nl> <nl> cluster_ . reset ( new VirtualCluster ( devices ) ) ; <nl> placer_ . reset ( new VirtualPlacer ( cluster_ . get ( ) ) ) ; <nl> } <nl> <nl> - void CreateSchedulerWithConv2Ds ( ) { <nl> - / / Create a scheduler with a simple graph : 3 Conv2Ds , where only 2 are in <nl> - / / fetch nodes . <nl> - const int bs = 4 ; <nl> - const int width = 10 ; <nl> - const int height = 10 ; <nl> - const int depth_in = 8 ; <nl> - const int kernel = 3 ; <nl> - const int depth_out = 16 ; <nl> - <nl> - tensorflow : : Scope s = tensorflow : : Scope : : NewRootScope ( ) ; <nl> + / / Three Conv2Ds with only two in fetch nodes . <nl> + void CreateGrapplerItemWithConv2Ds ( ) { <nl> + tensorflow : : Scope s = tensorflow : : Scope : : NewRootScope ( ) . WithDevice ( kCPU0 ) ; <nl> auto x = tensorflow : : ops : : RandomUniform ( <nl> - s . WithOpName ( " x " ) , { bs , width , height , depth_in } , DT_FLOAT ) ; <nl> + s . WithOpName ( " x " ) , { batch_size_ , width_ , height_ , depth_in_ } , DT_FLOAT ) ; <nl> auto y = tensorflow : : ops : : RandomUniform ( <nl> - s . WithOpName ( " y " ) , { bs , width , height , depth_in } , DT_FLOAT ) ; <nl> + s . WithOpName ( " y " ) , { batch_size_ , width_ , height_ , depth_in_ } , DT_FLOAT ) ; <nl> auto z = tensorflow : : ops : : RandomUniform ( <nl> - s . WithOpName ( " z " ) , { bs , width , height , depth_in } , DT_FLOAT ) ; <nl> + s . WithOpName ( " z " ) , { batch_size_ , width_ , height_ , depth_in_ } , DT_FLOAT ) ; <nl> auto f = tensorflow : : ops : : RandomUniform ( <nl> - s . WithOpName ( " f " ) , { kernel , kernel , depth_in , depth_out } , DT_FLOAT ) ; <nl> + s . WithOpName ( " f " ) , { kernel_ , kernel_ , depth_in_ , depth_out_ } , DT_FLOAT ) ; <nl> std : : vector < int > strides = { 1 , 1 , 1 , 1 } ; <nl> auto c0 = <nl> tensorflow : : ops : : Conv2D ( s . WithOpName ( " c0 " ) , x , f , strides , " SAME " ) ; <nl> class VirtualSchedulerTest : public : : testing : : Test { <nl> tensorflow : : ops : : Conv2D ( s . WithOpName ( " c2 " ) , z , f , strides , " SAME " ) ; <nl> GraphDef def ; <nl> TF_CHECK_OK ( s . ToGraphDef ( & def ) ) ; <nl> - LOG ( INFO ) < < def . DebugString ( ) ; <nl> <nl> grappler_item_ . reset ( new GrapplerItem ) ; <nl> grappler_item_ - > id = " test_conv2d_graph " ; <nl> grappler_item_ - > graph = def ; <nl> grappler_item_ - > fetch = { " c0 " , " c1 " } ; <nl> <nl> - scheduler_ . reset ( new VirtualScheduler ( <nl> + dependency_ [ " c0 " ] = { " x " , " f " } ; <nl> + dependency_ [ " c1 " ] = { " y " , " f " } ; <nl> + } <nl> + <nl> + / / A Conv2D with a variable . <nl> + void CreateGrapplerItemWithConv2DAndVariable ( ) { <nl> + tensorflow : : Scope s = tensorflow : : Scope : : NewRootScope ( ) . WithDevice ( kCPU0 ) ; <nl> + auto x = tensorflow : : ops : : RandomUniform ( <nl> + s . WithOpName ( " x " ) , { batch_size_ , width_ , height_ , depth_in_ } , DT_FLOAT ) ; <nl> + auto f = tensorflow : : ops : : Variable ( <nl> + s . WithOpName ( " f " ) , { kernel_ , kernel_ , depth_in_ , depth_out_ } , DT_FLOAT ) ; <nl> + std : : vector < int > strides = { 1 , 1 , 1 , 1 } ; <nl> + auto y = tensorflow : : ops : : Conv2D ( s . WithOpName ( " y " ) , x , f , strides , " SAME " ) ; <nl> + GraphDef def ; <nl> + TF_CHECK_OK ( s . ToGraphDef ( & def ) ) ; <nl> + <nl> + grappler_item_ . reset ( new GrapplerItem ) ; <nl> + grappler_item_ - > id = " test_conv2d_var_graph " ; <nl> + grappler_item_ - > graph = def ; <nl> + grappler_item_ - > fetch = { " y " } ; <nl> + <nl> + dependency_ [ " y " ] = { " x " , " f " } ; <nl> + } <nl> + <nl> + / / AddN that takes 4 tensors with 10x10x10x10 . <nl> + void CreateGrapplerItemWithAddN ( ) { <nl> + tensorflow : : Scope s = tensorflow : : Scope : : NewRootScope ( ) . WithDevice ( kCPU0 ) ; <nl> + auto x = tensorflow : : ops : : RandomUniform ( s . WithOpName ( " x " ) , { 10 , 10 , 10 , 10 } , <nl> + DT_FLOAT ) ; <nl> + auto y = tensorflow : : ops : : RandomUniform ( s . WithOpName ( " y " ) , { 10 , 10 , 10 , 10 } , <nl> + DT_FLOAT ) ; <nl> + auto z = tensorflow : : ops : : RandomUniform ( s . WithOpName ( " z " ) , { 10 , 10 , 10 , 10 } , <nl> + DT_FLOAT ) ; <nl> + auto w = tensorflow : : ops : : RandomUniform ( s . WithOpName ( " w " ) , { 10 , 10 , 10 , 10 } , <nl> + DT_FLOAT ) ; <nl> + tensorflow : : OutputList input_tensors = { x , y , z , w } ; <nl> + auto out = tensorflow : : ops : : AddN ( s . WithOpName ( " out " ) , input_tensors ) ; <nl> + GraphDef def ; <nl> + TF_CHECK_OK ( s . ToGraphDef ( & def ) ) ; <nl> + <nl> + grappler_item_ . reset ( new GrapplerItem ) ; <nl> + grappler_item_ - > id = " test_addn_graph " ; <nl> + grappler_item_ - > graph = def ; <nl> + grappler_item_ - > fetch = { " out " } ; <nl> + <nl> + dependency_ [ " out " ] = { " x " , " y " , " z " , " w " } ; <nl> + } <nl> + <nl> + / / NoOp that takes 7 NoOps as control dependency . <nl> + void CreateGrapplerItemWithControlDependency ( ) { <nl> + tensorflow : : Scope s = tensorflow : : Scope : : NewRootScope ( ) . WithDevice ( kCPU0 ) ; <nl> + std : : vector < string > input_noop_names = { " x " , " y " , " z " , " w " , " u " , " v " , " t " } ; <nl> + std : : vector < tensorflow : : Operation > input_tensors ; <nl> + for ( const auto & input : input_noop_names ) { <nl> + auto x = tensorflow : : ops : : NoOp ( s . WithOpName ( input ) ) ; <nl> + input_tensors . push_back ( x . operation ) ; <nl> + } <nl> + auto out = tensorflow : : ops : : NoOp ( <nl> + s . WithControlDependencies ( input_tensors ) . WithOpName ( " out " ) ) ; <nl> + GraphDef def ; <nl> + TF_CHECK_OK ( s . ToGraphDef ( & def ) ) ; <nl> + <nl> + grappler_item_ . reset ( new GrapplerItem ) ; <nl> + grappler_item_ - > id = " test_control_dependency_graph " ; <nl> + grappler_item_ - > graph = def ; <nl> + grappler_item_ - > fetch = { " out " } ; <nl> + <nl> + dependency_ [ " out " ] = input_noop_names ; <nl> + } <nl> + <nl> + / / FusedBN [ an op with multiple outputs ] with multiple consumers ( including <nl> + / / control dependency ) . <nl> + void CreateGrapplerItemWithBatchNorm ( ) { <nl> + tensorflow : : Scope s = tensorflow : : Scope : : NewRootScope ( ) . WithDevice ( kCPU0 ) ; <nl> + auto x = tensorflow : : ops : : RandomUniform ( <nl> + s . WithOpName ( " x " ) , { batch_size_ , width_ , height_ , depth_in_ } , DT_FLOAT ) ; <nl> + auto scale = tensorflow : : ops : : RandomUniform ( s . WithOpName ( " scale " ) , <nl> + { depth_in_ } , DT_FLOAT ) ; <nl> + auto offset = tensorflow : : ops : : RandomUniform ( s . WithOpName ( " offset " ) , <nl> + { depth_in_ } , DT_FLOAT ) ; <nl> + auto mean = <nl> + tensorflow : : ops : : RandomUniform ( s . WithOpName ( " mean " ) , { 0 } , DT_FLOAT ) ; <nl> + auto var = <nl> + tensorflow : : ops : : RandomUniform ( s . WithOpName ( " var " ) , { 0 } , DT_FLOAT ) ; <nl> + <nl> + auto batch_norm = tensorflow : : ops : : FusedBatchNorm ( <nl> + s . WithOpName ( " bn " ) , x , scale , offset , mean , var , <nl> + ops : : FusedBatchNorm : : IsTraining ( true ) . Epsilon ( 0 . 1f ) ) ; <nl> + auto y = batch_norm . y ; <nl> + auto batch_mean = batch_norm . batch_mean ; <nl> + auto batch_var = batch_norm . batch_variance ; <nl> + <nl> + auto z1 = tensorflow : : ops : : Add ( s . WithOpName ( " z1 " ) , x , y ) ; <nl> + auto z2 = tensorflow : : ops : : Add ( s . WithOpName ( " z2 " ) , batch_var , batch_var ) ; <nl> + auto z3 = tensorflow : : ops : : Add ( s . WithOpName ( " z3 " ) , batch_var , batch_var ) ; <nl> + std : : vector < tensorflow : : Operation > input_tensors = { <nl> + batch_mean . op ( ) , z1 . z . op ( ) , z2 . z . op ( ) , z3 . z . op ( ) , <nl> + } ; <nl> + auto z4 = tensorflow : : ops : : NoOp ( <nl> + s . WithControlDependencies ( batch_var ) . WithOpName ( " z4 " ) ) ; <nl> + <nl> + GraphDef def ; <nl> + TF_CHECK_OK ( s . ToGraphDef ( & def ) ) ; <nl> + <nl> + grappler_item_ . reset ( new GrapplerItem ) ; <nl> + grappler_item_ - > id = " test_complex_dependency_graph " ; <nl> + grappler_item_ - > graph = def ; <nl> + grappler_item_ - > fetch = { " z1 " , " z2 " , " z3 " , " z4 " } ; <nl> + <nl> + dependency_ [ " bn " ] = { " x " , " scale " , " offset " , " mean " , " var " } ; <nl> + dependency_ [ " z1 " ] = { " x " , " bn " } ; <nl> + dependency_ [ " z2 " ] = { " bn " } ; <nl> + dependency_ [ " z3 " ] = { " bn " } ; <nl> + dependency_ [ " z4 " ] = { " bn " } ; <nl> + } <nl> + <nl> + / / Call this after creating grappler_item_ and setting up dependency_ . <nl> + void InitScheduler ( ) { <nl> + scheduler_ . reset ( new TestVirtualScheduler ( <nl> grappler_item_ . get ( ) , true / * use_static_shapes * / , <nl> " CPU " / * default_device_type * / , cluster_ . get ( ) , placer_ . get ( ) ) ) ; <nl> TF_CHECK_OK ( scheduler_ - > Init ( ) ) ; <nl> } <nl> <nl> + / / Call this after init scheduler_ . Scheduler stops after executing <nl> + / / target_node . <nl> + std : : unordered_map < string , NodeInfo > RunScheduler ( const string & target_node ) { <nl> + Costs zero_costs = Costs : : ZeroCosts ( ) ; <nl> + std : : unordered_map < string , NodeInfo > ops_executed ; <nl> + bool more_nodes = true ; <nl> + do { <nl> + NodeInfo node_info = scheduler_ - > GetCurrNodeInfo ( ) ; <nl> + ops_executed [ node_info . name ] = node_info ; <nl> + <nl> + / / Check scheduling order . <nl> + auto it = dependency_ . find ( node_info . name ) ; <nl> + if ( it ! = dependency_ . end ( ) ) { <nl> + for ( const auto & preceding_node : it - > second ) { <nl> + EXPECT_GT ( ops_executed . count ( preceding_node ) , 0 ) ; <nl> + } <nl> + } <nl> + more_nodes = scheduler_ - > MarkCurrNodeExecuted ( zero_costs ) ; <nl> + <nl> + if ( node_info . name = = target_node ) { <nl> + / / Scheduler has the state after executing the target node . <nl> + break ; <nl> + } <nl> + } while ( more_nodes ) ; <nl> + return ops_executed ; <nl> + } <nl> + <nl> + / / Helper method for validating a vector . <nl> + template < typename T > <nl> + void ExpectVectorEq ( const std : : vector < T > & expected , <nl> + const std : : vector < T > & test_elements ) { <nl> + / / Set of expected elements for an easy comparison . <nl> + std : : set < T > expected_set ( expected . begin ( ) , expected . end ( ) ) ; <nl> + for ( const auto & element : test_elements ) { <nl> + EXPECT_GT ( expected_set . count ( element ) , 0 ) ; <nl> + } <nl> + EXPECT_EQ ( expected . size ( ) , test_elements . size ( ) ) ; <nl> + } <nl> + <nl> + / / Helper method that checks the name of nodes . <nl> + void ValidateNodeDefs ( const std : : vector < string > & expected , <nl> + const std : : vector < const NodeDef * > & node_defs ) { <nl> + std : : vector < string > node_names ; <nl> + std : : transform ( node_defs . begin ( ) , node_defs . end ( ) , <nl> + std : : back_inserter ( node_names ) , <nl> + [ ] ( const NodeDef * node ) { return node - > name ( ) ; } ) ; <nl> + ExpectVectorEq ( expected , node_names ) ; <nl> + } <nl> + <nl> + / / Helper method for validating a set . <nl> + template < typename T > <nl> + void ExpectSetEq ( const std : : set < T > & expected , <nl> + const std : : set < T > & test_elements ) { <nl> + for ( const auto & element : test_elements ) { <nl> + EXPECT_GT ( expected . count ( element ) , 0 ) ; <nl> + } <nl> + EXPECT_EQ ( expected . size ( ) , test_elements . size ( ) ) ; <nl> + } <nl> + <nl> + / / Helper method tthat checks name - port pairs . <nl> + void ValidateMemoryUsageSnapshot ( <nl> + const std : : vector < string > & expected_names , const int port_num_expected , <nl> + const std : : set < std : : pair < const NodeDef * , int > > & mem_usage_snapshot ) { <nl> + std : : set < std : : pair < string , int > > nodes_at_peak_mem_usage ; <nl> + std : : transform ( <nl> + mem_usage_snapshot . begin ( ) , mem_usage_snapshot . end ( ) , <nl> + std : : inserter ( nodes_at_peak_mem_usage , nodes_at_peak_mem_usage . begin ( ) ) , <nl> + [ ] ( const std : : pair < const NodeDef * , int > & node_port ) { <nl> + return std : : make_pair ( node_port . first - > name ( ) , node_port . second ) ; <nl> + } ) ; <nl> + std : : set < std : : pair < string , int > > expected ; <nl> + std : : transform ( expected_names . begin ( ) , expected_names . end ( ) , <nl> + std : : inserter ( expected , expected . begin ( ) ) , <nl> + [ port_num_expected ] ( const string & name ) { <nl> + return std : : make_pair ( name , port_num_expected ) ; <nl> + } ) ; <nl> + ExpectSetEq ( expected , nodes_at_peak_mem_usage ) ; <nl> + } <nl> + <nl> + / / Helper method for converting shape vector to TensorProperty . <nl> + OpInfo : : TensorProperties ShapeToTensorProperty ( <nl> + const std : : vector < int > shape , const DataType & data_type ) const { <nl> + OpInfo : : TensorProperties tensor_property ; <nl> + tensor_property . set_dtype ( data_type ) ; <nl> + for ( const auto & x : shape ) { <nl> + tensor_property . mutable_shape ( ) - > add_dim ( ) - > set_size ( x ) ; <nl> + } <nl> + return tensor_property ; <nl> + } <nl> + <nl> / / SetUp ( ) inits cluster_ and placer_ . <nl> std : : unique_ptr < VirtualCluster > cluster_ ; <nl> std : : unique_ptr < VirtualPlacer > placer_ ; <nl> <nl> / / grappler_item_ and scheduler_ will be initialized differently for each test <nl> - / / case <nl> + / / case . <nl> std : : unique_ptr < GrapplerItem > grappler_item_ ; <nl> - std : : unique_ptr < VirtualScheduler > scheduler_ ; <nl> + std : : unique_ptr < TestVirtualScheduler > scheduler_ ; <nl> + / / Node name - > its preceding nodes map for testing scheduling order . <nl> + std : : unordered_map < string , std : : vector < string > > dependency_ ; <nl> + <nl> + / / Shared params for Conv2D related graphs : <nl> + const int batch_size_ = 4 ; <nl> + const int width_ = 10 ; <nl> + const int height_ = 10 ; <nl> + const int depth_in_ = 8 ; <nl> + const int kernel_ = 3 ; <nl> + const int depth_out_ = 16 ; <nl> } ; <nl> <nl> TEST_F ( VirtualSchedulerTest , InitAndBasicScheduling ) { <nl> - CreateSchedulerWithConv2Ds ( ) ; / / init scheduler_ . <nl> - <nl> - Costs zero_costs = Costs : : ZeroCosts ( ) ; <nl> - std : : unordered_map < string , NodeInfo > ops_executed ; <nl> - do { <nl> - NodeInfo node_info = scheduler_ - > GetCurrNodeInfo ( ) ; <nl> - ops_executed [ node_info . name ] = node_info ; <nl> - <nl> - / / Check scheduling order : x and f before c0 , and y and f before c1 . <nl> - if ( node_info . name = = " c0 " ) { <nl> - EXPECT_GT ( ops_executed . count ( " x " ) , 0 ) ; <nl> - EXPECT_GT ( ops_executed . count ( " f " ) , 0 ) ; <nl> - } else if ( node_info . name = = " c1 " ) { <nl> - EXPECT_GT ( ops_executed . count ( " y " ) , 0 ) ; <nl> - EXPECT_GT ( ops_executed . count ( " f " ) , 0 ) ; <nl> - } <nl> - } while ( scheduler_ - > MarkCurrNodeExecuted ( zero_costs ) ) ; <nl> + / / Init . <nl> + CreateGrapplerItemWithConv2Ds ( ) ; <nl> + InitScheduler ( ) ; <nl> + <nl> + / / Run the scheduler . <nl> + auto ops_executed = RunScheduler ( " " ) ; / / Run all the nodes . <nl> <nl> / / [ const and rand ] * ( x , y , f ) , and c0 and c1 . c2 and z shouldn ' t be <nl> / / executed . <nl> TEST_F ( VirtualSchedulerTest , InitAndBasicScheduling ) { <nl> EXPECT_EQ ( 2 , ops_executed [ " c0 " ] . op_info . inputs_size ( ) ) ; <nl> EXPECT_EQ ( 2 , ops_executed [ " c1 " ] . op_info . inputs_size ( ) ) ; <nl> } <nl> + <nl> + TEST_F ( VirtualSchedulerTest , CalculateOutputSize ) { <nl> + / / Init . <nl> + CreateGrapplerItemWithAddN ( ) ; <nl> + InitScheduler ( ) ; <nl> + <nl> + / / Create a set of tensor properties . <nl> + std : : vector < OpInfo : : TensorProperties > output ; <nl> + output . push_back ( ShapeToTensorProperty ( { 4 , 4 } , DT_FLOAT ) ) ; / / 0 <nl> + output . push_back ( ShapeToTensorProperty ( { 1 } , DT_FLOAT ) ) ; / / 1 <nl> + output . push_back ( ShapeToTensorProperty ( { 10 , 10 , 10 } , DT_HALF ) ) ; / / 2 <nl> + output . push_back ( ShapeToTensorProperty ( { 100 , 7 , 8 , 99 } , DT_FLOAT ) ) ; / / 3 <nl> + output . push_back ( ShapeToTensorProperty ( { - 1 , 7 , 8 , 99 } , DT_FLOAT ) ) ; / / 4 <nl> + output . push_back ( ShapeToTensorProperty ( { - 1 , 7 , - 1 , 99 } , DT_FLOAT ) ) ; / / 4 <nl> + <nl> + / / port_num - 1 is for control dependency : hard coded 4B . <nl> + EXPECT_EQ ( 4 , scheduler_ - > CalculateOutputSize ( output , - 1 ) ) ; <nl> + <nl> + / / Test valid outputs . <nl> + EXPECT_EQ ( 4 * 4 * 4 , scheduler_ - > CalculateOutputSize ( output , 0 ) ) ; <nl> + EXPECT_EQ ( 4 * 1 , scheduler_ - > CalculateOutputSize ( output , 1 ) ) ; <nl> + EXPECT_EQ ( 2 * 10 * 10 * 10 , scheduler_ - > CalculateOutputSize ( output , 2 ) ) ; <nl> + EXPECT_EQ ( 4 * 100 * 7 * 8 * 99 , scheduler_ - > CalculateOutputSize ( output , 3 ) ) ; <nl> + <nl> + / / Any uknown shape ( - 1 ) shall yield zero output size . <nl> + EXPECT_EQ ( 0 , scheduler_ - > CalculateOutputSize ( output , 4 ) ) ; <nl> + EXPECT_EQ ( 0 , scheduler_ - > CalculateOutputSize ( output , 5 ) ) ; <nl> + <nl> + / / Invalid port_num ( though it may be an error ) shall yield zero <nl> + / / output size . <nl> + EXPECT_EQ ( 0 , scheduler_ - > CalculateOutputSize ( output , 6 ) ) ; <nl> + } <nl> + <nl> + TEST_F ( VirtualSchedulerTest , MemoryUsage ) { <nl> + / / Init . <nl> + CreateGrapplerItemWithAddN ( ) ; <nl> + InitScheduler ( ) ; <nl> + <nl> + / / Run the scheduler . <nl> + RunScheduler ( " " ) ; <nl> + <nl> + const auto & device_states = scheduler_ - > GetDeviceStates ( ) ; <nl> + const auto & cpu_state = device_states . at ( kCPU0 ) ; <nl> + <nl> + / / out node adds 4 tensors , each with 10x10x10x10 , so the peak memory usage <nl> + / / is 4 x the input tensor size while executing the out node . <nl> + int64 one_input_node_size = 4 * 10 * 10 * 10 * 10 ; <nl> + const std : : vector < string > expected_names = { " x " , " y " , " z " , " w " } ; <nl> + EXPECT_EQ ( expected_names . size ( ) * one_input_node_size , <nl> + cpu_state . max_memory_usage ) ; <nl> + ValidateMemoryUsageSnapshot ( expected_names , 0 / * port_num_expected * / , <nl> + cpu_state . mem_usage_snapshot_at_peak ) ; <nl> + } <nl> + <nl> + TEST_F ( VirtualSchedulerTest , ControlDependency ) { <nl> + / / Init . <nl> + CreateGrapplerItemWithControlDependency ( ) ; <nl> + InitScheduler ( ) ; <nl> + <nl> + / / Run the scheduler . <nl> + RunScheduler ( " " ) ; <nl> + <nl> + const auto & device_states = scheduler_ - > GetDeviceStates ( ) ; <nl> + const auto & cpu_state = device_states . at ( kCPU0 ) ; <nl> + <nl> + / / The graph has a NoOp that takes control dependency from 7 NoOps . The peak <nl> + / / memory usage is when executing the final NoOp . <nl> + int64 one_input_node_size = 4 ; / / control dependency <nl> + const std : : vector < string > expected_names = { " x " , " y " , " z " , " w " , <nl> + " u " , " v " , " t " } ; <nl> + EXPECT_EQ ( expected_names . size ( ) * one_input_node_size , <nl> + cpu_state . max_memory_usage ) ; <nl> + ValidateMemoryUsageSnapshot ( expected_names , - 1 / * port_num_expected * / , <nl> + cpu_state . mem_usage_snapshot_at_peak ) ; <nl> + } <nl> + <nl> + TEST_F ( VirtualSchedulerTest , ComplexDependency ) { <nl> + / / Init . <nl> + CreateGrapplerItemWithBatchNorm ( ) ; <nl> + InitScheduler ( ) ; <nl> + <nl> + / / Run the scheduler . <nl> + RunScheduler ( " bn " ) ; <nl> + <nl> + const auto & device_states = scheduler_ - > GetDeviceStates ( ) ; <nl> + const auto & cpu_state = device_states . at ( kCPU0 ) ; <nl> + <nl> + / / The graph is <nl> + / / bn = FusedBatchNorm ( x , scale , offset , mean , var ) <nl> + / / z1 = bn . y + x <nl> + / / z2 = bn . var + bn . var <nl> + / / z3 = bn . var + bn . var <nl> + / / z4 = control dependency from bn . <nl> + / / Note that bn . mean doesn ' t have any consumer . <nl> + const int x_size = batch_size_ * width_ * height_ * depth_in_ ; <nl> + int64 expected_size = <nl> + 4 * ( 2 * x_size / * x and bn . y * / + depth_in_ / * bn . var * / + <nl> + 1 / * control dependency * / ) ; <nl> + EXPECT_EQ ( expected_size , cpu_state . memory_usage ) ; <nl> + <nl> + / / Nodes currrently in memory : bn ' s port - 1 , 0 , and 2 , and x ' s port 0 . <nl> + std : : set < std : : pair < string , int > > nodes_in_memory ; <nl> + std : : transform ( <nl> + cpu_state . nodes_in_memory . begin ( ) , cpu_state . nodes_in_memory . end ( ) , <nl> + std : : inserter ( nodes_in_memory , nodes_in_memory . begin ( ) ) , <nl> + [ ] ( const std : : pair < const NodeDef * , int > & node_port ) { <nl> + return std : : make_pair ( node_port . first - > name ( ) , node_port . second ) ; <nl> + } ) ; <nl> + std : : set < std : : pair < string , int > > expected = { <nl> + std : : make_pair ( " bn " , - 1 ) , std : : make_pair ( " bn " , 0 ) , <nl> + std : : make_pair ( " bn " , 2 ) , std : : make_pair ( " x " , 0 ) , <nl> + } ; <nl> + ExpectSetEq ( expected , nodes_in_memory ) ; <nl> + <nl> + const auto & node_states = scheduler_ - > GetNodeStates ( ) ; <nl> + const NodeState * bn_node = nullptr ; <nl> + const NodeState * x_node = nullptr ; <nl> + for ( const auto & nodedef_node_state : node_states ) { <nl> + const NodeDef * node = nodedef_node_state . first ; <nl> + const NodeState & node_state = nodedef_node_state . second ; <nl> + if ( node - > name ( ) = = " bn " ) { <nl> + bn_node = & node_state ; <nl> + } <nl> + if ( node - > name ( ) = = " x " ) { <nl> + x_node = & node_state ; <nl> + } <nl> + } <nl> + CHECK_NOTNULL ( bn_node ) ; <nl> + CHECK_NOTNULL ( x_node ) ; <nl> + <nl> + ValidateNodeDefs ( { " bn " , " z1 " } , x_node - > outputs . at ( 0 ) ) ; <nl> + ValidateNodeDefs ( { " z4 " } , bn_node - > outputs . at ( - 1 ) ) ; <nl> + ValidateNodeDefs ( { " z1 " } , bn_node - > outputs . at ( 0 ) ) ; <nl> + / / z2 and z3 are bn . var + bn . var , so they appear twice in bn ' s output port 2 . <nl> + ValidateNodeDefs ( { " z2 " , " z3 " , " z2 " , " z3 " } , bn_node - > outputs . at ( 2 ) ) ; <nl> + } <nl> + <nl> + TEST_F ( VirtualSchedulerTest , Variable ) { <nl> + / / Init . <nl> + CreateGrapplerItemWithConv2DAndVariable ( ) ; <nl> + InitScheduler ( ) ; <nl> + <nl> + / / Run the scheduler . <nl> + RunScheduler ( " " ) ; <nl> + <nl> + const auto & device_states = scheduler_ - > GetDeviceStates ( ) ; <nl> + const auto & cpu_state = device_states . at ( kCPU0 ) ; <nl> + <nl> + / / There is one Conv2D that takes x and f , but f is variable , so it should be <nl> + / / in persistent nodes . <nl> + / / f is variable . <nl> + ValidateMemoryUsageSnapshot ( { " f " } , 0 / * port_num_expected * / , <nl> + cpu_state . persistent_nodes ) ; <nl> + / / Only x in peak memory usage snapshot . <nl> + ValidateMemoryUsageSnapshot ( { " x " } , 0 / * port_num_expected * / , <nl> + cpu_state . mem_usage_snapshot_at_peak ) ; <nl> + } <nl> } / / end namespace grappler <nl> } / / end namespace tensorflow <nl> mmm a / tensorflow / core / grappler / grappler_item_builder . cc <nl> ppp b / tensorflow / core / grappler / grappler_item_builder . cc <nl> limitations under the License . <nl> # include " tensorflow / core / framework / function . pb . h " <nl> # include " tensorflow / core / framework / node_def . pb . h " <nl> # include " tensorflow / core / framework / op . h " <nl> - # include " tensorflow / core / framework / op_def . pb . h " <nl> # include " tensorflow / core / framework / types . pb . h " <nl> # include " tensorflow / core / framework / variable . pb . h " <nl> # include " tensorflow / core / graph / graph_constructor . h " <nl> mmm a / tensorflow / core / grappler / inputs / utils_test . cc <nl> ppp b / tensorflow / core / grappler / inputs / utils_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / lib / io / path . h " <nl> # include " tensorflow / core / platform / env . h " <nl> # include " tensorflow / core / platform / test . h " <nl> - # include " tensorflow / core / protobuf / meta_graph . pb . h " <nl> <nl> namespace tensorflow { <nl> namespace grappler { <nl> mmm a / tensorflow / core / grappler / op_types . cc <nl> ppp b / tensorflow / core / grappler / op_types . cc <nl> bool IsDequeueOp ( const NodeDef & node ) { <nl> op = = " QueueDequeueUpToV2 " | | op = = " QueueDequeueUpTo " ; <nl> } <nl> <nl> + bool IsIdentity ( const NodeDef & node ) { <nl> + const auto & op = node . op ( ) ; <nl> + return op = = " Identity " ; <nl> + } <nl> + <nl> bool IsMerge ( const NodeDef & node ) { <nl> const auto op = node . op ( ) ; <nl> return op = = " Merge " ; <nl> } <nl> <nl> + bool IsNoOp ( const NodeDef & node ) { <nl> + const auto op = node . op ( ) ; <nl> + return op = = " NoOp " ; <nl> + } <nl> + <nl> bool IsPlaceholder ( const NodeDef & node ) { <nl> const auto op = node . op ( ) ; <nl> return op = = " Placeholder " | | op = = " PlaceholderV2 " | | <nl> op = = " PlaceholderWithDefault " ; <nl> } <nl> <nl> + bool IsRecv ( const NodeDef & node ) { <nl> + const auto op = node . op ( ) ; <nl> + return op = = " _Recv " ; <nl> + } <nl> + <nl> bool IsReduction ( const NodeDef & node ) { <nl> const auto & op = node . op ( ) ; <nl> return op = = " Sum " | | op = = " Prod " | | op = = " Min " | | op = = " Max " | | <nl> op = = " Mean " | | op = = " Any " | | op = = " All " ; <nl> } <nl> <nl> + bool IsSend ( const NodeDef & node ) { <nl> + const auto op = node . op ( ) ; <nl> + return op = = " _Send " ; <nl> + } <nl> + <nl> + bool IsSwitch ( const NodeDef & node ) { <nl> + const auto & op = node . op ( ) ; <nl> + return op = = " Switch " ; <nl> + } <nl> + <nl> bool IsTranspose ( const NodeDef & node ) { <nl> const auto op = node . op ( ) ; <nl> return op = = " Transpose " ; <nl> mmm a / tensorflow / core / grappler / op_types . h <nl> ppp b / tensorflow / core / grappler / op_types . h <nl> namespace grappler { <nl> bool IsConcat ( const NodeDef & node ) ; <nl> bool IsConstant ( const NodeDef & node ) ; <nl> bool IsDequeueOp ( const NodeDef & node ) ; <nl> + bool IsIdentity ( const NodeDef & node ) ; <nl> bool IsMerge ( const NodeDef & node ) ; <nl> + bool IsNoOp ( const NodeDef & node ) ; <nl> bool IsPlaceholder ( const NodeDef & node ) ; <nl> + bool IsRecv ( const NodeDef & node ) ; <nl> bool IsReduction ( const NodeDef & node ) ; <nl> + bool IsSend ( const NodeDef & node ) ; <nl> + bool IsSwitch ( const NodeDef & node ) ; <nl> bool IsTranspose ( const NodeDef & node ) ; <nl> bool IsVariable ( const NodeDef & node ) ; <nl> <nl> mmm a / tensorflow / core / grappler / optimizers / constant_folding . cc <nl> ppp b / tensorflow / core / grappler / optimizers / constant_folding . cc <nl> Status NumOutputs ( const NodeDef & node , int * num_outputs ) { <nl> } <nl> return Status : : OK ( ) ; <nl> } <nl> + <nl> + string AsControlDependency ( const NodeDef & node ) { <nl> + return strings : : StrCat ( " ^ " , node . name ( ) ) ; <nl> + } <nl> + <nl> } / / namespace <nl> <nl> ConstantFolding : : ConstantFolding ( ) { <nl> - ops_to_preserve_ = <nl> - std : : regex ( " Placeholder . * | Const | . * Save . * | . * Restore . * | . * Reader " ) ; <nl> + ops_to_preserve_ = std : : regex ( <nl> + " Placeholder . * | Const | . * Save . * | . * Restore . * | . * Reader | Enter | Exit | " <nl> + " NextIteration " ) ; <nl> + } <nl> + <nl> + string ConstantFolding : : AddControlDependency ( const string & input_name ) { <nl> + const NodeDef * node = node_map_ - > GetNode ( input_name ) ; <nl> + if ( ! IsSwitch ( * node ) ) { <nl> + return AsControlDependency ( * node ) ; <nl> + } else { <nl> + / / We can ' t anchor control dependencies directly on the switch node : unlike <nl> + / / other nodes only one of the outputs of the switch node will be generated <nl> + / / when the switch node is executed , and we need to make sure the control <nl> + / / dependency is only triggered when the corresponding output is triggered . <nl> + / / We start by looking for an identity node connected to the output of the <nl> + / / switch node , and use it to anchor the control dependency . <nl> + auto outputs = node_map_ - > GetOutputs ( node - > name ( ) ) ; <nl> + for ( const NodeDef * node : outputs ) { <nl> + if ( IsIdentity ( * node ) ) { <nl> + CHECK_EQ ( 1 , node - > input_size ( ) ) ; <nl> + if ( IsSameInput ( node - > input ( 0 ) , input_name ) ) { <nl> + return AsControlDependency ( * node ) ; <nl> + } <nl> + } <nl> + } <nl> + / / We haven ' t found an existing node where we can anchor the control <nl> + / / dependency : add a new identity node . <nl> + int position = 0 ; <nl> + string ctrl_dep_name = ParseNodeName ( input_name , & position ) ; <nl> + strings : : StrAppend ( & ctrl_dep_name , " _ " , position ) ; <nl> + ctrl_dep_name = AddPrefixToNodeName ( ctrl_dep_name , kConstantFoldingCtrl ) ; <nl> + const DataType output_type = node - > attr ( ) . at ( " T " ) . type ( ) ; <nl> + <nl> + NodeDef * added_node = graph_ . add_node ( ) ; <nl> + added_node - > set_name ( ctrl_dep_name ) ; <nl> + added_node - > set_op ( " Identity " ) ; <nl> + ( * added_node - > mutable_attr ( ) ) [ " T " ] . set_type ( output_type ) ; <nl> + * added_node - > add_input ( ) = input_name ; <nl> + node_map_ - > AddNode ( added_node - > name ( ) , added_node ) ; <nl> + node_map_ - > AddOutput ( node - > name ( ) , added_node - > name ( ) ) ; <nl> + return AsControlDependency ( * added_node ) ; <nl> + } <nl> } <nl> <nl> Status ConstantFolding : : MaterializeShapes ( const GrapplerItem & item ) { <nl> GraphProperties properties ( item ) ; <nl> TF_RETURN_IF_ERROR ( properties . InferStatically ( ) ) ; <nl> - for ( NodeDef & node : * graph_ . mutable_node ( ) ) { <nl> + / / We may add some nodes to the graph to encode control dependencies : there is <nl> + / / no need to process these , so only iterate over the nodes of the input <nl> + / / graph . <nl> + const int node_count = graph_ . node_size ( ) ; <nl> + for ( int i = 0 ; i < node_count ; + + i ) { <nl> + NodeDef & node = * graph_ . mutable_node ( i ) ; <nl> const string op = node . op ( ) ; <nl> if ( op ! = " Shape " & & op ! = " Size " & & op ! = " Rank " ) { <nl> continue ; <nl> Status ConstantFolding : : MaterializeShapes ( const GrapplerItem & item ) { <nl> value . AsProtoTensorContent ( <nl> ( * node . mutable_attr ( ) ) [ " value " ] . mutable_tensor ( ) ) ; <nl> <nl> - / / Turn the inputs into control dependencies . <nl> - CHECK_EQ ( 1 , node . input_size ( ) ) ; <nl> - node . set_input ( 0 , strings : : StrCat ( " ^ " , NodeName ( node . input ( 0 ) ) ) ) ; <nl> + / / Turn the data input into a control dependency : this is needed to <nl> + / / ensure that the constant value will only be generated in the cases <nl> + / / where the shape / rank / size would have been generated in the original <nl> + / / graph . Additional inputs are extra control dependencies that we <nl> + / / preserve . <nl> + CHECK_LE ( 1 , node . input_size ( ) ) ; <nl> + string ctrl_dep = AddControlDependency ( node . input ( 0 ) ) ; <nl> + node . set_input ( 0 , ctrl_dep ) ; <nl> } <nl> } <nl> } <nl> Status ConstantFolding : : EvaluateOneFoldable ( const NodeDef & node , <nl> if ( output_tensors . size ( ) > 1 ) { <nl> node_name = strings : : StrCat ( node_name , " - " , i ) ; <nl> } <nl> - outputs - > push_back ( CreateNodeDef ( node_name , output_tensors [ i ] ) ) ; <nl> - delete output_tensors [ i ] . tensor ; <nl> + if ( output_tensors [ i ] . tensor ) { <nl> + outputs - > push_back ( CreateNodeDef ( node_name , output_tensors [ i ] ) ) ; <nl> + delete output_tensors [ i ] . tensor ; <nl> + } else { <nl> + / / Create an empty NodeDef to identify dead outputs ( e . g . the output of a <nl> + / / switch that ' s not selected by the switch predicate ) . <nl> + outputs - > push_back ( NodeDef ( ) ) ; <nl> + } <nl> } <nl> return Status : : OK ( ) ; <nl> } <nl> Status ConstantFolding : : FoldNode ( const NodeDef & node , GraphDef * output ) { <nl> std : : vector < NodeDef > const_nodes ; <nl> TF_RETURN_IF_ERROR ( EvaluateOneFoldable ( node , & const_nodes ) ) ; <nl> <nl> + NodeDef * constant_output = nullptr ; <nl> for ( const auto & const_node : const_nodes ) { <nl> + if ( const_node . name ( ) . empty ( ) ) { <nl> + / / Dead output : we can ' t create a constant to encode its value , so we ' ll <nl> + / / just skip it . We ' ll preserve the edges that originate from that output <nl> + / / below to preserve the overall behavior of the graph wrt dead edges . <nl> + continue ; <nl> + } <nl> NodeDef * added_node = output - > add_node ( ) ; <nl> * added_node = const_node ; <nl> node_map_ - > AddNode ( added_node - > name ( ) , added_node ) ; <nl> Status ConstantFolding : : FoldNode ( const NodeDef & node , GraphDef * output ) { <nl> } <nl> } <nl> } <nl> + <nl> + / / All the constant nodes encoding output values have the same control <nl> + / / dependencies ( since these are the control dependencies of the node we ' re <nl> + / / trying to fold ) . Record one such constant node . <nl> + constant_output = added_node ; <nl> } <nl> <nl> auto outputs = node_map_ - > GetOutputs ( node . name ( ) ) ; <nl> Status ConstantFolding : : FoldNode ( const NodeDef & node , GraphDef * output ) { <nl> string node_name = ParseNodeName ( output - > input ( i ) , & position ) ; <nl> if ( node_name = = node . name ( ) ) { <nl> if ( position < 0 ) { <nl> - * output - > mutable_input ( i ) = <nl> - strings : : StrCat ( " ^ " , const_nodes [ 0 ] . name ( ) ) ; <nl> - } else { <nl> + / / Propagate control dependencies if possible . If not , we ' ll just <nl> + / / preserve the existing control dependencies . <nl> + if ( constant_output ! = nullptr ) { <nl> + * output - > mutable_input ( i ) = AsControlDependency ( * constant_output ) ; <nl> + } <nl> + <nl> + } else if ( position < const_nodes . size ( ) & & <nl> + ! const_nodes [ position ] . name ( ) . empty ( ) ) { <nl> + / / Replace alive outputs with the corresponding constant . <nl> * output - > mutable_input ( i ) = const_nodes [ position ] . name ( ) ; <nl> + } else { <nl> + / / Leave this edge alone . <nl> + VLOG ( 1 ) < < " Preserving edge from " < < node . name ( ) < < " : " < < position <nl> + < < " [ " < < node . op ( ) < < " ] to " < < output - > name ( ) < < " : " < < i <nl> + < < " [ " < < output - > op ( ) < < " ] " ; <nl> } <nl> } <nl> } <nl> mmm a / tensorflow / core / grappler / optimizers / constant_folding . h <nl> ppp b / tensorflow / core / grappler / optimizers / constant_folding . h <nl> namespace tensorflow { <nl> namespace grappler { <nl> <nl> const char kConstantFoldingConst [ ] = " ConstantFolding " ; <nl> + const char kConstantFoldingCtrl [ ] = " ConstantFoldingCtrl " ; <nl> <nl> / / Contant folding optimization for a graph . <nl> class ConstantFolding : public GraphOptimizer { <nl> class ConstantFolding : public GraphOptimizer { <nl> const GraphDef & optimize_output , double result ) override ; <nl> <nl> private : <nl> + string AddControlDependency ( const string & input_name ) ; <nl> Status MaterializeShapes ( const GrapplerItem & item ) ; <nl> <nl> bool IsFoldable ( const NodeDef & node ) const ; <nl> mmm a / tensorflow / core / grappler / optimizers / constant_folding_test . cc <nl> ppp b / tensorflow / core / grappler / optimizers / constant_folding_test . cc <nl> TEST_F ( ConstantFoldingTest , ShapeMaterialization ) { <nl> EXPECT_EQ ( 3 , found ) ; <nl> } <nl> <nl> + TEST_F ( ConstantFoldingTest , SwitchNodes ) { <nl> + tensorflow : : Scope scope = tensorflow : : Scope : : NewRootScope ( ) ; <nl> + ops : : Variable v_in ( scope . WithOpName ( " v_in " ) , { 3 } , DT_FLOAT ) ; <nl> + ops : : Variable v_ctrl ( scope . WithOpName ( " v_ctrl " ) , { } , DT_BOOL ) ; <nl> + ops : : Switch s1 ( scope . WithOpName ( " switch " ) , v_in , v_ctrl ) ; <nl> + ops : : Rank rank ( scope . WithOpName ( " rank " ) , s1 . output_false ) ; <nl> + ops : : Identity i ( scope . WithOpName ( " i " ) , s1 . output_true ) ; <nl> + ops : : Size size ( scope . WithOpName ( " size " ) , i ) ; <nl> + ops : : Square p1 ( scope . WithOpName ( " p1 " ) , rank ) ; <nl> + ops : : Square p2 ( scope . WithOpName ( " p2 " ) , size ) ; <nl> + ops : : Merge m ( scope . WithOpName ( " m " ) , { p1 . y , p2 . y } ) ; <nl> + <nl> + Output predicate = <nl> + ops : : Const ( scope . WithOpName ( " false " ) , false , TensorShape ( { } ) ) ; <nl> + Output constant = <nl> + ops : : Const ( scope . WithOpName ( " constant " ) , 1 . 0f , TensorShape ( { 1 } ) ) ; <nl> + ops : : Switch s2 ( scope . WithOpName ( " switch2 " ) , constant , predicate ) ; <nl> + ops : : Identity statically_known ( scope . WithOpName ( " i2 " ) , s2 . output_false ) ; <nl> + ops : : Identity never_generated ( scope . WithOpName ( " i3 " ) , s2 . output_true ) ; <nl> + ops : : Merge m2 ( scope . WithOpName ( " m2 " ) , <nl> + { statically_known . output , never_generated . output } ) ; <nl> + <nl> + GrapplerItem item ; <nl> + item . fetch . push_back ( " m " ) ; <nl> + item . fetch . push_back ( " m2 " ) ; <nl> + <nl> + TF_CHECK_OK ( scope . ToGraphDef ( & item . graph ) ) ; <nl> + <nl> + ConstantFolding fold ; <nl> + GraphDef output ; <nl> + Status status = fold . Optimize ( nullptr , item , & output ) ; <nl> + TF_EXPECT_OK ( status ) ; <nl> + <nl> + for ( const auto & node : output . node ( ) ) { <nl> + if ( node . name ( ) = = " rank " ) { <nl> + EXPECT_EQ ( " Const " , node . op ( ) ) ; <nl> + EXPECT_EQ ( 1 , node . input_size ( ) ) ; <nl> + EXPECT_EQ ( " ^ ConstantFoldingCtrl / switch_0 " , node . input ( 0 ) ) ; <nl> + } <nl> + if ( node . name ( ) = = " size " ) { <nl> + EXPECT_EQ ( " Const " , node . op ( ) ) ; <nl> + EXPECT_EQ ( 1 , node . input_size ( ) ) ; <nl> + EXPECT_EQ ( " ^ i " , node . input ( 0 ) ) ; <nl> + } <nl> + if ( node . name ( ) = = " ConstantFolding / switch2 - 0 " ) { <nl> + EXPECT_EQ ( " Const " , node . op ( ) ) ; <nl> + EXPECT_EQ ( 0 , node . input_size ( ) ) ; <nl> + } <nl> + if ( node . name ( ) = = " ConstantFolding / i2 " ) { <nl> + EXPECT_EQ ( " Const " , node . op ( ) ) ; <nl> + EXPECT_EQ ( 0 , node . input_size ( ) ) ; <nl> + } <nl> + if ( node . name ( ) = = " i3 " ) { <nl> + EXPECT_EQ ( " Identity " , node . op ( ) ) ; <nl> + EXPECT_EQ ( 1 , node . input_size ( ) ) ; <nl> + EXPECT_EQ ( " switch2 : 1 " , node . input ( 0 ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> TEST_F ( ConstantFoldingTest , NoOpReduction ) { <nl> / / Build a simple graph with a reduction that can be reduced to the identity . <nl> tensorflow : : Scope scope = tensorflow : : Scope : : NewRootScope ( ) ; <nl> mmm a / tensorflow / core / grappler / utils . cc <nl> ppp b / tensorflow / core / grappler / utils . cc <nl> NodeMap : : NodeMap ( GraphDef * graph ) : graph_ ( graph ) { <nl> } <nl> } <nl> <nl> - NodeDef * NodeMap : : GetNode ( const string & name ) { <nl> + NodeDef * NodeMap : : GetNode ( const string & name ) const { <nl> string node_name = NodeName ( name ) ; <nl> - return nodes_ [ node_name ] ; <nl> + auto it = nodes_ . find ( node_name ) ; <nl> + if ( it = = nodes_ . end ( ) ) { <nl> + return nullptr ; <nl> + } <nl> + return it - > second ; <nl> } <nl> <nl> - std : : set < NodeDef * > NodeMap : : GetOutputs ( const string & node_name ) { <nl> - return outputs_ [ node_name ] ; <nl> + const std : : set < NodeDef * > & NodeMap : : GetOutputs ( const string & node_name ) const { <nl> + auto it = outputs_ . find ( node_name ) ; <nl> + if ( it = = outputs_ . end ( ) ) { <nl> + return empty_set_ ; <nl> + } <nl> + return it - > second ; <nl> } <nl> <nl> void NodeMap : : AddNode ( const string & name , NodeDef * node ) { <nl> void NodeMap : : UpdateOutput ( const string & node , const string & old_output , <nl> outputs_ [ node ] . insert ( nodes_ [ new_output ] ) ; <nl> } <nl> <nl> + bool IsSameInput ( const string & name1 , const string & name2 ) { <nl> + if ( name1 = = name2 ) { <nl> + return true ; <nl> + } <nl> + int position1 ; <nl> + string node1 = ParseNodeName ( name1 , & position1 ) ; <nl> + int position2 ; <nl> + string node2 = ParseNodeName ( name2 , & position2 ) ; <nl> + return ( position1 = = position2 ) & & ( node1 = = node2 ) ; <nl> + } <nl> + <nl> string ParseNodeName ( const string & name , int * position ) { <nl> / / Strip the prefix ' ^ ' ( if any ) , and strip the trailing " : { digits } ( if any ) <nl> / / to get a node name . <nl> mmm a / tensorflow / core / grappler / utils . h <nl> ppp b / tensorflow / core / grappler / utils . h <nl> namespace grappler { <nl> class NodeMap { <nl> public : <nl> explicit NodeMap ( GraphDef * graph ) ; <nl> - NodeDef * GetNode ( const string & name ) ; <nl> - std : : set < NodeDef * > GetOutputs ( const string & node_name ) ; <nl> + NodeDef * GetNode ( const string & name ) const ; <nl> + const std : : set < NodeDef * > & GetOutputs ( const string & node_name ) const ; <nl> / / This method doesn ' t record the outputs of the added node ; the outputs need <nl> / / to be explicitly added by the AddOutput method . <nl> void AddNode ( const string & name , NodeDef * node ) ; <nl> class NodeMap { <nl> <nl> private : <nl> GraphDef * graph_ ; <nl> + std : : set < NodeDef * > empty_set_ ; <nl> std : : unordered_map < string , NodeDef * > nodes_ ; <nl> std : : unordered_map < string , std : : set < NodeDef * > > outputs_ ; <nl> } ; <nl> class NodeMap { <nl> / / the ^ character . <nl> bool IsControlInput ( const string & name ) ; <nl> <nl> + / / True iff ' name1 ' and ' name2 ' refer to the same input . <nl> + bool IsSameInput ( const string & name1 , const string & name2 ) ; <nl> + <nl> / / Return the node name corresponding to ' name ' if name is valid , or the empty <nl> / / string otherwise . <nl> string NodeName ( const string & name ) ; <nl> mmm a / tensorflow / core / kernels / BUILD <nl> ppp b / tensorflow / core / kernels / BUILD <nl> tf_kernel_library ( <nl> " / / tensorflow / core : framework " , <nl> " / / tensorflow / core : lib " , <nl> " / / tensorflow / core : lib_internal " , <nl> + " / / third_party / eigen3 " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / core / kernels / adjust_contrast_op_test . cc <nl> ppp b / tensorflow / core / kernels / adjust_contrast_op_test . cc <nl> limitations under the License . <nl> # include < vector > <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / batch_norm_op_test . cc <nl> ppp b / tensorflow / core / kernels / batch_norm_op_test . cc <nl> limitations under the License . <nl> # include < vector > <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / colorspace_op_test . cc <nl> ppp b / tensorflow / core / kernels / colorspace_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / control_flow_ops_test . cc <nl> ppp b / tensorflow / core / kernels / control_flow_ops_test . cc <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> # include " tensorflow / core / framework / tensor_testutil . h " <nl> mmm a / tensorflow / core / kernels / crop_and_resize_op_test . cc <nl> ppp b / tensorflow / core / kernels / crop_and_resize_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / register_types . h " <nl> mmm a / tensorflow / core / kernels / cross_op_test . cc <nl> ppp b / tensorflow / core / kernels / cross_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / debug_ops_test . cc <nl> ppp b / tensorflow / core / kernels / debug_ops_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / debug / debug_io_utils . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> + # include " tensorflow / core / framework / summary . pb . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> # include " tensorflow / core / framework / tensor_testutil . h " <nl> # include " tensorflow / core / framework / types . h " <nl> mmm a / tensorflow / core / kernels / depthwise_conv_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / depthwise_conv_op_gpu . cu . cc <nl> namespace tensorflow { <nl> <nl> using Eigen : : GpuDevice ; <nl> <nl> - / / Returns whether depthwise convolution forward pass can be performed using the <nl> - / / faster ( ' Small ' ) variant of the kernel . <nl> + / / Returns whether depthwise convolution forward or backward input pass can be <nl> + / / performed using the faster ( ' Small ' ) variant of the kernel . <nl> EIGEN_DEVICE_FUNC bool CanLaunchDepthwiseConv2dGPUSmall ( <nl> const DepthwiseArgs args ) { <nl> return args . depth_multiplier = = 1 & & args . stride = = 1 & & args . in_rows < = 16 & & <nl> EIGEN_DEVICE_FUNC bool CanLaunchDepthwiseConv2dGPUSmall ( <nl> ( args . in_rows + 1 ) / 2 * args . in_cols ; <nl> } <nl> <nl> + / / Returns whether depthwise convolution backward filter pass can be performed <nl> + / / using the faster ( ' Small ' ) variant of the kernel . <nl> + EIGEN_DEVICE_FUNC bool CanLaunchDepthwiseConv2dBackpropFilterGPUSmall ( <nl> + const DepthwiseArgs args , const int block_rows ) { <nl> + return args . depth_multiplier = = 1 & & args . stride = = 1 & & args . in_rows < = 16 & & <nl> + args . in_cols < = 16 & & args . in_rows = = args . out_rows & & <nl> + args . in_cols = = args . out_cols & & args . pad_rows > = 0 & & <nl> + args . pad_rows < args . filter_rows & & args . pad_cols > = 0 & & <nl> + args . pad_cols < args . filter_cols & & block_rows < = args . in_rows & & <nl> + args . filter_rows * args . filter_cols < = args . in_cols * block_rows ; <nl> + } <nl> + <nl> / / A Cuda kernel to compute the depthwise convolution forward pass <nl> / / in NHWC format . <nl> template < typename T , int kKnownFilterWidth , int kKnownFilterHeight , <nl> __global__ void __launch_bounds__ ( 1024 , 2 ) <nl> } <nl> } <nl> <nl> - / / CUDA kernel to compute the depthwise convolution forward pass in NCHW format , <nl> + / / CUDA kernel to compute the depthwise convolution forward pass in NHWC format , <nl> / / tailored for small images up to 16x16 . Stride and depth multiplier must be 1 . <nl> / / Padding must be ' SAME ' , which allows to reuse the index computation . Only <nl> / / use this kernel if CanLaunchDepthwiseConv2dGPUSmall ( args ) returns true . <nl> void LaunchDepthwiseConv2dGPUSmall ( const GpuDevice & d , const DepthwiseArgs args , <nl> < < < config . block_count , block_dim , shared_memory_size , d . stream ( ) > > > ( <nl> args , input , filter , output ) ; <nl> } else { <nl> - assert ( false ) ; <nl> + assert ( false & & " Incorrect data format " ) ; <nl> } <nl> } <nl> <nl> void LaunchDepthwiseConv2dGPU ( const GpuDevice & d , const DepthwiseArgs args , <nl> config . thread_per_block , 0 , d . stream ( ) > > > ( args , input , filter , <nl> output , num_outputs ) ; <nl> } else { <nl> - assert ( false ) ; <nl> + assert ( false & & " Incorrect data format " ) ; <nl> } <nl> } <nl> <nl> __global__ void __launch_bounds__ ( 640 , 2 ) <nl> } <nl> <nl> / / CUDA kernel to compute the depthwise convolution backward w . r . t . input in <nl> - / / NCHW format , tailored for small images up to 16x16 . Stride and depth <nl> + / / NHWC format , tailored for small images up to 16x16 . Stride and depth <nl> / / multiplier must be 1 . Padding must be ' SAME ' , which allows to reuse the index <nl> - / / computation . <nl> + / / computation . Only use this kernel if CanLaunchDepthwiseConv2dGPUSmall ( args ) <nl> + / / returns true . <nl> / / Implementation is the same as the forward pass , except that the filter is <nl> / / rotate by 180 ° , see filter_read_offset and filter_ptr . <nl> / / Tiles of the input and filter tensors are loaded into shared memory before <nl> template < typename T , int kKnownFilterWidth , int kKnownFilterHeight , <nl> __global__ <nl> __launch_bounds__ ( 1024 , 2 ) void DepthwiseConv2dBackpropInputGPUKernelNHWCSmall ( <nl> const DepthwiseArgs args , const T * input , const T * filter , T * output ) { <nl> + assert ( CanLaunchDepthwiseConv2dGPUSmall ( args ) ) ; <nl> / / Holds block plus halo and filter data for blockDim . x depths . <nl> extern __shared__ __align__ ( sizeof ( T ) ) unsigned char shared_memory [ ] ; <nl> T * const shared_data = reinterpret_cast < T * > ( shared_memory ) ; <nl> __global__ void __launch_bounds__ ( 640 , 2 ) <nl> } <nl> } <nl> <nl> - template < typename T , int kKnownFilterWidth , int kKnownFilterHeight > <nl> - bool TryLaunchDepthwiseConv2dBackpropInputGPUSmall ( <nl> - const GpuDevice & d , const DepthwiseArgs args , const T * out_backprop , <nl> - const T * filter , T * in_backprop , TensorFormat data_format ) { <nl> - if ( data_format ! = FORMAT_NHWC | | args . depth_multiplier ! = 1 | | <nl> - args . stride ! = 1 | | args . in_rows > 16 | | args . in_cols > 16 | | <nl> - args . in_rows ! = args . out_rows | | args . in_cols ! = args . out_cols | | <nl> - args . pad_rows < 0 | | args . pad_rows > = args . filter_rows | | <nl> - args . pad_cols < 0 | | args . pad_cols > = args . filter_cols ) { <nl> - return false ; <nl> + / / CUDA kernel to compute the depthwise convolution backward w . r . t . input in <nl> + / / NHWC format , tailored for small images up to 16x16 . Stride and depth <nl> + / / multiplier must be 1 . Padding must be ' SAME ' , which allows to reuse the index <nl> + / / computation . Only use this kernel if CanLaunchDepthwiseConv2dGPUSmall ( args ) <nl> + / / returns true . <nl> + / / Implementation is the same as the forward pass , except that the filter is <nl> + / / rotate by 180 ° , see filter_read_offset and filter_ptr . <nl> + / / Tiles of the input and filter tensors are loaded into shared memory before <nl> + / / performing the convolution . Each thread handles two elements per iteration , <nl> + / / one each in the lower and upper half of a tile . <nl> + template < typename T , int kKnownFilterWidth , int kKnownFilterHeight , <nl> + bool kKnownEvenRows > <nl> + __global__ <nl> + __launch_bounds__ ( 1024 , 2 ) void DepthwiseConv2dBackpropInputGPUKernelNCHWSmall ( <nl> + const DepthwiseArgs args , const T * input , const T * filter , T * output ) { <nl> + assert ( CanLaunchDepthwiseConv2dGPUSmall ( args ) ) ; <nl> + / / Holds block plus halo and filter data for blockDim . z depths . <nl> + extern __shared__ __align__ ( sizeof ( T ) ) unsigned char shared_memory [ ] ; <nl> + T * const shared_data = reinterpret_cast < T * > ( shared_memory ) ; <nl> + <nl> + const int batches = args . batch ; <nl> + const int in_rows = args . in_rows ; <nl> + const int in_cols = args . in_cols ; <nl> + const int in_depth = args . in_depth ; <nl> + const int filter_rows = <nl> + kKnownFilterHeight < 0 ? args . filter_rows : kKnownFilterHeight ; <nl> + const int filter_cols = <nl> + kKnownFilterWidth < 0 ? args . filter_cols : kKnownFilterWidth ; <nl> + const int pad_rows = args . pad_rows ; <nl> + const int pad_cols = args . pad_cols ; <nl> + <nl> + / / Fixed blockDim . z , tailored for maximum grid size for images of size 16x16 . <nl> + const int block_rows = blockDim . y ; <nl> + const int block_slices = 8 ; <nl> + <nl> + / / These values are the same for all threads and could <nl> + / / be precomputed on the CPU . <nl> + const int block_pixels = in_cols * block_rows ; <nl> + const int block_size = block_pixels * block_slices ; <nl> + const int in_pixels = in_cols * in_rows ; <nl> + const int in_increment = in_cols - 1 ; <nl> + const int filter_pixels = filter_rows * filter_cols ; <nl> + const int tile_cols = in_cols + filter_cols - 1 ; <nl> + const int even_rows = kKnownEvenRows | | ( 1 & ~ in_rows ) ; <nl> + const int tile_rows = in_rows + filter_rows - even_rows ; <nl> + const int tile_pixels = tile_cols * tile_rows ; <nl> + const int tile_size = tile_pixels * block_slices ; <nl> + const int tile_offset = block_rows * tile_cols ; <nl> + const int pad_offset = pad_rows * tile_cols + pad_cols ; <nl> + const int in_slices = in_depth * batches ; <nl> + const int in_blocks = ( in_slices + block_slices - 1 ) / block_slices ; <nl> + <nl> + const int thread_col = threadIdx . x ; <nl> + const int thread_row = threadIdx . y ; <nl> + const int thread_depth = threadIdx . z ; <nl> + <nl> + / / Position in block . <nl> + const int thread_pix = thread_row * in_cols + thread_col ; <nl> + const int thread_idx = thread_depth * block_pixels + thread_pix ; <nl> + <nl> + / / Initialize tile , in particular the padding . <nl> + for ( int i = thread_idx ; i < tile_size ; i + = block_size ) { <nl> + shared_data [ i ] = T ( 0 ) ; <nl> } <nl> + __syncthreads ( ) ; <nl> <nl> - const int block_rows = ( args . in_rows + 1 ) / 2 ; <nl> - if ( args . filter_rows * args . filter_cols > args . in_cols * block_rows ) { <nl> - return false ; <nl> + / / Position in tensors . <nl> + const int tensor_idx = thread_depth * in_pixels + thread_pix ; <nl> + <nl> + / / Position in ( padded ) shared memory . <nl> + const int data_pix = thread_row * tile_cols + thread_col ; <nl> + const int data_idx = thread_depth * tile_pixels + data_pix ; <nl> + <nl> + / / Position in shared memory , offset by pad_rows / pad_cols . <nl> + const int tile_idx = data_idx + pad_offset ; <nl> + <nl> + / / Filter is always in HWCK format , irrespective of the input / output format . <nl> + const int filter_pix = thread_idx / block_slices ; <nl> + const int filter_depth = thread_idx % block_slices ; <nl> + const int filter_idx = filter_pix * in_depth ; <nl> + <nl> + const int max_slice = in_slices - thread_depth ; <nl> + const int filter_write_offset = <nl> + filter_pix < filter_pixels ? tile_size + thread_idx : 0 ; <nl> + const int filter_read_offset = <nl> + tile_size + filter_pixels * block_slices + thread_depth ; <nl> + const bool skip_second = <nl> + ! kKnownEvenRows & & thread_row + ( in_rows & 1 ) = = block_rows ; <nl> + <nl> + for ( int b = blockIdx . x ; b < in_blocks ; b + = gridDim . x ) { <nl> + const int slice = b * block_slices ; <nl> + <nl> + const int inout_offset = slice * in_pixels + tensor_idx ; <nl> + const bool slice_in_range = slice < max_slice ; <nl> + <nl> + if ( slice_in_range ) { <nl> + const T * const in_ptr = inout_offset + input ; <nl> + T * const tile_ptr = tile_idx + shared_data ; <nl> + tile_ptr [ 0 ] = ldg ( in_ptr ) ; <nl> + if ( ! skip_second ) { <nl> + tile_ptr [ tile_offset ] = ldg ( block_pixels + in_ptr ) ; <nl> + } <nl> + } <nl> + <nl> + if ( filter_write_offset ! = 0 ) { <nl> + const int filter_offset = filter_idx + ( slice + filter_depth ) % in_depth ; <nl> + shared_data [ filter_write_offset ] = ldg ( filter_offset + filter ) ; <nl> + } <nl> + <nl> + / / Note : the condition to reach this is uniform across the entire block . <nl> + __syncthreads ( ) ; <nl> + <nl> + if ( slice_in_range ) { <nl> + T sum1 = 0 ; <nl> + T sum2 = 0 ; <nl> + int shared_offset = data_idx ; <nl> + const T * filter_ptr = filter_read_offset + shared_data ; <nl> + UNROLL for ( int r = 0 ; r < filter_rows ; + + r ) { <nl> + UNROLL for ( int c = 0 ; c < filter_cols ; + + c ) { <nl> + filter_ptr - = block_slices ; <nl> + const T filter_value = * filter_ptr ; <nl> + const T * const tile_ptr = shared_offset + shared_data ; <nl> + sum1 + = filter_value * tile_ptr [ 0 ] ; <nl> + sum2 + = filter_value * tile_ptr [ tile_offset ] ; <nl> + + + shared_offset ; <nl> + } <nl> + shared_offset + = in_increment ; <nl> + } <nl> + T * const out_ptr = inout_offset + output ; <nl> + out_ptr [ 0 ] = sum1 ; <nl> + if ( ! skip_second ) { <nl> + out_ptr [ block_pixels ] = sum2 ; <nl> + } <nl> + } <nl> + <nl> + / / Note : the condition to reach this is uniform across the entire block . <nl> + __syncthreads ( ) ; <nl> } <nl> + } <nl> <nl> + template < typename T , int kKnownFilterWidth , int kKnownFilterHeight , <nl> + bool kKnownEvenRows > <nl> + void LaunchDepthwiseConv2dBackpropInputGPUSmall ( const GpuDevice & d , <nl> + const DepthwiseArgs args , <nl> + const T * out_backprop , <nl> + const T * filter , T * in_backprop , <nl> + TensorFormat data_format ) { <nl> + const int block_rows = ( args . in_rows + 1 ) / 2 ; <nl> + const int block_slices = 8 ; <nl> const int tile_cols = args . in_cols + args . filter_cols - 1 ; <nl> const int tile_rows = block_rows * 2 + args . filter_rows - 1 ; <nl> const int tile_pixels = tile_rows * tile_cols ; <nl> const int filter_pixels = args . filter_rows * args . filter_cols ; <nl> - dim3 block_dim = dim3 ( 8 , args . in_cols , block_rows ) ; <nl> + <nl> const int shared_memory_size = <nl> - block_dim . x * ( tile_pixels + filter_pixels ) * sizeof ( T ) ; <nl> + block_slices * ( tile_pixels + filter_pixels ) * sizeof ( T ) ; <nl> + const int num_outputs = <nl> + args . batch * args . out_rows * args . out_cols * args . out_depth ; <nl> <nl> - const int num_in_backprop = <nl> - args . batch * args . in_rows * args . in_cols * args . in_depth ; <nl> - if ( args . in_rows & 1 ) { <nl> + if ( data_format = = FORMAT_NHWC ) { <nl> + dim3 block_dim = dim3 ( block_slices , args . in_cols , block_rows ) ; <nl> CudaLaunchConfig config = GetCudaLaunchConfig ( <nl> - num_in_backprop , d , <nl> + num_outputs , d , <nl> DepthwiseConv2dBackpropInputGPUKernelNHWCSmall < <nl> - T , kKnownFilterWidth , kKnownFilterHeight , false > , <nl> + T , kKnownFilterWidth , kKnownFilterHeight , kKnownEvenRows > , <nl> shared_memory_size , block_dim . x * block_dim . y * block_dim . z ) ; <nl> - DepthwiseConv2dBackpropInputGPUKernelNHWCSmall < T , kKnownFilterWidth , <nl> - kKnownFilterHeight , false > <nl> + DepthwiseConv2dBackpropInputGPUKernelNHWCSmall < <nl> + T , kKnownFilterWidth , kKnownFilterHeight , kKnownEvenRows > <nl> < < < config . block_count , block_dim , shared_memory_size , d . stream ( ) > > > ( <nl> args , out_backprop , filter , in_backprop ) ; <nl> - } else { <nl> + } else if ( data_format = = FORMAT_NCHW ) { <nl> + dim3 block_dim = dim3 ( args . in_cols , block_rows , block_slices ) ; <nl> CudaLaunchConfig config = GetCudaLaunchConfig ( <nl> - num_in_backprop , d , <nl> - DepthwiseConv2dBackpropInputGPUKernelNHWCSmall < <nl> - T , kKnownFilterWidth , kKnownFilterHeight , true > , <nl> + num_outputs , d , <nl> + DepthwiseConv2dBackpropInputGPUKernelNCHWSmall < <nl> + T , kKnownFilterWidth , kKnownFilterHeight , kKnownEvenRows > , <nl> shared_memory_size , block_dim . x * block_dim . y * block_dim . z ) ; <nl> - DepthwiseConv2dBackpropInputGPUKernelNHWCSmall < T , kKnownFilterWidth , <nl> - kKnownFilterHeight , true > <nl> + DepthwiseConv2dBackpropInputGPUKernelNCHWSmall < <nl> + T , kKnownFilterWidth , kKnownFilterHeight , kKnownEvenRows > <nl> < < < config . block_count , block_dim , shared_memory_size , d . stream ( ) > > > ( <nl> args , out_backprop , filter , in_backprop ) ; <nl> + } else { <nl> + assert ( false & & " Incorrect data format " ) ; <nl> } <nl> + } <nl> <nl> - return true ; <nl> + template < typename T , int kKnownFilterWidth , int kKnownFilterHeight > <nl> + void LaunchDepthwiseConv2dBackpropInputGPUSmall ( const GpuDevice & d , <nl> + const DepthwiseArgs args , <nl> + const T * out_backprop , <nl> + const T * filter , T * in_backprop , <nl> + TensorFormat data_format ) { <nl> + if ( args . in_rows & 1 ) { <nl> + LaunchDepthwiseConv2dBackpropInputGPUSmall < T , kKnownFilterWidth , <nl> + kKnownFilterHeight , <nl> + / * kKnownEvenRows = * / false > ( <nl> + d , args , out_backprop , filter , in_backprop , data_format ) ; <nl> + } else { <nl> + LaunchDepthwiseConv2dBackpropInputGPUSmall < T , kKnownFilterWidth , <nl> + kKnownFilterHeight , <nl> + / * kKnownEvenRows = * / true > ( <nl> + d , args , out_backprop , filter , in_backprop , data_format ) ; <nl> + } <nl> } <nl> <nl> template < typename T , int kKnownFilterWidth , int kKnownFilterHeight , <nl> void LaunchDepthwiseConv2dBackpropInputGPU ( const GpuDevice & d , <nl> const T * out_backprop , <nl> const T * filter , T * in_backprop , <nl> TensorFormat data_format ) { <nl> - if ( TryLaunchDepthwiseConv2dBackpropInputGPUSmall < T , kKnownFilterWidth , <nl> - kKnownFilterHeight > ( <nl> - d , args , out_backprop , filter , in_backprop , data_format ) ) { <nl> + if ( CanLaunchDepthwiseConv2dGPUSmall ( args ) ) { <nl> + LaunchDepthwiseConv2dBackpropInputGPUSmall < T , kKnownFilterWidth , <nl> + kKnownFilterHeight > ( <nl> + d , args , out_backprop , filter , in_backprop , data_format ) ; <nl> return ; <nl> } <nl> const int num_in_backprop = <nl> void LaunchDepthwiseConv2dBackpropInputGPU ( const GpuDevice & d , <nl> < < < config . block_count , config . thread_per_block , 0 , d . stream ( ) > > > ( <nl> args , out_backprop , filter , in_backprop , num_in_backprop ) ; <nl> } else { <nl> - assert ( false ) ; <nl> + assert ( false & & " Incorrect data format " ) ; <nl> } <nl> } <nl> <nl> __global__ void __launch_bounds__ ( 640 , 2 ) <nl> } <nl> <nl> / / CUDA kernel to compute the depthwise convolution backward w . r . t . filter in <nl> - / / NCHW format , tailored for small images up to 16x16 . Stride and depth <nl> - / / multiplier must be 1 . Padding must be ' SAME ' . <nl> + / / NHWC format , tailored for small images up to 16x16 . Stride and depth <nl> + / / multiplier must be 1 . Padding must be ' SAME ' . Only use this kernel if <nl> + / / CanLaunchDepthwiseConv2dGPUSmall ( args ) returns true . <nl> / / Tiles of the input tensor are loaded into shared memory before performing the <nl> / / convolution . Per iteration and filter element , each thread first performs <nl> / / a partial convolution for two elements , one each in the lower and upper half <nl> template < typename T , int kKnownFilterWidth , int kKnownFilterHeight > <nl> __global__ <nl> __launch_bounds__ ( 1024 , 2 ) void DepthwiseConv2dBackpropFilterGPUKernelNHWCSmall ( <nl> const DepthwiseArgs args , const T * output , const T * input , T * filter ) { <nl> + assert ( CanLaunchDepthwiseConv2dBackpropFilterGPUSmall ( args , blockDim . z ) ) ; <nl> / / Holds block plus halo and filter data for blockDim . x depths . <nl> extern __shared__ __align__ ( sizeof ( T ) ) unsigned char shared_memory [ ] ; <nl> T * const shared_data = reinterpret_cast < T * > ( shared_memory ) ; <nl> __launch_bounds__ ( 1024 , 2 ) void DepthwiseConv2dBackpropFilterGPUKernelNHWCSmall ( <nl> const int batch_blocks = ( in_depth + block_slices - 1 ) / block_slices ; <nl> const int in_blocks = batch_blocks * batches ; <nl> const int tensor_offset = block_rows * in_row_size ; <nl> + / / The accumulator has a fixed number of pixels that can be reduced by one <nl> + / / warp . Pixels beyond block_pixels / 4 are never written . <nl> const int accum_pixels = 32 ; <nl> const int accum_increment = accum_pixels * block_slices ; <nl> const int accum_size = filter_pixels * accum_increment ; <nl> __launch_bounds__ ( 1024 , 2 ) void DepthwiseConv2dBackpropFilterGPUKernelNHWCSmall ( <nl> UNROLL for ( int c = 0 ; c < filter_cols ; + + c ) { <nl> const T * const tile_ptr = shared_offset + shared_data ; <nl> T val = out1 * tile_ptr [ 0 ] + out2 * tile_ptr [ tile_offset ] ; <nl> + / / Sum up 4 block_pixels of the same depth and write to accumulator . <nl> val + = CudaShuffleDown ( val , 16 ) ; <nl> val + = CudaShuffleDown ( val , 8 ) ; <nl> if ( ! ( thread_idx & 24 ) / * i . e . ' lane_idx < 8 ' * / ) { <nl> __launch_bounds__ ( 1024 , 2 ) void DepthwiseConv2dBackpropFilterGPUKernelNHWCSmall ( <nl> <nl> const T * const accum_data = tile_size + shared_data ; <nl> for ( int i = thread_idx ; i < accum_size ; i + = block_size ) { <nl> - const int filter_idx = i / 32 ; <nl> + const int filter_idx = i / accum_pixels ; <nl> const int filter_pix = filter_idx / block_slices ; <nl> const int filter_depth = filter_idx % block_slices + start_depth ; <nl> const int filter_offset = filter_pix * in_depth + filter_depth ; <nl> if ( filter_depth < in_depth ) { <nl> T val = accum_data [ i ] ; <nl> + / / Sum up the 32 pixels of the same depth from the accumulator . <nl> val + = CudaShuffleDown ( val , 16 ) ; <nl> val + = CudaShuffleDown ( val , 8 ) ; <nl> val + = CudaShuffleDown ( val , 4 ) ; <nl> __global__ void __launch_bounds__ ( 640 , 2 ) <nl> } <nl> } <nl> <nl> + / / CUDA kernel to compute the depthwise convolution backward w . r . t . filter in <nl> + / / NCHW format , tailored for small images up to 16x16 . Stride and depth <nl> + / / multiplier must be 1 . Padding must be ' SAME ' . Only use this kernel if <nl> + / / CanLaunchDepthwiseConv2dGPUSmall ( args ) returns true . <nl> + / / Tiles of the input tensor are loaded into shared memory before performing the <nl> + / / convolution . Per iteration and filter element , each thread first performs <nl> + / / a partial convolution for two elements , one each in the lower and upper half <nl> + / / of a tile . The intermediate result of 4 consecutive columns are then <nl> + / / accumulated and written to shared memory . Finally , the values in shared <nl> + / / memory are warp - accumulated ( in chunks of 32 elements ) and summed up in <nl> + / / global memory using atomics . <nl> + template < typename T , int kKnownFilterWidth , int kKnownFilterHeight > <nl> + __global__ <nl> + __launch_bounds__ ( 1024 , 2 ) void DepthwiseConv2dBackpropFilterGPUKernelNCHWSmall ( <nl> + const DepthwiseArgs args , const T * output , const T * input , T * filter ) { <nl> + assert ( CanLaunchDepthwiseConv2dBackpropFilterGPUSmall ( args , blockDim . x ) ) ; <nl> + / / Holds block plus halo and filter data for blockDim . z depths . <nl> + extern __shared__ __align__ ( sizeof ( T ) ) unsigned char shared_memory [ ] ; <nl> + T * const shared_data = reinterpret_cast < T * > ( shared_memory ) ; <nl> + <nl> + const int batches = args . batch ; <nl> + const int in_rows = args . in_rows ; <nl> + const int in_cols = args . in_cols ; <nl> + const int in_depth = args . in_depth ; <nl> + const int filter_rows = <nl> + kKnownFilterHeight < 0 ? args . filter_rows : kKnownFilterHeight ; <nl> + const int filter_cols = <nl> + kKnownFilterWidth < 0 ? args . filter_cols : kKnownFilterWidth ; <nl> + const int pad_rows = args . pad_rows ; <nl> + const int pad_cols = args . pad_cols ; <nl> + <nl> + / / Fixed blockDim . z , corresponding to Pascal ' s global load granularity of 32B . <nl> + const int block_rows = blockDim . y ; <nl> + const int block_slices = 8 ; <nl> + <nl> + / / These values are the same for all threads and could <nl> + / / be precomputed on the CPU . <nl> + const int block_pixels = in_cols * block_rows ; <nl> + const int block_size = block_pixels * block_slices ; <nl> + const int in_pixels = in_cols * in_rows ; <nl> + const int in_increment = in_cols - 1 ; <nl> + const int filter_pixels = filter_rows * filter_cols ; <nl> + const int tile_cols = in_cols + filter_cols - 1 ; <nl> + const int tile_rows = 2 * block_rows + filter_rows - 1 ; <nl> + const int tile_pixels = tile_cols * tile_rows ; <nl> + const int tile_size = tile_pixels * block_slices ; <nl> + const int tile_offset = block_rows * tile_cols ; <nl> + const int pad_offset = pad_rows * tile_cols + pad_cols ; <nl> + const int in_slices = in_depth * batches ; <nl> + const int in_blocks = ( in_slices + block_slices - 1 ) / block_slices ; <nl> + / / The accumulator has a fixed number of pixels that can be reduced by one <nl> + / / warp . Pixels beyond block_pixels / 4 are never written . <nl> + const int accum_pixels = 32 ; <nl> + const int accum_increment = accum_pixels * block_slices ; <nl> + const int accum_size = filter_pixels * accum_increment ; <nl> + <nl> + const int thread_col = threadIdx . x ; <nl> + const int thread_row = threadIdx . y ; <nl> + const int thread_depth = threadIdx . z ; <nl> + <nl> + / / Position in block . <nl> + const int thread_pix = thread_row * in_cols + thread_col ; <nl> + const int thread_idx = thread_depth * block_pixels + thread_pix ; <nl> + <nl> + / / Initialize tile , in particular the padding and accumulator . <nl> + for ( int i = thread_idx ; i < tile_size + accum_size ; i + = block_size ) { <nl> + shared_data [ i ] = T ( 0 ) ; <nl> + } <nl> + __syncthreads ( ) ; <nl> + <nl> + / / Position in tensors . <nl> + const int tensor_idx = thread_depth * in_pixels + thread_pix ; <nl> + <nl> + / / Position in ( padded ) shared memory . <nl> + const int data_pix = thread_row * tile_cols + thread_col ; <nl> + const int data_idx = thread_depth * tile_pixels + data_pix ; <nl> + <nl> + / / Position in shared memory , offset by pad_rows / pad_cols . <nl> + const int tile_idx = data_idx + pad_offset ; <nl> + <nl> + / / Position in accumulator ( 1 per 4 threads , depth major ) . <nl> + const int accum_pix = thread_pix / 4 ; <nl> + const int accum_idx = thread_depth * accum_pixels + accum_pix ; <nl> + <nl> + const int max_slice = in_slices - thread_depth ; <nl> + const int accum_offset = tile_size + accum_idx ; <nl> + const bool skip_second = block_rows + thread_row > = in_rows ; <nl> + <nl> + for ( int b = blockIdx . x ; b < in_blocks ; b + = gridDim . x ) { <nl> + const int slice = b * block_slices ; <nl> + <nl> + const int inout_offset = slice * in_pixels + tensor_idx ; <nl> + const bool slice_in_range = slice < max_slice ; <nl> + <nl> + if ( slice_in_range ) { <nl> + const T * const in_ptr = inout_offset + input ; <nl> + T * const tile_ptr = tile_idx + shared_data ; <nl> + tile_ptr [ 0 ] = ldg ( in_ptr ) ; <nl> + if ( ! skip_second ) { <nl> + tile_ptr [ tile_offset ] = ldg ( block_pixels + in_ptr ) ; <nl> + } <nl> + } <nl> + <nl> + / / Note : the condition to reach this is uniform across the entire block . <nl> + __syncthreads ( ) ; <nl> + <nl> + if ( slice_in_range ) { <nl> + const T * const out_ptr = inout_offset + output ; <nl> + const T out1 = ldg ( out_ptr ) ; <nl> + const T out2 = skip_second ? T ( 0 ) : ldg ( block_pixels + out_ptr ) ; <nl> + int shared_offset = data_idx ; <nl> + T * accum_ptr = accum_offset + shared_data ; <nl> + UNROLL for ( int r = 0 ; r < filter_rows ; + + r ) { <nl> + UNROLL for ( int c = 0 ; c < filter_cols ; + + c ) { <nl> + const T * const tile_ptr = shared_offset + shared_data ; <nl> + T val = out1 * tile_ptr [ 0 ] + out2 * tile_ptr [ tile_offset ] ; <nl> + / / Sum up 4 block_pixels of the same depth and write to accumulator . <nl> + val + = CudaShuffleDown ( val , 2 ) ; <nl> + val + = CudaShuffleDown ( val , 1 ) ; <nl> + if ( ! ( thread_idx & 3 ) ) { <nl> + * accum_ptr = val ; <nl> + } <nl> + + + shared_offset ; <nl> + accum_ptr + = accum_increment ; <nl> + } <nl> + shared_offset + = in_increment ; <nl> + } <nl> + } <nl> + <nl> + / / Note : the condition to reach this is uniform across the entire block . <nl> + __syncthreads ( ) ; <nl> + <nl> + const T * const accum_data = tile_size + shared_data ; <nl> + for ( int i = thread_idx ; i < accum_size ; i + = block_size ) { <nl> + const int filter_idx = i / accum_pixels ; <nl> + const int filter_pix = filter_idx / block_slices ; <nl> + const int filter_depth = ( slice + filter_idx % block_slices ) % in_depth ; <nl> + const int filter_offset = filter_pix * in_depth + filter_depth ; <nl> + if ( filter_depth < in_depth ) { <nl> + T val = accum_data [ i ] ; <nl> + / / Sum up 32 pixels of the same depth from the accumulator . <nl> + val + = CudaShuffleDown ( val , 16 ) ; <nl> + val + = CudaShuffleDown ( val , 8 ) ; <nl> + val + = CudaShuffleDown ( val , 4 ) ; <nl> + val + = CudaShuffleDown ( val , 2 ) ; <nl> + val + = CudaShuffleDown ( val , 1 ) ; <nl> + if ( ! ( thread_idx & 31 ) / * i . e . ' lane_idx = = 0 ' * / ) { <nl> + CudaAtomicAdd ( filter_offset + filter , val ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + template < typename T , int kKnownFilterWidth , int kKnownFilterHeight > <nl> + void LaunchDepthwiseConv2dBackpropFilterGPUSmall ( <nl> + const GpuDevice & d , const DepthwiseArgs args , int block_rows , <nl> + int shared_memory_size , const T * out_backprop , const T * input , <nl> + T * filter_backprop , TensorFormat data_format ) { <nl> + const int block_slices = 8 ; <nl> + const int num_out_backprop = <nl> + args . batch * args . out_rows * args . out_cols * args . out_depth ; <nl> + if ( data_format = = FORMAT_NHWC ) { <nl> + dim3 block_dim = dim3 ( block_slices , args . in_cols , block_rows ) ; <nl> + CudaLaunchConfig config = GetCudaLaunchConfig ( <nl> + num_out_backprop , d , <nl> + DepthwiseConv2dBackpropFilterGPUKernelNHWCSmall < T , kKnownFilterWidth , <nl> + kKnownFilterHeight > , <nl> + shared_memory_size , block_dim . x * block_dim . y * block_dim . z ) ; <nl> + DepthwiseConv2dBackpropFilterGPUKernelNHWCSmall < T , kKnownFilterWidth , <nl> + kKnownFilterHeight > <nl> + < < < config . block_count , block_dim , shared_memory_size , d . stream ( ) > > > ( <nl> + args , out_backprop , input , filter_backprop ) ; <nl> + } else if ( data_format = = FORMAT_NCHW ) { <nl> + dim3 block_dim = dim3 ( args . in_cols , block_rows , block_slices ) ; <nl> + CudaLaunchConfig config = GetCudaLaunchConfig ( <nl> + num_out_backprop , d , <nl> + DepthwiseConv2dBackpropFilterGPUKernelNCHWSmall < T , kKnownFilterWidth , <nl> + kKnownFilterHeight > , <nl> + shared_memory_size , block_dim . x * block_dim . y * block_dim . z ) ; <nl> + DepthwiseConv2dBackpropFilterGPUKernelNCHWSmall < T , kKnownFilterWidth , <nl> + kKnownFilterHeight > <nl> + < < < config . block_count , block_dim , shared_memory_size , d . stream ( ) > > > ( <nl> + args , out_backprop , input , filter_backprop ) ; <nl> + } else { <nl> + assert ( false & & " Incorrect data format " ) ; <nl> + } <nl> + } <nl> + <nl> template < typename T , int kKnownFilterWidth , int kKnownFilterHeight > <nl> bool TryLaunchDepthwiseConv2dBackpropFilterGPUSmall ( <nl> const GpuDevice & d , const DepthwiseArgs args , const T * out_backprop , <nl> const T * input , T * filter_backprop , TensorFormat data_format ) { <nl> - if ( data_format ! = FORMAT_NHWC | | args . depth_multiplier ! = 1 | | <nl> - args . stride ! = 1 | | args . in_rows > 16 | | args . in_cols > 16 | | <nl> - args . in_rows ! = args . out_rows | | args . in_cols ! = args . out_cols | | <nl> - args . pad_rows < 0 | | args . pad_rows > = args . filter_rows | | <nl> - args . pad_cols < 0 | | args . pad_cols > = args . filter_cols ) { <nl> - return false ; <nl> - } <nl> - <nl> + / / args . in_cols * blocks_rows ( block_pixels ) must be multiple of 4 . <nl> const int lookup_table [ ] = { 0 , 3 , 1 , 3 } ; <nl> const int rows_mask = lookup_table [ args . in_cols & 3 ] ; <nl> const int block_rows = ( args . in_rows + 1 ) / 2 + rows_mask & ~ rows_mask ; <nl> + if ( ! CanLaunchDepthwiseConv2dBackpropFilterGPUSmall ( args , block_rows ) ) { <nl> + return false ; <nl> + } <nl> + <nl> + const int block_slices = 8 ; <nl> const int tile_cols = args . in_cols + args . filter_cols - 1 ; <nl> const int tile_rows = block_rows * 2 + args . filter_rows - 1 ; <nl> const int tile_pixels = tile_rows * tile_cols ; <nl> const int accum_size = args . filter_rows * args . filter_cols * 32 ; <nl> - dim3 block_dim = dim3 ( 8 , args . in_cols , block_rows ) ; <nl> const int shared_memory_size = <nl> - block_dim . x * ( tile_pixels + accum_size ) * sizeof ( T ) ; <nl> - <nl> - if ( block_rows > args . in_rows | | <nl> - args . filter_rows * args . filter_cols > args . in_cols * block_rows | | <nl> - shared_memory_size > d . sharedMemPerBlock ( ) ) { <nl> + block_slices * ( tile_pixels + accum_size ) * sizeof ( T ) ; <nl> + if ( shared_memory_size > d . sharedMemPerBlock ( ) ) { <nl> return false ; <nl> } <nl> <nl> - const int num_out_backprop = <nl> - args . batch * args . out_rows * args . out_cols * args . out_depth ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( <nl> - num_out_backprop , d , <nl> - DepthwiseConv2dBackpropFilterGPUKernelNHWCSmall < T , kKnownFilterWidth , <nl> - kKnownFilterHeight > , <nl> - shared_memory_size , block_dim . x * block_dim . y * block_dim . z ) ; <nl> - DepthwiseConv2dBackpropFilterGPUKernelNHWCSmall < T , kKnownFilterWidth , <nl> - kKnownFilterHeight > <nl> - < < < config . block_count , block_dim , shared_memory_size , d . stream ( ) > > > ( <nl> - args , out_backprop , input , filter_backprop ) ; <nl> + LaunchDepthwiseConv2dBackpropFilterGPUSmall < T , kKnownFilterWidth , <nl> + kKnownFilterHeight > ( <nl> + d , args , block_rows , shared_memory_size , out_backprop , input , <nl> + filter_backprop , data_format ) ; <nl> return true ; <nl> } <nl> <nl> void LaunchDepthwiseConv2dBackpropFilterGPU ( const GpuDevice & d , <nl> < < < config . block_count , config . thread_per_block , 0 , d . stream ( ) > > > ( <nl> args , out_backprop , input , filter_backprop , num_out_backprop ) ; <nl> } else { <nl> - assert ( false ) ; <nl> + assert ( false & & " Incorrect data format " ) ; <nl> } <nl> } <nl> <nl> mmm a / tensorflow / core / kernels / dynamic_partition_op_test . cc <nl> ppp b / tensorflow / core / kernels / dynamic_partition_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / dynamic_stitch_op_test . cc <nl> ppp b / tensorflow / core / kernels / dynamic_stitch_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / gather_nd_op_test . cc <nl> ppp b / tensorflow / core / kernels / gather_nd_op_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / kernel_benchmark_testlib . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / gather_op_test . cc <nl> ppp b / tensorflow / core / kernels / gather_op_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / kernel_benchmark_testlib . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / hexagon / BUILD <nl> ppp b / tensorflow / core / kernels / hexagon / BUILD <nl> tf_kernel_library ( <nl> " / / tensorflow / core : core_cpu " , <nl> " / / tensorflow / core : framework " , <nl> " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core : protos_all_cc " , <nl> " / / tensorflow / core / kernels : remote_fused_graph_execute_utils " , <nl> " / / third_party / eigen3 " , <nl> ] , <nl> mmm a / tensorflow / core / kernels / hexagon / hexagon_ops_definitions . cc <nl> ppp b / tensorflow / core / kernels / hexagon / hexagon_ops_definitions . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / kernels / hexagon / hexagon_ops_definitions . h " <nl> <nl> + # include " tensorflow / core / framework / graph_transfer_info . pb . h " <nl> # include " tensorflow / core / framework / types . h " <nl> <nl> / / CAVEAT : Comment - out the following macro if you want to use experimental <nl> mmm a / tensorflow / core / kernels / hexagon / quantized_matmul_op_for_hexagon_test . cc <nl> ppp b / tensorflow / core / kernels / hexagon / quantized_matmul_op_for_hexagon_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / identity_op_test . cc <nl> ppp b / tensorflow / core / kernels / identity_op_test . cc <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> # include " tensorflow / core / framework / tensor_testutil . h " <nl> mmm a / tensorflow / core / kernels / logging_ops_test . cc <nl> ppp b / tensorflow / core / kernels / logging_ops_test . cc <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> # include " tensorflow / core / framework / tensor_testutil . h " <nl> mmm a / tensorflow / core / kernels / lrn_op_test . cc <nl> ppp b / tensorflow / core / kernels / lrn_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / multinomial_op . cc <nl> ppp b / tensorflow / core / kernels / multinomial_op . cc <nl> limitations under the License . <nl> # include < cmath > <nl> # include < memory > <nl> <nl> + # include " third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / register_types . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> struct MultinomialFunctor < CPUDevice , T > { <nl> / / <nl> / / This takes O ( BatchSize * NumSamples * log ( NumClasses ) + NumClasses ) CPU <nl> / / time . <nl> - auto DoWork = [ num_samples , num_classes , & gen , & output , & logits ] ( <nl> - int64 start_row , int64 limit_row ) { <nl> + auto DoWork = [ ctx , num_samples , num_classes , & gen , & output , & logits ] ( <nl> + int64 start_row , int64 limit_row ) { <nl> / / Capturing " gen " by - value would only make a copy for the _shared_ <nl> / / lambda . Since we want to let each worker have its own copy , we pass <nl> / / " gen " by reference and explicitly do a copy assignment here . <nl> struct MultinomialFunctor < CPUDevice , T > { <nl> gen_copy . Skip ( start_row * ( num_samples + 3 ) / 4 ) ; <nl> random : : SimplePhilox simple_philox ( & gen_copy ) ; <nl> <nl> - std : : vector < double > cdf ( num_classes ) ; <nl> - <nl> + Tensor cdf_tensor ; <nl> + OP_REQUIRES_OK ( ctx , <nl> + ctx - > allocate_temp ( DT_DOUBLE , TensorShape ( { num_classes } ) , <nl> + & cdf_tensor ) ) ; <nl> + auto cdf = cdf_tensor . flat < double > ( ) ; <nl> for ( int64 b = start_row ; b < limit_row ; + + b ) { <nl> const auto * logits_row = & logits ( b , 0 ) ; <nl> <nl> / / Takes an along - class maximum ( for numerical stability ) . <nl> T max = std : : numeric_limits < T > : : lowest ( ) ; <nl> for ( int64 j = 0 ; j < num_classes ; + + j ) { <nl> - if ( std : : isfinite ( static_cast < double > ( logits_row [ j ] ) ) ) { <nl> + if ( Eigen : : numext : : isfinite ( logits_row [ j ] ) ) { <nl> max = std : : max ( max , logits_row [ j ] ) ; <nl> } <nl> } <nl> struct MultinomialFunctor < CPUDevice , T > { <nl> <nl> / / Precompute cumulative probability distribution across classes . <nl> / / Note : This isn ' t normalized . <nl> + cdf = ( logits . template chip < 0 > ( b ) . template cast < double > ( ) - max_logit ) <nl> + . exp ( ) ; <nl> double running_total = 0 ; <nl> for ( int64 j = 0 ; j < num_classes ; + + j ) { <nl> - if ( std : : isfinite ( static_cast < double > ( logits_row [ j ] ) ) ) { <nl> - running_total + = <nl> - std : : exp ( static_cast < double > ( logits_row [ j ] ) - max_logit ) ; <nl> + if ( Eigen : : numext : : isfinite ( logits_row [ j ] ) ) { <nl> + running_total + = cdf ( j ) ; <nl> } <nl> - cdf [ j ] = running_total ; <nl> + cdf ( j ) = running_total ; <nl> } <nl> / / Generate each sample . <nl> + const double * cdf_begin = cdf . data ( ) ; <nl> + const double * cdf_end = cdf . data ( ) + num_classes ; <nl> for ( int64 j = 0 ; j < num_samples ; + + j ) { <nl> - double to_find = simple_philox . RandDouble ( ) * running_total ; <nl> - auto found_iter = std : : upper_bound ( cdf . begin ( ) , cdf . end ( ) , to_find ) ; <nl> - output ( b , j ) = std : : distance ( cdf . begin ( ) , found_iter ) ; <nl> + const double to_find = simple_philox . RandDouble ( ) * running_total ; <nl> + auto found_iter = std : : upper_bound ( cdf_begin , cdf_end , to_find ) ; <nl> + output ( b , j ) = std : : distance ( cdf_begin , found_iter ) ; <nl> } <nl> } <nl> } ; <nl> class MultinomialOp : public OpKernel { <nl> for ( int i = 0 ; i < 2 ; i + + ) { <nl> const int64 dim = logits_t . dim_size ( i ) ; <nl> OP_REQUIRES ( ctx , static_cast < int > ( dim ) = = dim , <nl> - errors : : InvalidArgument ( " logits . shape = " , <nl> - logits_t . shape ( ) . DebugString ( ) , <nl> - " too large for int " ) ) ; <nl> + errors : : InvalidArgument ( <nl> + " logits . shape = " , logits_t . shape ( ) . DebugString ( ) , <nl> + " too large for int " ) ) ; <nl> } <nl> const int batch_size = static_cast < int > ( logits_t . dim_size ( 0 ) ) ; <nl> const int num_classes = static_cast < int > ( logits_t . dim_size ( 1 ) ) ; <nl> mmm a / tensorflow / core / kernels / non_max_suppression_op_test . cc <nl> ppp b / tensorflow / core / kernels / non_max_suppression_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / quantize_and_dequantize_op_test . cc <nl> ppp b / tensorflow / core / kernels / quantize_and_dequantize_op_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / kernel_benchmark_testlib . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / quantize_down_and_shrink_range_op_test . cc <nl> ppp b / tensorflow / core / kernels / quantize_down_and_shrink_range_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / quantized_activation_ops_test . cc <nl> ppp b / tensorflow / core / kernels / quantized_activation_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / kernels / quantization_utils . h " <nl> + # define EIGEN_USE_THREADS <nl> + <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> limitations under the License . <nl> # include " tensorflow / core / framework / types . pb . h " <nl> # include " tensorflow / core / kernels / ops_testutil . h " <nl> # include " tensorflow / core / kernels / ops_util . h " <nl> + # include " tensorflow / core / kernels / quantization_utils . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / platform / test . h " <nl> <nl> mmm a / tensorflow / core / kernels / quantized_batch_norm_op_test . cc <nl> ppp b / tensorflow / core / kernels / quantized_batch_norm_op_test . cc <nl> limitations under the License . <nl> # define EIGEN_USE_THREADS <nl> <nl> # include " third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor " <nl> - # include " tensorflow / core / kernels / quantization_utils . h " <nl> # include " tensorflow / core / common_runtime / eigen_thread_pool . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> limitations under the License . <nl> # include " tensorflow / core / framework / types . pb . h " <nl> # include " tensorflow / core / kernels / batch_norm_op . h " <nl> # include " tensorflow / core / kernels / ops_testutil . h " <nl> + # include " tensorflow / core / kernels / quantization_utils . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / lib / core / threadpool . h " <nl> # include " tensorflow / core / platform / test . h " <nl> mmm a / tensorflow / core / kernels / quantized_bias_add_op_test . cc <nl> ppp b / tensorflow / core / kernels / quantized_bias_add_op_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> + # define EIGEN_USE_THREADS <nl> + <nl> # include < functional > <nl> <nl> - # include " tensorflow / core / kernels / quantization_utils . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> limitations under the License . <nl> # include " tensorflow / core / framework / types . pb . h " <nl> # include " tensorflow / core / kernels / ops_testutil . h " <nl> # include " tensorflow / core / kernels / ops_util . h " <nl> + # include " tensorflow / core / kernels / quantization_utils . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / platform / test . h " <nl> <nl> mmm a / tensorflow / core / kernels / quantized_concat_op_test . cc <nl> ppp b / tensorflow / core / kernels / quantized_concat_op_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> + # define EIGEN_USE_THREADS <nl> + <nl> # include < functional > <nl> # include < memory > <nl> # include < vector > <nl> <nl> - # include " tensorflow / core / kernels / quantization_utils . h " <nl> # include " tensorflow / core / common_runtime / kernel_benchmark_testlib . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> limitations under the License . <nl> # include " tensorflow / core / graph / node_builder . h " <nl> # include " tensorflow / core / kernels / ops_testutil . h " <nl> # include " tensorflow / core / kernels / ops_util . h " <nl> + # include " tensorflow / core / kernels / quantization_utils . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / platform / test . h " <nl> mmm a / tensorflow / core / kernels / quantized_conv_ops_test . cc <nl> ppp b / tensorflow / core / kernels / quantized_conv_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> + # define EIGEN_USE_THREADS <nl> + <nl> # include < functional > <nl> # include < memory > <nl> # include < vector > <nl> <nl> - # include " tensorflow / core / kernels / quantization_utils . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> limitations under the License . <nl> # include " tensorflow / core / framework / types . pb . h " <nl> # include " tensorflow / core / kernels / ops_testutil . h " <nl> # include " tensorflow / core / kernels / ops_util . h " <nl> + # include " tensorflow / core / kernels / quantization_utils . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / platform / test . h " <nl> <nl> mmm a / tensorflow / core / kernels / quantized_matmul_op_test . cc <nl> ppp b / tensorflow / core / kernels / quantized_matmul_op_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> + # define EIGEN_USE_THREADS <nl> + <nl> # include < functional > <nl> # include < memory > <nl> # include < vector > <nl> <nl> - # include " tensorflow / core / kernels / quantization_utils . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> limitations under the License . <nl> # include " tensorflow / core / framework / types . pb . h " <nl> # include " tensorflow / core / kernels / ops_testutil . h " <nl> # include " tensorflow / core / kernels / ops_util . h " <nl> + # include " tensorflow / core / kernels / quantization_utils . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / platform / test . h " <nl> <nl> mmm a / tensorflow / core / kernels / quantized_pooling_ops_test . cc <nl> ppp b / tensorflow / core / kernels / quantized_pooling_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / kernels / quantization_utils . h " <nl> + # define EIGEN_USE_THREADS <nl> + <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> limitations under the License . <nl> # include " tensorflow / core / framework / types . pb . h " <nl> # include " tensorflow / core / kernels / ops_testutil . h " <nl> # include " tensorflow / core / kernels / ops_util . h " <nl> + # include " tensorflow / core / kernels / quantization_utils . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / platform / test . h " <nl> <nl> mmm a / tensorflow / core / kernels / quantized_reshape_op_test . cc <nl> ppp b / tensorflow / core / kernels / quantized_reshape_op_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / kernel_benchmark_testlib . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / requantization_range_op_test . cc <nl> ppp b / tensorflow / core / kernels / requantization_range_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / requantize_op_test . cc <nl> ppp b / tensorflow / core / kernels / requantize_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / resize_bilinear_op_test . cc <nl> ppp b / tensorflow / core / kernels / resize_bilinear_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / resize_nearest_neighbor_op_test . cc <nl> ppp b / tensorflow / core / kernels / resize_nearest_neighbor_op_test . cc <nl> limitations under the License . <nl> / / Image Library . <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / restore_op_test . cc <nl> ppp b / tensorflow / core / kernels / restore_op_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / device_factory . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / restore_v2_op_test . cc <nl> ppp b / tensorflow / core / kernels / restore_v2_op_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / device_factory . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / reverse_op_test . cc <nl> ppp b / tensorflow / core / kernels / reverse_op_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / kernel_benchmark_testlib . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / save_op_test . cc <nl> ppp b / tensorflow / core / kernels / save_op_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / kernel_benchmark_testlib . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / scatter_nd_op_test . cc <nl> ppp b / tensorflow / core / kernels / scatter_nd_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / scatter_op_test . cc <nl> ppp b / tensorflow / core / kernels / scatter_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / segment_reduction_ops_test . cc <nl> ppp b / tensorflow / core / kernels / segment_reduction_ops_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / kernel_benchmark_testlib . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / slice_op_test . cc <nl> ppp b / tensorflow / core / kernels / slice_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / common_runtime / kernel_benchmark_testlib . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> # include " tensorflow / core / framework / types . h " <nl> mmm a / tensorflow / core / kernels / sparse_to_dense_op_test . cc <nl> ppp b / tensorflow / core / kernels / sparse_to_dense_op_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / common_runtime / device_factory . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> mmm a / tensorflow / core / kernels / spectrogram_test_utils . cc <nl> ppp b / tensorflow / core / kernels / spectrogram_test_utils . cc <nl> limitations under the License . <nl> # include < math . h > <nl> # include < stddef . h > <nl> <nl> - # include " tensorflow / core / lib / core / error_codes . pb . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / lib / io / path . h " <nl> # include " tensorflow / core / lib / strings / str_util . h " <nl> mmm a / tensorflow / core / kernels / split_v_op . cc <nl> ppp b / tensorflow / core / kernels / split_v_op . cc <nl> class SplitVOpBase : public OpKernel { <nl> " specified . Got : " , <nl> determined_size ) ) ; <nl> <nl> - if ( neg_one_dim > = 0 ) <nl> + if ( neg_one_dim > = 0 ) { <nl> ( * split_sizes_vec ) [ neg_one_dim ] = input_size_split_dim - determined_size ; <nl> + } <nl> <nl> / / Special case 2 : split along the 1st dimension . We can share the <nl> / / underlying buffer . <nl> class SplitVOpBase : public OpKernel { <nl> * done = true ; <nl> return ; <nl> } <nl> - return ; <nl> } <nl> <nl> template < typename IndexType > <nl> mmm a / tensorflow / core / kernels / strided_slice_op_test . cc <nl> ppp b / tensorflow / core / kernels / strided_slice_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / common_runtime / kernel_benchmark_testlib . h " <nl> # include " tensorflow / core / framework / allocator . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> # include " tensorflow / core / framework / tensor_testutil . h " <nl> mmm a / tensorflow / core / kernels / summary_audio_op_test . cc <nl> ppp b / tensorflow / core / kernels / summary_audio_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / summary . pb . h " <nl> mmm a / tensorflow / core / kernels / summary_image_op_test . cc <nl> ppp b / tensorflow / core / kernels / summary_image_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / summary . pb . h " <nl> mmm a / tensorflow / core / kernels / summary_op_test . cc <nl> ppp b / tensorflow / core / kernels / summary_op_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / allocator . h " <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op_kernel . h " <nl> # include " tensorflow / core / framework / summary . pb . h " <nl> mmm a / tensorflow / core / ops / array_ops_test . cc <nl> ppp b / tensorflow / core / ops / array_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / node_def_util . h " <nl> # include " tensorflow / core / framework / op . h " <nl> mmm a / tensorflow / core / ops / candidate_sampling_ops_test . cc <nl> ppp b / tensorflow / core / ops / candidate_sampling_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / shape_inference_testutil . h " <nl> mmm a / tensorflow / core / ops / compat / ops_history . v1 . pbtxt <nl> ppp b / tensorflow / core / ops / compat / ops_history . v1 . pbtxt <nl> op { <nl> } <nl> } <nl> } <nl> + op { <nl> + name : " MergeV2Checkpoints " <nl> + input_arg { <nl> + name : " checkpoint_prefixes " <nl> + type : DT_STRING <nl> + } <nl> + input_arg { <nl> + name : " destination_prefix " <nl> + type : DT_STRING <nl> + } <nl> + attr { <nl> + name : " delete_old_dirs " <nl> + type : " bool " <nl> + default_value { <nl> + b : true <nl> + } <nl> + } <nl> + is_stateful : true <nl> + } <nl> op { <nl> name : " Mfcc " <nl> input_arg { <nl> op { <nl> } <nl> } <nl> } <nl> + op { <nl> + name : " Restore " <nl> + input_arg { <nl> + name : " file_pattern " <nl> + type : DT_STRING <nl> + } <nl> + input_arg { <nl> + name : " tensor_name " <nl> + type : DT_STRING <nl> + } <nl> + output_arg { <nl> + name : " tensor " <nl> + type_attr : " dt " <nl> + } <nl> + attr { <nl> + name : " dt " <nl> + type : " type " <nl> + } <nl> + attr { <nl> + name : " preferred_shard " <nl> + type : " int " <nl> + default_value { <nl> + i : - 1 <nl> + } <nl> + } <nl> + is_stateful : true <nl> + } <nl> + op { <nl> + name : " RestoreSlice " <nl> + input_arg { <nl> + name : " file_pattern " <nl> + type : DT_STRING <nl> + } <nl> + input_arg { <nl> + name : " tensor_name " <nl> + type : DT_STRING <nl> + } <nl> + input_arg { <nl> + name : " shape_and_slice " <nl> + type : DT_STRING <nl> + } <nl> + output_arg { <nl> + name : " tensor " <nl> + type_attr : " dt " <nl> + } <nl> + attr { <nl> + name : " dt " <nl> + type : " type " <nl> + } <nl> + attr { <nl> + name : " preferred_shard " <nl> + type : " int " <nl> + default_value { <nl> + i : - 1 <nl> + } <nl> + } <nl> + } <nl> op { <nl> name : " RestoreSlice " <nl> input_arg { <nl> op { <nl> i : - 1 <nl> } <nl> } <nl> + is_stateful : true <nl> } <nl> op { <nl> name : " RestoreV2 " <nl> op { <nl> minimum : 1 <nl> } <nl> } <nl> + op { <nl> + name : " RestoreV2 " <nl> + input_arg { <nl> + name : " prefix " <nl> + type : DT_STRING <nl> + } <nl> + input_arg { <nl> + name : " tensor_names " <nl> + type : DT_STRING <nl> + } <nl> + input_arg { <nl> + name : " shape_and_slices " <nl> + type : DT_STRING <nl> + } <nl> + output_arg { <nl> + name : " tensors " <nl> + type_list_attr : " dtypes " <nl> + } <nl> + attr { <nl> + name : " dtypes " <nl> + type : " list ( type ) " <nl> + has_minimum : true <nl> + minimum : 1 <nl> + } <nl> + is_stateful : true <nl> + } <nl> op { <nl> name : " Reverse " <nl> input_arg { <nl> op { <nl> minimum : 1 <nl> } <nl> } <nl> + op { <nl> + name : " Save " <nl> + input_arg { <nl> + name : " filename " <nl> + type : DT_STRING <nl> + } <nl> + input_arg { <nl> + name : " tensor_names " <nl> + type : DT_STRING <nl> + } <nl> + input_arg { <nl> + name : " data " <nl> + type_list_attr : " T " <nl> + } <nl> + attr { <nl> + name : " T " <nl> + type : " list ( type ) " <nl> + has_minimum : true <nl> + minimum : 1 <nl> + } <nl> + is_stateful : true <nl> + } <nl> + op { <nl> + name : " SaveSlices " <nl> + input_arg { <nl> + name : " filename " <nl> + type : DT_STRING <nl> + } <nl> + input_arg { <nl> + name : " tensor_names " <nl> + type : DT_STRING <nl> + } <nl> + input_arg { <nl> + name : " shapes_and_slices " <nl> + type : DT_STRING <nl> + } <nl> + input_arg { <nl> + name : " data " <nl> + type_list_attr : " T " <nl> + } <nl> + attr { <nl> + name : " T " <nl> + type : " list ( type ) " <nl> + has_minimum : true <nl> + minimum : 1 <nl> + } <nl> + } <nl> op { <nl> name : " SaveSlices " <nl> input_arg { <nl> op { <nl> has_minimum : true <nl> minimum : 1 <nl> } <nl> + is_stateful : true <nl> + } <nl> + op { <nl> + name : " SaveV2 " <nl> + input_arg { <nl> + name : " prefix " <nl> + type : DT_STRING <nl> + } <nl> + input_arg { <nl> + name : " tensor_names " <nl> + type : DT_STRING <nl> + } <nl> + input_arg { <nl> + name : " shape_and_slices " <nl> + type : DT_STRING <nl> + } <nl> + input_arg { <nl> + name : " tensors " <nl> + type_list_attr : " dtypes " <nl> + } <nl> + attr { <nl> + name : " dtypes " <nl> + type : " list ( type ) " <nl> + has_minimum : true <nl> + minimum : 1 <nl> + } <nl> } <nl> op { <nl> name : " SaveV2 " <nl> op { <nl> has_minimum : true <nl> minimum : 1 <nl> } <nl> + is_stateful : true <nl> } <nl> op { <nl> name : " ScalarSummary " <nl> mmm a / tensorflow / core / ops / control_flow_ops_test . cc <nl> ppp b / tensorflow / core / ops / control_flow_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / shape_inference_testutil . h " <nl> mmm a / tensorflow / core / ops / ctc_ops_test . cc <nl> ppp b / tensorflow / core / ops / ctc_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / shape_inference_testutil . h " <nl> mmm a / tensorflow / core / ops / functional_ops_test . cc <nl> ppp b / tensorflow / core / ops / functional_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / shape_inference_testutil . h " <nl> mmm a / tensorflow / core / ops / image_ops_test . cc <nl> ppp b / tensorflow / core / ops / image_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / shape_inference_testutil . h " <nl> mmm a / tensorflow / core / ops / io_ops . cc <nl> ppp b / tensorflow / core / ops / io_ops . cc <nl> REGISTER_OP ( " SaveV2 " ) <nl> . Input ( " shape_and_slices : string " ) <nl> . Input ( " tensors : dtypes " ) <nl> . Attr ( " dtypes : list ( type ) " ) <nl> + . SetIsStateful ( ) <nl> . SetShapeFn ( [ ] ( InferenceContext * c ) { <nl> ShapeHandle unused ; <nl> ShapeHandle s ; <nl> REGISTER_OP ( " RestoreV2 " ) <nl> . Input ( " shape_and_slices : string " ) <nl> . Output ( " tensors : dtypes " ) <nl> . Attr ( " dtypes : list ( type ) " ) <nl> + . SetIsStateful ( ) <nl> . SetShapeFn ( [ ] ( InferenceContext * c ) { <nl> ShapeHandle shape0 , shape1 , shape2 ; <nl> TF_RETURN_IF_ERROR ( c - > WithRank ( c - > input ( 0 ) , 0 , & shape0 ) ) ; <nl> REGISTER_OP ( " MergeV2Checkpoints " ) <nl> . Input ( " checkpoint_prefixes : string " ) <nl> . Input ( " destination_prefix : string " ) <nl> . Attr ( " delete_old_dirs : bool = true " ) <nl> + . SetIsStateful ( ) <nl> . SetShapeFn ( [ ] ( InferenceContext * c ) { <nl> ShapeHandle unused ; <nl> TF_RETURN_IF_ERROR ( c - > WithRank ( c - > input ( 0 ) , 1 , & unused ) ) ; <nl> REGISTER_OP ( " Save " ) <nl> . Input ( " tensor_names : string " ) <nl> . Input ( " data : T " ) <nl> . Attr ( " T : list ( type ) " ) <nl> + . SetIsStateful ( ) <nl> . SetShapeFn ( [ ] ( InferenceContext * c ) { <nl> ShapeHandle unused ; <nl> ShapeHandle s ; <nl> REGISTER_OP ( " SaveSlices " ) <nl> . Input ( " shapes_and_slices : string " ) <nl> . Input ( " data : T " ) <nl> . Attr ( " T : list ( type ) " ) <nl> + . SetIsStateful ( ) <nl> . SetShapeFn ( [ ] ( InferenceContext * c ) { <nl> ShapeHandle unused ; <nl> ShapeHandle s ; <nl> REGISTER_OP ( " Restore " ) <nl> . Output ( " tensor : dt " ) <nl> . Attr ( " dt : type " ) <nl> . Attr ( " preferred_shard : int = - 1 " ) <nl> + . SetIsStateful ( ) <nl> . SetShapeFn ( [ ] ( InferenceContext * c ) { <nl> ShapeHandle unused ; <nl> TF_RETURN_IF_ERROR ( c - > WithRank ( c - > input ( 0 ) , 0 , & unused ) ) ; <nl> REGISTER_OP ( " RestoreSlice " ) <nl> . Output ( " tensor : dt " ) <nl> . Attr ( " dt : type " ) <nl> . Attr ( " preferred_shard : int = - 1 " ) <nl> + . SetIsStateful ( ) <nl> . SetShapeFn ( [ ] ( InferenceContext * c ) { <nl> ShapeHandle unused ; <nl> TF_RETURN_IF_ERROR ( c - > WithRank ( c - > input ( 0 ) , 0 , & unused ) ) ; <nl> mmm a / tensorflow / core / ops / io_ops_test . cc <nl> ppp b / tensorflow / core / ops / io_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / shape_inference_testutil . h " <nl> mmm a / tensorflow / core / ops / nn_ops_test . cc <nl> ppp b / tensorflow / core / ops / nn_ops_test . cc <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> # include " tensorflow / core / framework / fake_input . h " <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / shape_inference_testutil . h " <nl> mmm a / tensorflow / core / ops / ops . pbtxt <nl> ppp b / tensorflow / core / ops / ops . pbtxt <nl> op { <nl> } <nl> summary : " V2 format specific : merges the metadata files of sharded checkpoints . The " <nl> description : " result is one logical checkpoint , with one physical metadata file and renamed \ ndata files . \ n \ nIntended for \ " grouping \ " multiple checkpoints in a sharded checkpoint setup . \ n \ nIf delete_old_dirs is true , attempts to delete recursively the dirname of each \ npath in the input checkpoint_prefixes . This is useful when those paths are non \ nuser - facing temporary locations . " <nl> + is_stateful : true <nl> } <nl> op { <nl> name : " Mfcc " <nl> op { <nl> } <nl> summary : " Restores a tensor from checkpoint files . " <nl> description : " Reads a tensor stored in one or several files . If there are several files ( for \ ninstance because a tensor was saved as slices ) , ` file_pattern ` may contain \ nwildcard symbols ( ` * ` and ` ? ` ) in the filename portion only , not in the \ ndirectory portion . \ n \ nIf a ` file_pattern ` matches several files , ` preferred_shard ` can be used to hint \ nin which file the requested tensor is likely to be found . This op will first \ nopen the file at index ` preferred_shard ` in the list of matching files and try \ nto restore tensors from that file . Only if some tensors or tensor slices are \ nnot found in that first file , then the Op opens all the files . Setting \ n ` preferred_shard ` to match the value passed as the ` shard ` input \ nof a matching ` Save ` Op may speed up Restore . This attribute only affects \ nperformance , not correctness . The default value - 1 means files are processed in \ norder . \ n \ nSee also ` RestoreSlice ` . " <nl> + is_stateful : true <nl> } <nl> op { <nl> name : " RestoreSlice " <nl> op { <nl> } <nl> summary : " Restores a tensor from checkpoint files . " <nl> description : " This is like ` Restore ` except that restored tensor can be listed as filling \ nonly a slice of a larger tensor . ` shape_and_slice ` specifies the shape of the \ nlarger tensor and the slice that the restored tensor covers . \ n \ nThe ` shape_and_slice ` input has the same format as the \ nelements of the ` shapes_and_slices ` input of the ` SaveSlices ` op . " <nl> + is_stateful : true <nl> } <nl> op { <nl> name : " RestoreV2 " <nl> op { <nl> } <nl> summary : " Restores tensors from a V2 checkpoint . " <nl> description : " For backward compatibility with the V1 format , this Op currently allows \ nrestoring from a V1 checkpoint as well : \ n - This Op first attempts to find the V2 index file pointed to by \ " prefix \ " , and \ n if found proceed to read it as a V2 checkpoint ; \ n - Otherwise the V1 read path is invoked . \ nRelying on this behavior is not recommended , as the ability to fall back to read \ nV1 might be deprecated and eventually removed . \ n \ nBy default , restores the named tensors in full . If the caller wishes to restore \ nspecific slices of stored tensors , \ " shape_and_slices \ " should be non - empty \ nstrings and correspondingly well - formed . \ n \ nCallers must ensure all the named tensors are indeed stored in the checkpoint . " <nl> + is_stateful : true <nl> } <nl> op { <nl> name : " Reverse " <nl> op { <nl> } <nl> summary : " Saves the input tensors to disk . " <nl> description : " The size of ` tensor_names ` must match the number of tensors in ` data ` . ` data [ i ] ` \ nis written to ` filename ` with name ` tensor_names [ i ] ` . \ n \ nSee also ` SaveSlices ` . " <nl> + is_stateful : true <nl> } <nl> op { <nl> name : " SaveSlices " <nl> op { <nl> } <nl> summary : " Saves input tensors slices to disk . " <nl> description : " This is like ` Save ` except that tensors can be listed in the saved file as being \ na slice of a larger tensor . ` shapes_and_slices ` specifies the shape of the \ nlarger tensor and the slice that this tensor covers . ` shapes_and_slices ` must \ nhave as many elements as ` tensor_names ` . \ n \ nElements of the ` shapes_and_slices ` input must either be : \ n \ n * The empty string , in which case the corresponding tensor is \ n saved normally . \ n * A string of the form ` dim0 dim1 . . . dimN - 1 slice - spec ` where the \ n ` dimI ` are the dimensions of the larger tensor and ` slice - spec ` \ n specifies what part is covered by the tensor to save . \ n \ n ` slice - spec ` itself is a ` : ` - separated list : ` slice0 : slice1 : . . . : sliceN - 1 ` \ nwhere each ` sliceI ` is either : \ n \ n * The string ` - ` meaning that the slice covers all indices of this dimension \ n * ` start , length ` where ` start ` and ` length ` are integers . In that \ n case the slice covers ` length ` indices starting at ` start ` . \ n \ nSee also ` Save ` . " <nl> + is_stateful : true <nl> } <nl> op { <nl> name : " SaveV2 " <nl> op { <nl> } <nl> summary : " Saves tensors in V2 checkpoint format . " <nl> description : " By default , saves the named tensors in full . If the caller wishes to save \ nspecific slices of full tensors , \ " shape_and_slices \ " should be non - empty strings \ nand correspondingly well - formed . " <nl> + is_stateful : true <nl> } <nl> op { <nl> name : " ScalarSummary " <nl> mmm a / tensorflow / core / ops / parsing_ops_test . cc <nl> ppp b / tensorflow / core / ops / parsing_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / shape_inference_testutil . h " <nl> mmm a / tensorflow / core / ops / random_ops_test . cc <nl> ppp b / tensorflow / core / ops / random_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / shape_inference_testutil . h " <nl> mmm a / tensorflow / core / ops / sparse_ops_test . cc <nl> ppp b / tensorflow / core / ops / sparse_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / shape_inference_testutil . h " <nl> mmm a / tensorflow / core / ops / string_ops_test . cc <nl> ppp b / tensorflow / core / ops / string_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / node_def_builder . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / shape_inference_testutil . h " <nl> mmm a / tensorflow / core / ops / training_ops_test . cc <nl> ppp b / tensorflow / core / ops / training_ops_test . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / shape_inference_testutil . h " <nl> # include " tensorflow / core / platform / test . h " <nl> mmm a / tensorflow / core / platform / env_test . cc <nl> ppp b / tensorflow / core / platform / env_test . cc <nl> limitations under the License . <nl> # include < sys / stat . h > <nl> <nl> # include " tensorflow / core / framework / graph . pb . h " <nl> + # include " tensorflow / core / framework / node_def . pb . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / lib / io / path . h " <nl> # include " tensorflow / core / lib / strings / str_util . h " <nl> mmm a / tensorflow / core / platform / hadoop / hadoop_file_system . cc <nl> ppp b / tensorflow / core / platform / hadoop / hadoop_file_system . cc <nl> class LibHDFS { <nl> / / libhdfs . so is installed in non - standard location <nl> status_ = TryLoadAndBind ( kLibHdfsDso , & handle_ ) ; <nl> } <nl> - return ; <nl> } <nl> <nl> Status status_ ; <nl> mmm a / tensorflow / core / util / events_writer_test . cc <nl> ppp b / tensorflow / core / util / events_writer_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / util / events_writer . h " <nl> <nl> # include < math . h > <nl> + # include " tensorflow / core / framework / summary . pb . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> mmm a / tensorflow / core / util / example_proto_fast_parsing_test . cc <nl> ppp b / tensorflow / core / util / example_proto_fast_parsing_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / util / example_proto_fast_parsing . h " <nl> <nl> # include " tensorflow / core / example / example . pb . h " <nl> + # include " tensorflow / core / example / feature . pb . h " <nl> # include " tensorflow / core / lib / random / philox_random . h " <nl> # include " tensorflow / core / lib / random / simple_philox . h " <nl> # include " tensorflow / core / platform / protobuf . h " <nl> mmm a / tensorflow / core / util / example_proto_helper_test . cc <nl> ppp b / tensorflow / core / util / example_proto_helper_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / util / example_proto_helper . h " <nl> <nl> # include " tensorflow / core / example / example . pb . h " <nl> + # include " tensorflow / core / example / feature . pb . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / platform / test . h " <nl> <nl> mmm a / tensorflow / core / util / mirror_pad_mode . h <nl> ppp b / tensorflow / core / util / mirror_pad_mode . h <nl> enum class MirrorPadMode { <nl> / / used as an Attr ( ) in REGISTER_OP . <nl> string GetMirrorPadModeAttrString ( ) ; <nl> <nl> - / / Forward declaration to avoid including core / framework / graph . pb . h . <nl> + / / Forward declaration to avoid including core / framework / graph . proto . <nl> class NodeDef ; <nl> <nl> / / Specialization to parse an attribute directly into a MirrorPadMode enum . <nl> mmm a / tensorflow / core / util / tensor_bundle / tensor_bundle . cc <nl> ppp b / tensorflow / core / util / tensor_bundle / tensor_bundle . cc <nl> limitations under the License . <nl> # include < utility > <nl> <nl> # include " tensorflow / core / framework / register_types . h " <nl> - # include " tensorflow / core / framework / tensor_shape . pb . h " <nl> # include " tensorflow / core / framework / tensor_shape . pb_text . h " <nl> + # include " tensorflow / core / framework / tensor_shape . pb . h " <nl> # include " tensorflow / core / framework / types . h " <nl> # include " tensorflow / core / framework / types . pb_text . h " <nl> # include " tensorflow / core / framework / versions . h " <nl> mmm a / tensorflow / core / util / tensor_slice_reader_test . cc <nl> ppp b / tensorflow / core / util / tensor_slice_reader_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / util / tensor_slice_reader . h " <nl> <nl> # include " tensorflow / core / framework / types . h " <nl> + # include " tensorflow / core / framework / versions . pb . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / lib / core / stringpiece . h " <nl> # include " tensorflow / core / lib / io / path . h " <nl> mmm a / tensorflow / core / util / tensor_slice_writer . cc <nl> ppp b / tensorflow / core / util / tensor_slice_writer . cc <nl> limitations under the License . <nl> <nl> # include < utility > <nl> <nl> + # include " tensorflow / core / framework / versions . pb . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> # include " tensorflow / core / lib / io / table_builder . h " <nl> # include " tensorflow / core / lib / random / random . h " <nl> mmm a / tensorflow / core / util / tensor_slice_writer . h <nl> ppp b / tensorflow / core / util / tensor_slice_writer . h <nl> limitations under the License . <nl> # include " tensorflow / core / platform / logging . h " <nl> # include " tensorflow / core / platform / macros . h " <nl> # include " tensorflow / core / platform / types . h " <nl> - # include " tensorflow / core / util / saved_tensor_slice . pb . h " <nl> # include " tensorflow / core / util / saved_tensor_slice . pb_text . h " <nl> + # include " tensorflow / core / util / saved_tensor_slice . pb . h " <nl> # include " tensorflow / core / util / saved_tensor_slice_util . h " <nl> <nl> namespace tensorflow { <nl> mmm a / tensorflow / core / util / tensor_slice_writer_test . cc <nl> ppp b / tensorflow / core / util / tensor_slice_writer_test . cc <nl> limitations under the License . <nl> <nl> # include < array > <nl> <nl> + # include " tensorflow / core / framework / versions . pb . h " <nl> # include " tensorflow / core / lib / core / status_test_util . h " <nl> # include " tensorflow / core / lib / core / stringpiece . h " <nl> # include " tensorflow / core / lib / io / path . h " <nl> mmm a / tensorflow / java / maven / libtensorflow / pom . xml <nl> ppp b / tensorflow / java / maven / libtensorflow / pom . xml <nl> <nl> < parent > <nl> < groupId > org . tensorflow < / groupId > <nl> < artifactId > parentpom < / artifactId > <nl> - < version > 1 . 2 . 0 - rc1 < / version > <nl> + < version > 1 . 2 . 0 - rc2 < / version > <nl> < relativePath > . . / < / relativePath > <nl> < / parent > <nl> < artifactId > libtensorflow < / artifactId > <nl> mmm a / tensorflow / java / maven / libtensorflow_jni / pom . xml <nl> ppp b / tensorflow / java / maven / libtensorflow_jni / pom . xml <nl> <nl> < parent > <nl> < groupId > org . tensorflow < / groupId > <nl> < artifactId > parentpom < / artifactId > <nl> - < version > 1 . 2 . 0 - rc1 < / version > <nl> + < version > 1 . 2 . 0 - rc2 < / version > <nl> < relativePath > . . / < / relativePath > <nl> < / parent > <nl> < artifactId > libtensorflow_jni < / artifactId > <nl> mmm a / tensorflow / java / maven / pom . xml <nl> ppp b / tensorflow / java / maven / pom . xml <nl> <nl> < modelVersion > 4 . 0 . 0 < / modelVersion > <nl> < groupId > org . tensorflow < / groupId > <nl> < artifactId > parentpom < / artifactId > <nl> - < version > 1 . 2 . 0 - rc1 < / version > <nl> + < version > 1 . 2 . 0 - rc2 < / version > <nl> < packaging > pom < / packaging > <nl> <nl> < url > https : / / www . tensorflow . org < / url > <nl> mmm a / tensorflow / java / maven / proto / pom . xml <nl> ppp b / tensorflow / java / maven / proto / pom . xml <nl> <nl> < parent > <nl> < groupId > org . tensorflow < / groupId > <nl> < artifactId > parentpom < / artifactId > <nl> - < version > 1 . 2 . 0 - rc1 < / version > <nl> + < version > 1 . 2 . 0 - rc2 < / version > <nl> < relativePath > . . / < / relativePath > <nl> < / parent > <nl> < artifactId > proto < / artifactId > <nl> mmm a / tensorflow / java / maven / tensorflow / pom . xml <nl> ppp b / tensorflow / java / maven / tensorflow / pom . xml <nl> <nl> < parent > <nl> < groupId > org . tensorflow < / groupId > <nl> < artifactId > parentpom < / artifactId > <nl> - < version > 1 . 2 . 0 - rc1 < / version > <nl> + < version > 1 . 2 . 0 - rc2 < / version > <nl> < relativePath > . . / < / relativePath > <nl> < / parent > <nl> < artifactId > tensorflow < / artifactId > <nl> mmm a / tensorflow / python / BUILD <nl> ppp b / tensorflow / python / BUILD <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " : array_ops " , <nl> + " : constant_op " , <nl> " : framework " , <nl> " : framework_for_generated_wrappers " , <nl> " : lookup_ops_gen " , <nl> mmm a / tensorflow / python / estimator / BUILD <nl> ppp b / tensorflow / python / estimator / BUILD <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> deps = [ <nl> " : dnn " , <nl> + " : dnn_linear_combined " , <nl> " : estimator " , <nl> " : export " , <nl> " : inputs " , <nl> py_library ( <nl> ] , <nl> ) <nl> <nl> + py_library ( <nl> + name = " dnn_testing_utils " , <nl> + testonly = 1 , <nl> + srcs = [ " canned / dnn_testing_utils . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : dnn " , <nl> + " : export_export " , <nl> + " : head " , <nl> + " : metric_keys " , <nl> + " : model_fn " , <nl> + " : numpy_io " , <nl> + " : pandas_io " , <nl> + " : prediction_keys " , <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : array_ops " , <nl> + " / / tensorflow / python : check_ops " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : constant_op " , <nl> + " / / tensorflow / python : control_flow_ops " , <nl> + " / / tensorflow / python : data_flow_ops " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : parsing_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : summary " , <nl> + " / / tensorflow / python : training " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / feature_column " , <nl> + ] , <nl> + ) <nl> + <nl> py_test ( <nl> name = " dnn_test " , <nl> size = " medium " , <nl> srcs = [ " canned / dnn_test . py " ] , <nl> srcs_version = " PY2AND3 " , <nl> + tags = [ " no_pip " ] , <nl> deps = [ <nl> " : dnn " , <nl> + " : dnn_testing_utils " , <nl> " : export_export " , <nl> " : head " , <nl> " : metric_keys " , <nl> py_test ( <nl> ] , <nl> ) <nl> <nl> + py_library ( <nl> + name = " dnn_linear_combined " , <nl> + srcs = [ " canned / dnn_linear_combined . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : estimator " , <nl> + " : head " , <nl> + " : model_fn " , <nl> + " : optimizers " , <nl> + " / / tensorflow / python : init_ops " , <nl> + " / / tensorflow / python : layers " , <nl> + " / / tensorflow / python : nn " , <nl> + " / / tensorflow / python : partitioned_variables " , <nl> + " / / tensorflow / python : summary " , <nl> + " / / tensorflow / python : training " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python / feature_column " , <nl> + ] , <nl> + ) <nl> + <nl> + py_test ( <nl> + name = " dnn_linear_combined_test " , <nl> + size = " medium " , <nl> + srcs = [ " canned / dnn_linear_combined_test . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + tags = [ " no_pip " ] , <nl> + deps = [ <nl> + " : dnn_linear_combined " , <nl> + " : dnn_testing_utils " , <nl> + " : export_export " , <nl> + " : linear_testing_utils " , <nl> + " : numpy_io " , <nl> + " : pandas_io " , <nl> + " : prediction_keys " , <nl> + " / / tensorflow / core : protos_all_py " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : nn " , <nl> + " / / tensorflow / python : parsing_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : training " , <nl> + " / / tensorflow / python / feature_column " , <nl> + ] , <nl> + ) <nl> + <nl> py_library ( <nl> name = " estimator " , <nl> srcs = [ <nl> py_library ( <nl> ] , <nl> ) <nl> <nl> + py_library ( <nl> + name = " linear_testing_utils " , <nl> + testonly = 1 , <nl> + srcs = [ " canned / linear_testing_utils . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " : estimator " , <nl> + " : export_export " , <nl> + " : metric_keys " , <nl> + " : numpy_io " , <nl> + " : pandas_io " , <nl> + " : run_config " , <nl> + " / / tensorflow / python : check_ops " , <nl> + " / / tensorflow / python : client " , <nl> + " / / tensorflow / python : client_testlib " , <nl> + " / / tensorflow / python : dtypes " , <nl> + " / / tensorflow / python : framework_ops " , <nl> + " / / tensorflow / python : math_ops " , <nl> + " / / tensorflow / python : platform " , <nl> + " / / tensorflow / python : sparse_tensor " , <nl> + " / / tensorflow / python : state_ops " , <nl> + " / / tensorflow / python : training " , <nl> + " / / tensorflow / python : variable_scope " , <nl> + " / / tensorflow / python : variables " , <nl> + " / / tensorflow / python / feature_column " , <nl> + ] , <nl> + ) <nl> + <nl> py_test ( <nl> name = " linear_test " , <nl> size = " medium " , <nl> srcs = [ " canned / linear_test . py " ] , <nl> srcs_version = " PY2AND3 " , <nl> + tags = [ " no_pip " ] , <nl> deps = [ <nl> " : estimator " , <nl> " : export_export " , <nl> " : linear " , <nl> + " : linear_testing_utils " , <nl> " : metric_keys " , <nl> " : numpy_io " , <nl> " : pandas_io " , <nl> mmm a / tensorflow / python / estimator / canned / dnn_linear_combined . py <nl> ppp b / tensorflow / python / estimator / canned / dnn_linear_combined . py <nl> <nl> from tensorflow . python . training import training_util <nl> <nl> # The default learning rates are a historical artifact of the initial <nl> - # implementation , but seem a reasonable choice . <nl> - _DNN_LEARNING_RATE = 0 . 05 <nl> - _LINEAR_LEARNING_RATE = 0 . 2 <nl> + # implementation . <nl> + _DNN_LEARNING_RATE = 0 . 001 <nl> + _LINEAR_LEARNING_RATE = 0 . 005 <nl> <nl> <nl> def _check_no_sync_replicas_optimizer ( optimizer ) : <nl> def _dnn_linear_combined_model_fn ( <nl> max_partitions = num_ps_replicas , <nl> min_slice_size = 64 < < 20 ) ) <nl> <nl> - linear_optimizer = optimizers . get_optimizer_instance ( <nl> - linear_optimizer , <nl> - learning_rate = _linear_learning_rate ( len ( linear_feature_columns ) ) ) <nl> - _check_no_sync_replicas_optimizer ( linear_optimizer ) <nl> - <nl> - dnn_optimizer = optimizers . get_optimizer_instance ( <nl> - dnn_optimizer , <nl> - learning_rate = _DNN_LEARNING_RATE ) <nl> - _check_no_sync_replicas_optimizer ( dnn_optimizer ) <nl> - <nl> # Build DNN Logits . <nl> dnn_parent_scope = ' dnn ' <nl> <nl> if not dnn_feature_columns : <nl> dnn_logits = None <nl> else : <nl> + dnn_optimizer = optimizers . get_optimizer_instance ( <nl> + dnn_optimizer , learning_rate = _DNN_LEARNING_RATE ) <nl> + _check_no_sync_replicas_optimizer ( dnn_optimizer ) <nl> if not dnn_hidden_units : <nl> raise ValueError ( <nl> ' dnn_hidden_units must be defined when dnn_feature_columns is ' <nl> def _dnn_linear_combined_model_fn ( <nl> with variable_scope . variable_scope ( <nl> ' logits ' , <nl> values = ( net , ) ) as dnn_logits_scope : <nl> - logits = core_layers . dense ( <nl> + dnn_logits = core_layers . dense ( <nl> net , <nl> units = head . logits_dimension , <nl> activation = None , <nl> def _dnn_linear_combined_model_fn ( <nl> if not linear_feature_columns : <nl> linear_logits = None <nl> else : <nl> + linear_optimizer = optimizers . get_optimizer_instance ( <nl> + linear_optimizer , <nl> + learning_rate = _linear_learning_rate ( len ( linear_feature_columns ) ) ) <nl> + _check_no_sync_replicas_optimizer ( linear_optimizer ) <nl> with variable_scope . variable_scope ( <nl> linear_parent_scope , <nl> values = tuple ( six . itervalues ( features ) ) , <nl> def _train_op_fn ( loss ) : <nl> with ops . colocate_with ( global_step ) : <nl> return state_ops . assign_add ( global_step , 1 ) <nl> <nl> - return head . create_estimator_spec ( <nl> - features = features , <nl> - mode = mode , <nl> - labels = labels , <nl> - train_op_fn = _train_op_fn , <nl> - logits = logits ) <nl> + return head . create_estimator_spec ( <nl> + features = features , <nl> + mode = mode , <nl> + labels = labels , <nl> + train_op_fn = _train_op_fn , <nl> + logits = logits ) <nl> + <nl> + <nl> + class DNNLinearCombinedClassifier ( estimator . Estimator ) : <nl> + " " " An estimator for TensorFlow Linear and DNN joined classification models . <nl> + <nl> + Note : This estimator is also known as wide - n - deep . <nl> + <nl> + Example : <nl> + <nl> + ` ` ` python <nl> + numeric_feature = numeric_column ( . . . ) <nl> + sparse_column_a = categorical_column_with_hash_bucket ( . . . ) <nl> + sparse_column_b = categorical_column_with_hash_bucket ( . . . ) <nl> + <nl> + sparse_feature_a_x_sparse_feature_b = crossed_column ( . . . ) <nl> + sparse_feature_a_emb = embedding_column ( sparse_id_column = sparse_feature_a , <nl> + . . . ) <nl> + sparse_feature_b_emb = embedding_column ( sparse_id_column = sparse_feature_b , <nl> + . . . ) <nl> + <nl> + estimator = DNNLinearCombinedClassifier ( <nl> + # wide settings <nl> + linear_feature_columns = [ sparse_feature_a_x_sparse_feature_b ] , <nl> + linear_optimizer = tf . train . FtrlOptimizer ( . . . ) , <nl> + # deep settings <nl> + dnn_feature_columns = [ <nl> + sparse_feature_a_emb , sparse_feature_b_emb , numeric_feature ] , <nl> + dnn_hidden_units = [ 1000 , 500 , 100 ] , <nl> + dnn_optimizer = tf . train . ProximalAdagradOptimizer ( . . . ) ) <nl> + <nl> + # To apply L1 and L2 regularization , you can set optimizers as follows : <nl> + tf . train . ProximalAdagradOptimizer ( <nl> + learning_rate = 0 . 1 , <nl> + l1_regularization_strength = 0 . 001 , <nl> + l2_regularization_strength = 0 . 001 ) <nl> + # It is same for FtrlOptimizer . <nl> + <nl> + # Input builders <nl> + def input_fn_train : # returns x , y <nl> + pass <nl> + estimator . train ( input_fn = input_fn_train , steps = 100 ) <nl> + <nl> + def input_fn_eval : # returns x , y <nl> + pass <nl> + metrics = estimator . evaluate ( input_fn = input_fn_eval , steps = 10 ) <nl> + def input_fn_predict : # returns x , None <nl> + pass <nl> + predictions = estimator . predict ( input_fn = input_fn_predict ) <nl> + ` ` ` <nl> + <nl> + Input of ` train ` and ` evaluate ` should have following features , <nl> + otherwise there will be a ` KeyError ` : <nl> + <nl> + * for each ` column ` in ` dnn_feature_columns ` + ` linear_feature_columns ` : <nl> + - if ` column ` is a ` _CategoricalColumn ` , a feature with ` key = column . name ` <nl> + whose ` value ` is a ` SparseTensor ` . <nl> + - if ` column ` is a ` _WeightedCategoricalColumn ` , two features : the first <nl> + with ` key ` the id column name , the second with ` key ` the weight column <nl> + name . Both features ' ` value ` must be a ` SparseTensor ` . <nl> + - if ` column ` is a ` _DenseColumn ` , a feature with ` key = column . name ` <nl> + whose ` value ` is a ` Tensor ` . <nl> + <nl> + " " " <nl> + <nl> + def __init__ ( self , <nl> + model_dir = None , <nl> + linear_feature_columns = None , <nl> + linear_optimizer = ' Ftrl ' , <nl> + dnn_feature_columns = None , <nl> + dnn_optimizer = ' Adagrad ' , <nl> + dnn_hidden_units = None , <nl> + dnn_activation_fn = nn . relu , <nl> + dnn_dropout = None , <nl> + n_classes = 2 , <nl> + input_layer_partitioner = None , <nl> + config = None ) : <nl> + " " " Initializes a DNNLinearCombinedClassifier instance . <nl> + <nl> + Args : <nl> + model_dir : Directory to save model parameters , graph and etc . This can <nl> + also be used to load checkpoints from the directory into a estimator <nl> + to continue training a previously saved model . <nl> + linear_feature_columns : An iterable containing all the feature columns <nl> + used by linear part of the model . All items in the set must be <nl> + instances of classes derived from ` FeatureColumn ` . <nl> + linear_optimizer : An instance of ` tf . Optimizer ` used to apply gradients to <nl> + the linear part of the model . Defaults to FTRL optimizer . <nl> + dnn_feature_columns : An iterable containing all the feature columns used <nl> + by deep part of the model . All items in the set must be instances of <nl> + classes derived from ` FeatureColumn ` . <nl> + dnn_optimizer : An instance of ` tf . Optimizer ` used to apply gradients to <nl> + the deep part of the model . Defaults to Adagrad optimizer . <nl> + dnn_hidden_units : List of hidden units per layer . All layers are fully <nl> + connected . <nl> + dnn_activation_fn : Activation function applied to each layer . If None , <nl> + will use ` tf . nn . relu ` . <nl> + dnn_dropout : When not None , the probability we will drop out <nl> + a given coordinate . <nl> + n_classes : Number of label classes . Defaults to 2 , namely binary <nl> + classification . Must be > 1 . <nl> + input_layer_partitioner : Partitioner for input layer . Defaults to <nl> + ` min_max_variable_partitioner ` with ` min_slice_size ` 64 < < 20 . <nl> + config : RunConfig object to configure the runtime settings . <nl> + <nl> + Raises : <nl> + ValueError : If both linear_feature_columns and dnn_features_columns are <nl> + empty at the same time . <nl> + " " " <nl> + linear_feature_columns = linear_feature_columns or [ ] <nl> + dnn_feature_columns = dnn_feature_columns or [ ] <nl> + self . _feature_columns = linear_feature_columns + dnn_feature_columns <nl> + if not self . _feature_columns : <nl> + raise ValueError ( ' Either linear_feature_columns or dnn_feature_columns ' <nl> + ' must be defined . ' ) <nl> + if n_classes = = 2 : <nl> + head = head_lib . _binary_logistic_head_with_sigmoid_cross_entropy_loss ( ) # pylint : disable = protected - access <nl> + else : <nl> + head = head_lib . _multi_class_head_with_softmax_cross_entropy_loss ( # pylint : disable = protected - access <nl> + n_classes ) <nl> + <nl> + def _model_fn ( features , labels , mode , config ) : <nl> + return _dnn_linear_combined_model_fn ( <nl> + features = features , <nl> + labels = labels , <nl> + mode = mode , <nl> + head = head , <nl> + linear_feature_columns = linear_feature_columns , <nl> + linear_optimizer = linear_optimizer , <nl> + dnn_feature_columns = dnn_feature_columns , <nl> + dnn_optimizer = dnn_optimizer , <nl> + dnn_hidden_units = dnn_hidden_units , <nl> + dnn_activation_fn = dnn_activation_fn , <nl> + dnn_dropout = dnn_dropout , <nl> + input_layer_partitioner = input_layer_partitioner , <nl> + config = config ) <nl> + <nl> + super ( DNNLinearCombinedClassifier , self ) . __init__ ( <nl> + model_fn = _model_fn , model_dir = model_dir , config = config ) <nl> <nl> <nl> class DNNLinearCombinedRegressor ( estimator . Estimator ) : <nl> def input_fn_predict : # returns x , None <nl> def __init__ ( self , <nl> model_dir = None , <nl> linear_feature_columns = None , <nl> - linear_optimizer = None , <nl> + linear_optimizer = ' Ftrl ' , <nl> dnn_feature_columns = None , <nl> - dnn_optimizer = None , <nl> + dnn_optimizer = ' Adagrad ' , <nl> dnn_hidden_units = None , <nl> dnn_activation_fn = nn . relu , <nl> dnn_dropout = None , <nl> label_dimension = 1 , <nl> + weight_feature_key = None , <nl> input_layer_partitioner = None , <nl> config = None ) : <nl> " " " Initializes a DNNLinearCombinedRegressor instance . <nl> def __init__ ( self , <nl> label_dimension : Number of regression targets per example . This is the <nl> size of the last dimension of the labels and logits ` Tensor ` objects <nl> ( typically , these have shape ` [ batch_size , label_dimension ] ` ) . <nl> + weight_feature_key : A string defining feature column name representing <nl> + weights . It is used to down weight or boost examples during training . It <nl> + will be multiplied by the loss of the example . <nl> input_layer_partitioner : Partitioner for input layer . Defaults to <nl> ` min_max_variable_partitioner ` with ` min_slice_size ` 64 < < 20 . <nl> config : RunConfig object to configure the runtime settings . <nl> def __init__ ( self , <nl> " " " <nl> linear_feature_columns = linear_feature_columns or [ ] <nl> dnn_feature_columns = dnn_feature_columns or [ ] <nl> - self . _feature_columns = linear_feature_columns + dnn_feature_columns <nl> + self . _feature_columns = ( <nl> + list ( linear_feature_columns ) + list ( dnn_feature_columns ) ) <nl> if not self . _feature_columns : <nl> raise ValueError ( ' Either linear_feature_columns or dnn_feature_columns ' <nl> ' must be defined . ' ) <nl> def _model_fn ( features , labels , mode , config ) : <nl> features = features , <nl> labels = labels , <nl> mode = mode , <nl> - head = head_lib . _regression_head_with_mean_squared_error_loss ( # pylint : disable = protected - access <nl> - label_dimension = label_dimension ) , <nl> + head = head_lib . # pylint : disable = protected - access <nl> + _regression_head_with_mean_squared_error_loss ( <nl> + label_dimension = label_dimension , <nl> + weight_feature_key = weight_feature_key ) , <nl> linear_feature_columns = linear_feature_columns , <nl> linear_optimizer = linear_optimizer , <nl> dnn_feature_columns = dnn_feature_columns , <nl> new file mode 100644 <nl> index 0000000000000 . . 16b4be7b24ad7 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / estimator / canned / dnn_linear_combined_test . py <nl> <nl> + # Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Tests for dnn_linear_combined . py . " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + import shutil <nl> + import tempfile <nl> + <nl> + import numpy as np <nl> + import six <nl> + <nl> + from tensorflow . core . example import example_pb2 <nl> + from tensorflow . core . example import feature_pb2 <nl> + from tensorflow . python . estimator . canned import dnn_linear_combined <nl> + from tensorflow . python . estimator . canned import dnn_testing_utils <nl> + from tensorflow . python . estimator . canned import linear_testing_utils <nl> + from tensorflow . python . estimator . canned import prediction_keys <nl> + from tensorflow . python . estimator . export import export <nl> + from tensorflow . python . estimator . inputs import numpy_io <nl> + from tensorflow . python . estimator . inputs import pandas_io <nl> + from tensorflow . python . feature_column import feature_column <nl> + from tensorflow . python . framework import dtypes <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . ops import nn <nl> + from tensorflow . python . ops import parsing_ops <nl> + from tensorflow . python . platform import gfile <nl> + from tensorflow . python . platform import test <nl> + from tensorflow . python . training import input as input_lib <nl> + <nl> + try : <nl> + # pylint : disable = g - import - not - at - top <nl> + import pandas as pd <nl> + HAS_PANDAS = True <nl> + except IOError : <nl> + # Pandas writes a temporary file during import . If it fails , don ' t use pandas . <nl> + HAS_PANDAS = False <nl> + except ImportError : <nl> + HAS_PANDAS = False <nl> + <nl> + <nl> + class DNNOnlyModelFnTest ( dnn_testing_utils . BaseDNNModelFnTest , test . TestCase ) : <nl> + <nl> + def __init__ ( self , methodName = ' runTest ' ) : # pylint : disable = invalid - name <nl> + test . TestCase . __init__ ( self , methodName ) <nl> + dnn_testing_utils . BaseDNNModelFnTest . __init__ ( self , self . _dnn_only_model_fn ) <nl> + <nl> + def _dnn_only_model_fn ( <nl> + self , <nl> + features , <nl> + labels , <nl> + mode , <nl> + head , <nl> + hidden_units , <nl> + feature_columns , <nl> + optimizer = ' Adagrad ' , <nl> + activation_fn = nn . relu , <nl> + dropout = None , # pylint : disable = redefined - outer - name <nl> + input_layer_partitioner = None , <nl> + config = None ) : <nl> + return dnn_linear_combined . _dnn_linear_combined_model_fn ( <nl> + features = features , <nl> + labels = labels , <nl> + mode = mode , <nl> + head = head , <nl> + linear_feature_columns = [ ] , <nl> + dnn_hidden_units = hidden_units , <nl> + dnn_feature_columns = feature_columns , <nl> + dnn_optimizer = optimizer , <nl> + dnn_activation_fn = activation_fn , <nl> + dnn_dropout = dropout , <nl> + input_layer_partitioner = input_layer_partitioner , <nl> + config = config ) <nl> + <nl> + <nl> + # A function to mimic linear - regressor init reuse same tests . <nl> + def _linear_regressor_fn ( feature_columns , <nl> + model_dir = None , <nl> + label_dimension = 1 , <nl> + weight_feature_key = None , <nl> + optimizer = ' Ftrl ' , <nl> + config = None , <nl> + partitioner = None ) : <nl> + return dnn_linear_combined . DNNLinearCombinedRegressor ( <nl> + model_dir = model_dir , <nl> + linear_feature_columns = feature_columns , <nl> + linear_optimizer = optimizer , <nl> + label_dimension = label_dimension , <nl> + weight_feature_key = weight_feature_key , <nl> + input_layer_partitioner = partitioner , <nl> + config = config ) <nl> + <nl> + <nl> + class LinearOnlyRegressorPartitionerTest ( <nl> + linear_testing_utils . BaseLinearRegressorPartitionerTest , test . TestCase ) : <nl> + <nl> + def __init__ ( self , methodName = ' runTest ' ) : # pylint : disable = invalid - name <nl> + test . TestCase . __init__ ( self , methodName ) <nl> + linear_testing_utils . BaseLinearRegressorPartitionerTest . __init__ ( <nl> + self , _linear_regressor_fn ) <nl> + <nl> + <nl> + class LinearOnlyRegressorEvaluationTest ( <nl> + linear_testing_utils . BaseLinearRegressorEvaluationTest , test . TestCase ) : <nl> + <nl> + def __init__ ( self , methodName = ' runTest ' ) : # pylint : disable = invalid - name <nl> + test . TestCase . __init__ ( self , methodName ) <nl> + linear_testing_utils . BaseLinearRegressorEvaluationTest . __init__ ( <nl> + self , _linear_regressor_fn ) <nl> + <nl> + <nl> + class LinearOnlyRegressorPredictTest ( <nl> + linear_testing_utils . BaseLinearRegressorPredictTest , test . TestCase ) : <nl> + <nl> + def __init__ ( self , methodName = ' runTest ' ) : # pylint : disable = invalid - name <nl> + test . TestCase . __init__ ( self , methodName ) <nl> + linear_testing_utils . BaseLinearRegressorPredictTest . __init__ ( <nl> + self , _linear_regressor_fn ) <nl> + <nl> + <nl> + class LinearOnlyRegressorIntegrationTest ( <nl> + linear_testing_utils . BaseLinearRegressorIntegrationTest , test . TestCase ) : <nl> + <nl> + def __init__ ( self , methodName = ' runTest ' ) : # pylint : disable = invalid - name <nl> + test . TestCase . __init__ ( self , methodName ) <nl> + linear_testing_utils . BaseLinearRegressorIntegrationTest . __init__ ( <nl> + self , _linear_regressor_fn ) <nl> + <nl> + <nl> + class LinearOnlyRegressorTrainingTest ( <nl> + linear_testing_utils . BaseLinearRegressorTrainingTest , test . TestCase ) : <nl> + <nl> + def __init__ ( self , methodName = ' runTest ' ) : # pylint : disable = invalid - name <nl> + test . TestCase . __init__ ( self , methodName ) <nl> + linear_testing_utils . BaseLinearRegressorTrainingTest . __init__ ( <nl> + self , _linear_regressor_fn ) <nl> + <nl> + <nl> + class DNNLinearCombinedRegressorIntegrationTest ( test . TestCase ) : <nl> + <nl> + def setUp ( self ) : <nl> + self . _model_dir = tempfile . mkdtemp ( ) <nl> + <nl> + def tearDown ( self ) : <nl> + if self . _model_dir : <nl> + shutil . rmtree ( self . _model_dir ) <nl> + <nl> + def _test_complete_flow ( <nl> + self , train_input_fn , eval_input_fn , predict_input_fn , input_dimension , <nl> + label_dimension , batch_size ) : <nl> + linear_feature_columns = [ <nl> + feature_column . numeric_column ( ' x ' , shape = ( input_dimension , ) ) ] <nl> + dnn_feature_columns = [ <nl> + feature_column . numeric_column ( ' x ' , shape = ( input_dimension , ) ) ] <nl> + feature_columns = linear_feature_columns + dnn_feature_columns <nl> + est = dnn_linear_combined . DNNLinearCombinedRegressor ( <nl> + linear_feature_columns = linear_feature_columns , <nl> + dnn_hidden_units = ( 2 , 2 ) , <nl> + dnn_feature_columns = dnn_feature_columns , <nl> + label_dimension = label_dimension , <nl> + model_dir = self . _model_dir ) <nl> + <nl> + # TRAIN <nl> + num_steps = 10 <nl> + est . train ( train_input_fn , steps = num_steps ) <nl> + <nl> + # EVALUTE <nl> + scores = est . evaluate ( eval_input_fn ) <nl> + self . assertEqual ( num_steps , scores [ ops . GraphKeys . GLOBAL_STEP ] ) <nl> + self . assertIn ( ' loss ' , six . iterkeys ( scores ) ) <nl> + <nl> + # PREDICT <nl> + predictions = np . array ( [ <nl> + x [ prediction_keys . PredictionKeys . PREDICTIONS ] <nl> + for x in est . predict ( predict_input_fn ) <nl> + ] ) <nl> + self . assertAllEqual ( ( batch_size , label_dimension ) , predictions . shape ) <nl> + <nl> + # EXPORT <nl> + feature_spec = feature_column . make_parse_example_spec ( feature_columns ) <nl> + serving_input_receiver_fn = export . build_parsing_serving_input_receiver_fn ( <nl> + feature_spec ) <nl> + export_dir = est . export_savedmodel ( tempfile . mkdtemp ( ) , <nl> + serving_input_receiver_fn ) <nl> + self . assertTrue ( gfile . Exists ( export_dir ) ) <nl> + <nl> + def test_numpy_input_fn ( self ) : <nl> + " " " Tests complete flow with numpy_input_fn . " " " <nl> + label_dimension = 2 <nl> + batch_size = 10 <nl> + data = np . linspace ( 0 . , 2 . , batch_size * label_dimension , dtype = np . float32 ) <nl> + data = data . reshape ( batch_size , label_dimension ) <nl> + # learn y = x <nl> + train_input_fn = numpy_io . numpy_input_fn ( <nl> + x = { ' x ' : data } , <nl> + y = data , <nl> + batch_size = batch_size , <nl> + num_epochs = None , <nl> + shuffle = True ) <nl> + eval_input_fn = numpy_io . numpy_input_fn ( <nl> + x = { ' x ' : data } , <nl> + y = data , <nl> + batch_size = batch_size , <nl> + shuffle = False ) <nl> + predict_input_fn = numpy_io . numpy_input_fn ( <nl> + x = { ' x ' : data } , <nl> + batch_size = batch_size , <nl> + shuffle = False ) <nl> + <nl> + self . _test_complete_flow ( <nl> + train_input_fn = train_input_fn , <nl> + eval_input_fn = eval_input_fn , <nl> + predict_input_fn = predict_input_fn , <nl> + input_dimension = label_dimension , <nl> + label_dimension = label_dimension , <nl> + batch_size = batch_size ) <nl> + <nl> + def test_pandas_input_fn ( self ) : <nl> + " " " Tests complete flow with pandas_input_fn . " " " <nl> + if not HAS_PANDAS : <nl> + return <nl> + label_dimension = 1 <nl> + batch_size = 10 <nl> + data = np . linspace ( 0 . , 2 . , batch_size , dtype = np . float32 ) <nl> + x = pd . DataFrame ( { ' x ' : data } ) <nl> + y = pd . Series ( data ) <nl> + train_input_fn = pandas_io . pandas_input_fn ( <nl> + x = x , <nl> + y = y , <nl> + batch_size = batch_size , <nl> + num_epochs = None , <nl> + shuffle = True ) <nl> + eval_input_fn = pandas_io . pandas_input_fn ( <nl> + x = x , <nl> + y = y , <nl> + batch_size = batch_size , <nl> + shuffle = False ) <nl> + predict_input_fn = pandas_io . pandas_input_fn ( <nl> + x = x , <nl> + batch_size = batch_size , <nl> + shuffle = False ) <nl> + <nl> + self . _test_complete_flow ( <nl> + train_input_fn = train_input_fn , <nl> + eval_input_fn = eval_input_fn , <nl> + predict_input_fn = predict_input_fn , <nl> + input_dimension = label_dimension , <nl> + label_dimension = label_dimension , <nl> + batch_size = batch_size ) <nl> + <nl> + def test_input_fn_from_parse_example ( self ) : <nl> + " " " Tests complete flow with input_fn constructed from parse_example . " " " <nl> + label_dimension = 2 <nl> + batch_size = 10 <nl> + data = np . linspace ( 0 . , 2 . , batch_size * label_dimension , dtype = np . float32 ) <nl> + data = data . reshape ( batch_size , label_dimension ) <nl> + <nl> + serialized_examples = [ ] <nl> + for datum in data : <nl> + example = example_pb2 . Example ( features = feature_pb2 . Features ( <nl> + feature = { <nl> + ' x ' : feature_pb2 . Feature ( <nl> + float_list = feature_pb2 . FloatList ( value = datum ) ) , <nl> + ' y ' : feature_pb2 . Feature ( <nl> + float_list = feature_pb2 . FloatList ( value = datum ) ) , <nl> + } ) ) <nl> + serialized_examples . append ( example . SerializeToString ( ) ) <nl> + <nl> + feature_spec = { <nl> + ' x ' : parsing_ops . FixedLenFeature ( [ label_dimension ] , dtypes . float32 ) , <nl> + ' y ' : parsing_ops . FixedLenFeature ( [ label_dimension ] , dtypes . float32 ) , <nl> + } <nl> + def _train_input_fn ( ) : <nl> + feature_map = parsing_ops . parse_example ( serialized_examples , feature_spec ) <nl> + features = linear_testing_utils . queue_parsed_features ( feature_map ) <nl> + labels = features . pop ( ' y ' ) <nl> + return features , labels <nl> + def _eval_input_fn ( ) : <nl> + feature_map = parsing_ops . parse_example ( <nl> + input_lib . limit_epochs ( serialized_examples , num_epochs = 1 ) , <nl> + feature_spec ) <nl> + features = linear_testing_utils . queue_parsed_features ( feature_map ) <nl> + labels = features . pop ( ' y ' ) <nl> + return features , labels <nl> + def _predict_input_fn ( ) : <nl> + feature_map = parsing_ops . parse_example ( <nl> + input_lib . limit_epochs ( serialized_examples , num_epochs = 1 ) , <nl> + feature_spec ) <nl> + features = linear_testing_utils . queue_parsed_features ( feature_map ) <nl> + features . pop ( ' y ' ) <nl> + return features , None <nl> + <nl> + self . _test_complete_flow ( <nl> + train_input_fn = _train_input_fn , <nl> + eval_input_fn = _eval_input_fn , <nl> + predict_input_fn = _predict_input_fn , <nl> + input_dimension = label_dimension , <nl> + label_dimension = label_dimension , <nl> + batch_size = batch_size ) <nl> + <nl> + <nl> + class DNNLinearCombinedClassifierIntegrationTest ( test . TestCase ) : <nl> + <nl> + def setUp ( self ) : <nl> + self . _model_dir = tempfile . mkdtemp ( ) <nl> + <nl> + def tearDown ( self ) : <nl> + if self . _model_dir : <nl> + shutil . rmtree ( self . _model_dir ) <nl> + <nl> + def _test_complete_flow ( <nl> + self , train_input_fn , eval_input_fn , predict_input_fn , input_dimension , <nl> + n_classes , batch_size ) : <nl> + linear_feature_columns = [ <nl> + feature_column . numeric_column ( ' x ' , shape = ( input_dimension , ) ) ] <nl> + dnn_feature_columns = [ <nl> + feature_column . numeric_column ( ' x ' , shape = ( input_dimension , ) ) ] <nl> + feature_columns = linear_feature_columns + dnn_feature_columns <nl> + est = dnn_linear_combined . DNNLinearCombinedClassifier ( <nl> + linear_feature_columns = linear_feature_columns , <nl> + dnn_hidden_units = ( 2 , 2 ) , <nl> + dnn_feature_columns = dnn_feature_columns , <nl> + n_classes = n_classes , <nl> + model_dir = self . _model_dir ) <nl> + <nl> + # TRAIN <nl> + num_steps = 10 <nl> + est . train ( train_input_fn , steps = num_steps ) <nl> + <nl> + # EVALUTE <nl> + scores = est . evaluate ( eval_input_fn ) <nl> + self . assertEqual ( num_steps , scores [ ops . GraphKeys . GLOBAL_STEP ] ) <nl> + self . assertIn ( ' loss ' , six . iterkeys ( scores ) ) <nl> + <nl> + # PREDICT <nl> + predicted_proba = np . array ( [ <nl> + x [ prediction_keys . PredictionKeys . PROBABILITIES ] <nl> + for x in est . predict ( predict_input_fn ) <nl> + ] ) <nl> + self . assertAllEqual ( ( batch_size , n_classes ) , predicted_proba . shape ) <nl> + <nl> + # EXPORT <nl> + feature_spec = feature_column . make_parse_example_spec ( feature_columns ) <nl> + serving_input_receiver_fn = export . build_parsing_serving_input_receiver_fn ( <nl> + feature_spec ) <nl> + export_dir = est . export_savedmodel ( tempfile . mkdtemp ( ) , <nl> + serving_input_receiver_fn ) <nl> + self . assertTrue ( gfile . Exists ( export_dir ) ) <nl> + <nl> + def test_numpy_input_fn ( self ) : <nl> + " " " Tests complete flow with numpy_input_fn . " " " <nl> + n_classes = 2 <nl> + input_dimension = 2 <nl> + batch_size = 10 <nl> + data = np . linspace ( 0 . , 2 . , batch_size * input_dimension , dtype = np . float32 ) <nl> + x_data = data . reshape ( batch_size , input_dimension ) <nl> + y_data = np . reshape ( data [ : batch_size ] , ( batch_size , 1 ) ) <nl> + # learn y = x <nl> + train_input_fn = numpy_io . numpy_input_fn ( <nl> + x = { ' x ' : x_data } , <nl> + y = y_data , <nl> + batch_size = batch_size , <nl> + num_epochs = None , <nl> + shuffle = True ) <nl> + eval_input_fn = numpy_io . numpy_input_fn ( <nl> + x = { ' x ' : x_data } , <nl> + y = y_data , <nl> + batch_size = batch_size , <nl> + shuffle = False ) <nl> + predict_input_fn = numpy_io . numpy_input_fn ( <nl> + x = { ' x ' : x_data } , <nl> + batch_size = batch_size , <nl> + shuffle = False ) <nl> + <nl> + self . _test_complete_flow ( <nl> + train_input_fn = train_input_fn , <nl> + eval_input_fn = eval_input_fn , <nl> + predict_input_fn = predict_input_fn , <nl> + input_dimension = input_dimension , <nl> + n_classes = n_classes , <nl> + batch_size = batch_size ) <nl> + <nl> + def test_pandas_input_fn ( self ) : <nl> + " " " Tests complete flow with pandas_input_fn . " " " <nl> + if not HAS_PANDAS : <nl> + return <nl> + input_dimension = 1 <nl> + n_classes = 2 <nl> + batch_size = 10 <nl> + data = np . linspace ( 0 . , 2 . , batch_size , dtype = np . float32 ) <nl> + x = pd . DataFrame ( { ' x ' : data } ) <nl> + y = pd . Series ( data ) <nl> + train_input_fn = pandas_io . pandas_input_fn ( <nl> + x = x , <nl> + y = y , <nl> + batch_size = batch_size , <nl> + num_epochs = None , <nl> + shuffle = True ) <nl> + eval_input_fn = pandas_io . pandas_input_fn ( <nl> + x = x , <nl> + y = y , <nl> + batch_size = batch_size , <nl> + shuffle = False ) <nl> + predict_input_fn = pandas_io . pandas_input_fn ( <nl> + x = x , <nl> + batch_size = batch_size , <nl> + shuffle = False ) <nl> + <nl> + self . _test_complete_flow ( <nl> + train_input_fn = train_input_fn , <nl> + eval_input_fn = eval_input_fn , <nl> + predict_input_fn = predict_input_fn , <nl> + input_dimension = input_dimension , <nl> + n_classes = n_classes , <nl> + batch_size = batch_size ) <nl> + <nl> + def test_input_fn_from_parse_example ( self ) : <nl> + " " " Tests complete flow with input_fn constructed from parse_example . " " " <nl> + input_dimension = 2 <nl> + n_classes = 2 <nl> + batch_size = 10 <nl> + data = np . linspace ( 0 . , 2 . , batch_size * input_dimension , dtype = np . float32 ) <nl> + data = data . reshape ( batch_size , input_dimension ) <nl> + <nl> + serialized_examples = [ ] <nl> + for datum in data : <nl> + example = example_pb2 . Example ( features = feature_pb2 . Features ( <nl> + feature = { <nl> + ' x ' : feature_pb2 . Feature ( <nl> + float_list = feature_pb2 . FloatList ( value = datum ) ) , <nl> + ' y ' : feature_pb2 . Feature ( <nl> + float_list = feature_pb2 . FloatList ( value = datum [ : 1 ] ) ) , <nl> + } ) ) <nl> + serialized_examples . append ( example . SerializeToString ( ) ) <nl> + <nl> + feature_spec = { <nl> + ' x ' : parsing_ops . FixedLenFeature ( [ input_dimension ] , dtypes . float32 ) , <nl> + ' y ' : parsing_ops . FixedLenFeature ( [ 1 ] , dtypes . float32 ) , <nl> + } <nl> + def _train_input_fn ( ) : <nl> + feature_map = parsing_ops . parse_example ( serialized_examples , feature_spec ) <nl> + features = linear_testing_utils . queue_parsed_features ( feature_map ) <nl> + labels = features . pop ( ' y ' ) <nl> + return features , labels <nl> + def _eval_input_fn ( ) : <nl> + feature_map = parsing_ops . parse_example ( <nl> + input_lib . limit_epochs ( serialized_examples , num_epochs = 1 ) , <nl> + feature_spec ) <nl> + features = linear_testing_utils . queue_parsed_features ( feature_map ) <nl> + labels = features . pop ( ' y ' ) <nl> + return features , labels <nl> + def _predict_input_fn ( ) : <nl> + feature_map = parsing_ops . parse_example ( <nl> + input_lib . limit_epochs ( serialized_examples , num_epochs = 1 ) , <nl> + feature_spec ) <nl> + features = linear_testing_utils . queue_parsed_features ( feature_map ) <nl> + features . pop ( ' y ' ) <nl> + return features , None <nl> + <nl> + self . _test_complete_flow ( <nl> + train_input_fn = _train_input_fn , <nl> + eval_input_fn = _eval_input_fn , <nl> + predict_input_fn = _predict_input_fn , <nl> + input_dimension = input_dimension , <nl> + n_classes = n_classes , <nl> + batch_size = batch_size ) <nl> + <nl> + <nl> + if __name__ = = ' __main__ ' : <nl> + test . main ( ) <nl> mmm a / tensorflow / python / estimator / canned / dnn_test . py <nl> ppp b / tensorflow / python / estimator / canned / dnn_test . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> - import os <nl> import shutil <nl> import tempfile <nl> <nl> <nl> from tensorflow . core . example import example_pb2 <nl> from tensorflow . core . example import feature_pb2 <nl> from tensorflow . core . framework import summary_pb2 <nl> - from tensorflow . python . client import session as tf_session <nl> - from tensorflow . python . estimator import model_fn <nl> from tensorflow . python . estimator . canned import dnn <nl> - from tensorflow . python . estimator . canned import head as head_lib <nl> + from tensorflow . python . estimator . canned import dnn_testing_utils <nl> from tensorflow . python . estimator . canned import metric_keys <nl> from tensorflow . python . estimator . canned import prediction_keys <nl> from tensorflow . python . estimator . export import export <nl> from tensorflow . python . estimator . inputs import numpy_io <nl> from tensorflow . python . estimator . inputs import pandas_io <nl> from tensorflow . python . feature_column import feature_column <nl> - from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> - from tensorflow . python . ops import array_ops <nl> - from tensorflow . python . ops import check_ops <nl> from tensorflow . python . ops import data_flow_ops <nl> - from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import parsing_ops <nl> - from tensorflow . python . ops import state_ops <nl> - from tensorflow . python . ops import variables as variables_lib <nl> from tensorflow . python . platform import gfile <nl> from tensorflow . python . platform import test <nl> from tensorflow . python . summary import summary as summary_lib <nl> from tensorflow . python . summary . writer import writer_cache <nl> from tensorflow . python . training import checkpoint_utils <nl> from tensorflow . python . training import input as input_lib <nl> - from tensorflow . python . training import monitored_session <nl> - from tensorflow . python . training import optimizer <nl> from tensorflow . python . training import queue_runner <nl> - from tensorflow . python . training import saver <nl> from tensorflow . python . training import session_run_hook <nl> - from tensorflow . python . training import training_util <nl> <nl> try : <nl> # pylint : disable = g - import - not - at - top <nl> <nl> except ImportError : <nl> HAS_PANDAS = False <nl> <nl> - # Names of variables created by model . <nl> - _LEARNING_RATE_NAME = ' dnn / regression_head / dnn / learning_rate ' <nl> - _HIDDEN_WEIGHTS_NAME_PATTERN = ' dnn / hiddenlayer_ % d / kernel ' <nl> - _HIDDEN_BIASES_NAME_PATTERN = ' dnn / hiddenlayer_ % d / bias ' <nl> - _LOGITS_WEIGHTS_NAME = ' dnn / logits / kernel ' <nl> - _LOGITS_BIASES_NAME = ' dnn / logits / bias ' <nl> <nl> + class DNNModelFnTest ( dnn_testing_utils . BaseDNNModelFnTest , test . TestCase ) : <nl> <nl> - def _create_checkpoint ( weights_and_biases , global_step , model_dir ) : <nl> - " " " Create checkpoint file with provided model weights . <nl> - <nl> - Args : <nl> - weights_and_biases : Iterable of tuples of weight and bias values . <nl> - global_step : Initial global step to save in checkpoint . <nl> - model_dir : Directory into which checkpoint is saved . <nl> - " " " <nl> - weights , biases = zip ( * weights_and_biases ) <nl> - model_weights = { } <nl> - <nl> - # Hidden layer weights . <nl> - for i in range ( 0 , len ( weights ) - 1 ) : <nl> - model_weights [ _HIDDEN_WEIGHTS_NAME_PATTERN % i ] = weights [ i ] <nl> - model_weights [ _HIDDEN_BIASES_NAME_PATTERN % i ] = biases [ i ] <nl> - <nl> - # Output layer weights . <nl> - model_weights [ _LOGITS_WEIGHTS_NAME ] = weights [ - 1 ] <nl> - model_weights [ _LOGITS_BIASES_NAME ] = biases [ - 1 ] <nl> - <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - # Create model variables . <nl> - for k , v in six . iteritems ( model_weights ) : <nl> - variables_lib . Variable ( v , name = k , dtype = dtypes . float32 ) <nl> - <nl> - # Create non - model variables . <nl> - global_step_var = training_util . create_global_step ( ) <nl> - <nl> - # Initialize vars and save checkpoint . <nl> - with tf_session . Session ( ) as sess : <nl> - variables_lib . global_variables_initializer ( ) . run ( ) <nl> - global_step_var . assign ( global_step ) . eval ( ) <nl> - saver . Saver ( ) . save ( sess , os . path . join ( model_dir , ' model . ckpt ' ) ) <nl> - <nl> - <nl> - def _mock_head ( <nl> - testcase , hidden_units , logits_dimension , expected_logits ) : <nl> - " " " Returns a mock head that validates logits values and variable names . " " " <nl> - hidden_weights_names = [ <nl> - ( _HIDDEN_WEIGHTS_NAME_PATTERN + ' / part_0 : 0 ' ) % i <nl> - for i in range ( len ( hidden_units ) ) ] <nl> - hidden_biases_names = [ <nl> - ( _HIDDEN_BIASES_NAME_PATTERN + ' / part_0 : 0 ' ) % i <nl> - for i in range ( len ( hidden_units ) ) ] <nl> - expected_var_names = ( <nl> - hidden_weights_names + hidden_biases_names + <nl> - [ _LOGITS_WEIGHTS_NAME + ' / part_0 : 0 ' , _LOGITS_BIASES_NAME + ' / part_0 : 0 ' ] ) <nl> - <nl> - def _create_estimator_spec ( features , mode , logits , labels , train_op_fn ) : <nl> - del features , labels # Not used . <nl> - trainable_vars = ops . get_collection ( ops . GraphKeys . TRAINABLE_VARIABLES ) <nl> - testcase . assertItemsEqual ( <nl> - expected_var_names , <nl> - [ var . name for var in trainable_vars ] ) <nl> - loss = constant_op . constant ( 1 . ) <nl> - assert_logits = _assert_close ( <nl> - expected_logits , logits , message = ' Failed for mode = { } . ' . format ( mode ) ) <nl> - with ops . control_dependencies ( [ assert_logits ] ) : <nl> - if mode = = model_fn . ModeKeys . TRAIN : <nl> - return model_fn . EstimatorSpec ( <nl> - mode = mode , <nl> - loss = loss , <nl> - train_op = train_op_fn ( loss ) ) <nl> - elif mode = = model_fn . ModeKeys . EVAL : <nl> - return model_fn . EstimatorSpec ( <nl> - mode = mode , <nl> - loss = array_ops . identity ( loss ) ) <nl> - elif mode = = model_fn . ModeKeys . PREDICT : <nl> - return model_fn . EstimatorSpec ( <nl> - mode = mode , <nl> - predictions = { ' logits ' : array_ops . identity ( logits ) } ) <nl> - else : <nl> - testcase . fail ( ' Invalid mode : { } ' . format ( mode ) ) <nl> - <nl> - mock_head = test . mock . NonCallableMagicMock ( spec = head_lib . _Head ) <nl> - mock_head . logits_dimension = logits_dimension <nl> - mock_head . create_estimator_spec = test . mock . MagicMock ( <nl> - wraps = _create_estimator_spec ) <nl> - <nl> - return mock_head <nl> - <nl> - <nl> - class DNNModelFnTest ( test . TestCase ) : <nl> - " " " Tests that _dnn_model_fn passes expected logits to mock head . " " " <nl> - <nl> - def setUp ( self ) : <nl> - self . _model_dir = tempfile . mkdtemp ( ) <nl> - <nl> - def tearDown ( self ) : <nl> - if self . _model_dir : <nl> - writer_cache . FileWriterCache . clear ( ) <nl> - shutil . rmtree ( self . _model_dir ) <nl> - <nl> - def _test_logits ( <nl> - self , mode , hidden_units , logits_dimension , inputs , expected_logits ) : <nl> - " " " Tests that the expected logits are passed to mock head . " " " <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - training_util . create_global_step ( ) <nl> - head = _mock_head ( <nl> - self , <nl> - hidden_units = hidden_units , <nl> - logits_dimension = logits_dimension , <nl> - expected_logits = expected_logits ) <nl> - estimator_spec = dnn . _dnn_model_fn ( <nl> - features = { ' age ' : constant_op . constant ( inputs ) } , <nl> - labels = constant_op . constant ( [ [ 1 ] ] ) , <nl> - mode = mode , <nl> - head = head , <nl> - hidden_units = hidden_units , <nl> - feature_columns = [ <nl> - feature_column . numeric_column ( ' age ' , <nl> - shape = np . array ( inputs ) . shape [ 1 : ] ) ] , <nl> - optimizer = _mock_optimizer ( self , hidden_units ) ) <nl> - with monitored_session . MonitoredTrainingSession ( <nl> - checkpoint_dir = self . _model_dir ) as sess : <nl> - if mode = = model_fn . ModeKeys . TRAIN : <nl> - sess . run ( estimator_spec . train_op ) <nl> - elif mode = = model_fn . ModeKeys . EVAL : <nl> - sess . run ( estimator_spec . loss ) <nl> - elif mode = = model_fn . ModeKeys . PREDICT : <nl> - sess . run ( estimator_spec . predictions ) <nl> - else : <nl> - self . fail ( ' Invalid mode : { } ' . format ( mode ) ) <nl> - <nl> - def test_one_dim_logits ( self ) : <nl> - " " " Tests one - dimensional logits . <nl> - <nl> - input_layer = [ [ 10 ] ] <nl> - hidden_layer_0 = [ [ relu ( 0 . 6 * 10 + 0 . 1 ) , relu ( 0 . 5 * 10 - 0 . 1 ) ] ] = [ [ 6 . 1 , 4 . 9 ] ] <nl> - hidden_layer_1 = [ [ relu ( 1 * 6 . 1 - 0 . 8 * 4 . 9 + 0 . 2 ) , relu ( 0 . 8 * 6 . 1 - 1 * 4 . 9 - 0 . 1 ) ] ] <nl> - = [ [ relu ( 2 . 38 ) , relu ( - 0 . 12 ) ] ] = [ [ 2 . 38 , 0 ] ] <nl> - logits = [ [ - 1 * 2 . 38 + 1 * 0 + 0 . 3 ] ] = [ [ - 2 . 08 ] ] <nl> - " " " <nl> - base_global_step = 100 <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , <nl> - ) , base_global_step , self . _model_dir ) <nl> - <nl> - for mode in [ model_fn . ModeKeys . TRAIN , <nl> - model_fn . ModeKeys . EVAL , <nl> - model_fn . ModeKeys . PREDICT ] : <nl> - self . _test_logits ( <nl> - mode , <nl> - hidden_units = ( 2 , 2 ) , <nl> - logits_dimension = 1 , <nl> - inputs = [ [ 10 . ] ] , <nl> - expected_logits = [ [ - 2 . 08 ] ] ) <nl> - <nl> - def test_multi_dim_logits ( self ) : <nl> - " " " Tests multi - dimensional logits . <nl> - <nl> - input_layer = [ [ 10 ] ] <nl> - hidden_layer_0 = [ [ relu ( 0 . 6 * 10 + 0 . 1 ) , relu ( 0 . 5 * 10 - 0 . 1 ) ] ] = [ [ 6 . 1 , 4 . 9 ] ] <nl> - hidden_layer_1 = [ [ relu ( 1 * 6 . 1 - 0 . 8 * 4 . 9 + 0 . 2 ) , relu ( 0 . 8 * 6 . 1 - 1 * 4 . 9 - 0 . 1 ) ] ] <nl> - = [ [ relu ( 2 . 38 ) , relu ( - 0 . 12 ) ] ] = [ [ 2 . 38 , 0 ] ] <nl> - logits = [ [ - 1 * 2 . 38 + 0 . 3 , 1 * 2 . 38 - 0 . 3 , 0 . 5 * 2 . 38 ] ] <nl> - = [ [ - 2 . 08 , 2 . 08 , 1 . 19 ] ] <nl> - " " " <nl> - base_global_step = 100 <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , . 0 ] ) , <nl> - ) , base_global_step , self . _model_dir ) <nl> - <nl> - for mode in [ model_fn . ModeKeys . TRAIN , <nl> - model_fn . ModeKeys . EVAL , <nl> - model_fn . ModeKeys . PREDICT ] : <nl> - self . _test_logits ( <nl> - mode , <nl> - hidden_units = ( 2 , 2 ) , <nl> - logits_dimension = 3 , <nl> - inputs = [ [ 10 . ] ] , <nl> - expected_logits = [ [ - 2 . 08 , 2 . 08 , 1 . 19 ] ] ) <nl> - <nl> - def test_multi_example_multi_dim_logits ( self ) : <nl> - " " " Tests multiple examples and multi - dimensional logits . <nl> - <nl> - input_layer = [ [ 10 ] , [ 5 ] ] <nl> - hidden_layer_0 = [ [ relu ( 0 . 6 * 10 + 0 . 1 ) , relu ( 0 . 5 * 10 - 0 . 1 ) ] , <nl> - [ relu ( 0 . 6 * 5 + 0 . 1 ) , relu ( 0 . 5 * 5 - 0 . 1 ) ] ] <nl> - = [ [ 6 . 1 , 4 . 9 ] , [ 3 . 1 , 2 . 4 ] ] <nl> - hidden_layer_1 = [ [ relu ( 1 * 6 . 1 - 0 . 8 * 4 . 9 + 0 . 2 ) , relu ( 0 . 8 * 6 . 1 - 1 * 4 . 9 - 0 . 1 ) ] , <nl> - [ relu ( 1 * 3 . 1 - 0 . 8 * 2 . 4 + 0 . 2 ) , relu ( 0 . 8 * 3 . 1 - 1 * 2 . 4 - 0 . 1 ) ] ] <nl> - = [ [ 2 . 38 , 0 ] , [ 1 . 38 , 0 ] ] <nl> - logits = [ [ - 1 * 2 . 38 + 0 . 3 , 1 * 2 . 38 - 0 . 3 , 0 . 5 * 2 . 38 ] , <nl> - [ - 1 * 1 . 38 + 0 . 3 , 1 * 1 . 38 - 0 . 3 , 0 . 5 * 1 . 38 ] ] <nl> - = [ [ - 2 . 08 , 2 . 08 , 1 . 19 ] , [ - 1 . 08 , 1 . 08 , 0 . 69 ] ] <nl> - " " " <nl> - base_global_step = 100 <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , . 0 ] ) , <nl> - ) , base_global_step , self . _model_dir ) <nl> - <nl> - for mode in [ model_fn . ModeKeys . TRAIN , <nl> - model_fn . ModeKeys . EVAL , <nl> - model_fn . ModeKeys . PREDICT ] : <nl> - self . _test_logits ( <nl> - mode , <nl> - hidden_units = ( 2 , 2 ) , <nl> - logits_dimension = 3 , <nl> - inputs = [ [ 10 . ] , [ 5 . ] ] , <nl> - expected_logits = [ [ - 2 . 08 , 2 . 08 , 1 . 19 ] , [ - 1 . 08 , 1 . 08 , . 69 ] ] ) <nl> - <nl> - def test_multi_dim_input_one_dim_logits ( self ) : <nl> - " " " Tests multi - dimensional inputs and one - dimensional logits . <nl> - <nl> - input_layer = [ [ 10 , 8 ] ] <nl> - hidden_layer_0 = [ [ relu ( 0 . 6 * 10 - 0 . 6 * 8 + 0 . 1 ) , relu ( 0 . 5 * 10 - 0 . 5 * 8 - 0 . 1 ) ] ] <nl> - = [ [ 1 . 3 , 0 . 9 ] ] <nl> - hidden_layer_1 = [ [ relu ( 1 * 1 . 3 - 0 . 8 * 0 . 9 + 0 . 2 ) , relu ( 0 . 8 * 1 . 3 - 1 * 0 . 9 - 0 . 2 ) ] ] <nl> - = [ [ 0 . 78 , relu ( - 0 . 06 ) ] ] = [ [ 0 . 78 , 0 ] ] <nl> - logits = [ [ - 1 * 0 . 78 + 1 * 0 + 0 . 3 ] ] = [ [ - 0 . 48 ] ] <nl> - " " " <nl> - base_global_step = 100 <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , <nl> - ) , base_global_step , self . _model_dir ) <nl> - <nl> - for mode in [ model_fn . ModeKeys . TRAIN , <nl> - model_fn . ModeKeys . EVAL , <nl> - model_fn . ModeKeys . PREDICT ] : <nl> - self . _test_logits ( <nl> - mode , <nl> - hidden_units = ( 2 , 2 ) , <nl> - logits_dimension = 1 , <nl> - inputs = [ [ 10 . , 8 . ] ] , <nl> - expected_logits = [ [ - 0 . 48 ] ] ) <nl> - <nl> - def test_multi_dim_input_multi_dim_logits ( self ) : <nl> - " " " Tests multi - dimensional inputs and multi - dimensional logits . <nl> - <nl> - input_layer = [ [ 10 , 8 ] ] <nl> - hidden_layer_0 = [ [ relu ( 0 . 6 * 10 - 0 . 6 * 8 + 0 . 1 ) , relu ( 0 . 5 * 10 - 0 . 5 * 8 - 0 . 1 ) ] ] <nl> - = [ [ 1 . 3 , 0 . 9 ] ] <nl> - hidden_layer_1 = [ [ relu ( 1 * 1 . 3 - 0 . 8 * 0 . 9 + 0 . 2 ) , relu ( 0 . 8 * 1 . 3 - 1 * 0 . 9 - 0 . 2 ) ] ] <nl> - = [ [ 0 . 78 , relu ( - 0 . 06 ) ] ] = [ [ 0 . 78 , 0 ] ] <nl> - logits = [ [ - 1 * 0 . 78 + 0 . 3 , 1 * 0 . 78 - 0 . 3 , 0 . 5 * 0 . 78 ] ] = [ [ - 0 . 48 , 0 . 48 , 0 . 39 ] ] <nl> - " " " <nl> - base_global_step = 100 <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , . 0 ] ) , <nl> - ) , base_global_step , self . _model_dir ) <nl> - <nl> - for mode in [ model_fn . ModeKeys . TRAIN , <nl> - model_fn . ModeKeys . EVAL , <nl> - model_fn . ModeKeys . PREDICT ] : <nl> - self . _test_logits ( <nl> - mode , <nl> - hidden_units = ( 2 , 2 ) , <nl> - logits_dimension = 3 , <nl> - inputs = [ [ 10 . , 8 . ] ] , <nl> - expected_logits = [ [ - 0 . 48 , 0 . 48 , 0 . 39 ] ] ) <nl> - <nl> - def test_multi_feature_column_multi_dim_logits ( self ) : <nl> - " " " Tests multiple feature columns and multi - dimensional logits . <nl> - <nl> - All numbers are the same as test_multi_dim_input_multi_dim_logits . The only <nl> - difference is that the input consists of two 1D feature columns , instead of <nl> - one 2D feature column . <nl> - " " " <nl> - base_global_step = 100 <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , . 0 ] ) , <nl> - ) , base_global_step , self . _model_dir ) <nl> - hidden_units = ( 2 , 2 ) <nl> - logits_dimension = 3 <nl> - inputs = ( [ [ 10 . ] ] , [ [ 8 . ] ] ) <nl> - expected_logits = [ [ - 0 . 48 , 0 . 48 , 0 . 39 ] ] <nl> - <nl> - for mode in [ model_fn . ModeKeys . TRAIN , <nl> - model_fn . ModeKeys . EVAL , <nl> - model_fn . ModeKeys . PREDICT ] : <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - training_util . create_global_step ( ) <nl> - head = _mock_head ( <nl> - self , <nl> - hidden_units = hidden_units , <nl> - logits_dimension = logits_dimension , <nl> - expected_logits = expected_logits ) <nl> - estimator_spec = dnn . _dnn_model_fn ( <nl> - features = { ' age ' : constant_op . constant ( inputs [ 0 ] ) , <nl> - ' height ' : constant_op . constant ( inputs [ 1 ] ) } , <nl> - labels = constant_op . constant ( [ [ 1 ] ] ) , <nl> - mode = mode , <nl> - head = head , <nl> - hidden_units = hidden_units , <nl> - feature_columns = [ <nl> - feature_column . numeric_column ( ' age ' ) , <nl> - feature_column . numeric_column ( ' height ' ) ] , <nl> - optimizer = _mock_optimizer ( self , hidden_units ) ) <nl> - with monitored_session . MonitoredTrainingSession ( <nl> - checkpoint_dir = self . _model_dir ) as sess : <nl> - if mode = = model_fn . ModeKeys . TRAIN : <nl> - sess . run ( estimator_spec . train_op ) <nl> - elif mode = = model_fn . ModeKeys . EVAL : <nl> - sess . run ( estimator_spec . loss ) <nl> - elif mode = = model_fn . ModeKeys . PREDICT : <nl> - sess . run ( estimator_spec . predictions ) <nl> - else : <nl> - self . fail ( ' Invalid mode : { } ' . format ( mode ) ) <nl> + def __init__ ( self , methodName = ' runTest ' ) : # pylint : disable = invalid - name <nl> + test . TestCase . __init__ ( self , methodName ) <nl> + dnn_testing_utils . BaseDNNModelFnTest . __init__ ( self , dnn . _dnn_model_fn ) <nl> <nl> <nl> class DNNRegressorEvaluateTest ( test . TestCase ) : <nl> def test_one_dim ( self ) : <nl> " " " Asserts evaluation metrics for one - dimensional input and logits . " " " <nl> # Create checkpoint : num_inputs = 1 , hidden_units = ( 2 , 2 ) , num_outputs = 1 . <nl> global_step = 100 <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , <nl> - ) , global_step , self . _model_dir ) <nl> + dnn_testing_utils . create_checkpoint ( <nl> + ( ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , ) , global_step , self . _model_dir ) <nl> <nl> # Create DNNRegressor and evaluate . <nl> dnn_regressor = dnn . DNNRegressor ( <nl> def test_multi_dim ( self ) : <nl> " " " Asserts evaluation metrics for multi - dimensional input and logits . " " " <nl> # Create checkpoint : num_inputs = 2 , hidden_units = ( 2 , 2 ) , num_outputs = 3 . <nl> global_step = 100 <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , . 0 ] ) , <nl> - ) , global_step , self . _model_dir ) <nl> + dnn_testing_utils . create_checkpoint ( <nl> + ( ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , <nl> + [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , <nl> + . 0 ] ) , ) , global_step , self . _model_dir ) <nl> label_dimension = 3 <nl> <nl> # Create DNNRegressor and evaluate . <nl> def tearDown ( self ) : <nl> def test_one_dim ( self ) : <nl> " " " Asserts evaluation metrics for one - dimensional input and logits . " " " <nl> global_step = 100 <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , <nl> - ) , global_step , self . _model_dir ) <nl> + dnn_testing_utils . create_checkpoint ( <nl> + ( ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , ) , global_step , self . _model_dir ) <nl> <nl> dnn_classifier = dnn . DNNClassifier ( <nl> hidden_units = ( 2 , 2 ) , <nl> def _input_fn ( ) : <nl> def test_multi_dim ( self ) : <nl> " " " Asserts evaluation metrics for multi - dimensional input and logits . " " " <nl> global_step = 100 <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , . 0 ] ) , <nl> - ) , global_step , self . _model_dir ) <nl> + dnn_testing_utils . create_checkpoint ( <nl> + ( ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , <nl> + [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , <nl> + . 0 ] ) , ) , global_step , self . _model_dir ) <nl> n_classes = 3 <nl> <nl> dnn_classifier = dnn . DNNClassifier ( <nl> def tearDown ( self ) : <nl> def test_one_dim ( self ) : <nl> " " " Asserts predictions for one - dimensional input and logits . " " " <nl> # Create checkpoint : num_inputs = 1 , hidden_units = ( 2 , 2 ) , num_outputs = 1 . <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , <nl> - ) , global_step = 0 , model_dir = self . _model_dir ) <nl> + dnn_testing_utils . create_checkpoint ( <nl> + ( ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , ) , <nl> + global_step = 0 , <nl> + model_dir = self . _model_dir ) <nl> <nl> # Create DNNRegressor and predict . <nl> dnn_regressor = dnn . DNNRegressor ( <nl> def test_one_dim ( self ) : <nl> def test_multi_dim ( self ) : <nl> " " " Asserts predictions for multi - dimensional input and logits . " " " <nl> # Create checkpoint : num_inputs = 2 , hidden_units = ( 2 , 2 ) , num_outputs = 3 . <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , . 0 ] ) , <nl> - ) , 100 , self . _model_dir ) <nl> + dnn_testing_utils . create_checkpoint ( <nl> + ( ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> + ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , <nl> + [ . 3 , - . 3 , <nl> + . 0 ] ) , ) , 100 , self . _model_dir ) <nl> <nl> # Create DNNRegressor and predict . <nl> dnn_regressor = dnn . DNNRegressor ( <nl> def tearDown ( self ) : <nl> <nl> def test_one_dim ( self ) : <nl> " " " Asserts predictions for one - dimensional input and logits . " " " <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , <nl> - ) , global_step = 0 , model_dir = self . _model_dir ) <nl> + dnn_testing_utils . create_checkpoint ( <nl> + ( ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , ) , <nl> + global_step = 0 , <nl> + model_dir = self . _model_dir ) <nl> <nl> dnn_classifier = dnn . DNNClassifier ( <nl> hidden_units = ( 2 , 2 ) , <nl> def test_one_dim ( self ) : <nl> <nl> def test_multi_dim ( self ) : <nl> " " " Asserts predictions for multi - dimensional input and logits . " " " <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , . 0 ] ) , <nl> - ) , global_step = 0 , model_dir = self . _model_dir ) <nl> + dnn_testing_utils . create_checkpoint ( <nl> + ( ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> + ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , <nl> + [ . 3 , - . 3 , . 0 ] ) , ) , <nl> + global_step = 0 , <nl> + model_dir = self . _model_dir ) <nl> <nl> dnn_classifier = dnn . DNNClassifier ( <nl> hidden_units = ( 2 , 2 ) , <nl> def _predict_input_fn ( ) : <nl> batch_size = batch_size ) <nl> <nl> <nl> - def _full_var_name ( var_name ) : <nl> - return ' % s / part_0 : 0 ' % var_name <nl> - <nl> - <nl> - def _assert_close ( <nl> - expected , actual , rtol = 1e - 04 , message = ' ' , name = ' assert_close ' ) : <nl> - with ops . name_scope ( name , ' assert_close ' , ( expected , actual , rtol ) ) as scope : <nl> - expected = ops . convert_to_tensor ( expected , name = ' expected ' ) <nl> - actual = ops . convert_to_tensor ( actual , name = ' actual ' ) <nl> - rdiff = math_ops . abs ( ( expected - actual ) / expected , ' diff ' ) <nl> - rtol = ops . convert_to_tensor ( rtol , name = ' rtol ' ) <nl> - return check_ops . assert_less ( <nl> - rdiff , <nl> - rtol , <nl> - data = ( <nl> - message , <nl> - ' Condition expected = ~ actual did not hold element - wise : ' <nl> - ' expected = ' , expected , <nl> - ' actual = ' , actual , <nl> - ' rdiff = ' , rdiff , <nl> - ' rtol = ' , rtol , <nl> - ) , <nl> - summarize = expected . get_shape ( ) . num_elements ( ) , <nl> - name = scope ) <nl> - <nl> - <nl> class _SummaryHook ( session_run_hook . SessionRunHook ) : <nl> " " " Saves summaries every N steps . " " " <nl> <nl> def _assert_checkpoint ( <nl> prev_layer_units = input_units <nl> for i in range ( len ( hidden_units ) ) : <nl> layer_units = hidden_units [ i ] <nl> - testcase . assertAllEqual ( ( prev_layer_units , layer_units ) , <nl> - shapes [ _HIDDEN_WEIGHTS_NAME_PATTERN % i ] ) <nl> - testcase . assertAllEqual ( ( layer_units , ) , <nl> - shapes [ _HIDDEN_BIASES_NAME_PATTERN % i ] ) <nl> + testcase . assertAllEqual ( <nl> + ( prev_layer_units , layer_units ) , <nl> + shapes [ dnn_testing_utils . HIDDEN_WEIGHTS_NAME_PATTERN % i ] ) <nl> + testcase . assertAllEqual ( <nl> + ( layer_units , ) , <nl> + shapes [ dnn_testing_utils . HIDDEN_BIASES_NAME_PATTERN % i ] ) <nl> prev_layer_units = layer_units <nl> <nl> # Output layer weights . <nl> testcase . assertAllEqual ( ( prev_layer_units , output_units ) , <nl> - shapes [ _LOGITS_WEIGHTS_NAME ] ) <nl> - testcase . assertAllEqual ( ( output_units , ) , shapes [ _LOGITS_BIASES_NAME ] ) <nl> - <nl> - <nl> - def _mock_optimizer ( testcase , hidden_units , expected_loss = None ) : <nl> - " " " Creates a mock optimizer to test the train method . <nl> - <nl> - Args : <nl> - testcase : A TestCase instance . <nl> - hidden_units : Iterable of integer sizes for the hidden layers . <nl> - expected_loss : If given , will assert the loss value . <nl> - <nl> - Returns : <nl> - A mock Optimizer . <nl> - " " " <nl> - hidden_weights_names = [ <nl> - ( _HIDDEN_WEIGHTS_NAME_PATTERN + ' / part_0 : 0 ' ) % i <nl> - for i in range ( len ( hidden_units ) ) ] <nl> - hidden_biases_names = [ <nl> - ( _HIDDEN_BIASES_NAME_PATTERN + ' / part_0 : 0 ' ) % i <nl> - for i in range ( len ( hidden_units ) ) ] <nl> - expected_var_names = ( <nl> - hidden_weights_names + hidden_biases_names + <nl> - [ _LOGITS_WEIGHTS_NAME + ' / part_0 : 0 ' , _LOGITS_BIASES_NAME + ' / part_0 : 0 ' ] ) <nl> - <nl> - def _minimize ( loss , global_step ) : <nl> - trainable_vars = ops . get_collection ( ops . GraphKeys . TRAINABLE_VARIABLES ) <nl> - testcase . assertItemsEqual ( <nl> - expected_var_names , <nl> - [ var . name for var in trainable_vars ] ) <nl> - <nl> - # Verify loss . We can ' t check the value directly , so we add an assert op . <nl> - testcase . assertEquals ( 0 , loss . shape . ndims ) <nl> - if expected_loss is None : <nl> - return state_ops . assign_add ( global_step , 1 ) . op <nl> - assert_loss = _assert_close ( <nl> - math_ops . to_float ( expected_loss , name = ' expected ' ) , loss , <nl> - name = ' assert_loss ' ) <nl> - with ops . control_dependencies ( ( assert_loss , ) ) : <nl> - return state_ops . assign_add ( global_step , 1 ) . op <nl> - <nl> - mock_optimizer = test . mock . NonCallableMagicMock ( <nl> - spec = optimizer . Optimizer , <nl> - wraps = optimizer . Optimizer ( use_locking = False , name = ' my_optimizer ' ) ) <nl> - mock_optimizer . minimize = test . mock . MagicMock ( wraps = _minimize ) <nl> - <nl> - return mock_optimizer <nl> + shapes [ dnn_testing_utils . LOGITS_WEIGHTS_NAME ] ) <nl> + testcase . assertAllEqual ( ( output_units , ) , <nl> + shapes [ dnn_testing_utils . LOGITS_BIASES_NAME ] ) <nl> <nl> <nl> def _assert_simple_summary ( testcase , expected_values , actual_summary ) : <nl> def test_from_scratch_with_default_optimizer ( self ) : <nl> <nl> def test_from_scratch ( self ) : <nl> hidden_units = ( 2 , 2 ) <nl> - mock_optimizer = _mock_optimizer ( self , hidden_units = hidden_units ) <nl> + mock_optimizer = dnn_testing_utils . mock_optimizer ( <nl> + self , hidden_units = hidden_units ) <nl> dnn_regressor = dnn . DNNRegressor ( <nl> hidden_units = hidden_units , <nl> feature_columns = ( feature_column . numeric_column ( ' age ' ) , ) , <nl> def test_one_dim ( self ) : <nl> " " " Asserts train loss for one - dimensional input and logits . " " " <nl> base_global_step = 100 <nl> hidden_units = ( 2 , 2 ) <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , <nl> - ) , base_global_step , self . _model_dir ) <nl> + dnn_testing_utils . create_checkpoint ( <nl> + ( ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , ) , base_global_step , self . _model_dir ) <nl> <nl> # Uses identical numbers as DNNModelFnTest . test_one_dim_logits . <nl> # See that test for calculation of logits . <nl> # logits = [ - 2 . 08 ] = > predictions = [ - 2 . 08 ] <nl> # loss = ( 1 + 2 . 08 ) ^ 2 = 9 . 4864 <nl> expected_loss = 9 . 4864 <nl> - mock_optimizer = _mock_optimizer ( <nl> + mock_optimizer = dnn_testing_utils . mock_optimizer ( <nl> self , hidden_units = hidden_units , expected_loss = expected_loss ) <nl> dnn_regressor = dnn . DNNRegressor ( <nl> hidden_units = hidden_units , <nl> def test_multi_dim ( self ) : <nl> " " " Asserts train loss for multi - dimensional input and logits . " " " <nl> base_global_step = 100 <nl> hidden_units = ( 2 , 2 ) <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , . 0 ] ) , <nl> - ) , base_global_step , self . _model_dir ) <nl> + dnn_testing_utils . create_checkpoint ( <nl> + ( ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , <nl> + [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , <nl> + [ . 3 , - . 3 , . 0 ] ) , ) , base_global_step , self . _model_dir ) <nl> input_dimension = 2 <nl> label_dimension = 3 <nl> <nl> def test_multi_dim ( self ) : <nl> # logits = [ [ - 0 . 48 , 0 . 48 , 0 . 39 ] ] <nl> # loss = ( 1 + 0 . 48 ) ^ 2 + ( - 1 - 0 . 48 ) ^ 2 + ( 0 . 5 - 0 . 39 ) ^ 2 = 4 . 3929 <nl> expected_loss = 4 . 3929 <nl> - mock_optimizer = _mock_optimizer ( <nl> + mock_optimizer = dnn_testing_utils . mock_optimizer ( <nl> self , hidden_units = hidden_units , expected_loss = expected_loss ) <nl> dnn_regressor = dnn . DNNRegressor ( <nl> hidden_units = hidden_units , <nl> def test_from_scratch_with_default_optimizer_multi_class ( self ) : <nl> <nl> def test_from_scratch_validate_summary ( self ) : <nl> hidden_units = ( 2 , 2 ) <nl> - mock_optimizer = _mock_optimizer ( self , hidden_units = hidden_units ) <nl> + mock_optimizer = dnn_testing_utils . mock_optimizer ( <nl> + self , hidden_units = hidden_units ) <nl> dnn_classifier = dnn . DNNClassifier ( <nl> hidden_units = hidden_units , <nl> feature_columns = ( feature_column . numeric_column ( ' age ' ) , ) , <nl> def test_from_scratch_validate_summary ( self ) : <nl> def test_binary_classification ( self ) : <nl> base_global_step = 100 <nl> hidden_units = ( 2 , 2 ) <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , <nl> - ) , base_global_step , self . _model_dir ) <nl> + dnn_testing_utils . create_checkpoint ( <nl> + ( ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , ) , base_global_step , self . _model_dir ) <nl> <nl> # Uses identical numbers as DNNModelFnTest . test_one_dim_logits . <nl> # See that test for calculation of logits . <nl> # logits = [ - 2 . 08 ] = > probabilities = [ 0 . 889 , 0 . 111 ] <nl> # loss = - 1 . * log ( 0 . 111 ) = 2 . 19772100 <nl> expected_loss = 2 . 19772100 <nl> - mock_optimizer = _mock_optimizer ( <nl> + mock_optimizer = dnn_testing_utils . mock_optimizer ( <nl> self , hidden_units = hidden_units , expected_loss = expected_loss ) <nl> dnn_classifier = dnn . DNNClassifier ( <nl> hidden_units = hidden_units , <nl> def test_multi_class ( self ) : <nl> n_classes = 3 <nl> base_global_step = 100 <nl> hidden_units = ( 2 , 2 ) <nl> - _create_checkpoint ( ( <nl> - ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , <nl> - ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> - ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , . 0 ] ) , <nl> - ) , base_global_step , self . _model_dir ) <nl> + dnn_testing_utils . create_checkpoint ( <nl> + ( ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , <nl> + [ . 3 , - . 3 , . 0 ] ) , ) , base_global_step , self . _model_dir ) <nl> <nl> # Uses identical numbers as DNNModelFnTest . test_multi_dim_logits . <nl> # See that test for calculation of logits . <nl> # logits = [ - 2 . 08 , 2 . 08 , 1 . 19 ] = > probabilities = [ 0 . 0109 , 0 . 7011 , 0 . 2879 ] <nl> # loss = - 1 . * log ( 0 . 7011 ) = 0 . 35505795 <nl> expected_loss = 0 . 35505795 <nl> - mock_optimizer = _mock_optimizer ( <nl> + mock_optimizer = dnn_testing_utils . mock_optimizer ( <nl> self , hidden_units = hidden_units , expected_loss = expected_loss ) <nl> dnn_classifier = dnn . DNNClassifier ( <nl> n_classes = n_classes , <nl> new file mode 100644 <nl> index 0000000000000 . . bc9c4348c134b <nl> mmm / dev / null <nl> ppp b / tensorflow / python / estimator / canned / dnn_testing_utils . py <nl> <nl> + # Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Utils to be used in testing DNN estimators . " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + import os <nl> + import shutil <nl> + import tempfile <nl> + <nl> + import numpy as np <nl> + import six <nl> + <nl> + from tensorflow . python . client import session as tf_session <nl> + from tensorflow . python . estimator import model_fn <nl> + from tensorflow . python . estimator . canned import head as head_lib <nl> + from tensorflow . python . feature_column import feature_column <nl> + from tensorflow . python . framework import constant_op <nl> + from tensorflow . python . framework import dtypes <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . ops import check_ops <nl> + from tensorflow . python . ops import control_flow_ops <nl> + from tensorflow . python . ops import math_ops <nl> + from tensorflow . python . ops import state_ops <nl> + from tensorflow . python . ops import variables as variables_lib <nl> + from tensorflow . python . platform import test <nl> + from tensorflow . python . summary . writer import writer_cache <nl> + from tensorflow . python . training import monitored_session <nl> + from tensorflow . python . training import optimizer <nl> + from tensorflow . python . training import saver <nl> + from tensorflow . python . training import training_util <nl> + <nl> + # pylint rules which are disabled by default for test files . <nl> + # pylint : disable = invalid - name , protected - access , missing - docstring <nl> + <nl> + # Names of variables created by model . <nl> + LEARNING_RATE_NAME = ' dnn / regression_head / dnn / learning_rate ' <nl> + HIDDEN_WEIGHTS_NAME_PATTERN = ' dnn / hiddenlayer_ % d / kernel ' <nl> + HIDDEN_BIASES_NAME_PATTERN = ' dnn / hiddenlayer_ % d / bias ' <nl> + LOGITS_WEIGHTS_NAME = ' dnn / logits / kernel ' <nl> + LOGITS_BIASES_NAME = ' dnn / logits / bias ' <nl> + <nl> + <nl> + def assert_close ( expected , actual , rtol = 1e - 04 , message = ' ' , name = ' assert_close ' ) : <nl> + with ops . name_scope ( name , ' assert_close ' , ( expected , actual , rtol ) ) as scope : <nl> + expected = ops . convert_to_tensor ( expected , name = ' expected ' ) <nl> + actual = ops . convert_to_tensor ( actual , name = ' actual ' ) <nl> + rdiff = math_ops . abs ( ( expected - actual ) / expected , ' diff ' ) <nl> + rtol = ops . convert_to_tensor ( rtol , name = ' rtol ' ) <nl> + return check_ops . assert_less ( <nl> + rdiff , <nl> + rtol , <nl> + data = ( message , ' Condition expected = ~ actual did not hold element - wise : ' <nl> + ' expected = ' , expected , ' actual = ' , actual , ' rdiff = ' , rdiff , <nl> + ' rtol = ' , rtol , ) , <nl> + summarize = expected . get_shape ( ) . num_elements ( ) , <nl> + name = scope ) <nl> + <nl> + <nl> + def create_checkpoint ( weights_and_biases , global_step , model_dir ) : <nl> + " " " Create checkpoint file with provided model weights . <nl> + <nl> + Args : <nl> + weights_and_biases : Iterable of tuples of weight and bias values . <nl> + global_step : Initial global step to save in checkpoint . <nl> + model_dir : Directory into which checkpoint is saved . <nl> + " " " <nl> + weights , biases = zip ( * weights_and_biases ) <nl> + model_weights = { } <nl> + <nl> + # Hidden layer weights . <nl> + for i in range ( 0 , len ( weights ) - 1 ) : <nl> + model_weights [ HIDDEN_WEIGHTS_NAME_PATTERN % i ] = weights [ i ] <nl> + model_weights [ HIDDEN_BIASES_NAME_PATTERN % i ] = biases [ i ] <nl> + <nl> + # Output layer weights . <nl> + model_weights [ LOGITS_WEIGHTS_NAME ] = weights [ - 1 ] <nl> + model_weights [ LOGITS_BIASES_NAME ] = biases [ - 1 ] <nl> + <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + # Create model variables . <nl> + for k , v in six . iteritems ( model_weights ) : <nl> + variables_lib . Variable ( v , name = k , dtype = dtypes . float32 ) <nl> + <nl> + # Create non - model variables . <nl> + global_step_var = training_util . create_global_step ( ) <nl> + <nl> + # Initialize vars and save checkpoint . <nl> + with tf_session . Session ( ) as sess : <nl> + variables_lib . global_variables_initializer ( ) . run ( ) <nl> + global_step_var . assign ( global_step ) . eval ( ) <nl> + saver . Saver ( ) . save ( sess , os . path . join ( model_dir , ' model . ckpt ' ) ) <nl> + <nl> + <nl> + def mock_head ( testcase , hidden_units , logits_dimension , expected_logits ) : <nl> + " " " Returns a mock head that validates logits values and variable names . " " " <nl> + hidden_weights_names = [ ( HIDDEN_WEIGHTS_NAME_PATTERN + ' / part_0 : 0 ' ) % i <nl> + for i in range ( len ( hidden_units ) ) ] <nl> + hidden_biases_names = [ ( HIDDEN_BIASES_NAME_PATTERN + ' / part_0 : 0 ' ) % i <nl> + for i in range ( len ( hidden_units ) ) ] <nl> + expected_var_names = ( <nl> + hidden_weights_names + hidden_biases_names + <nl> + [ LOGITS_WEIGHTS_NAME + ' / part_0 : 0 ' , LOGITS_BIASES_NAME + ' / part_0 : 0 ' ] ) <nl> + <nl> + def _create_estimator_spec ( features , mode , logits , labels , train_op_fn ) : <nl> + del features , labels # Not used . <nl> + trainable_vars = ops . get_collection ( ops . GraphKeys . TRAINABLE_VARIABLES ) <nl> + testcase . assertItemsEqual ( expected_var_names , <nl> + [ var . name for var in trainable_vars ] ) <nl> + loss = constant_op . constant ( 1 . ) <nl> + assert_logits = assert_close ( <nl> + expected_logits , logits , message = ' Failed for mode = { } . ' . format ( mode ) ) <nl> + with ops . control_dependencies ( [ assert_logits ] ) : <nl> + if mode = = model_fn . ModeKeys . TRAIN : <nl> + return model_fn . EstimatorSpec ( <nl> + mode = mode , loss = loss , train_op = train_op_fn ( loss ) ) <nl> + elif mode = = model_fn . ModeKeys . EVAL : <nl> + return model_fn . EstimatorSpec ( mode = mode , loss = array_ops . identity ( loss ) ) <nl> + elif mode = = model_fn . ModeKeys . PREDICT : <nl> + return model_fn . EstimatorSpec ( <nl> + mode = mode , predictions = { ' logits ' : array_ops . identity ( logits ) } ) <nl> + else : <nl> + testcase . fail ( ' Invalid mode : { } ' . format ( mode ) ) <nl> + <nl> + head = test . mock . NonCallableMagicMock ( spec = head_lib . _Head ) <nl> + head . logits_dimension = logits_dimension <nl> + head . create_estimator_spec = test . mock . MagicMock ( wraps = _create_estimator_spec ) <nl> + <nl> + return head <nl> + <nl> + <nl> + def mock_optimizer ( testcase , hidden_units , expected_loss = None ) : <nl> + " " " Creates a mock optimizer to test the train method . <nl> + <nl> + Args : <nl> + testcase : A TestCase instance . <nl> + hidden_units : Iterable of integer sizes for the hidden layers . <nl> + expected_loss : If given , will assert the loss value . <nl> + <nl> + Returns : <nl> + A mock Optimizer . <nl> + " " " <nl> + hidden_weights_names = [ ( HIDDEN_WEIGHTS_NAME_PATTERN + ' / part_0 : 0 ' ) % i <nl> + for i in range ( len ( hidden_units ) ) ] <nl> + hidden_biases_names = [ ( HIDDEN_BIASES_NAME_PATTERN + ' / part_0 : 0 ' ) % i <nl> + for i in range ( len ( hidden_units ) ) ] <nl> + expected_var_names = ( <nl> + hidden_weights_names + hidden_biases_names + <nl> + [ LOGITS_WEIGHTS_NAME + ' / part_0 : 0 ' , LOGITS_BIASES_NAME + ' / part_0 : 0 ' ] ) <nl> + <nl> + def _minimize ( loss , global_step = None , var_list = None ) : <nl> + " " " Mock of optimizer . minimize . " " " <nl> + trainable_vars = var_list or ops . get_collection ( <nl> + ops . GraphKeys . TRAINABLE_VARIABLES ) <nl> + testcase . assertItemsEqual ( expected_var_names , <nl> + [ var . name for var in trainable_vars ] ) <nl> + <nl> + # Verify loss . We can ' t check the value directly , so we add an assert op . <nl> + testcase . assertEquals ( 0 , loss . shape . ndims ) <nl> + if expected_loss is None : <nl> + if global_step is not None : <nl> + return state_ops . assign_add ( global_step , 1 ) . op <nl> + return control_flow_ops . no_op ( ) <nl> + assert_loss = assert_close ( <nl> + math_ops . to_float ( expected_loss , name = ' expected ' ) , <nl> + loss , <nl> + name = ' assert_loss ' ) <nl> + with ops . control_dependencies ( ( assert_loss , ) ) : <nl> + if global_step is not None : <nl> + return state_ops . assign_add ( global_step , 1 ) . op <nl> + return control_flow_ops . no_op ( ) <nl> + <nl> + optimizer_mock = test . mock . NonCallableMagicMock ( <nl> + spec = optimizer . Optimizer , <nl> + wraps = optimizer . Optimizer ( use_locking = False , name = ' my_optimizer ' ) ) <nl> + optimizer_mock . minimize = test . mock . MagicMock ( wraps = _minimize ) <nl> + <nl> + return optimizer_mock <nl> + <nl> + <nl> + class BaseDNNModelFnTest ( object ) : <nl> + " " " Tests that _dnn_model_fn passes expected logits to mock head . " " " <nl> + <nl> + def __init__ ( self , dnn_model_fn ) : <nl> + self . _dnn_model_fn = dnn_model_fn <nl> + <nl> + def setUp ( self ) : <nl> + self . _model_dir = tempfile . mkdtemp ( ) <nl> + <nl> + def tearDown ( self ) : <nl> + if self . _model_dir : <nl> + writer_cache . FileWriterCache . clear ( ) <nl> + shutil . rmtree ( self . _model_dir ) <nl> + <nl> + def _test_logits ( self , mode , hidden_units , logits_dimension , inputs , <nl> + expected_logits ) : <nl> + " " " Tests that the expected logits are passed to mock head . " " " <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + training_util . create_global_step ( ) <nl> + head = mock_head ( <nl> + self , <nl> + hidden_units = hidden_units , <nl> + logits_dimension = logits_dimension , <nl> + expected_logits = expected_logits ) <nl> + estimator_spec = self . _dnn_model_fn ( <nl> + features = { ' age ' : constant_op . constant ( inputs ) } , <nl> + labels = constant_op . constant ( [ [ 1 ] ] ) , <nl> + mode = mode , <nl> + head = head , <nl> + hidden_units = hidden_units , <nl> + feature_columns = [ <nl> + feature_column . numeric_column ( <nl> + ' age ' , shape = np . array ( inputs ) . shape [ 1 : ] ) <nl> + ] , <nl> + optimizer = mock_optimizer ( self , hidden_units ) ) <nl> + with monitored_session . MonitoredTrainingSession ( <nl> + checkpoint_dir = self . _model_dir ) as sess : <nl> + if mode = = model_fn . ModeKeys . TRAIN : <nl> + sess . run ( estimator_spec . train_op ) <nl> + elif mode = = model_fn . ModeKeys . EVAL : <nl> + sess . run ( estimator_spec . loss ) <nl> + elif mode = = model_fn . ModeKeys . PREDICT : <nl> + sess . run ( estimator_spec . predictions ) <nl> + else : <nl> + self . fail ( ' Invalid mode : { } ' . format ( mode ) ) <nl> + <nl> + def test_one_dim_logits ( self ) : <nl> + " " " Tests one - dimensional logits . <nl> + <nl> + input_layer = [ [ 10 ] ] <nl> + hidden_layer_0 = [ [ relu ( 0 . 6 * 10 + 0 . 1 ) , relu ( 0 . 5 * 10 - 0 . 1 ) ] ] = [ [ 6 . 1 , 4 . 9 ] ] <nl> + hidden_layer_1 = [ [ relu ( 1 * 6 . 1 - 0 . 8 * 4 . 9 + 0 . 2 ) , relu ( 0 . 8 * 6 . 1 - 1 * 4 . 9 - 0 . 1 ) ] ] <nl> + = [ [ relu ( 2 . 38 ) , relu ( - 0 . 12 ) ] ] = [ [ 2 . 38 , 0 ] ] <nl> + logits = [ [ - 1 * 2 . 38 + 1 * 0 + 0 . 3 ] ] = [ [ - 2 . 08 ] ] <nl> + " " " <nl> + base_global_step = 100 <nl> + create_checkpoint ( <nl> + ( ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , ) , base_global_step , self . _model_dir ) <nl> + <nl> + for mode in [ <nl> + model_fn . ModeKeys . TRAIN , model_fn . ModeKeys . EVAL , <nl> + model_fn . ModeKeys . PREDICT <nl> + ] : <nl> + self . _test_logits ( <nl> + mode , <nl> + hidden_units = ( 2 , 2 ) , <nl> + logits_dimension = 1 , <nl> + inputs = [ [ 10 . ] ] , <nl> + expected_logits = [ [ - 2 . 08 ] ] ) <nl> + <nl> + def test_multi_dim_logits ( self ) : <nl> + " " " Tests multi - dimensional logits . <nl> + <nl> + input_layer = [ [ 10 ] ] <nl> + hidden_layer_0 = [ [ relu ( 0 . 6 * 10 + 0 . 1 ) , relu ( 0 . 5 * 10 - 0 . 1 ) ] ] = [ [ 6 . 1 , 4 . 9 ] ] <nl> + hidden_layer_1 = [ [ relu ( 1 * 6 . 1 - 0 . 8 * 4 . 9 + 0 . 2 ) , relu ( 0 . 8 * 6 . 1 - 1 * 4 . 9 - 0 . 1 ) ] ] <nl> + = [ [ relu ( 2 . 38 ) , relu ( - 0 . 12 ) ] ] = [ [ 2 . 38 , 0 ] ] <nl> + logits = [ [ - 1 * 2 . 38 + 0 . 3 , 1 * 2 . 38 - 0 . 3 , 0 . 5 * 2 . 38 ] ] <nl> + = [ [ - 2 . 08 , 2 . 08 , 1 . 19 ] ] <nl> + " " " <nl> + base_global_step = 100 <nl> + create_checkpoint ( ( ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , <nl> + [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , . 0 ] ) , ) , <nl> + base_global_step , self . _model_dir ) <nl> + <nl> + for mode in [ <nl> + model_fn . ModeKeys . TRAIN , model_fn . ModeKeys . EVAL , <nl> + model_fn . ModeKeys . PREDICT <nl> + ] : <nl> + self . _test_logits ( <nl> + mode , <nl> + hidden_units = ( 2 , 2 ) , <nl> + logits_dimension = 3 , <nl> + inputs = [ [ 10 . ] ] , <nl> + expected_logits = [ [ - 2 . 08 , 2 . 08 , 1 . 19 ] ] ) <nl> + <nl> + def test_multi_example_multi_dim_logits ( self ) : <nl> + " " " Tests multiple examples and multi - dimensional logits . <nl> + <nl> + input_layer = [ [ 10 ] , [ 5 ] ] <nl> + hidden_layer_0 = [ [ relu ( 0 . 6 * 10 + 0 . 1 ) , relu ( 0 . 5 * 10 - 0 . 1 ) ] , <nl> + [ relu ( 0 . 6 * 5 + 0 . 1 ) , relu ( 0 . 5 * 5 - 0 . 1 ) ] ] <nl> + = [ [ 6 . 1 , 4 . 9 ] , [ 3 . 1 , 2 . 4 ] ] <nl> + hidden_layer_1 = [ [ relu ( 1 * 6 . 1 - 0 . 8 * 4 . 9 + 0 . 2 ) , relu ( 0 . 8 * 6 . 1 - 1 * 4 . 9 - 0 . 1 ) ] , <nl> + [ relu ( 1 * 3 . 1 - 0 . 8 * 2 . 4 + 0 . 2 ) , relu ( 0 . 8 * 3 . 1 - 1 * 2 . 4 - 0 . 1 ) ] ] <nl> + = [ [ 2 . 38 , 0 ] , [ 1 . 38 , 0 ] ] <nl> + logits = [ [ - 1 * 2 . 38 + 0 . 3 , 1 * 2 . 38 - 0 . 3 , 0 . 5 * 2 . 38 ] , <nl> + [ - 1 * 1 . 38 + 0 . 3 , 1 * 1 . 38 - 0 . 3 , 0 . 5 * 1 . 38 ] ] <nl> + = [ [ - 2 . 08 , 2 . 08 , 1 . 19 ] , [ - 1 . 08 , 1 . 08 , 0 . 69 ] ] <nl> + " " " <nl> + base_global_step = 100 <nl> + create_checkpoint ( ( ( [ [ . 6 , . 5 ] ] , [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , <nl> + [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , . 0 ] ) , ) , <nl> + base_global_step , self . _model_dir ) <nl> + <nl> + for mode in [ <nl> + model_fn . ModeKeys . TRAIN , model_fn . ModeKeys . EVAL , <nl> + model_fn . ModeKeys . PREDICT <nl> + ] : <nl> + self . _test_logits ( <nl> + mode , <nl> + hidden_units = ( 2 , 2 ) , <nl> + logits_dimension = 3 , <nl> + inputs = [ [ 10 . ] , [ 5 . ] ] , <nl> + expected_logits = [ [ - 2 . 08 , 2 . 08 , 1 . 19 ] , [ - 1 . 08 , 1 . 08 , . 69 ] ] ) <nl> + <nl> + def test_multi_dim_input_one_dim_logits ( self ) : <nl> + " " " Tests multi - dimensional inputs and one - dimensional logits . <nl> + <nl> + input_layer = [ [ 10 , 8 ] ] <nl> + hidden_layer_0 = [ [ relu ( 0 . 6 * 10 - 0 . 6 * 8 + 0 . 1 ) , relu ( 0 . 5 * 10 - 0 . 5 * 8 - 0 . 1 ) ] ] <nl> + = [ [ 1 . 3 , 0 . 9 ] ] <nl> + hidden_layer_1 = [ [ relu ( 1 * 1 . 3 - 0 . 8 * 0 . 9 + 0 . 2 ) , relu ( 0 . 8 * 1 . 3 - 1 * 0 . 9 - 0 . 2 ) ] ] <nl> + = [ [ 0 . 78 , relu ( - 0 . 06 ) ] ] = [ [ 0 . 78 , 0 ] ] <nl> + logits = [ [ - 1 * 0 . 78 + 1 * 0 + 0 . 3 ] ] = [ [ - 0 . 48 ] ] <nl> + " " " <nl> + base_global_step = 100 <nl> + create_checkpoint ( ( ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , <nl> + [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . ] , [ 1 . ] ] , [ . 3 ] ) , ) , base_global_step , <nl> + self . _model_dir ) <nl> + <nl> + for mode in [ <nl> + model_fn . ModeKeys . TRAIN , model_fn . ModeKeys . EVAL , <nl> + model_fn . ModeKeys . PREDICT <nl> + ] : <nl> + self . _test_logits ( <nl> + mode , <nl> + hidden_units = ( 2 , 2 ) , <nl> + logits_dimension = 1 , <nl> + inputs = [ [ 10 . , 8 . ] ] , <nl> + expected_logits = [ [ - 0 . 48 ] ] ) <nl> + <nl> + def test_multi_dim_input_multi_dim_logits ( self ) : <nl> + " " " Tests multi - dimensional inputs and multi - dimensional logits . <nl> + <nl> + input_layer = [ [ 10 , 8 ] ] <nl> + hidden_layer_0 = [ [ relu ( 0 . 6 * 10 - 0 . 6 * 8 + 0 . 1 ) , relu ( 0 . 5 * 10 - 0 . 5 * 8 - 0 . 1 ) ] ] <nl> + = [ [ 1 . 3 , 0 . 9 ] ] <nl> + hidden_layer_1 = [ [ relu ( 1 * 1 . 3 - 0 . 8 * 0 . 9 + 0 . 2 ) , relu ( 0 . 8 * 1 . 3 - 1 * 0 . 9 - 0 . 2 ) ] ] <nl> + = [ [ 0 . 78 , relu ( - 0 . 06 ) ] ] = [ [ 0 . 78 , 0 ] ] <nl> + logits = [ [ - 1 * 0 . 78 + 0 . 3 , 1 * 0 . 78 - 0 . 3 , 0 . 5 * 0 . 78 ] ] = [ [ - 0 . 48 , 0 . 48 , 0 . 39 ] ] <nl> + " " " <nl> + base_global_step = 100 <nl> + create_checkpoint ( ( ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , <nl> + [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , . 0 ] ) , ) , <nl> + base_global_step , self . _model_dir ) <nl> + <nl> + for mode in [ <nl> + model_fn . ModeKeys . TRAIN , model_fn . ModeKeys . EVAL , <nl> + model_fn . ModeKeys . PREDICT <nl> + ] : <nl> + self . _test_logits ( <nl> + mode , <nl> + hidden_units = ( 2 , 2 ) , <nl> + logits_dimension = 3 , <nl> + inputs = [ [ 10 . , 8 . ] ] , <nl> + expected_logits = [ [ - 0 . 48 , 0 . 48 , 0 . 39 ] ] ) <nl> + <nl> + def test_multi_feature_column_multi_dim_logits ( self ) : <nl> + " " " Tests multiple feature columns and multi - dimensional logits . <nl> + <nl> + All numbers are the same as test_multi_dim_input_multi_dim_logits . The only <nl> + difference is that the input consists of two 1D feature columns , instead of <nl> + one 2D feature column . <nl> + " " " <nl> + base_global_step = 100 <nl> + create_checkpoint ( ( ( [ [ . 6 , . 5 ] , [ - . 6 , - . 5 ] ] , <nl> + [ . 1 , - . 1 ] ) , ( [ [ 1 . , . 8 ] , [ - . 8 , - 1 . ] ] , [ . 2 , - . 2 ] ) , <nl> + ( [ [ - 1 . , 1 . , . 5 ] , [ - 1 . , 1 . , . 5 ] ] , [ . 3 , - . 3 , . 0 ] ) , ) , <nl> + base_global_step , self . _model_dir ) <nl> + hidden_units = ( 2 , 2 ) <nl> + logits_dimension = 3 <nl> + inputs = ( [ [ 10 . ] ] , [ [ 8 . ] ] ) <nl> + expected_logits = [ [ - 0 . 48 , 0 . 48 , 0 . 39 ] ] <nl> + <nl> + for mode in [ <nl> + model_fn . ModeKeys . TRAIN , model_fn . ModeKeys . EVAL , <nl> + model_fn . ModeKeys . PREDICT <nl> + ] : <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + training_util . create_global_step ( ) <nl> + head = mock_head ( <nl> + self , <nl> + hidden_units = hidden_units , <nl> + logits_dimension = logits_dimension , <nl> + expected_logits = expected_logits ) <nl> + estimator_spec = self . _dnn_model_fn ( <nl> + features = { <nl> + ' age ' : constant_op . constant ( inputs [ 0 ] ) , <nl> + ' height ' : constant_op . constant ( inputs [ 1 ] ) <nl> + } , <nl> + labels = constant_op . constant ( [ [ 1 ] ] ) , <nl> + mode = mode , <nl> + head = head , <nl> + hidden_units = hidden_units , <nl> + feature_columns = [ <nl> + feature_column . numeric_column ( ' age ' ) , <nl> + feature_column . numeric_column ( ' height ' ) <nl> + ] , <nl> + optimizer = mock_optimizer ( self , hidden_units ) ) <nl> + with monitored_session . MonitoredTrainingSession ( <nl> + checkpoint_dir = self . _model_dir ) as sess : <nl> + if mode = = model_fn . ModeKeys . TRAIN : <nl> + sess . run ( estimator_spec . train_op ) <nl> + elif mode = = model_fn . ModeKeys . EVAL : <nl> + sess . run ( estimator_spec . loss ) <nl> + elif mode = = model_fn . ModeKeys . PREDICT : <nl> + sess . run ( estimator_spec . predictions ) <nl> + else : <nl> + self . fail ( ' Invalid mode : { } ' . format ( mode ) ) <nl> mmm a / tensorflow / python / estimator / canned / linear_test . py <nl> ppp b / tensorflow / python / estimator / canned / linear_test . py <nl> <nl> from __future__ import print_function <nl> <nl> import math <nl> - import os <nl> import shutil <nl> import tempfile <nl> <nl> import numpy as np <nl> - import six <nl> <nl> - from tensorflow . core . example import example_pb2 <nl> - from tensorflow . core . example import feature_pb2 <nl> - from tensorflow . python . client import session as tf_session <nl> - from tensorflow . python . estimator import estimator <nl> - from tensorflow . python . estimator import run_config <nl> from tensorflow . python . estimator . canned import linear <nl> - from tensorflow . python . estimator . canned import metric_keys <nl> - from tensorflow . python . estimator . export import export <nl> + from tensorflow . python . estimator . canned import linear_testing_utils <nl> from tensorflow . python . estimator . inputs import numpy_io <nl> - from tensorflow . python . estimator . inputs import pandas_io <nl> from tensorflow . python . feature_column import feature_column as feature_column_lib <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> - from tensorflow . python . framework import sparse_tensor <nl> - from tensorflow . python . ops import check_ops <nl> - from tensorflow . python . ops import data_flow_ops <nl> from tensorflow . python . ops import math_ops <nl> - from tensorflow . python . ops import parsing_ops <nl> from tensorflow . python . ops import state_ops <nl> - from tensorflow . python . ops import variable_scope <nl> from tensorflow . python . ops import variables <nl> - from tensorflow . python . platform import gfile <nl> from tensorflow . python . platform import test <nl> from tensorflow . python . summary . writer import writer_cache <nl> from tensorflow . python . training import checkpoint_utils <nl> - from tensorflow . python . training import input as input_lib <nl> from tensorflow . python . training import optimizer <nl> - from tensorflow . python . training import queue_runner <nl> - from tensorflow . python . training import saver <nl> - from tensorflow . python . training import session_run_hook <nl> - <nl> - <nl> - try : <nl> - # pylint : disable = g - import - not - at - top <nl> - import pandas as pd <nl> - HAS_PANDAS = True <nl> - except IOError : <nl> - # Pandas writes a temporary file during import . If it fails , don ' t use pandas . <nl> - HAS_PANDAS = False <nl> - except ImportError : <nl> - HAS_PANDAS = False <nl> - <nl> - <nl> - # Names of variables created by model . <nl> - _AGE_WEIGHT_NAME = ' linear / linear_model / age / weights ' <nl> - _HEIGHT_WEIGHT_NAME = ' linear / linear_model / height / weights ' <nl> - _BIAS_NAME = ' linear / linear_model / bias_weights ' <nl> - _LANGUAGE_WEIGHT_NAME = ' linear / linear_model / language / weights ' <nl> - <nl> - <nl> - def _save_variables_to_ckpt ( model_dir ) : <nl> - init_all_op = [ variables . global_variables_initializer ( ) ] <nl> - with tf_session . Session ( ) as sess : <nl> - sess . run ( init_all_op ) <nl> - saver . Saver ( ) . save ( sess , os . path . join ( model_dir , ' model . ckpt ' ) ) <nl> - <nl> - <nl> - def _queue_parsed_features ( feature_map ) : <nl> - tensors_to_enqueue = [ ] <nl> - keys = [ ] <nl> - for key , tensor in six . iteritems ( feature_map ) : <nl> - keys . append ( key ) <nl> - tensors_to_enqueue . append ( tensor ) <nl> - queue_dtypes = [ x . dtype for x in tensors_to_enqueue ] <nl> - input_queue = data_flow_ops . FIFOQueue ( capacity = 100 , dtypes = queue_dtypes ) <nl> - queue_runner . add_queue_runner ( <nl> - queue_runner . QueueRunner ( <nl> - input_queue , <nl> - [ input_queue . enqueue ( tensors_to_enqueue ) ] ) ) <nl> - dequeued_tensors = input_queue . dequeue ( ) <nl> - return { keys [ i ] : dequeued_tensors [ i ] for i in range ( len ( dequeued_tensors ) ) } <nl> - <nl> - <nl> - class _CheckPartitionerVarHook ( session_run_hook . SessionRunHook ) : <nl> - " " " A ` SessionRunHook ` to check a paritioned variable . " " " <nl> - <nl> - def __init__ ( self , test_case , var_name , var_dim , partitions ) : <nl> - self . _test_case = test_case <nl> - self . _var_name = var_name <nl> - self . _var_dim = var_dim <nl> - self . _partitions = partitions <nl> - <nl> - def begin ( self ) : <nl> - with variable_scope . variable_scope ( <nl> - variable_scope . get_variable_scope ( ) ) as scope : <nl> - scope . reuse_variables ( ) <nl> - partitioned_weight = variable_scope . get_variable ( <nl> - self . _var_name , shape = ( self . _var_dim , 1 ) ) <nl> - self . _test_case . assertTrue ( <nl> - isinstance ( partitioned_weight , variables . PartitionedVariable ) ) <nl> - for part in partitioned_weight : <nl> - self . _test_case . assertEqual ( self . _var_dim / / self . _partitions , <nl> - part . get_shape ( ) [ 0 ] ) <nl> - <nl> - <nl> - class LinearRegressorPartitionerTest ( test . TestCase ) : <nl> <nl> - def setUp ( self ) : <nl> - self . _model_dir = tempfile . mkdtemp ( ) <nl> - <nl> - def tearDown ( self ) : <nl> - if self . _model_dir : <nl> - writer_cache . FileWriterCache . clear ( ) <nl> - shutil . rmtree ( self . _model_dir ) <nl> - <nl> - def testPartitioner ( self ) : <nl> - x_dim = 64 <nl> - partitions = 4 <nl> - <nl> - def _partitioner ( shape , dtype ) : <nl> - del dtype # unused ; required by Fn signature . <nl> - # Only partition the embedding tensor . <nl> - return [ partitions , 1 ] if shape [ 0 ] = = x_dim else [ 1 ] <nl> - <nl> - regressor = linear . LinearRegressor ( <nl> - feature_columns = ( <nl> - feature_column_lib . categorical_column_with_hash_bucket ( <nl> - ' language ' , hash_bucket_size = x_dim ) , ) , <nl> - partitioner = _partitioner , <nl> - model_dir = self . _model_dir ) <nl> - <nl> - def _input_fn ( ) : <nl> - return { <nl> - ' language ' : sparse_tensor . SparseTensor ( <nl> - values = [ ' english ' , ' spanish ' ] , <nl> - indices = [ [ 0 , 0 ] , [ 0 , 1 ] ] , <nl> - dense_shape = [ 1 , 2 ] ) <nl> - } , [ [ 10 . ] ] <nl> - <nl> - hook = _CheckPartitionerVarHook ( <nl> - self , _LANGUAGE_WEIGHT_NAME , x_dim , partitions ) <nl> - regressor . train ( <nl> - input_fn = _input_fn , steps = 1 , hooks = [ hook ] ) <nl> - <nl> - def testDefaultPartitionerWithMultiplePsReplicas ( self ) : <nl> - partitions = 2 <nl> - # This results in weights larger than the default partition size of 64M , <nl> - # so partitioned weights are created ( each weight uses 4 bytes ) . <nl> - x_dim = 32 < < 20 <nl> - <nl> - class FakeRunConfig ( run_config . RunConfig ) : <nl> - <nl> - @ property <nl> - def num_ps_replicas ( self ) : <nl> - return partitions <nl> - <nl> - # Mock the device setter as ps is not available on test machines . <nl> - with test . mock . patch . object ( estimator , <nl> - ' _get_replica_device_setter ' , <nl> - return_value = lambda _ : ' / cpu : 0 ' ) : <nl> - linear_regressor = linear . LinearRegressor ( <nl> - feature_columns = ( <nl> - feature_column_lib . categorical_column_with_hash_bucket ( <nl> - ' language ' , hash_bucket_size = x_dim ) , ) , <nl> - config = FakeRunConfig ( ) , <nl> - model_dir = self . _model_dir ) <nl> - <nl> - def _input_fn ( ) : <nl> - return { <nl> - ' language ' : sparse_tensor . SparseTensor ( <nl> - values = [ ' english ' , ' spanish ' ] , <nl> - indices = [ [ 0 , 0 ] , [ 0 , 1 ] ] , <nl> - dense_shape = [ 1 , 2 ] ) <nl> - } , [ [ 10 . ] ] <nl> - <nl> - hook = _CheckPartitionerVarHook ( <nl> - self , _LANGUAGE_WEIGHT_NAME , x_dim , partitions ) <nl> - linear_regressor . train ( <nl> - input_fn = _input_fn , steps = 1 , hooks = [ hook ] ) <nl> - <nl> - <nl> - # TODO ( b / 36813849 ) : Add tests with dynamic shape inputs using placeholders . <nl> - class LinearRegressorEvaluationTest ( test . TestCase ) : <nl> - <nl> - def setUp ( self ) : <nl> - self . _model_dir = tempfile . mkdtemp ( ) <nl> - <nl> - def tearDown ( self ) : <nl> - if self . _model_dir : <nl> - writer_cache . FileWriterCache . clear ( ) <nl> - shutil . rmtree ( self . _model_dir ) <nl> - <nl> - def test_evaluation_for_simple_data ( self ) : <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - variables . Variable ( [ [ 11 . 0 ] ] , name = _AGE_WEIGHT_NAME ) <nl> - variables . Variable ( [ 2 . 0 ] , name = _BIAS_NAME ) <nl> - variables . Variable ( <nl> - 100 , name = ops . GraphKeys . GLOBAL_STEP , dtype = dtypes . int64 ) <nl> - _save_variables_to_ckpt ( self . _model_dir ) <nl> - <nl> - linear_regressor = linear . LinearRegressor ( <nl> - feature_columns = ( feature_column_lib . numeric_column ( ' age ' ) , ) , <nl> - model_dir = self . _model_dir ) <nl> - eval_metrics = linear_regressor . evaluate ( <nl> - input_fn = lambda : ( { ' age ' : ( ( 1 , ) , ) } , ( ( 10 . , ) , ) ) , steps = 1 ) <nl> - <nl> - # Logit is ( 1 . * 11 . 0 + 2 . 0 ) = 13 , while label is 10 . Loss is 3 * * 2 = 9 . <nl> - self . assertDictEqual ( { <nl> - metric_keys . MetricKeys . LOSS : 9 . , <nl> - metric_keys . MetricKeys . LOSS_MEAN : 9 . , <nl> - ops . GraphKeys . GLOBAL_STEP : 100 <nl> - } , eval_metrics ) <nl> - <nl> - def test_evaluation_batch ( self ) : <nl> - " " " Tests evaluation for batch_size = = 2 . " " " <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - variables . Variable ( [ [ 11 . 0 ] ] , name = _AGE_WEIGHT_NAME ) <nl> - variables . Variable ( [ 2 . 0 ] , name = _BIAS_NAME ) <nl> - variables . Variable ( <nl> - 100 , name = ops . GraphKeys . GLOBAL_STEP , dtype = dtypes . int64 ) <nl> - _save_variables_to_ckpt ( self . _model_dir ) <nl> - <nl> - linear_regressor = linear . LinearRegressor ( <nl> - feature_columns = ( feature_column_lib . numeric_column ( ' age ' ) , ) , <nl> - model_dir = self . _model_dir ) <nl> - eval_metrics = linear_regressor . evaluate ( <nl> - input_fn = lambda : ( { ' age ' : ( ( 1 , ) , ( 1 , ) ) } , ( ( 10 . , ) , ( 10 . , ) ) ) , steps = 1 ) <nl> - <nl> - # Logit is ( 1 . * 11 . 0 + 2 . 0 ) = 13 , while label is 10 . <nl> - # Loss per example is 3 * * 2 = 9 . <nl> - # Training loss is the sum over batch = 9 + 9 = 18 <nl> - # Average loss is the average over batch = 9 <nl> - self . assertDictEqual ( { <nl> - metric_keys . MetricKeys . LOSS : 18 . , <nl> - metric_keys . MetricKeys . LOSS_MEAN : 9 . , <nl> - ops . GraphKeys . GLOBAL_STEP : 100 <nl> - } , eval_metrics ) <nl> - <nl> - def test_evaluation_weights ( self ) : <nl> - " " " Tests evaluation with weights . " " " <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - variables . Variable ( [ [ 11 . 0 ] ] , name = _AGE_WEIGHT_NAME ) <nl> - variables . Variable ( [ 2 . 0 ] , name = _BIAS_NAME ) <nl> - variables . Variable ( <nl> - 100 , name = ops . GraphKeys . GLOBAL_STEP , dtype = dtypes . int64 ) <nl> - _save_variables_to_ckpt ( self . _model_dir ) <nl> - <nl> - def _input_fn ( ) : <nl> - features = { <nl> - ' age ' : ( ( 1 , ) , ( 1 , ) ) , <nl> - ' weights ' : ( ( 1 . , ) , ( 2 . , ) ) <nl> - } <nl> - labels = ( ( 10 . , ) , ( 10 . , ) ) <nl> - return features , labels <nl> - <nl> - linear_regressor = linear . LinearRegressor ( <nl> - feature_columns = ( feature_column_lib . numeric_column ( ' age ' ) , ) , <nl> - weight_feature_key = ' weights ' , <nl> - model_dir = self . _model_dir ) <nl> - eval_metrics = linear_regressor . evaluate ( input_fn = _input_fn , steps = 1 ) <nl> - <nl> - # Logit is ( 1 . * 11 . 0 + 2 . 0 ) = 13 , while label is 10 . <nl> - # Loss per example is 3 * * 2 = 9 . <nl> - # Training loss is the weighted sum over batch = 9 + 2 * 9 = 27 <nl> - # average loss is the weighted average = 9 + 2 * 9 / ( 1 + 2 ) = 9 <nl> - self . assertDictEqual ( { <nl> - metric_keys . MetricKeys . LOSS : 27 . , <nl> - metric_keys . MetricKeys . LOSS_MEAN : 9 . , <nl> - ops . GraphKeys . GLOBAL_STEP : 100 <nl> - } , eval_metrics ) <nl> - <nl> - def test_evaluation_for_multi_dimensions ( self ) : <nl> - x_dim = 3 <nl> - label_dim = 2 <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - variables . Variable ( <nl> - [ [ 1 . 0 , 2 . 0 ] , <nl> - [ 3 . 0 , 4 . 0 ] , <nl> - [ 5 . 0 , 6 . 0 ] ] , <nl> - name = _AGE_WEIGHT_NAME ) <nl> - variables . Variable ( [ 7 . 0 , 8 . 0 ] , name = _BIAS_NAME ) <nl> - variables . Variable ( 100 , name = ' global_step ' , dtype = dtypes . int64 ) <nl> - _save_variables_to_ckpt ( self . _model_dir ) <nl> - <nl> - linear_regressor = linear . LinearRegressor ( <nl> - feature_columns = ( <nl> - feature_column_lib . numeric_column ( ' age ' , shape = ( x_dim , ) ) , ) , <nl> - label_dimension = label_dim , <nl> - model_dir = self . _model_dir ) <nl> - input_fn = numpy_io . numpy_input_fn ( <nl> - x = { <nl> - ' age ' : np . array ( [ [ 2 . , 4 . , 5 . ] ] ) , <nl> - } , <nl> - y = np . array ( [ [ 46 . , 58 . ] ] ) , <nl> - batch_size = 1 , <nl> - num_epochs = None , <nl> - shuffle = False ) <nl> - eval_metrics = linear_regressor . evaluate ( <nl> - input_fn = input_fn , steps = 1 ) <nl> - <nl> - self . assertItemsEqual ( ( <nl> - metric_keys . MetricKeys . LOSS , <nl> - metric_keys . MetricKeys . LOSS_MEAN , <nl> - ops . GraphKeys . GLOBAL_STEP <nl> - ) , eval_metrics . keys ( ) ) <nl> - <nl> - # Logit is <nl> - # [ 2 . , 4 . , 5 . ] * [ 1 . 0 , 2 . 0 ] + [ 7 . 0 , 8 . 0 ] = [ 39 , 50 ] + [ 7 . 0 , 8 . 0 ] <nl> - # [ 3 . 0 , 4 . 0 ] <nl> - # [ 5 . 0 , 6 . 0 ] <nl> - # which is [ 46 , 58 ] <nl> - self . assertAlmostEqual ( 0 , eval_metrics [ metric_keys . MetricKeys . LOSS ] ) <nl> - <nl> - def test_evaluation_for_multiple_feature_columns ( self ) : <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - variables . Variable ( [ [ 10 . 0 ] ] , name = _AGE_WEIGHT_NAME ) <nl> - variables . Variable ( [ [ 2 . 0 ] ] , name = _HEIGHT_WEIGHT_NAME ) <nl> - variables . Variable ( [ 5 . 0 ] , name = _BIAS_NAME ) <nl> - variables . Variable ( <nl> - 100 , name = ops . GraphKeys . GLOBAL_STEP , dtype = dtypes . int64 ) <nl> - _save_variables_to_ckpt ( self . _model_dir ) <nl> - <nl> - batch_size = 2 <nl> - feature_columns = [ <nl> - feature_column_lib . numeric_column ( ' age ' ) , <nl> - feature_column_lib . numeric_column ( ' height ' ) <nl> - ] <nl> - input_fn = numpy_io . numpy_input_fn ( <nl> - x = { <nl> - ' age ' : np . array ( [ 20 , 40 ] ) , <nl> - ' height ' : np . array ( [ 4 , 8 ] ) <nl> - } , <nl> - y = np . array ( [ [ 213 . ] , [ 421 . ] ] ) , <nl> - batch_size = batch_size , <nl> - num_epochs = None , <nl> - shuffle = False ) <nl> - <nl> - est = linear . LinearRegressor ( <nl> - feature_columns = feature_columns , <nl> - model_dir = self . _model_dir ) <nl> - <nl> - eval_metrics = est . evaluate ( input_fn = input_fn , steps = 1 ) <nl> - self . assertItemsEqual ( ( <nl> - metric_keys . MetricKeys . LOSS , <nl> - metric_keys . MetricKeys . LOSS_MEAN , <nl> - ops . GraphKeys . GLOBAL_STEP <nl> - ) , eval_metrics . keys ( ) ) <nl> - <nl> - # Logit is [ ( 20 . * 10 . 0 + 4 * 2 . 0 + 5 . 0 ) , ( 40 . * 10 . 0 + 8 * 2 . 0 + 5 . 0 ) ] = <nl> - # [ 213 . 0 , 421 . 0 ] , while label is [ 213 . , 421 . ] . Loss = 0 . <nl> - self . assertAlmostEqual ( 0 , eval_metrics [ metric_keys . MetricKeys . LOSS ] ) <nl> - <nl> - <nl> - class LinearRegressorPredictTest ( test . TestCase ) : <nl> - <nl> - def setUp ( self ) : <nl> - self . _model_dir = tempfile . mkdtemp ( ) <nl> - <nl> - def tearDown ( self ) : <nl> - if self . _model_dir : <nl> - writer_cache . FileWriterCache . clear ( ) <nl> - shutil . rmtree ( self . _model_dir ) <nl> - <nl> - def test_1d ( self ) : <nl> - " " " Tests predict when all variables are one - dimensional . " " " <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - variables . Variable ( [ [ 10 . ] ] , name = ' linear / linear_model / x / weights ' ) <nl> - variables . Variable ( [ . 2 ] , name = _BIAS_NAME ) <nl> - variables . Variable ( 100 , name = ' global_step ' , dtype = dtypes . int64 ) <nl> - _save_variables_to_ckpt ( self . _model_dir ) <nl> - <nl> - linear_regressor = linear . LinearRegressor ( <nl> - feature_columns = ( feature_column_lib . numeric_column ( ' x ' ) , ) , <nl> - model_dir = self . _model_dir ) <nl> - <nl> - predict_input_fn = numpy_io . numpy_input_fn ( <nl> - x = { ' x ' : np . array ( [ [ 2 . ] ] ) } , y = None , batch_size = 1 , num_epochs = 1 , <nl> - shuffle = False ) <nl> - predictions = linear_regressor . predict ( input_fn = predict_input_fn ) <nl> - predicted_scores = list ( [ x [ ' predictions ' ] for x in predictions ] ) <nl> - # x * weight + bias = 2 . * 10 . + . 2 = 20 . 2 <nl> - self . assertAllClose ( [ [ 20 . 2 ] ] , predicted_scores ) <nl> - <nl> - def testMultiDim ( self ) : <nl> - " " " Tests predict when all variables are multi - dimenstional . " " " <nl> - batch_size = 2 <nl> - label_dimension = 3 <nl> - x_dim = 4 <nl> - feature_columns = ( <nl> - feature_column_lib . numeric_column ( ' x ' , shape = ( x_dim , ) ) , ) <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - variables . Variable ( # shape = [ x_dim , label_dimension ] <nl> - [ [ 1 . , 2 . , 3 . ] , <nl> - [ 2 . , 3 . , 4 . ] , <nl> - [ 3 . , 4 . , 5 . ] , <nl> - [ 4 . , 5 . , 6 . ] ] , <nl> - name = ' linear / linear_model / x / weights ' ) <nl> - variables . Variable ( # shape = [ label_dimension ] <nl> - [ . 2 , . 4 , . 6 ] , name = _BIAS_NAME ) <nl> - variables . Variable ( 100 , name = ' global_step ' , dtype = dtypes . int64 ) <nl> - _save_variables_to_ckpt ( self . _model_dir ) <nl> - <nl> - linear_regressor = linear . LinearRegressor ( <nl> - feature_columns = feature_columns , <nl> - label_dimension = label_dimension , <nl> - model_dir = self . _model_dir ) <nl> - <nl> - predict_input_fn = numpy_io . numpy_input_fn ( <nl> - # x shape = [ batch_size , x_dim ] <nl> - x = { ' x ' : np . array ( [ [ 1 . , 2 . , 3 . , 4 . ] , <nl> - [ 5 . , 6 . , 7 . , 8 . ] ] ) } , <nl> - y = None , batch_size = batch_size , num_epochs = 1 , shuffle = False ) <nl> - predictions = linear_regressor . predict ( input_fn = predict_input_fn ) <nl> - predicted_scores = list ( [ x [ ' predictions ' ] for x in predictions ] ) <nl> - # score = x * weight + bias , shape = [ batch_size , label_dimension ] <nl> - self . assertAllClose ( <nl> - [ [ 30 . 2 , 40 . 4 , 50 . 6 ] , [ 70 . 2 , 96 . 4 , 122 . 6 ] ] , predicted_scores ) <nl> - <nl> - def testTwoFeatureColumns ( self ) : <nl> - " " " Tests predict with two feature columns . " " " <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - variables . Variable ( [ [ 10 . ] ] , name = ' linear / linear_model / x0 / weights ' ) <nl> - variables . Variable ( [ [ 20 . ] ] , name = ' linear / linear_model / x1 / weights ' ) <nl> - variables . Variable ( [ . 2 ] , name = _BIAS_NAME ) <nl> - variables . Variable ( 100 , name = ' global_step ' , dtype = dtypes . int64 ) <nl> - _save_variables_to_ckpt ( self . _model_dir ) <nl> - <nl> - linear_regressor = linear . LinearRegressor ( <nl> - feature_columns = ( <nl> - feature_column_lib . numeric_column ( ' x0 ' ) , <nl> - feature_column_lib . numeric_column ( ' x1 ' ) ) , <nl> - model_dir = self . _model_dir ) <nl> - <nl> - predict_input_fn = numpy_io . numpy_input_fn ( <nl> - x = { ' x0 ' : np . array ( [ [ 2 . ] ] ) , <nl> - ' x1 ' : np . array ( [ [ 3 . ] ] ) } , <nl> - y = None , batch_size = 1 , num_epochs = 1 , <nl> - shuffle = False ) <nl> - predictions = linear_regressor . predict ( input_fn = predict_input_fn ) <nl> - predicted_scores = list ( [ x [ ' predictions ' ] for x in predictions ] ) <nl> - # x0 * weight0 + x1 * weight1 + bias = 2 . * 10 . + 3 . * 20 + . 2 = 80 . 2 <nl> - self . assertAllClose ( [ [ 80 . 2 ] ] , predicted_scores ) <nl> - <nl> - <nl> - class LinearRegressorIntegrationTest ( test . TestCase ) : <nl> - <nl> - def setUp ( self ) : <nl> - self . _model_dir = tempfile . mkdtemp ( ) <nl> - <nl> - def tearDown ( self ) : <nl> - if self . _model_dir : <nl> - writer_cache . FileWriterCache . clear ( ) <nl> - shutil . rmtree ( self . _model_dir ) <nl> - <nl> - def _test_complete_flow ( <nl> - self , train_input_fn , eval_input_fn , predict_input_fn , input_dimension , <nl> - label_dimension , prediction_length , batch_size ) : <nl> - feature_columns = [ <nl> - feature_column_lib . numeric_column ( ' x ' , shape = ( input_dimension , ) ) <nl> - ] <nl> - est = linear . LinearRegressor ( <nl> - feature_columns = feature_columns , label_dimension = label_dimension , <nl> - model_dir = self . _model_dir ) <nl> - <nl> - # TRAIN <nl> - # learn y = x <nl> - est . train ( train_input_fn , steps = 200 ) <nl> - <nl> - # EVALUTE <nl> - scores = est . evaluate ( eval_input_fn ) <nl> - self . assertEqual ( 200 , scores [ ops . GraphKeys . GLOBAL_STEP ] ) <nl> - self . assertIn ( metric_keys . MetricKeys . LOSS , six . iterkeys ( scores ) ) <nl> - <nl> - # PREDICT <nl> - predictions = np . array ( [ <nl> - x [ ' predictions ' ] for x in est . predict ( predict_input_fn ) ] ) <nl> - self . assertAllEqual ( ( prediction_length , label_dimension ) , predictions . shape ) <nl> - <nl> - # EXPORT <nl> - feature_spec = feature_column_lib . make_parse_example_spec ( <nl> - feature_columns ) <nl> - serving_input_receiver_fn = export . build_parsing_serving_input_receiver_fn ( <nl> - feature_spec ) <nl> - export_dir = est . export_savedmodel ( tempfile . mkdtemp ( ) , <nl> - serving_input_receiver_fn ) <nl> - self . assertTrue ( gfile . Exists ( export_dir ) ) <nl> - <nl> - def test_numpy_input_fn ( self ) : <nl> - " " " Tests complete flow with numpy_input_fn . " " " <nl> - label_dimension = 2 <nl> - input_dimension = label_dimension <nl> - batch_size = 10 <nl> - prediction_length = batch_size <nl> - data = np . linspace ( 0 . , 2 . , batch_size * label_dimension , dtype = np . float32 ) <nl> - data = data . reshape ( batch_size , label_dimension ) <nl> - <nl> - train_input_fn = numpy_io . numpy_input_fn ( <nl> - x = { ' x ' : data } , y = data , batch_size = batch_size , num_epochs = None , <nl> - shuffle = True ) <nl> - eval_input_fn = numpy_io . numpy_input_fn ( <nl> - x = { ' x ' : data } , y = data , batch_size = batch_size , num_epochs = 1 , <nl> - shuffle = False ) <nl> - predict_input_fn = numpy_io . numpy_input_fn ( <nl> - x = { ' x ' : data } , y = None , batch_size = batch_size , num_epochs = 1 , <nl> - shuffle = False ) <nl> - <nl> - self . _test_complete_flow ( <nl> - train_input_fn = train_input_fn , <nl> - eval_input_fn = eval_input_fn , <nl> - predict_input_fn = predict_input_fn , <nl> - input_dimension = input_dimension , <nl> - label_dimension = label_dimension , <nl> - prediction_length = prediction_length , <nl> - batch_size = batch_size ) <nl> - <nl> - def test_pandas_input_fn ( self ) : <nl> - " " " Tests complete flow with pandas_input_fn . " " " <nl> - if not HAS_PANDAS : <nl> - return <nl> - <nl> - # Pandas DataFrame natually supports 1 dim data only . <nl> - label_dimension = 1 <nl> - input_dimension = label_dimension <nl> - batch_size = 10 <nl> - data = np . array ( [ 1 . , 2 . , 3 . , 4 . ] , dtype = np . float32 ) <nl> - x = pd . DataFrame ( { ' x ' : data } ) <nl> - y = pd . Series ( data ) <nl> - prediction_length = 4 <nl> - <nl> - train_input_fn = pandas_io . pandas_input_fn ( <nl> - x = x , <nl> - y = y , <nl> - batch_size = batch_size , <nl> - num_epochs = None , <nl> - shuffle = True ) <nl> - eval_input_fn = pandas_io . pandas_input_fn ( <nl> - x = x , <nl> - y = y , <nl> - batch_size = batch_size , <nl> - shuffle = False ) <nl> - predict_input_fn = pandas_io . pandas_input_fn ( <nl> - x = x , <nl> - batch_size = batch_size , <nl> - shuffle = False ) <nl> - <nl> - self . _test_complete_flow ( <nl> - train_input_fn = train_input_fn , <nl> - eval_input_fn = eval_input_fn , <nl> - predict_input_fn = predict_input_fn , <nl> - input_dimension = input_dimension , <nl> - label_dimension = label_dimension , <nl> - prediction_length = prediction_length , <nl> - batch_size = batch_size ) <nl> - <nl> - def test_input_fn_from_parse_example ( self ) : <nl> - " " " Tests complete flow with input_fn constructed from parse_example . " " " <nl> - label_dimension = 2 <nl> - input_dimension = label_dimension <nl> - batch_size = 10 <nl> - prediction_length = batch_size <nl> - data = np . linspace ( 0 . , 2 . , batch_size * label_dimension , dtype = np . float32 ) <nl> - data = data . reshape ( batch_size , label_dimension ) <nl> - <nl> - serialized_examples = [ ] <nl> - for datum in data : <nl> - example = example_pb2 . Example ( features = feature_pb2 . Features ( <nl> - feature = { <nl> - ' x ' : feature_pb2 . Feature ( <nl> - float_list = feature_pb2 . FloatList ( value = datum ) ) , <nl> - ' y ' : feature_pb2 . Feature ( <nl> - float_list = feature_pb2 . FloatList ( <nl> - value = datum [ : label_dimension ] ) ) , <nl> - } ) ) <nl> - serialized_examples . append ( example . SerializeToString ( ) ) <nl> - <nl> - feature_spec = { <nl> - ' x ' : parsing_ops . FixedLenFeature ( [ input_dimension ] , dtypes . float32 ) , <nl> - ' y ' : parsing_ops . FixedLenFeature ( [ label_dimension ] , dtypes . float32 ) , <nl> - } <nl> - <nl> - def _train_input_fn ( ) : <nl> - feature_map = parsing_ops . parse_example ( serialized_examples , feature_spec ) <nl> - features = _queue_parsed_features ( feature_map ) <nl> - labels = features . pop ( ' y ' ) <nl> - return features , labels <nl> - def _eval_input_fn ( ) : <nl> - feature_map = parsing_ops . parse_example ( <nl> - input_lib . limit_epochs ( serialized_examples , num_epochs = 1 ) , <nl> - feature_spec ) <nl> - features = _queue_parsed_features ( feature_map ) <nl> - labels = features . pop ( ' y ' ) <nl> - return features , labels <nl> - def _predict_input_fn ( ) : <nl> - feature_map = parsing_ops . parse_example ( <nl> - input_lib . limit_epochs ( serialized_examples , num_epochs = 1 ) , <nl> - feature_spec ) <nl> - features = _queue_parsed_features ( feature_map ) <nl> - features . pop ( ' y ' ) <nl> - return features , None <nl> - <nl> - self . _test_complete_flow ( <nl> - train_input_fn = _train_input_fn , <nl> - eval_input_fn = _eval_input_fn , <nl> - predict_input_fn = _predict_input_fn , <nl> - input_dimension = input_dimension , <nl> - label_dimension = label_dimension , <nl> - prediction_length = prediction_length , <nl> - batch_size = batch_size ) <nl> - <nl> - <nl> - def _assert_close ( expected , actual , rtol = 1e - 04 , name = ' assert_close ' ) : <nl> - with ops . name_scope ( name , ' assert_close ' , ( expected , actual , rtol ) ) as scope : <nl> - expected = ops . convert_to_tensor ( expected , name = ' expected ' ) <nl> - actual = ops . convert_to_tensor ( actual , name = ' actual ' ) <nl> - rdiff = math_ops . abs ( expected - actual , ' diff ' ) / math_ops . abs ( expected ) <nl> - rtol = ops . convert_to_tensor ( rtol , name = ' rtol ' ) <nl> - return check_ops . assert_less ( <nl> - rdiff , <nl> - rtol , <nl> - data = ( <nl> - ' Condition expected = ~ actual did not hold element - wise : ' <nl> - ' expected = ' , expected , <nl> - ' actual = ' , actual , <nl> - ' rdiff = ' , rdiff , <nl> - ' rtol = ' , rtol , <nl> - ) , <nl> - name = scope ) <nl> - <nl> - <nl> - class LinearRegressorTrainingTest ( test . TestCase ) : <nl> - <nl> - def setUp ( self ) : <nl> - self . _model_dir = tempfile . mkdtemp ( ) <nl> <nl> - def tearDown ( self ) : <nl> - if self . _model_dir : <nl> - writer_cache . FileWriterCache . clear ( ) <nl> - shutil . rmtree ( self . _model_dir ) <nl> + def _linear_regressor_fn ( * args , * * kwargs ) : <nl> + return linear . LinearRegressor ( * args , * * kwargs ) <nl> <nl> - def _mock_optimizer ( self , expected_loss = None ) : <nl> - expected_var_names = [ <nl> - ' % s / part_0 : 0 ' % _AGE_WEIGHT_NAME , <nl> - ' % s / part_0 : 0 ' % _BIAS_NAME <nl> - ] <nl> <nl> - def _minimize ( loss , global_step ) : <nl> - trainable_vars = ops . get_collection ( ops . GraphKeys . TRAINABLE_VARIABLES ) <nl> - self . assertItemsEqual ( <nl> - expected_var_names , <nl> - [ var . name for var in trainable_vars ] ) <nl> + class LinearRegressorPartitionerTest ( <nl> + linear_testing_utils . BaseLinearRegressorPartitionerTest , test . TestCase ) : <nl> <nl> - # Verify loss . We can ' t check the value directly , so we add an assert op . <nl> - self . assertEquals ( 0 , loss . shape . ndims ) <nl> - if expected_loss is None : <nl> - return state_ops . assign_add ( global_step , 1 ) . op <nl> - assert_loss = _assert_close ( <nl> - math_ops . to_float ( expected_loss , name = ' expected ' ) , loss , <nl> - name = ' assert_loss ' ) <nl> - with ops . control_dependencies ( ( assert_loss , ) ) : <nl> - return state_ops . assign_add ( global_step , 1 ) . op <nl> - <nl> - mock_optimizer = test . mock . NonCallableMock ( <nl> - spec = optimizer . Optimizer , <nl> - wraps = optimizer . Optimizer ( use_locking = False , name = ' my_optimizer ' ) ) <nl> - mock_optimizer . minimize = test . mock . MagicMock ( wraps = _minimize ) <nl> - <nl> - # NOTE : Estimator . params performs a deepcopy , which wreaks havoc with mocks . <nl> - # So , return mock_optimizer itself for deepcopy . <nl> - mock_optimizer . __deepcopy__ = lambda _ : mock_optimizer <nl> - return mock_optimizer <nl> - <nl> - def _assert_checkpoint ( <nl> - self , expected_global_step , expected_age_weight = None , expected_bias = None ) : <nl> - shapes = { <nl> - name : shape for ( name , shape ) in <nl> - checkpoint_utils . list_variables ( self . _model_dir ) <nl> - } <nl> - <nl> - self . assertEqual ( [ ] , shapes [ ops . GraphKeys . GLOBAL_STEP ] ) <nl> - self . assertEqual ( <nl> - expected_global_step , <nl> - checkpoint_utils . load_variable ( <nl> - self . _model_dir , ops . GraphKeys . GLOBAL_STEP ) ) <nl> - <nl> - self . assertEqual ( [ 1 , 1 ] , shapes [ _AGE_WEIGHT_NAME ] ) <nl> - if expected_age_weight is not None : <nl> - self . assertEqual ( <nl> - expected_age_weight , <nl> - checkpoint_utils . load_variable ( self . _model_dir , _AGE_WEIGHT_NAME ) ) <nl> - <nl> - self . assertEqual ( [ 1 ] , shapes [ _BIAS_NAME ] ) <nl> - if expected_bias is not None : <nl> - self . assertEqual ( <nl> - expected_bias , <nl> - checkpoint_utils . load_variable ( self . _model_dir , _BIAS_NAME ) ) <nl> - <nl> - def testFromScratchWithDefaultOptimizer ( self ) : <nl> - # Create LinearRegressor . <nl> - label = 5 . <nl> - age = 17 <nl> - linear_regressor = linear . LinearRegressor ( <nl> - feature_columns = ( feature_column_lib . numeric_column ( ' age ' ) , ) , <nl> - model_dir = self . _model_dir ) <nl> - <nl> - # Train for a few steps , and validate final checkpoint . <nl> - num_steps = 10 <nl> - linear_regressor . train ( <nl> - input_fn = lambda : ( { ' age ' : ( ( age , ) , ) } , ( ( label , ) , ) ) , steps = num_steps ) <nl> - self . _assert_checkpoint ( num_steps ) <nl> + def __init__ ( self , methodName = ' runTest ' ) : # pylint : disable = invalid - name <nl> + test . TestCase . __init__ ( self , methodName ) <nl> + linear_testing_utils . BaseLinearRegressorPartitionerTest . __init__ ( <nl> + self , _linear_regressor_fn ) <nl> <nl> - def testTrainWithOneDimLabel ( self ) : <nl> - label_dimension = 1 <nl> - batch_size = 20 <nl> - feature_columns = [ <nl> - feature_column_lib . numeric_column ( ' age ' , shape = ( 1 , ) ) <nl> - ] <nl> - est = linear . LinearRegressor ( <nl> - feature_columns = feature_columns , label_dimension = label_dimension , <nl> - model_dir = self . _model_dir ) <nl> - data_rank_1 = np . linspace ( 0 . , 2 . , batch_size , dtype = np . float32 ) <nl> - self . assertEqual ( ( batch_size , ) , data_rank_1 . shape ) <nl> <nl> - train_input_fn = numpy_io . numpy_input_fn ( <nl> - x = { ' age ' : data_rank_1 } , y = data_rank_1 , <nl> - batch_size = batch_size , num_epochs = None , <nl> - shuffle = True ) <nl> - est . train ( train_input_fn , steps = 200 ) <nl> - self . _assert_checkpoint ( 200 ) <nl> + class LinearRegressorEvaluationTest ( <nl> + linear_testing_utils . BaseLinearRegressorEvaluationTest , test . TestCase ) : <nl> <nl> - def testTrainWithOneDimWeight ( self ) : <nl> - label_dimension = 1 <nl> - batch_size = 20 <nl> - feature_columns = [ <nl> - feature_column_lib . numeric_column ( ' age ' , shape = ( 1 , ) ) <nl> - ] <nl> - est = linear . LinearRegressor ( <nl> - feature_columns = feature_columns , label_dimension = label_dimension , <nl> - weight_feature_key = ' w ' , <nl> - model_dir = self . _model_dir ) <nl> + def __init__ ( self , methodName = ' runTest ' ) : # pylint : disable = invalid - name <nl> + test . TestCase . __init__ ( self , methodName ) <nl> + linear_testing_utils . BaseLinearRegressorEvaluationTest . __init__ ( <nl> + self , _linear_regressor_fn ) <nl> <nl> - data_rank_1 = np . linspace ( 0 . , 2 . , batch_size , dtype = np . float32 ) <nl> - self . assertEqual ( ( batch_size , ) , data_rank_1 . shape ) <nl> <nl> - train_input_fn = numpy_io . numpy_input_fn ( <nl> - x = { ' age ' : data_rank_1 , ' w ' : data_rank_1 } , y = data_rank_1 , <nl> - batch_size = batch_size , num_epochs = None , <nl> - shuffle = True ) <nl> - est . train ( train_input_fn , steps = 200 ) <nl> - self . _assert_checkpoint ( 200 ) <nl> + class LinearRegressorPredictTest ( <nl> + linear_testing_utils . BaseLinearRegressorPredictTest , test . TestCase ) : <nl> <nl> - def testFromScratch ( self ) : <nl> - # Create LinearRegressor . <nl> - label = 5 . <nl> - age = 17 <nl> - # loss = ( logits - label ) ^ 2 = ( 0 - 5 . ) ^ 2 = 25 . <nl> - mock_optimizer = self . _mock_optimizer ( expected_loss = 25 . ) <nl> - linear_regressor = linear . LinearRegressor ( <nl> - feature_columns = ( feature_column_lib . numeric_column ( ' age ' ) , ) , <nl> - model_dir = self . _model_dir , optimizer = mock_optimizer ) <nl> - self . assertEqual ( 0 , mock_optimizer . minimize . call_count ) <nl> + def __init__ ( self , methodName = ' runTest ' ) : # pylint : disable = invalid - name <nl> + test . TestCase . __init__ ( self , methodName ) <nl> + linear_testing_utils . BaseLinearRegressorPredictTest . __init__ ( <nl> + self , _linear_regressor_fn ) <nl> <nl> - # Train for a few steps , and validate optimizer and final checkpoint . <nl> - num_steps = 10 <nl> - linear_regressor . train ( <nl> - input_fn = lambda : ( { ' age ' : ( ( age , ) , ) } , ( ( label , ) , ) ) , steps = num_steps ) <nl> - self . assertEqual ( 1 , mock_optimizer . minimize . call_count ) <nl> - self . _assert_checkpoint ( <nl> - expected_global_step = num_steps , <nl> - expected_age_weight = 0 . , <nl> - expected_bias = 0 . ) <nl> <nl> - def testFromCheckpoint ( self ) : <nl> - # Create initial checkpoint . <nl> - age_weight = 10 . 0 <nl> - bias = 5 . 0 <nl> - initial_global_step = 100 <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - variables . Variable ( [ [ age_weight ] ] , name = _AGE_WEIGHT_NAME ) <nl> - variables . Variable ( [ bias ] , name = _BIAS_NAME ) <nl> - variables . Variable ( <nl> - initial_global_step , name = ops . GraphKeys . GLOBAL_STEP , <nl> - dtype = dtypes . int64 ) <nl> - _save_variables_to_ckpt ( self . _model_dir ) <nl> + class LinearRegressorIntegrationTest ( <nl> + linear_testing_utils . BaseLinearRegressorIntegrationTest , test . TestCase ) : <nl> <nl> - # logits = age * age_weight + bias = 17 * 10 . + 5 . = 175 <nl> - # loss = ( logits - label ) ^ 2 = ( 175 - 5 ) ^ 2 = 28900 <nl> - mock_optimizer = self . _mock_optimizer ( expected_loss = 28900 . ) <nl> - linear_regressor = linear . LinearRegressor ( <nl> - feature_columns = ( feature_column_lib . numeric_column ( ' age ' ) , ) , <nl> - model_dir = self . _model_dir , optimizer = mock_optimizer ) <nl> - self . assertEqual ( 0 , mock_optimizer . minimize . call_count ) <nl> + def __init__ ( self , methodName = ' runTest ' ) : # pylint : disable = invalid - name <nl> + test . TestCase . __init__ ( self , methodName ) <nl> + linear_testing_utils . BaseLinearRegressorIntegrationTest . __init__ ( <nl> + self , _linear_regressor_fn ) <nl> <nl> - # Train for a few steps , and validate optimizer and final checkpoint . <nl> - num_steps = 10 <nl> - linear_regressor . train ( <nl> - input_fn = lambda : ( { ' age ' : ( ( 17 , ) , ) } , ( ( 5 . , ) , ) ) , steps = num_steps ) <nl> - self . assertEqual ( 1 , mock_optimizer . minimize . call_count ) <nl> - self . _assert_checkpoint ( <nl> - expected_global_step = initial_global_step + num_steps , <nl> - expected_age_weight = age_weight , <nl> - expected_bias = bias ) <nl> <nl> - def testFromCheckpointMultiBatch ( self ) : <nl> - # Create initial checkpoint . <nl> - age_weight = 10 . 0 <nl> - bias = 5 . 0 <nl> - initial_global_step = 100 <nl> - with ops . Graph ( ) . as_default ( ) : <nl> - variables . Variable ( [ [ age_weight ] ] , name = _AGE_WEIGHT_NAME ) <nl> - variables . Variable ( [ bias ] , name = _BIAS_NAME ) <nl> - variables . Variable ( <nl> - initial_global_step , name = ops . GraphKeys . GLOBAL_STEP , <nl> - dtype = dtypes . int64 ) <nl> - _save_variables_to_ckpt ( self . _model_dir ) <nl> - <nl> - # logits = age * age_weight + bias <nl> - # logits [ 0 ] = 17 * 10 . + 5 . = 175 <nl> - # logits [ 1 ] = 15 * 10 . + 5 . = 155 <nl> - # loss = sum ( logits - label ) ^ 2 = ( 175 - 5 ) ^ 2 + ( 155 - 3 ) ^ 2 = 52004 <nl> - mock_optimizer = self . _mock_optimizer ( expected_loss = 52004 . ) <nl> - linear_regressor = linear . LinearRegressor ( <nl> - feature_columns = ( feature_column_lib . numeric_column ( ' age ' ) , ) , <nl> - model_dir = self . _model_dir , optimizer = mock_optimizer ) <nl> - self . assertEqual ( 0 , mock_optimizer . minimize . call_count ) <nl> + class LinearRegressorTrainingTest ( <nl> + linear_testing_utils . BaseLinearRegressorTrainingTest , test . TestCase ) : <nl> <nl> - # Train for a few steps , and validate optimizer and final checkpoint . <nl> - num_steps = 10 <nl> - linear_regressor . train ( <nl> - input_fn = lambda : ( { ' age ' : ( ( 17 , ) , ( 15 , ) ) } , ( ( 5 . , ) , ( 3 . , ) ) ) , <nl> - steps = num_steps ) <nl> - self . assertEqual ( 1 , mock_optimizer . minimize . call_count ) <nl> - self . _assert_checkpoint ( <nl> - expected_global_step = initial_global_step + num_steps , <nl> - expected_age_weight = age_weight , <nl> - expected_bias = bias ) <nl> + def __init__ ( self , methodName = ' runTest ' ) : # pylint : disable = invalid - name <nl> + test . TestCase . __init__ ( self , methodName ) <nl> + linear_testing_utils . BaseLinearRegressorTrainingTest . __init__ ( <nl> + self , _linear_regressor_fn ) <nl> <nl> <nl> class _BaseLinearClassiferTrainingTest ( object ) : <nl> def tearDown ( self ) : <nl> <nl> def _mock_optimizer ( self , expected_loss = None ) : <nl> expected_var_names = [ <nl> - ' % s / part_0 : 0 ' % _AGE_WEIGHT_NAME , <nl> - ' % s / part_0 : 0 ' % _BIAS_NAME <nl> + ' % s / part_0 : 0 ' % linear_testing_utils . AGE_WEIGHT_NAME , <nl> + ' % s / part_0 : 0 ' % linear_testing_utils . BIAS_NAME <nl> ] <nl> <nl> def _minimize ( loss , global_step ) : <nl> def _minimize ( loss , global_step ) : <nl> self . assertEquals ( 0 , loss . shape . ndims ) <nl> if expected_loss is None : <nl> return state_ops . assign_add ( global_step , 1 ) . op <nl> - assert_loss = _assert_close ( <nl> - math_ops . to_float ( expected_loss , name = ' expected ' ) , loss , <nl> + assert_loss = linear_testing_utils . assert_close ( <nl> + math_ops . to_float ( expected_loss , name = ' expected ' ) , <nl> + loss , <nl> name = ' assert_loss ' ) <nl> with ops . control_dependencies ( ( assert_loss , ) ) : <nl> return state_ops . assign_add ( global_step , 1 ) . op <nl> def _assert_checkpoint ( <nl> checkpoint_utils . load_variable ( <nl> self . _model_dir , ops . GraphKeys . GLOBAL_STEP ) ) <nl> <nl> - self . assertEqual ( [ 1 , logits_dimension ] , shapes [ _AGE_WEIGHT_NAME ] ) <nl> + self . assertEqual ( [ 1 , logits_dimension ] , <nl> + shapes [ linear_testing_utils . AGE_WEIGHT_NAME ] ) <nl> if expected_age_weight is not None : <nl> - self . assertAllEqual ( <nl> - expected_age_weight , <nl> - checkpoint_utils . load_variable ( self . _model_dir , _AGE_WEIGHT_NAME ) ) <nl> + self . assertAllEqual ( expected_age_weight , <nl> + checkpoint_utils . load_variable ( <nl> + self . _model_dir , <nl> + linear_testing_utils . AGE_WEIGHT_NAME ) ) <nl> <nl> - self . assertEqual ( [ logits_dimension ] , shapes [ _BIAS_NAME ] ) <nl> + self . assertEqual ( [ logits_dimension ] , shapes [ linear_testing_utils . BIAS_NAME ] ) <nl> if expected_bias is not None : <nl> - self . assertAllEqual ( <nl> - expected_bias , <nl> - checkpoint_utils . load_variable ( self . _model_dir , _BIAS_NAME ) ) <nl> + self . assertAllEqual ( expected_bias , <nl> + checkpoint_utils . load_variable ( <nl> + self . _model_dir , linear_testing_utils . BIAS_NAME ) ) <nl> <nl> def testFromScratchWithDefaultOptimizer ( self ) : <nl> n_classes = self . _n_classes <nl> def testFromCheckpoint ( self ) : <nl> bias = [ - 35 . 0 ] if n_classes = = 2 else [ - 35 . 0 ] * n_classes <nl> initial_global_step = 100 <nl> with ops . Graph ( ) . as_default ( ) : <nl> - variables . Variable ( age_weight , name = _AGE_WEIGHT_NAME ) <nl> - variables . Variable ( bias , name = _BIAS_NAME ) <nl> + variables . Variable ( age_weight , name = linear_testing_utils . AGE_WEIGHT_NAME ) <nl> + variables . Variable ( bias , name = linear_testing_utils . BIAS_NAME ) <nl> variables . Variable ( <nl> initial_global_step , name = ops . GraphKeys . GLOBAL_STEP , <nl> dtype = dtypes . int64 ) <nl> - _save_variables_to_ckpt ( self . _model_dir ) <nl> + linear_testing_utils . save_variables_to_ckpt ( self . _model_dir ) <nl> <nl> # For binary classifer : <nl> # logits = age * age_weight + bias = 17 * 2 . - 35 . = - 1 . <nl> def testFromCheckpointMultiBatch ( self ) : <nl> bias = [ - 35 . 0 ] if n_classes = = 2 else [ - 35 . 0 ] * n_classes <nl> initial_global_step = 100 <nl> with ops . Graph ( ) . as_default ( ) : <nl> - variables . Variable ( age_weight , name = _AGE_WEIGHT_NAME ) <nl> - variables . Variable ( bias , name = _BIAS_NAME ) <nl> + variables . Variable ( age_weight , name = linear_testing_utils . AGE_WEIGHT_NAME ) <nl> + variables . Variable ( bias , name = linear_testing_utils . BIAS_NAME ) <nl> variables . Variable ( <nl> initial_global_step , name = ops . GraphKeys . GLOBAL_STEP , <nl> dtype = dtypes . int64 ) <nl> - _save_variables_to_ckpt ( self . _model_dir ) <nl> + linear_testing_utils . save_variables_to_ckpt ( self . _model_dir ) <nl> <nl> # For binary classifer : <nl> # logits = age * age_weight + bias <nl> new file mode 100644 <nl> index 0000000000000 . . 841dc7bdae5e9 <nl> mmm / dev / null <nl> ppp b / tensorflow / python / estimator / canned / linear_testing_utils . py <nl> <nl> + # Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Utils for testing linear estimators . " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + import os <nl> + import shutil <nl> + import tempfile <nl> + <nl> + import numpy as np <nl> + import six <nl> + <nl> + from tensorflow . core . example import example_pb2 <nl> + from tensorflow . core . example import feature_pb2 <nl> + from tensorflow . python . client import session as tf_session <nl> + from tensorflow . python . estimator import estimator <nl> + from tensorflow . python . estimator import run_config <nl> + from tensorflow . python . estimator . canned import metric_keys <nl> + from tensorflow . python . estimator . export import export <nl> + from tensorflow . python . estimator . inputs import numpy_io <nl> + from tensorflow . python . estimator . inputs import pandas_io <nl> + from tensorflow . python . feature_column import feature_column as feature_column_lib <nl> + from tensorflow . python . framework import dtypes <nl> + from tensorflow . python . framework import ops <nl> + from tensorflow . python . framework import sparse_tensor <nl> + from tensorflow . python . ops import check_ops <nl> + from tensorflow . python . ops import control_flow_ops <nl> + from tensorflow . python . ops import data_flow_ops <nl> + from tensorflow . python . ops import math_ops <nl> + from tensorflow . python . ops import parsing_ops <nl> + from tensorflow . python . ops import state_ops <nl> + from tensorflow . python . ops import variable_scope <nl> + from tensorflow . python . ops import variables <nl> + from tensorflow . python . platform import gfile <nl> + from tensorflow . python . platform import test <nl> + from tensorflow . python . summary . writer import writer_cache <nl> + from tensorflow . python . training import checkpoint_utils <nl> + from tensorflow . python . training import input as input_lib <nl> + from tensorflow . python . training import optimizer <nl> + from tensorflow . python . training import queue_runner <nl> + from tensorflow . python . training import saver <nl> + from tensorflow . python . training import session_run_hook <nl> + <nl> + try : <nl> + # pylint : disable = g - import - not - at - top <nl> + import pandas as pd <nl> + HAS_PANDAS = True <nl> + except IOError : <nl> + # Pandas writes a temporary file during import . If it fails , don ' t use pandas . <nl> + HAS_PANDAS = False <nl> + except ImportError : <nl> + HAS_PANDAS = False <nl> + <nl> + # pylint rules which are disabled by default for test files . <nl> + # pylint : disable = invalid - name , protected - access , missing - docstring <nl> + <nl> + # Names of variables created by model . <nl> + AGE_WEIGHT_NAME = ' linear / linear_model / age / weights ' <nl> + HEIGHT_WEIGHT_NAME = ' linear / linear_model / height / weights ' <nl> + BIAS_NAME = ' linear / linear_model / bias_weights ' <nl> + LANGUAGE_WEIGHT_NAME = ' linear / linear_model / language / weights ' <nl> + <nl> + <nl> + def assert_close ( expected , actual , rtol = 1e - 04 , name = ' assert_close ' ) : <nl> + with ops . name_scope ( name , ' assert_close ' , ( expected , actual , rtol ) ) as scope : <nl> + expected = ops . convert_to_tensor ( expected , name = ' expected ' ) <nl> + actual = ops . convert_to_tensor ( actual , name = ' actual ' ) <nl> + rdiff = math_ops . abs ( expected - actual , ' diff ' ) / math_ops . abs ( expected ) <nl> + rtol = ops . convert_to_tensor ( rtol , name = ' rtol ' ) <nl> + return check_ops . assert_less ( <nl> + rdiff , <nl> + rtol , <nl> + data = ( ' Condition expected = ~ actual did not hold element - wise : ' <nl> + ' expected = ' , expected , ' actual = ' , actual , ' rdiff = ' , rdiff , <nl> + ' rtol = ' , rtol , ) , <nl> + name = scope ) <nl> + <nl> + <nl> + def save_variables_to_ckpt ( model_dir ) : <nl> + init_all_op = [ variables . global_variables_initializer ( ) ] <nl> + with tf_session . Session ( ) as sess : <nl> + sess . run ( init_all_op ) <nl> + saver . Saver ( ) . save ( sess , os . path . join ( model_dir , ' model . ckpt ' ) ) <nl> + <nl> + <nl> + def queue_parsed_features ( feature_map ) : <nl> + tensors_to_enqueue = [ ] <nl> + keys = [ ] <nl> + for key , tensor in six . iteritems ( feature_map ) : <nl> + keys . append ( key ) <nl> + tensors_to_enqueue . append ( tensor ) <nl> + queue_dtypes = [ x . dtype for x in tensors_to_enqueue ] <nl> + input_queue = data_flow_ops . FIFOQueue ( capacity = 100 , dtypes = queue_dtypes ) <nl> + queue_runner . add_queue_runner ( <nl> + queue_runner . QueueRunner ( input_queue , <nl> + [ input_queue . enqueue ( tensors_to_enqueue ) ] ) ) <nl> + dequeued_tensors = input_queue . dequeue ( ) <nl> + return { keys [ i ] : dequeued_tensors [ i ] for i in range ( len ( dequeued_tensors ) ) } <nl> + <nl> + <nl> + class CheckPartitionerVarHook ( session_run_hook . SessionRunHook ) : <nl> + " " " A ` SessionRunHook ` to check a paritioned variable . " " " <nl> + <nl> + def __init__ ( self , test_case , var_name , var_dim , partitions ) : <nl> + self . _test_case = test_case <nl> + self . _var_name = var_name <nl> + self . _var_dim = var_dim <nl> + self . _partitions = partitions <nl> + <nl> + def begin ( self ) : <nl> + with variable_scope . variable_scope ( <nl> + variable_scope . get_variable_scope ( ) ) as scope : <nl> + scope . reuse_variables ( ) <nl> + partitioned_weight = variable_scope . get_variable ( <nl> + self . _var_name , shape = ( self . _var_dim , 1 ) ) <nl> + self . _test_case . assertTrue ( <nl> + isinstance ( partitioned_weight , variables . PartitionedVariable ) ) <nl> + for part in partitioned_weight : <nl> + self . _test_case . assertEqual ( self . _var_dim / / self . _partitions , <nl> + part . get_shape ( ) [ 0 ] ) <nl> + <nl> + <nl> + class BaseLinearRegressorPartitionerTest ( object ) : <nl> + <nl> + def __init__ ( self , linear_regressor_fn ) : <nl> + self . _linear_regressor_fn = linear_regressor_fn <nl> + <nl> + def setUp ( self ) : <nl> + self . _model_dir = tempfile . mkdtemp ( ) <nl> + <nl> + def tearDown ( self ) : <nl> + if self . _model_dir : <nl> + writer_cache . FileWriterCache . clear ( ) <nl> + shutil . rmtree ( self . _model_dir ) <nl> + <nl> + def testPartitioner ( self ) : <nl> + x_dim = 64 <nl> + partitions = 4 <nl> + <nl> + def _partitioner ( shape , dtype ) : <nl> + del dtype # unused ; required by Fn signature . <nl> + # Only partition the embedding tensor . <nl> + return [ partitions , 1 ] if shape [ 0 ] = = x_dim else [ 1 ] <nl> + <nl> + regressor = self . _linear_regressor_fn ( <nl> + feature_columns = ( feature_column_lib . categorical_column_with_hash_bucket ( <nl> + ' language ' , hash_bucket_size = x_dim ) , ) , <nl> + partitioner = _partitioner , <nl> + model_dir = self . _model_dir ) <nl> + <nl> + def _input_fn ( ) : <nl> + return { <nl> + ' language ' : <nl> + sparse_tensor . SparseTensor ( <nl> + values = [ ' english ' , ' spanish ' ] , <nl> + indices = [ [ 0 , 0 ] , [ 0 , 1 ] ] , <nl> + dense_shape = [ 1 , 2 ] ) <nl> + } , [ [ 10 . ] ] <nl> + <nl> + hook = CheckPartitionerVarHook ( self , LANGUAGE_WEIGHT_NAME , x_dim , <nl> + partitions ) <nl> + regressor . train ( input_fn = _input_fn , steps = 1 , hooks = [ hook ] ) <nl> + <nl> + def testDefaultPartitionerWithMultiplePsReplicas ( self ) : <nl> + partitions = 2 <nl> + # This results in weights larger than the default partition size of 64M , <nl> + # so partitioned weights are created ( each weight uses 4 bytes ) . <nl> + x_dim = 32 < < 20 <nl> + <nl> + class FakeRunConfig ( run_config . RunConfig ) : <nl> + <nl> + @ property <nl> + def num_ps_replicas ( self ) : <nl> + return partitions <nl> + <nl> + # Mock the device setter as ps is not available on test machines . <nl> + with test . mock . patch . object ( <nl> + estimator , <nl> + ' _get_replica_device_setter ' , <nl> + return_value = lambda _ : ' / cpu : 0 ' ) : <nl> + linear_regressor = self . _linear_regressor_fn ( <nl> + feature_columns = ( <nl> + feature_column_lib . categorical_column_with_hash_bucket ( <nl> + ' language ' , hash_bucket_size = x_dim ) , ) , <nl> + config = FakeRunConfig ( ) , <nl> + model_dir = self . _model_dir ) <nl> + <nl> + def _input_fn ( ) : <nl> + return { <nl> + ' language ' : <nl> + sparse_tensor . SparseTensor ( <nl> + values = [ ' english ' , ' spanish ' ] , <nl> + indices = [ [ 0 , 0 ] , [ 0 , 1 ] ] , <nl> + dense_shape = [ 1 , 2 ] ) <nl> + } , [ [ 10 . ] ] <nl> + <nl> + hook = CheckPartitionerVarHook ( self , LANGUAGE_WEIGHT_NAME , x_dim , <nl> + partitions ) <nl> + linear_regressor . train ( input_fn = _input_fn , steps = 1 , hooks = [ hook ] ) <nl> + <nl> + <nl> + # TODO ( b / 36813849 ) : Add tests with dynamic shape inputs using placeholders . <nl> + class BaseLinearRegressorEvaluationTest ( object ) : <nl> + <nl> + def __init__ ( self , linear_regressor_fn ) : <nl> + self . _linear_regressor_fn = linear_regressor_fn <nl> + <nl> + def setUp ( self ) : <nl> + self . _model_dir = tempfile . mkdtemp ( ) <nl> + <nl> + def tearDown ( self ) : <nl> + if self . _model_dir : <nl> + writer_cache . FileWriterCache . clear ( ) <nl> + shutil . rmtree ( self . _model_dir ) <nl> + <nl> + def test_evaluation_for_simple_data ( self ) : <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + variables . Variable ( [ [ 11 . 0 ] ] , name = AGE_WEIGHT_NAME ) <nl> + variables . Variable ( [ 2 . 0 ] , name = BIAS_NAME ) <nl> + variables . Variable ( <nl> + 100 , name = ops . GraphKeys . GLOBAL_STEP , dtype = dtypes . int64 ) <nl> + save_variables_to_ckpt ( self . _model_dir ) <nl> + <nl> + linear_regressor = self . _linear_regressor_fn ( <nl> + feature_columns = ( feature_column_lib . numeric_column ( ' age ' ) , ) , <nl> + model_dir = self . _model_dir ) <nl> + eval_metrics = linear_regressor . evaluate ( <nl> + input_fn = lambda : ( { ' age ' : ( ( 1 , ) , ) } , ( ( 10 . , ) , ) ) , steps = 1 ) <nl> + <nl> + # Logit is ( 1 . * 11 . 0 + 2 . 0 ) = 13 , while label is 10 . Loss is 3 * * 2 = 9 . <nl> + self . assertDictEqual ( { <nl> + metric_keys . MetricKeys . LOSS : 9 . , <nl> + metric_keys . MetricKeys . LOSS_MEAN : 9 . , <nl> + ops . GraphKeys . GLOBAL_STEP : 100 <nl> + } , eval_metrics ) <nl> + <nl> + def test_evaluation_batch ( self ) : <nl> + " " " Tests evaluation for batch_size = = 2 . " " " <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + variables . Variable ( [ [ 11 . 0 ] ] , name = AGE_WEIGHT_NAME ) <nl> + variables . Variable ( [ 2 . 0 ] , name = BIAS_NAME ) <nl> + variables . Variable ( <nl> + 100 , name = ops . GraphKeys . GLOBAL_STEP , dtype = dtypes . int64 ) <nl> + save_variables_to_ckpt ( self . _model_dir ) <nl> + <nl> + linear_regressor = self . _linear_regressor_fn ( <nl> + feature_columns = ( feature_column_lib . numeric_column ( ' age ' ) , ) , <nl> + model_dir = self . _model_dir ) <nl> + eval_metrics = linear_regressor . evaluate ( <nl> + input_fn = lambda : ( { ' age ' : ( ( 1 , ) , ( 1 , ) ) } , ( ( 10 . , ) , ( 10 . , ) ) ) , steps = 1 ) <nl> + <nl> + # Logit is ( 1 . * 11 . 0 + 2 . 0 ) = 13 , while label is 10 . <nl> + # Loss per example is 3 * * 2 = 9 . <nl> + # Training loss is the sum over batch = 9 + 9 = 18 <nl> + # Average loss is the average over batch = 9 <nl> + self . assertDictEqual ( { <nl> + metric_keys . MetricKeys . LOSS : 18 . , <nl> + metric_keys . MetricKeys . LOSS_MEAN : 9 . , <nl> + ops . GraphKeys . GLOBAL_STEP : 100 <nl> + } , eval_metrics ) <nl> + <nl> + def test_evaluation_weights ( self ) : <nl> + " " " Tests evaluation with weights . " " " <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + variables . Variable ( [ [ 11 . 0 ] ] , name = AGE_WEIGHT_NAME ) <nl> + variables . Variable ( [ 2 . 0 ] , name = BIAS_NAME ) <nl> + variables . Variable ( <nl> + 100 , name = ops . GraphKeys . GLOBAL_STEP , dtype = dtypes . int64 ) <nl> + save_variables_to_ckpt ( self . _model_dir ) <nl> + <nl> + def _input_fn ( ) : <nl> + features = { ' age ' : ( ( 1 , ) , ( 1 , ) ) , ' weights ' : ( ( 1 . , ) , ( 2 . , ) ) } <nl> + labels = ( ( 10 . , ) , ( 10 . , ) ) <nl> + return features , labels <nl> + <nl> + linear_regressor = self . _linear_regressor_fn ( <nl> + feature_columns = ( feature_column_lib . numeric_column ( ' age ' ) , ) , <nl> + weight_feature_key = ' weights ' , <nl> + model_dir = self . _model_dir ) <nl> + eval_metrics = linear_regressor . evaluate ( input_fn = _input_fn , steps = 1 ) <nl> + <nl> + # Logit is ( 1 . * 11 . 0 + 2 . 0 ) = 13 , while label is 10 . <nl> + # Loss per example is 3 * * 2 = 9 . <nl> + # Training loss is the weighted sum over batch = 9 + 2 * 9 = 27 <nl> + # average loss is the weighted average = 9 + 2 * 9 / ( 1 + 2 ) = 9 <nl> + self . assertDictEqual ( { <nl> + metric_keys . MetricKeys . LOSS : 27 . , <nl> + metric_keys . MetricKeys . LOSS_MEAN : 9 . , <nl> + ops . GraphKeys . GLOBAL_STEP : 100 <nl> + } , eval_metrics ) <nl> + <nl> + def test_evaluation_for_multi_dimensions ( self ) : <nl> + x_dim = 3 <nl> + label_dim = 2 <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + variables . Variable ( <nl> + [ [ 1 . 0 , 2 . 0 ] , [ 3 . 0 , 4 . 0 ] , [ 5 . 0 , 6 . 0 ] ] , name = AGE_WEIGHT_NAME ) <nl> + variables . Variable ( [ 7 . 0 , 8 . 0 ] , name = BIAS_NAME ) <nl> + variables . Variable ( 100 , name = ' global_step ' , dtype = dtypes . int64 ) <nl> + save_variables_to_ckpt ( self . _model_dir ) <nl> + <nl> + linear_regressor = self . _linear_regressor_fn ( <nl> + feature_columns = ( feature_column_lib . numeric_column ( <nl> + ' age ' , shape = ( x_dim , ) ) , ) , <nl> + label_dimension = label_dim , <nl> + model_dir = self . _model_dir ) <nl> + input_fn = numpy_io . numpy_input_fn ( <nl> + x = { <nl> + ' age ' : np . array ( [ [ 2 . , 4 . , 5 . ] ] ) , <nl> + } , <nl> + y = np . array ( [ [ 46 . , 58 . ] ] ) , <nl> + batch_size = 1 , <nl> + num_epochs = None , <nl> + shuffle = False ) <nl> + eval_metrics = linear_regressor . evaluate ( input_fn = input_fn , steps = 1 ) <nl> + <nl> + self . assertItemsEqual ( <nl> + ( metric_keys . MetricKeys . LOSS , metric_keys . MetricKeys . LOSS_MEAN , <nl> + ops . GraphKeys . GLOBAL_STEP ) , eval_metrics . keys ( ) ) <nl> + <nl> + # Logit is <nl> + # [ 2 . , 4 . , 5 . ] * [ 1 . 0 , 2 . 0 ] + [ 7 . 0 , 8 . 0 ] = [ 39 , 50 ] + [ 7 . 0 , 8 . 0 ] <nl> + # [ 3 . 0 , 4 . 0 ] <nl> + # [ 5 . 0 , 6 . 0 ] <nl> + # which is [ 46 , 58 ] <nl> + self . assertAlmostEqual ( 0 , eval_metrics [ metric_keys . MetricKeys . LOSS ] ) <nl> + <nl> + def test_evaluation_for_multiple_feature_columns ( self ) : <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + variables . Variable ( [ [ 10 . 0 ] ] , name = AGE_WEIGHT_NAME ) <nl> + variables . Variable ( [ [ 2 . 0 ] ] , name = HEIGHT_WEIGHT_NAME ) <nl> + variables . Variable ( [ 5 . 0 ] , name = BIAS_NAME ) <nl> + variables . Variable ( <nl> + 100 , name = ops . GraphKeys . GLOBAL_STEP , dtype = dtypes . int64 ) <nl> + save_variables_to_ckpt ( self . _model_dir ) <nl> + <nl> + batch_size = 2 <nl> + feature_columns = [ <nl> + feature_column_lib . numeric_column ( ' age ' ) , <nl> + feature_column_lib . numeric_column ( ' height ' ) <nl> + ] <nl> + input_fn = numpy_io . numpy_input_fn ( <nl> + x = { ' age ' : np . array ( [ 20 , 40 ] ) , <nl> + ' height ' : np . array ( [ 4 , 8 ] ) } , <nl> + y = np . array ( [ [ 213 . ] , [ 421 . ] ] ) , <nl> + batch_size = batch_size , <nl> + num_epochs = None , <nl> + shuffle = False ) <nl> + <nl> + est = self . _linear_regressor_fn ( <nl> + feature_columns = feature_columns , model_dir = self . _model_dir ) <nl> + <nl> + eval_metrics = est . evaluate ( input_fn = input_fn , steps = 1 ) <nl> + self . assertItemsEqual ( <nl> + ( metric_keys . MetricKeys . LOSS , metric_keys . MetricKeys . LOSS_MEAN , <nl> + ops . GraphKeys . GLOBAL_STEP ) , eval_metrics . keys ( ) ) <nl> + <nl> + # Logit is [ ( 20 . * 10 . 0 + 4 * 2 . 0 + 5 . 0 ) , ( 40 . * 10 . 0 + 8 * 2 . 0 + 5 . 0 ) ] = <nl> + # [ 213 . 0 , 421 . 0 ] , while label is [ 213 . , 421 . ] . Loss = 0 . <nl> + self . assertAlmostEqual ( 0 , eval_metrics [ metric_keys . MetricKeys . LOSS ] ) <nl> + <nl> + <nl> + class BaseLinearRegressorPredictTest ( object ) : <nl> + <nl> + def __init__ ( self , linear_regressor_fn ) : <nl> + self . _linear_regressor_fn = linear_regressor_fn <nl> + <nl> + def setUp ( self ) : <nl> + self . _model_dir = tempfile . mkdtemp ( ) <nl> + <nl> + def tearDown ( self ) : <nl> + if self . _model_dir : <nl> + writer_cache . FileWriterCache . clear ( ) <nl> + shutil . rmtree ( self . _model_dir ) <nl> + <nl> + def test_1d ( self ) : <nl> + " " " Tests predict when all variables are one - dimensional . " " " <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + variables . Variable ( [ [ 10 . ] ] , name = ' linear / linear_model / x / weights ' ) <nl> + variables . Variable ( [ . 2 ] , name = BIAS_NAME ) <nl> + variables . Variable ( 100 , name = ' global_step ' , dtype = dtypes . int64 ) <nl> + save_variables_to_ckpt ( self . _model_dir ) <nl> + <nl> + linear_regressor = self . _linear_regressor_fn ( <nl> + feature_columns = ( feature_column_lib . numeric_column ( ' x ' ) , ) , <nl> + model_dir = self . _model_dir ) <nl> + <nl> + predict_input_fn = numpy_io . numpy_input_fn ( <nl> + x = { ' x ' : np . array ( [ [ 2 . ] ] ) } , <nl> + y = None , <nl> + batch_size = 1 , <nl> + num_epochs = 1 , <nl> + shuffle = False ) <nl> + predictions = linear_regressor . predict ( input_fn = predict_input_fn ) <nl> + predicted_scores = list ( [ x [ ' predictions ' ] for x in predictions ] ) <nl> + # x * weight + bias = 2 . * 10 . + . 2 = 20 . 2 <nl> + self . assertAllClose ( [ [ 20 . 2 ] ] , predicted_scores ) <nl> + <nl> + def testMultiDim ( self ) : <nl> + " " " Tests predict when all variables are multi - dimenstional . " " " <nl> + batch_size = 2 <nl> + label_dimension = 3 <nl> + x_dim = 4 <nl> + feature_columns = ( feature_column_lib . numeric_column ( ' x ' , shape = ( x_dim , ) ) , ) <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + variables . Variable ( # shape = [ x_dim , label_dimension ] <nl> + [ [ 1 . , 2 . , 3 . ] , [ 2 . , 3 . , 4 . ] , [ 3 . , 4 . , 5 . ] , [ 4 . , 5 . , 6 . ] ] , <nl> + name = ' linear / linear_model / x / weights ' ) <nl> + variables . Variable ( # shape = [ label_dimension ] <nl> + [ . 2 , . 4 , . 6 ] , name = BIAS_NAME ) <nl> + variables . Variable ( 100 , name = ' global_step ' , dtype = dtypes . int64 ) <nl> + save_variables_to_ckpt ( self . _model_dir ) <nl> + <nl> + linear_regressor = self . _linear_regressor_fn ( <nl> + feature_columns = feature_columns , <nl> + label_dimension = label_dimension , <nl> + model_dir = self . _model_dir ) <nl> + <nl> + predict_input_fn = numpy_io . numpy_input_fn ( <nl> + # x shape = [ batch_size , x_dim ] <nl> + x = { ' x ' : np . array ( [ [ 1 . , 2 . , 3 . , 4 . ] , [ 5 . , 6 . , 7 . , 8 . ] ] ) } , <nl> + y = None , <nl> + batch_size = batch_size , <nl> + num_epochs = 1 , <nl> + shuffle = False ) <nl> + predictions = linear_regressor . predict ( input_fn = predict_input_fn ) <nl> + predicted_scores = list ( [ x [ ' predictions ' ] for x in predictions ] ) <nl> + # score = x * weight + bias , shape = [ batch_size , label_dimension ] <nl> + self . assertAllClose ( [ [ 30 . 2 , 40 . 4 , 50 . 6 ] , [ 70 . 2 , 96 . 4 , 122 . 6 ] ] , <nl> + predicted_scores ) <nl> + <nl> + def testTwoFeatureColumns ( self ) : <nl> + " " " Tests predict with two feature columns . " " " <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + variables . Variable ( [ [ 10 . ] ] , name = ' linear / linear_model / x0 / weights ' ) <nl> + variables . Variable ( [ [ 20 . ] ] , name = ' linear / linear_model / x1 / weights ' ) <nl> + variables . Variable ( [ . 2 ] , name = BIAS_NAME ) <nl> + variables . Variable ( 100 , name = ' global_step ' , dtype = dtypes . int64 ) <nl> + save_variables_to_ckpt ( self . _model_dir ) <nl> + <nl> + linear_regressor = self . _linear_regressor_fn ( <nl> + feature_columns = ( feature_column_lib . numeric_column ( ' x0 ' ) , <nl> + feature_column_lib . numeric_column ( ' x1 ' ) ) , <nl> + model_dir = self . _model_dir ) <nl> + <nl> + predict_input_fn = numpy_io . numpy_input_fn ( <nl> + x = { ' x0 ' : np . array ( [ [ 2 . ] ] ) , <nl> + ' x1 ' : np . array ( [ [ 3 . ] ] ) } , <nl> + y = None , <nl> + batch_size = 1 , <nl> + num_epochs = 1 , <nl> + shuffle = False ) <nl> + predictions = linear_regressor . predict ( input_fn = predict_input_fn ) <nl> + predicted_scores = list ( [ x [ ' predictions ' ] for x in predictions ] ) <nl> + # x0 * weight0 + x1 * weight1 + bias = 2 . * 10 . + 3 . * 20 + . 2 = 80 . 2 <nl> + self . assertAllClose ( [ [ 80 . 2 ] ] , predicted_scores ) <nl> + <nl> + <nl> + class BaseLinearRegressorIntegrationTest ( object ) : <nl> + <nl> + def __init__ ( self , linear_regressor_fn ) : <nl> + self . _linear_regressor_fn = linear_regressor_fn <nl> + <nl> + def setUp ( self ) : <nl> + self . _model_dir = tempfile . mkdtemp ( ) <nl> + <nl> + def tearDown ( self ) : <nl> + if self . _model_dir : <nl> + writer_cache . FileWriterCache . clear ( ) <nl> + shutil . rmtree ( self . _model_dir ) <nl> + <nl> + def _test_complete_flow ( self , train_input_fn , eval_input_fn , predict_input_fn , <nl> + input_dimension , label_dimension , prediction_length ) : <nl> + feature_columns = [ <nl> + feature_column_lib . numeric_column ( ' x ' , shape = ( input_dimension , ) ) <nl> + ] <nl> + est = self . _linear_regressor_fn ( <nl> + feature_columns = feature_columns , <nl> + label_dimension = label_dimension , <nl> + model_dir = self . _model_dir ) <nl> + <nl> + # TRAIN <nl> + # learn y = x <nl> + est . train ( train_input_fn , steps = 200 ) <nl> + <nl> + # EVALUTE <nl> + scores = est . evaluate ( eval_input_fn ) <nl> + self . assertEqual ( 200 , scores [ ops . GraphKeys . GLOBAL_STEP ] ) <nl> + self . assertIn ( metric_keys . MetricKeys . LOSS , six . iterkeys ( scores ) ) <nl> + <nl> + # PREDICT <nl> + predictions = np . array ( <nl> + [ x [ ' predictions ' ] for x in est . predict ( predict_input_fn ) ] ) <nl> + self . assertAllEqual ( ( prediction_length , label_dimension ) , predictions . shape ) <nl> + <nl> + # EXPORT <nl> + feature_spec = feature_column_lib . make_parse_example_spec ( feature_columns ) <nl> + serving_input_receiver_fn = export . build_parsing_serving_input_receiver_fn ( <nl> + feature_spec ) <nl> + export_dir = est . export_savedmodel ( tempfile . mkdtemp ( ) , <nl> + serving_input_receiver_fn ) <nl> + self . assertTrue ( gfile . Exists ( export_dir ) ) <nl> + <nl> + def test_numpy_input_fn ( self ) : <nl> + " " " Tests complete flow with numpy_input_fn . " " " <nl> + label_dimension = 2 <nl> + input_dimension = label_dimension <nl> + batch_size = 10 <nl> + prediction_length = batch_size <nl> + data = np . linspace ( 0 . , 2 . , batch_size * label_dimension , dtype = np . float32 ) <nl> + data = data . reshape ( batch_size , label_dimension ) <nl> + <nl> + train_input_fn = numpy_io . numpy_input_fn ( <nl> + x = { ' x ' : data } , <nl> + y = data , <nl> + batch_size = batch_size , <nl> + num_epochs = None , <nl> + shuffle = True ) <nl> + eval_input_fn = numpy_io . numpy_input_fn ( <nl> + x = { ' x ' : data } , <nl> + y = data , <nl> + batch_size = batch_size , <nl> + num_epochs = 1 , <nl> + shuffle = False ) <nl> + predict_input_fn = numpy_io . numpy_input_fn ( <nl> + x = { ' x ' : data } , <nl> + y = None , <nl> + batch_size = batch_size , <nl> + num_epochs = 1 , <nl> + shuffle = False ) <nl> + <nl> + self . _test_complete_flow ( <nl> + train_input_fn = train_input_fn , <nl> + eval_input_fn = eval_input_fn , <nl> + predict_input_fn = predict_input_fn , <nl> + input_dimension = input_dimension , <nl> + label_dimension = label_dimension , <nl> + prediction_length = prediction_length ) <nl> + <nl> + def test_pandas_input_fn ( self ) : <nl> + " " " Tests complete flow with pandas_input_fn . " " " <nl> + if not HAS_PANDAS : <nl> + return <nl> + <nl> + # Pandas DataFrame natually supports 1 dim data only . <nl> + label_dimension = 1 <nl> + input_dimension = label_dimension <nl> + batch_size = 10 <nl> + data = np . array ( [ 1 . , 2 . , 3 . , 4 . ] , dtype = np . float32 ) <nl> + x = pd . DataFrame ( { ' x ' : data } ) <nl> + y = pd . Series ( data ) <nl> + prediction_length = 4 <nl> + <nl> + train_input_fn = pandas_io . pandas_input_fn ( <nl> + x = x , y = y , batch_size = batch_size , num_epochs = None , shuffle = True ) <nl> + eval_input_fn = pandas_io . pandas_input_fn ( <nl> + x = x , y = y , batch_size = batch_size , shuffle = False ) <nl> + predict_input_fn = pandas_io . pandas_input_fn ( <nl> + x = x , batch_size = batch_size , shuffle = False ) <nl> + <nl> + self . _test_complete_flow ( <nl> + train_input_fn = train_input_fn , <nl> + eval_input_fn = eval_input_fn , <nl> + predict_input_fn = predict_input_fn , <nl> + input_dimension = input_dimension , <nl> + label_dimension = label_dimension , <nl> + prediction_length = prediction_length ) <nl> + <nl> + def test_input_fn_from_parse_example ( self ) : <nl> + " " " Tests complete flow with input_fn constructed from parse_example . " " " <nl> + label_dimension = 2 <nl> + input_dimension = label_dimension <nl> + batch_size = 10 <nl> + prediction_length = batch_size <nl> + data = np . linspace ( 0 . , 2 . , batch_size * label_dimension , dtype = np . float32 ) <nl> + data = data . reshape ( batch_size , label_dimension ) <nl> + <nl> + serialized_examples = [ ] <nl> + for datum in data : <nl> + example = example_pb2 . Example ( features = feature_pb2 . Features ( <nl> + feature = { <nl> + ' x ' : <nl> + feature_pb2 . Feature ( float_list = feature_pb2 . FloatList ( <nl> + value = datum ) ) , <nl> + ' y ' : <nl> + feature_pb2 . Feature ( float_list = feature_pb2 . FloatList ( <nl> + value = datum [ : label_dimension ] ) ) , <nl> + } ) ) <nl> + serialized_examples . append ( example . SerializeToString ( ) ) <nl> + <nl> + feature_spec = { <nl> + ' x ' : parsing_ops . FixedLenFeature ( [ input_dimension ] , dtypes . float32 ) , <nl> + ' y ' : parsing_ops . FixedLenFeature ( [ label_dimension ] , dtypes . float32 ) , <nl> + } <nl> + <nl> + def _train_input_fn ( ) : <nl> + feature_map = parsing_ops . parse_example ( serialized_examples , feature_spec ) <nl> + features = queue_parsed_features ( feature_map ) <nl> + labels = features . pop ( ' y ' ) <nl> + return features , labels <nl> + <nl> + def _eval_input_fn ( ) : <nl> + feature_map = parsing_ops . parse_example ( <nl> + input_lib . limit_epochs ( serialized_examples , num_epochs = 1 ) , <nl> + feature_spec ) <nl> + features = queue_parsed_features ( feature_map ) <nl> + labels = features . pop ( ' y ' ) <nl> + return features , labels <nl> + <nl> + def _predict_input_fn ( ) : <nl> + feature_map = parsing_ops . parse_example ( <nl> + input_lib . limit_epochs ( serialized_examples , num_epochs = 1 ) , <nl> + feature_spec ) <nl> + features = queue_parsed_features ( feature_map ) <nl> + features . pop ( ' y ' ) <nl> + return features , None <nl> + <nl> + self . _test_complete_flow ( <nl> + train_input_fn = _train_input_fn , <nl> + eval_input_fn = _eval_input_fn , <nl> + predict_input_fn = _predict_input_fn , <nl> + input_dimension = input_dimension , <nl> + label_dimension = label_dimension , <nl> + prediction_length = prediction_length ) <nl> + <nl> + <nl> + class BaseLinearRegressorTrainingTest ( object ) : <nl> + <nl> + def __init__ ( self , linear_regressor_fn ) : <nl> + self . _linear_regressor_fn = linear_regressor_fn <nl> + <nl> + def setUp ( self ) : <nl> + self . _model_dir = tempfile . mkdtemp ( ) <nl> + <nl> + def tearDown ( self ) : <nl> + if self . _model_dir : <nl> + writer_cache . FileWriterCache . clear ( ) <nl> + shutil . rmtree ( self . _model_dir ) <nl> + <nl> + def _mock_optimizer ( self , expected_loss = None ) : <nl> + expected_var_names = [ <nl> + ' % s / part_0 : 0 ' % AGE_WEIGHT_NAME , <nl> + ' % s / part_0 : 0 ' % BIAS_NAME <nl> + ] <nl> + <nl> + def _minimize ( loss , global_step = None , var_list = None ) : <nl> + trainable_vars = var_list or ops . get_collection ( <nl> + ops . GraphKeys . TRAINABLE_VARIABLES ) <nl> + self . assertItemsEqual ( expected_var_names , <nl> + [ var . name for var in trainable_vars ] ) <nl> + <nl> + # Verify loss . We can ' t check the value directly , so we add an assert op . <nl> + self . assertEquals ( 0 , loss . shape . ndims ) <nl> + if expected_loss is None : <nl> + if global_step is not None : <nl> + return state_ops . assign_add ( global_step , 1 ) . op <nl> + return control_flow_ops . no_op ( ) <nl> + assert_loss = assert_close ( <nl> + math_ops . to_float ( expected_loss , name = ' expected ' ) , <nl> + loss , <nl> + name = ' assert_loss ' ) <nl> + with ops . control_dependencies ( ( assert_loss , ) ) : <nl> + if global_step is not None : <nl> + return state_ops . assign_add ( global_step , 1 ) . op <nl> + return control_flow_ops . no_op ( ) <nl> + <nl> + mock_optimizer = test . mock . NonCallableMock ( <nl> + spec = optimizer . Optimizer , <nl> + wraps = optimizer . Optimizer ( use_locking = False , name = ' my_optimizer ' ) ) <nl> + mock_optimizer . minimize = test . mock . MagicMock ( wraps = _minimize ) <nl> + <nl> + # NOTE : Estimator . params performs a deepcopy , which wreaks havoc with mocks . <nl> + # So , return mock_optimizer itself for deepcopy . <nl> + mock_optimizer . __deepcopy__ = lambda _ : mock_optimizer <nl> + return mock_optimizer <nl> + <nl> + def _assert_checkpoint ( self , <nl> + expected_global_step , <nl> + expected_age_weight = None , <nl> + expected_bias = None ) : <nl> + shapes = { <nl> + name : shape <nl> + for ( name , shape ) in checkpoint_utils . list_variables ( self . _model_dir ) <nl> + } <nl> + <nl> + self . assertEqual ( [ ] , shapes [ ops . GraphKeys . GLOBAL_STEP ] ) <nl> + self . assertEqual ( expected_global_step , <nl> + checkpoint_utils . load_variable ( self . _model_dir , <nl> + ops . GraphKeys . GLOBAL_STEP ) ) <nl> + <nl> + self . assertEqual ( [ 1 , 1 ] , shapes [ AGE_WEIGHT_NAME ] ) <nl> + if expected_age_weight is not None : <nl> + self . assertEqual ( expected_age_weight , <nl> + checkpoint_utils . load_variable ( self . _model_dir , <nl> + AGE_WEIGHT_NAME ) ) <nl> + <nl> + self . assertEqual ( [ 1 ] , shapes [ BIAS_NAME ] ) <nl> + if expected_bias is not None : <nl> + self . assertEqual ( expected_bias , <nl> + checkpoint_utils . load_variable ( self . _model_dir , <nl> + BIAS_NAME ) ) <nl> + <nl> + def testFromScratchWithDefaultOptimizer ( self ) : <nl> + # Create LinearRegressor . <nl> + label = 5 . <nl> + age = 17 <nl> + linear_regressor = self . _linear_regressor_fn ( <nl> + feature_columns = ( feature_column_lib . numeric_column ( ' age ' ) , ) , <nl> + model_dir = self . _model_dir ) <nl> + <nl> + # Train for a few steps , and validate final checkpoint . <nl> + num_steps = 10 <nl> + linear_regressor . train ( <nl> + input_fn = lambda : ( { ' age ' : ( ( age , ) , ) } , ( ( label , ) , ) ) , steps = num_steps ) <nl> + self . _assert_checkpoint ( num_steps ) <nl> + <nl> + def testTrainWithOneDimLabel ( self ) : <nl> + label_dimension = 1 <nl> + batch_size = 20 <nl> + feature_columns = [ feature_column_lib . numeric_column ( ' age ' , shape = ( 1 , ) ) ] <nl> + est = self . _linear_regressor_fn ( <nl> + feature_columns = feature_columns , <nl> + label_dimension = label_dimension , <nl> + model_dir = self . _model_dir ) <nl> + data_rank_1 = np . linspace ( 0 . , 2 . , batch_size , dtype = np . float32 ) <nl> + self . assertEqual ( ( batch_size , ) , data_rank_1 . shape ) <nl> + <nl> + train_input_fn = numpy_io . numpy_input_fn ( <nl> + x = { ' age ' : data_rank_1 } , <nl> + y = data_rank_1 , <nl> + batch_size = batch_size , <nl> + num_epochs = None , <nl> + shuffle = True ) <nl> + est . train ( train_input_fn , steps = 200 ) <nl> + self . _assert_checkpoint ( 200 ) <nl> + <nl> + def testTrainWithOneDimWeight ( self ) : <nl> + label_dimension = 1 <nl> + batch_size = 20 <nl> + feature_columns = [ feature_column_lib . numeric_column ( ' age ' , shape = ( 1 , ) ) ] <nl> + est = self . _linear_regressor_fn ( <nl> + feature_columns = feature_columns , <nl> + label_dimension = label_dimension , <nl> + weight_feature_key = ' w ' , <nl> + model_dir = self . _model_dir ) <nl> + <nl> + data_rank_1 = np . linspace ( 0 . , 2 . , batch_size , dtype = np . float32 ) <nl> + self . assertEqual ( ( batch_size , ) , data_rank_1 . shape ) <nl> + <nl> + train_input_fn = numpy_io . numpy_input_fn ( <nl> + x = { ' age ' : data_rank_1 , <nl> + ' w ' : data_rank_1 } , <nl> + y = data_rank_1 , <nl> + batch_size = batch_size , <nl> + num_epochs = None , <nl> + shuffle = True ) <nl> + est . train ( train_input_fn , steps = 200 ) <nl> + self . _assert_checkpoint ( 200 ) <nl> + <nl> + def testFromScratch ( self ) : <nl> + # Create LinearRegressor . <nl> + label = 5 . <nl> + age = 17 <nl> + # loss = ( logits - label ) ^ 2 = ( 0 - 5 . ) ^ 2 = 25 . <nl> + mock_optimizer = self . _mock_optimizer ( expected_loss = 25 . ) <nl> + linear_regressor = self . _linear_regressor_fn ( <nl> + feature_columns = ( feature_column_lib . numeric_column ( ' age ' ) , ) , <nl> + model_dir = self . _model_dir , <nl> + optimizer = mock_optimizer ) <nl> + self . assertEqual ( 0 , mock_optimizer . minimize . call_count ) <nl> + <nl> + # Train for a few steps , and validate optimizer and final checkpoint . <nl> + num_steps = 10 <nl> + linear_regressor . train ( <nl> + input_fn = lambda : ( { ' age ' : ( ( age , ) , ) } , ( ( label , ) , ) ) , steps = num_steps ) <nl> + self . assertEqual ( 1 , mock_optimizer . minimize . call_count ) <nl> + self . _assert_checkpoint ( <nl> + expected_global_step = num_steps , <nl> + expected_age_weight = 0 . , <nl> + expected_bias = 0 . ) <nl> + <nl> + def testFromCheckpoint ( self ) : <nl> + # Create initial checkpoint . <nl> + age_weight = 10 . 0 <nl> + bias = 5 . 0 <nl> + initial_global_step = 100 <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + variables . Variable ( [ [ age_weight ] ] , name = AGE_WEIGHT_NAME ) <nl> + variables . Variable ( [ bias ] , name = BIAS_NAME ) <nl> + variables . Variable ( <nl> + initial_global_step , <nl> + name = ops . GraphKeys . GLOBAL_STEP , <nl> + dtype = dtypes . int64 ) <nl> + save_variables_to_ckpt ( self . _model_dir ) <nl> + <nl> + # logits = age * age_weight + bias = 17 * 10 . + 5 . = 175 <nl> + # loss = ( logits - label ) ^ 2 = ( 175 - 5 ) ^ 2 = 28900 <nl> + mock_optimizer = self . _mock_optimizer ( expected_loss = 28900 . ) <nl> + linear_regressor = self . _linear_regressor_fn ( <nl> + feature_columns = ( feature_column_lib . numeric_column ( ' age ' ) , ) , <nl> + model_dir = self . _model_dir , <nl> + optimizer = mock_optimizer ) <nl> + self . assertEqual ( 0 , mock_optimizer . minimize . call_count ) <nl> + <nl> + # Train for a few steps , and validate optimizer and final checkpoint . <nl> + num_steps = 10 <nl> + linear_regressor . train ( <nl> + input_fn = lambda : ( { ' age ' : ( ( 17 , ) , ) } , ( ( 5 . , ) , ) ) , steps = num_steps ) <nl> + self . assertEqual ( 1 , mock_optimizer . minimize . call_count ) <nl> + self . _assert_checkpoint ( <nl> + expected_global_step = initial_global_step + num_steps , <nl> + expected_age_weight = age_weight , <nl> + expected_bias = bias ) <nl> + <nl> + def testFromCheckpointMultiBatch ( self ) : <nl> + # Create initial checkpoint . <nl> + age_weight = 10 . 0 <nl> + bias = 5 . 0 <nl> + initial_global_step = 100 <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + variables . Variable ( [ [ age_weight ] ] , name = AGE_WEIGHT_NAME ) <nl> + variables . Variable ( [ bias ] , name = BIAS_NAME ) <nl> + variables . Variable ( <nl> + initial_global_step , <nl> + name = ops . GraphKeys . GLOBAL_STEP , <nl> + dtype = dtypes . int64 ) <nl> + save_variables_to_ckpt ( self . _model_dir ) <nl> + <nl> + # logits = age * age_weight + bias <nl> + # logits [ 0 ] = 17 * 10 . + 5 . = 175 <nl> + # logits [ 1 ] = 15 * 10 . + 5 . = 155 <nl> + # loss = sum ( logits - label ) ^ 2 = ( 175 - 5 ) ^ 2 + ( 155 - 3 ) ^ 2 = 52004 <nl> + mock_optimizer = self . _mock_optimizer ( expected_loss = 52004 . ) <nl> + linear_regressor = self . _linear_regressor_fn ( <nl> + feature_columns = ( feature_column_lib . numeric_column ( ' age ' ) , ) , <nl> + model_dir = self . _model_dir , <nl> + optimizer = mock_optimizer ) <nl> + self . assertEqual ( 0 , mock_optimizer . minimize . call_count ) <nl> + <nl> + # Train for a few steps , and validate optimizer and final checkpoint . <nl> + num_steps = 10 <nl> + linear_regressor . train ( <nl> + input_fn = lambda : ( { ' age ' : ( ( 17 , ) , ( 15 , ) ) } , ( ( 5 . , ) , ( 3 . , ) ) ) , <nl> + steps = num_steps ) <nl> + self . assertEqual ( 1 , mock_optimizer . minimize . call_count ) <nl> + self . _assert_checkpoint ( <nl> + expected_global_step = initial_global_step + num_steps , <nl> + expected_age_weight = age_weight , <nl> + expected_bias = bias ) <nl> mmm a / tensorflow / python / estimator / export / export . py <nl> ppp b / tensorflow / python / estimator / export / export . py <nl> def build_parsing_serving_input_receiver_fn ( feature_spec , <nl> default_batch_size = None ) : <nl> " " " Build a serving_input_receiver_fn expecting fed tf . Examples . <nl> <nl> - Creates an input_fn that expects a serialized tf . Example fed into a string <nl> - placeholder . The function parses the tf . Example according to the provided <nl> - feature_spec , and returns all parsed Tensors as features . This input_fn is <nl> - for use at serving time , so the labels return value is always None . <nl> + Creates a serving_input_receiver_fn that expects a serialized tf . Example fed <nl> + into a string placeholder . The function parses the tf . Example according to <nl> + the provided feature_spec , and returns all parsed Tensors as features . <nl> <nl> Args : <nl> feature_spec : a dict of string to ` VarLenFeature ` / ` FixedLenFeature ` . <nl> mmm a / tensorflow / python / framework / constant_op . py <nl> ppp b / tensorflow / python / framework / constant_op . py <nl> def constant ( value , dtype = None , shape = None , name = " Const " , verify_shape = False ) : <nl> return const_tensor <nl> <nl> <nl> + def is_constant ( tensor_or_op ) : <nl> + if isinstance ( tensor_or_op , ops . Tensor ) : <nl> + op = tensor_or_op . op <nl> + else : <nl> + op = tensor_or_op <nl> + return op . type = = " Const " <nl> + <nl> + <nl> def _constant_tensor_conversion_function ( v , dtype = None , name = None , <nl> as_ref = False ) : <nl> _ = as_ref <nl> mmm a / tensorflow / python / framework / cpp_shape_inference . cc <nl> ppp b / tensorflow / python / framework / cpp_shape_inference . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / python / framework / cpp_shape_inference . h " <nl> <nl> - # include " tensorflow / core / framework / graph . pb . h " <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / shape_inference . h " <nl> # include " tensorflow / core / lib / core / errors . h " <nl> mmm a / tensorflow / python / framework / meta_graph . py <nl> ppp b / tensorflow / python / framework / meta_graph . py <nl> def create_meta_graph_def ( meta_info_def = None , <nl> meta_graph_def . saver_def . MergeFrom ( saver_def ) <nl> <nl> # Adds collection_list . <nl> - if collection_list : <nl> + if collection_list is not None : <nl> clist = collection_list <nl> else : <nl> clist = graph . get_all_collection_keys ( ) <nl> def import_scoped_meta_graph ( meta_graph_or_file , <nl> the argument is a file containing a ` MetaGraphDef ` protocol buffer , <nl> it constructs a protocol buffer from the file content . The function <nl> then adds all the nodes from the ` graph_def ` field to the <nl> - current graph , recreates the desired collections , and returns a saver <nl> - constructed from the ` saver_def ` field . <nl> + current graph , recreates the desired collections , and returns a dictionary of <nl> + all the Variables imported into the name scope . <nl> <nl> In combination with ` export_scoped_meta_graph ( ) ` , this function can be used to <nl> <nl> mmm a / tensorflow / python / framework / python_op_gen . cc <nl> ppp b / tensorflow / python / framework / python_op_gen . cc <nl> limitations under the License . <nl> # include < unordered_map > <nl> # include " tensorflow / core / framework / attr_value . pb . h " <nl> # include " tensorflow / core / framework / op . h " <nl> - # include " tensorflow / core / framework / op_def . pb . h " <nl> # include " tensorflow / core / framework / op_def . pb_text . h " <nl> + # include " tensorflow / core / framework / op_def . pb . h " <nl> # include " tensorflow / core / framework / op_def_util . h " <nl> # include " tensorflow / core / framework / op_gen_lib . h " <nl> - # include " tensorflow / core / framework / tensor . pb . h " <nl> # include " tensorflow / core / framework / tensor . pb_text . h " <nl> + # include " tensorflow / core / framework / tensor . pb . h " <nl> + # include " tensorflow / core / framework / tensor_shape . pb . h " <nl> # include " tensorflow / core / framework / types . h " <nl> # include " tensorflow / core / framework / types . pb . h " <nl> # include " tensorflow / core / lib / gtl / map_util . h " <nl> mmm a / tensorflow / python / kernel_tests / atrous_convolution_test . py <nl> ppp b / tensorflow / python / kernel_tests / atrous_convolution_test . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> + import contextlib <nl> + <nl> import numpy as np <nl> <nl> from tensorflow . python . framework import constant_op <nl> def upsample_filters ( filters , rate ) : <nl> return output <nl> <nl> <nl> + @ contextlib . contextmanager <nl> + def delay_checks ( sess ) : <nl> + " " " Context manager for combining checks depending on tensor evaluations . <nl> + <nl> + Each call to Session . run has some overhead , and this overhead can easily <nl> + account for the majority of the time spent in tests that call Session . run ( or <nl> + Tensor . eval ) many times . <nl> + <nl> + This context manager provides a mechanism for registering callback functions <nl> + and associated tensors . When the context is exited , all of the tensors <nl> + associated with all of the registrations are evaluated with a single call to <nl> + Session . run , and then each registered callback function is called with the <nl> + values of its associated tensors . <nl> + <nl> + Args : <nl> + sess : The session to use to evaluate the tensors . <nl> + <nl> + Yields : <nl> + A function ` add_check ( check , * args , * * kwargs ) ` where ` check ` is the <nl> + callback function to be invoked , and ` * args ` and ` * * kwargs ` specify the <nl> + associated Tensors . <nl> + " " " <nl> + checks = [ ] <nl> + def add_check ( check , * args , * * kwargs ) : <nl> + checks . append ( ( check , args , kwargs ) ) <nl> + yield add_check <nl> + all_values = sess . run ( [ [ args , kwargs ] for _ , args , kwargs in checks ] ) <nl> + for ( check , _ , _ ) , ( args , kwargs ) in zip ( checks , all_values ) : <nl> + check ( * args , * * kwargs ) <nl> + <nl> + <nl> class AtrousConvolutionTest ( test . TestCase ) : <nl> <nl> - def _test_atrous_convolution ( self , input_shape , filter_shape , dilation_rate , <nl> - * * kwargs ) : <nl> - filters = np . arange ( <nl> - np . prod ( filter_shape ) , dtype = np . float32 ) . reshape ( filter_shape ) <nl> + def _test_atrous_convolution ( self , add_check , input_shape , filter_shape , <nl> + dilation_rate , * * kwargs ) : <nl> + filters = np . arange ( np . prod ( filter_shape ) , <nl> + dtype = np . float32 ) . reshape ( filter_shape ) <nl> filters_upsampled = upsample_filters ( filters , dilation_rate ) <nl> x = np . arange ( np . prod ( input_shape ) , dtype = np . float32 ) . reshape ( input_shape ) <nl> - y1 = nn_ops . convolution ( <nl> - input = x , filter = filters , dilation_rate = dilation_rate , * * kwargs ) <nl> + y1 = nn_ops . convolution ( input = x , filter = filters , <nl> + dilation_rate = dilation_rate , * * kwargs ) <nl> y2 = nn_ops . convolution ( input = x , filter = filters_upsampled , * * kwargs ) <nl> - self . assertAllClose ( y1 . eval ( ) , y2 . eval ( ) , rtol = 1e - 2 , atol = 1e - 2 ) <nl> + <nl> + def check ( y1_eval , y2_eval ) : <nl> + self . assertAllClose ( y1_eval , y2_eval , rtol = 1e - 2 , atol = 1e - 2 ) <nl> + <nl> + add_check ( check , y1 , y2 ) <nl> <nl> def testAtrousConvolution2D ( self ) : <nl> - with self . test_session ( ) : <nl> - for padding in [ " SAME " , " VALID " ] : <nl> - for height , width in [ [ 9 , 9 ] , [ 9 , 10 ] ] : <nl> - for kernel_height , kernel_width in [ [ 1 , 1 ] , [ 2 , 2 ] , [ 2 , 3 ] ] : <nl> - for dilation_rate in [ [ 1 , 1 ] , [ 3 , 2 ] , [ 2 , 1 ] ] : <nl> - self . _test_atrous_convolution ( <nl> - input_shape = [ 2 , height , width , 2 ] , <nl> - filter_shape = [ kernel_height , kernel_width , 2 , 2 ] , <nl> - padding = padding , <nl> - dilation_rate = dilation_rate ) <nl> + with self . test_session ( ) as sess : <nl> + with delay_checks ( sess ) as add_check : <nl> + for padding in [ " SAME " , " VALID " ] : <nl> + for height , width in [ [ 9 , 9 ] , [ 9 , 10 ] ] : <nl> + for kernel_height , kernel_width in [ [ 1 , 1 ] , [ 2 , 2 ] , [ 2 , 3 ] ] : <nl> + for dilation_rate in [ [ 1 , 1 ] , [ 3 , 2 ] , [ 2 , 1 ] ] : <nl> + self . _test_atrous_convolution ( <nl> + add_check = add_check , <nl> + input_shape = [ 2 , height , width , 2 ] , <nl> + filter_shape = [ kernel_height , kernel_width , 2 , 2 ] , <nl> + padding = padding , <nl> + dilation_rate = dilation_rate , <nl> + ) <nl> <nl> def testAtrousConvolution3D ( self ) : <nl> - with self . test_session ( ) : <nl> - for padding in [ " SAME " , " VALID " ] : <nl> - for depth , height , width in [ [ 9 , 9 , 10 ] , [ 9 , 10 , 9 ] ] : <nl> - for kernel_depth , kernel_height , kernel_width in [ [ 3 , 3 , 3 ] , <nl> - [ 3 , 2 , 2 ] , <nl> - [ 2 , 1 , 3 ] ] : <nl> - for dilation_rate in [ [ 1 , 1 , 1 ] , [ 3 , 3 , 3 ] , [ 3 , 2 , 3 ] , [ 3 , 1 , 2 ] ] : <nl> - self . _test_atrous_convolution ( <nl> - input_shape = [ 2 , depth , height , width , 2 ] , <nl> - filter_shape = [ <nl> - kernel_depth , kernel_height , kernel_width , 2 , 2 <nl> - ] , <nl> - padding = padding , <nl> - dilation_rate = dilation_rate ) <nl> + with self . test_session ( ) as sess : <nl> + with delay_checks ( sess ) as add_check : <nl> + for padding in [ " SAME " , " VALID " ] : <nl> + for depth , height , width in [ [ 9 , 9 , 10 ] , [ 9 , 10 , 9 ] ] : <nl> + for kernel_depth , kernel_height , kernel_width in [ [ 3 , 3 , <nl> + 3 ] , [ 3 , 2 , 2 ] , <nl> + [ 2 , 1 , 3 ] ] : <nl> + for dilation_rate in [ [ 1 , 1 , 1 ] , [ 3 , 3 , 3 ] , [ 3 , 2 , 3 ] , [ 3 , 1 , 2 ] ] : <nl> + self . _test_atrous_convolution ( <nl> + add_check = add_check , <nl> + input_shape = [ 2 , depth , height , width , 2 ] , <nl> + filter_shape = [ <nl> + kernel_depth , kernel_height , kernel_width , 2 , 2 <nl> + ] , <nl> + padding = padding , <nl> + dilation_rate = dilation_rate , <nl> + ) <nl> <nl> def testAtrousConvolution1D ( self ) : <nl> - with self . test_session ( ) : <nl> - for padding in [ " SAME " , " VALID " ] : <nl> - for width in [ 9 , 10 ] : <nl> - for kernel_width in range ( 1 , 4 ) : <nl> - for rate in range ( 1 , 4 ) : <nl> - self . _test_atrous_convolution ( <nl> - input_shape = [ 2 , width , 2 ] , <nl> - filter_shape = [ kernel_width , 2 , 2 ] , <nl> - padding = padding , <nl> - dilation_rate = [ rate ] ) <nl> + with self . test_session ( ) as sess : <nl> + with delay_checks ( sess ) as add_check : <nl> + for padding in [ " SAME " , " VALID " ] : <nl> + for width in [ 9 , 10 ] : <nl> + for kernel_width in range ( 1 , 4 ) : <nl> + for rate in range ( 1 , 4 ) : <nl> + self . _test_atrous_convolution ( <nl> + add_check = add_check , <nl> + input_shape = [ 2 , width , 2 ] , <nl> + filter_shape = [ kernel_width , 2 , 2 ] , <nl> + padding = padding , <nl> + dilation_rate = [ rate ] , <nl> + ) <nl> <nl> def testAtrousConvolutionNC ( self ) : <nl> if test . is_gpu_available ( cuda_only = True ) : <nl> # " NCW " and " NCHW " formats are currently supported only on CUDA . <nl> - with self . test_session ( use_gpu = True ) : <nl> - for padding in [ " SAME " , " VALID " ] : <nl> - self . _test_atrous_convolution ( <nl> - input_shape = [ 2 , 2 , 9 ] , <nl> - padding = padding , <nl> - filter_shape = [ 3 , 2 , 2 ] , <nl> - dilation_rate = [ 2 ] , <nl> - data_format = " NCW " ) <nl> - self . _test_atrous_convolution ( <nl> - input_shape = [ 2 , 2 , 9 , 5 ] , <nl> - padding = padding , <nl> - filter_shape = [ 3 , 3 , 2 , 2 ] , <nl> - dilation_rate = [ 2 , 1 ] , <nl> - data_format = " NCHW " ) <nl> + with self . test_session ( use_gpu = True ) as sess : <nl> + with delay_checks ( sess ) as add_check : <nl> + for padding in [ " SAME " , " VALID " ] : <nl> + self . _test_atrous_convolution ( <nl> + add_check = add_check , <nl> + input_shape = [ 2 , 2 , 9 ] , <nl> + padding = padding , <nl> + filter_shape = [ 3 , 2 , 2 ] , <nl> + dilation_rate = [ 2 ] , <nl> + data_format = " NCW " , <nl> + ) <nl> + self . _test_atrous_convolution ( <nl> + add_check = add_check , <nl> + input_shape = [ 2 , 2 , 9 , 5 ] , <nl> + padding = padding , <nl> + filter_shape = [ 3 , 3 , 2 , 2 ] , <nl> + dilation_rate = [ 2 , 1 ] , <nl> + data_format = " NCHW " , <nl> + ) <nl> <nl> def testAtrousSequence ( self ) : <nl> " " " Tests optimization of sequence of atrous convolutions . <nl> <nl> See the documentation of with_space_to_batch . <nl> " " " <nl> - with self . test_session ( ) : <nl> - for padding in [ " SAME " , " VALID " ] : <nl> - for height in range ( 15 , 17 ) : <nl> - for width in range ( 15 , 17 ) : <nl> - x_shape = [ 3 , height , width , 2 ] <nl> - x = np . random . random_sample ( x_shape ) . astype ( np . float32 ) <nl> - <nl> - kernel_sizes = [ 1 , 3 ] if padding = = " SAME " else range ( 1 , 3 ) <nl> - for kernel in kernel_sizes : <nl> - f_shape = [ kernel , kernel , 2 , 2 ] <nl> - f1 = 1e - 2 * np . random . random_sample ( f_shape ) . astype ( np . float32 ) <nl> - f2 = 1e - 2 * np . random . random_sample ( f_shape ) . astype ( np . float32 ) <nl> - <nl> - def combined_op ( converted_input , num_spatial_dims , padding_arg ) : # pylint : disable = unused - argument <nl> - result = nn_ops . convolution ( <nl> - input = converted_input , filter = f1 , <nl> - padding = padding ) # pylint : disable = cell - var - from - loop <nl> - result = nn_ops . convolution ( <nl> - input = result , filter = f2 , <nl> - padding = padding ) # pylint : disable = cell - var - from - loop <nl> - return result <nl> - <nl> - for rate_height in range ( 2 , 4 ) : <nl> - for rate_width in range ( 2 , 4 ) : <nl> - dilation_rate = [ rate_height , rate_width ] <nl> - y1 = nn_ops . convolution ( <nl> - input = x , <nl> - filter = f1 , <nl> - padding = padding , <nl> - dilation_rate = dilation_rate ) <nl> - y1 = nn_ops . convolution ( <nl> - input = y1 , <nl> - filter = f2 , <nl> - padding = padding , <nl> - dilation_rate = dilation_rate ) <nl> - y2 = nn_ops . with_space_to_batch ( <nl> - input = x , <nl> - dilation_rate = dilation_rate , <nl> - op = combined_op , <nl> - padding = " VALID " ) <nl> - self . assertAllClose ( <nl> - y1 . eval ( ) , y2 . eval ( ) , rtol = 1e - 2 , atol = 1e - 2 ) <nl> + with self . test_session ( ) as sess : <nl> + with delay_checks ( sess ) as add_check : <nl> + for padding in [ " SAME " , " VALID " ] : <nl> + for height in range ( 15 , 17 ) : <nl> + for width in range ( 15 , 17 ) : <nl> + x_shape = [ 3 , height , width , 2 ] <nl> + x = np . random . random_sample ( x_shape ) . astype ( np . float32 ) <nl> + <nl> + kernel_sizes = [ 1 , 3 ] if padding = = " SAME " else range ( 1 , 3 ) <nl> + for kernel in kernel_sizes : <nl> + f_shape = [ kernel , kernel , 2 , 2 ] <nl> + f1 = 1e - 2 * np . random . random_sample ( f_shape ) . astype ( np . float32 ) <nl> + f2 = 1e - 2 * np . random . random_sample ( f_shape ) . astype ( np . float32 ) <nl> + <nl> + def combined_op ( converted_input , num_spatial_dims , padding_arg ) : # pylint : disable = unused - argument <nl> + # pylint : disable = cell - var - from - loop <nl> + result = nn_ops . convolution ( input = converted_input , filter = f1 , <nl> + padding = padding ) <nl> + result = nn_ops . convolution ( input = result , filter = f2 , <nl> + padding = padding ) <nl> + # pylint : enable = cell - var - from - loop <nl> + return result <nl> + <nl> + for rate_height in range ( 2 , 4 ) : <nl> + for rate_width in range ( 2 , 4 ) : <nl> + dilation_rate = [ rate_height , rate_width ] <nl> + y1 = nn_ops . convolution ( input = x , filter = f1 , padding = padding , <nl> + dilation_rate = dilation_rate ) <nl> + y1 = nn_ops . convolution ( input = y1 , filter = f2 , <nl> + padding = padding , <nl> + dilation_rate = dilation_rate ) <nl> + y2 = nn_ops . with_space_to_batch ( <nl> + input = x , dilation_rate = dilation_rate , op = combined_op , <nl> + padding = " VALID " ) <nl> + <nl> + def check ( y1_eval , y2_eval ) : <nl> + self . assertAllClose ( y1_eval , y2_eval , rtol = 1e - 2 , <nl> + atol = 1e - 2 ) <nl> + add_check ( check , y1 , y2 ) <nl> <nl> def _test_gradient ( self , x_shape , f_shape , dilation_rate , padding ) : <nl> x_val = np . random . random_sample ( x_shape ) . astype ( np . float32 ) <nl> mmm a / tensorflow / python / kernel_tests / lookup_ops_test . py <nl> ppp b / tensorflow / python / kernel_tests / lookup_ops_test . py <nl> <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import sparse_tensor <nl> from tensorflow . python . framework import test_util <nl> + from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import lookup_ops <nl> from tensorflow . python . platform import test <nl> from tensorflow . python . training import server_lib <nl> def test_string_index_table_from_file_tensor_filename ( self ) : <nl> self . assertRaises ( errors_impl . OpError , ids . eval ) <nl> lookup_ops . tables_initializer ( ) . run ( ) <nl> self . assertAllEqual ( ( 1 , 2 , 3 ) , ids . eval ( ) ) <nl> + self . assertEqual ( 1 , <nl> + len ( ops . get_collection ( ops . GraphKeys . ASSET_FILEPATHS ) ) ) <nl> + <nl> + def test_string_index_table_from_file_placeholder_filename ( self ) : <nl> + vocabulary_file = self . _createVocabFile ( " f2i_vocab1 . txt " ) <nl> + with self . test_session ( ) : <nl> + vocabulary_placeholder = array_ops . placeholder ( dtypes . string , [ ] ) <nl> + table = lookup_ops . index_table_from_file ( <nl> + vocabulary_file = vocabulary_placeholder , num_oov_buckets = 1 ) <nl> + ids = table . lookup ( constant_op . constant ( [ " salad " , " surgery " , " tarkus " ] ) ) <nl> + <nl> + self . assertRaises ( errors_impl . OpError , ids . eval ) <nl> + feed_dict = { vocabulary_placeholder . name : vocabulary_file } <nl> + lookup_ops . tables_initializer ( ) . run ( feed_dict = feed_dict ) <nl> + self . assertAllEqual ( ( 1 , 2 , 3 ) , ids . eval ( ) ) <nl> + self . assertEqual ( 0 , <nl> + len ( ops . get_collection ( ops . GraphKeys . ASSET_FILEPATHS ) ) ) <nl> <nl> def test_int32_index_table_from_file ( self ) : <nl> vocabulary_file = self . _createVocabFile ( <nl> mmm a / tensorflow / python / kernel_tests / losses_test . py <nl> ppp b / tensorflow / python / kernel_tests / losses_test . py <nl> def testMixedQuadraticLinear ( self ) : <nl> expected_loss = ( quadratic + linear ) / 2 . <nl> self . assertAllClose ( loss . eval ( ) , expected_loss , atol = 1e - 5 ) <nl> <nl> + def testAllQuadraticDelta ( self ) : <nl> + with self . test_session ( ) : <nl> + delta = 0 . 5 <nl> + predictions = constant_op . constant ( [ 1 . 5 , - 1 . 4 , - 0 . 5 , 0 . 0 ] ) <nl> + labels = constant_op . constant ( [ 1 . 0 , - 1 . 0 , 0 . 0 , 0 . 5 ] ) <nl> + expected = 0 . 5 * np . array ( [ 0 . 5 * * 2 , 0 . 4 * * 2 , 0 . 5 * * 2 , 0 . 5 * * 2 ] ) . mean ( ) <nl> + loss = losses . huber_loss ( labels , predictions , delta = delta ) <nl> + self . assertAllClose ( expected , loss . eval ( ) , atol = 1e - 5 ) <nl> + <nl> + def testAllLinearDelta ( self ) : <nl> + delta = 0 . 5 <nl> + predictions = constant_op . constant ( [ 1 . 5 , - 1 . 4 , - 1 . 0 , 0 . 0 ] ) <nl> + labels = constant_op . constant ( [ 0 . 0 , 1 . 0 , 0 . 0 , 1 . 5 ] ) <nl> + expected = delta * np . array ( [ 1 . 5 , 2 . 4 , 1 . 0 , 1 . 5 ] ) . mean ( ) <nl> + expected - = 0 . 5 * delta * * 2 <nl> + loss = losses . huber_loss ( labels , predictions , delta = delta ) <nl> + with self . test_session ( ) : <nl> + self . assertAllClose ( expected , loss . eval ( ) , atol = 1e - 5 ) <nl> + <nl> <nl> class MeanSquaredErrorTest ( test . TestCase ) : <nl> <nl> mmm a / tensorflow / python / kernel_tests / resource_variable_ops_test . py <nl> ppp b / tensorflow / python / kernel_tests / resource_variable_ops_test . py <nl> def testAssignMethod ( self ) : <nl> v . assign ( 2 . 0 ) . eval ( ) <nl> self . assertEqual ( 2 . 0 , v . value ( ) . eval ( ) ) <nl> <nl> + def testLoad ( self ) : <nl> + with self . test_session ( ) : <nl> + v = resource_variable_ops . ResourceVariable ( 1 . 0 ) <nl> + variables . global_variables_initializer ( ) . run ( ) <nl> + v . load ( 2 . 0 ) <nl> + self . assertEqual ( 2 . 0 , v . value ( ) . eval ( ) ) <nl> + <nl> def testToFromProto ( self ) : <nl> with self . test_session ( ) : <nl> v = resource_variable_ops . ResourceVariable ( 1 . 0 ) <nl> mmm a / tensorflow / python / layers / convolutional . py <nl> ppp b / tensorflow / python / layers / convolutional . py <nl> def conv1d ( inputs , <nl> activity_regularizer = activity_regularizer , <nl> trainable = trainable , <nl> name = name , <nl> + dtype = inputs . dtype . base_dtype , <nl> _reuse = reuse , <nl> _scope = name ) <nl> return layer . apply ( inputs ) <nl> def conv2d ( inputs , <nl> activity_regularizer = activity_regularizer , <nl> trainable = trainable , <nl> name = name , <nl> + dtype = inputs . dtype . base_dtype , <nl> _reuse = reuse , <nl> _scope = name ) <nl> return layer . apply ( inputs ) <nl> def conv2d_transpose ( inputs , <nl> activity_regularizer = activity_regularizer , <nl> trainable = trainable , <nl> name = name , <nl> + dtype = inputs . dtype . base_dtype , <nl> _reuse = reuse , <nl> _scope = name ) <nl> return layer . apply ( inputs ) <nl> mmm a / tensorflow / python / layers / convolutional_test . py <nl> ppp b / tensorflow / python / layers / convolutional_test . py <nl> def testCreateConv2D ( self ) : <nl> self . assertListEqual ( layer . kernel . get_shape ( ) . as_list ( ) , [ 3 , 3 , 4 , 32 ] ) <nl> self . assertListEqual ( layer . bias . get_shape ( ) . as_list ( ) , [ 32 ] ) <nl> <nl> + def testConv2DFloat16 ( self ) : <nl> + height , width = 7 , 9 <nl> + images = random_ops . random_uniform ( ( 5 , height , width , 4 ) , dtype = ' float16 ' ) <nl> + output = conv_layers . conv2d ( images , 32 , [ 3 , 3 ] , activation = nn_ops . relu ) <nl> + self . assertListEqual ( output . get_shape ( ) . as_list ( ) , <nl> + [ 5 , height - 2 , width - 2 , 32 ] ) <nl> + <nl> def testCreateConv2DIntegerKernelSize ( self ) : <nl> height , width = 7 , 9 <nl> images = random_ops . random_uniform ( ( 5 , height , width , 4 ) ) <nl> def testCreateConv1D ( self ) : <nl> self . assertListEqual ( layer . kernel . get_shape ( ) . as_list ( ) , [ 3 , 4 , 32 ] ) <nl> self . assertListEqual ( layer . bias . get_shape ( ) . as_list ( ) , [ 32 ] ) <nl> <nl> + def testConv1DFloat16 ( self ) : <nl> + width = 7 <nl> + data = random_ops . random_uniform ( ( 5 , width , 4 ) , dtype = ' float16 ' ) <nl> + output = conv_layers . conv1d ( data , 32 , 3 , activation = nn_ops . relu ) <nl> + self . assertListEqual ( output . get_shape ( ) . as_list ( ) , [ 5 , width - 2 , 32 ] ) <nl> + <nl> def testCreateConv1DChannelsFirst ( self ) : <nl> width = 7 <nl> data = random_ops . random_uniform ( ( 5 , 4 , width ) ) <nl> def testCreateConv2DTranspose ( self ) : <nl> self . assertListEqual ( layer . kernel . get_shape ( ) . as_list ( ) , [ 3 , 3 , 32 , 4 ] ) <nl> self . assertListEqual ( layer . bias . get_shape ( ) . as_list ( ) , [ 32 ] ) <nl> <nl> + def testConv2DTransposeFloat16 ( self ) : <nl> + height , width = 7 , 9 <nl> + images = random_ops . random_uniform ( ( 5 , height , width , 4 ) , dtype = ' float16 ' ) <nl> + output = conv_layers . conv2d_transpose ( images , 32 , [ 3 , 3 ] , <nl> + activation = nn_ops . relu ) <nl> + self . assertListEqual ( output . get_shape ( ) . as_list ( ) , <nl> + [ 5 , height + 2 , width + 2 , 32 ] ) <nl> + <nl> def testCreateConv2DTransposeIntegerKernelSize ( self ) : <nl> height , width = 7 , 9 <nl> images = random_ops . random_uniform ( ( 5 , height , width , 4 ) ) <nl> mmm a / tensorflow / python / ops / lookup_ops . py <nl> ppp b / tensorflow / python / ops / lookup_ops . py <nl> <nl> import collections <nl> import functools <nl> <nl> + from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import sparse_tensor <nl> def initialize ( self , table ) : <nl> name = scope ) <nl> # pylint : enable = protected - access <nl> ops . add_to_collection ( ops . GraphKeys . TABLE_INITIALIZERS , init_op ) <nl> - ops . add_to_collection ( ops . GraphKeys . ASSET_FILEPATHS , filename ) <nl> + # If the filename tensor is anything other than a string constant ( e . g . , if <nl> + # it is a placeholder ) then it does not make sense to track it as an asset . <nl> + if constant_op . is_constant ( filename ) : <nl> + ops . add_to_collection ( ops . GraphKeys . ASSET_FILEPATHS , filename ) <nl> return init_op <nl> <nl> <nl> mmm a / tensorflow / python / ops / resource_variable_ops . py <nl> ppp b / tensorflow / python / ops / resource_variable_ops . py <nl> def _init_from_args ( self , <nl> gen_resource_variable_ops . var_is_initialized_op ( self . _handle ) ) <nl> if initial_value is not None : <nl> with ops . name_scope ( " Assign " ) as n , ops . colocate_with ( self . _handle ) : <nl> - self . _initialize_op = gen_resource_variable_ops . assign_variable_op ( <nl> + self . _initializer_op = gen_resource_variable_ops . assign_variable_op ( <nl> self . _handle , self . _initial_value , name = n ) <nl> with ops . name_scope ( " Read " ) , ops . colocate_with ( self . _handle ) : <nl> # Manually assign reads to the handle ' s device to avoid log messages . <nl> def _init_from_proto ( self , variable_def , import_scope = None ) : <nl> self . _handle = g . as_graph_element ( <nl> ops . prepend_name_scope ( variable_def . variable_name , <nl> import_scope = import_scope ) ) <nl> - self . _initialize_op = g . as_graph_element ( <nl> + self . _initializer_op = g . as_graph_element ( <nl> ops . prepend_name_scope ( variable_def . initializer_name , <nl> import_scope = import_scope ) ) <nl> if variable_def . snapshot_name : <nl> def get_shape ( self ) : <nl> @ property <nl> def create ( self ) : <nl> " " " The op responsible for initializing this variable . " " " <nl> - return self . _initialize_op <nl> + return self . _initializer_op <nl> <nl> @ property <nl> def handle ( self ) : <nl> def _as_graph_element ( self ) : <nl> @ property <nl> def initializer ( self ) : <nl> " " " The op responsible for initializing this variable . " " " <nl> - return self . _initialize_op <nl> + return self . _initializer_op <nl> <nl> @ property <nl> def initial_value ( self ) : <nl> mmm a / tensorflow / python / ops / summary_ops . py <nl> ppp b / tensorflow / python / ops / summary_ops . py <nl> def _Collect ( val , collections , default_collections ) : <nl> ops . add_to_collection ( key , val ) <nl> <nl> <nl> + # TODO ( dandelion ) : As currently implemented , this op has several problems . <nl> + # The ' summary_description ' field is passed but not used by the kernel . <nl> + # The ' name ' field is used to creat a scope and passed down via name = scope , <nl> + # but gen_logging_ops . _tensor_summary ignores this parameter and uses the <nl> + # kernel ' s op name as the name . This is ok because scope and the op name <nl> + # are identical , but it ' s probably worthwhile to fix . <nl> + # Finally , because of the complications above , this currently does not <nl> + # support the family = attribute added to other summaries in cl / 156791589 . <nl> def tensor_summary ( # pylint : disable = invalid - name <nl> name , <nl> tensor , <nl> mmm a / tensorflow / python / summary / summary . py <nl> ppp b / tensorflow / python / summary / summary . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> + import contextlib as _contextlib <nl> import re as _re <nl> <nl> from google . protobuf import json_format as _json_format <nl> def _clean_tag ( name ) : <nl> return name <nl> <nl> <nl> - def scalar ( name , tensor , collections = None ) : <nl> + @ _contextlib . contextmanager <nl> + def _summary_scope ( name , family = None , default_name = None , values = None ) : <nl> + " " " Enters a scope used for the summary and yields both the name and tag . <nl> + <nl> + To ensure that the summary tag name is always unique , we create a name scope <nl> + based on ` name ` and use the full scope name in the tag . <nl> + <nl> + If ` family ` is set , then the tag name will be ' < family > / < scope_name > ' , where <nl> + ` scope_name ` is ` < outer_scope > / < family > / < name > ` . This ensures that ` family ` <nl> + is always the prefix of the tag ( and unmodified ) , while ensuring the scope <nl> + respects the outer scope from this this summary was created . <nl> + <nl> + Args : <nl> + name : A name for the generated summary node . <nl> + family : Optional ; if provided , used as the prefix of the summary tag name . <nl> + default_name : Optional ; if provided , used as default name of the summary . <nl> + values : Optional ; passed as ` values ` parameter to name_scope . <nl> + <nl> + Yields : <nl> + A tuple ` ( tag , scope ) ` , both of which are unique and should be used for the <nl> + tag and the scope for the summary to output . <nl> + " " " <nl> + name = _clean_tag ( name ) <nl> + family = _clean_tag ( family ) <nl> + # Use family name in the scope to ensure uniqueness of scope / tag . <nl> + scope_base_name = name if family is None else ' { } / { } ' . format ( family , name ) <nl> + with _ops . name_scope ( scope_base_name , default_name , values = values ) as scope : <nl> + if family is None : <nl> + tag = scope . rstrip ( ' / ' ) <nl> + else : <nl> + # Prefix our scope with family again so it displays in the right tab . <nl> + tag = ' { } / { } ' . format ( family , scope . rstrip ( ' / ' ) ) <nl> + # Note : tag is not 100 % unique if the user explicitly enters a scope with <nl> + # the same name as family , then later enter it again before summaries . <nl> + # This is very contrived though , and we opt here to let it be a runtime <nl> + # exception if tags do indeed collide . <nl> + yield ( tag , scope ) <nl> + <nl> + <nl> + def scalar ( name , tensor , collections = None , family = None ) : <nl> " " " Outputs a ` Summary ` protocol buffer containing a single scalar value . <nl> <nl> The generated Summary has a Tensor . proto containing the input Tensor . <nl> def scalar ( name , tensor , collections = None ) : <nl> tensor : A real numeric Tensor containing a single value . <nl> collections : Optional list of graph collections keys . The new summary op is <nl> added to these collections . Defaults to ` [ GraphKeys . SUMMARIES ] ` . <nl> + family : Optional ; if provided , used as the prefix of the summary tag name , <nl> + which controls the tab name used for display on Tensorboard . <nl> <nl> Returns : <nl> A scalar ` Tensor ` of type ` string ` . Which contains a ` Summary ` protobuf . <nl> def scalar ( name , tensor , collections = None ) : <nl> Raises : <nl> ValueError : If tensor has the wrong shape or type . <nl> " " " <nl> - name = _clean_tag ( name ) <nl> - with _ops . name_scope ( name , None , [ tensor ] ) as scope : <nl> + with _summary_scope ( name , family , values = [ tensor ] ) as ( tag , scope ) : <nl> # pylint : disable = protected - access <nl> - val = _gen_logging_ops . _scalar_summary ( <nl> - tags = scope . rstrip ( ' / ' ) , values = tensor , name = scope ) <nl> + val = _gen_logging_ops . _scalar_summary ( tags = tag , values = tensor , name = scope ) <nl> _collect ( val , collections , [ _ops . GraphKeys . SUMMARIES ] ) <nl> return val <nl> <nl> <nl> - def image ( name , tensor , max_outputs = 3 , collections = None ) : <nl> + def image ( name , tensor , max_outputs = 3 , collections = None , family = None ) : <nl> " " " Outputs a ` Summary ` protocol buffer with images . <nl> <nl> The summary has up to ` max_outputs ` summary values containing images . The <nl> def image ( name , tensor , max_outputs = 3 , collections = None ) : <nl> max_outputs : Max number of batch elements to generate images for . <nl> collections : Optional list of ops . GraphKeys . The collections to add the <nl> summary to . Defaults to [ _ops . GraphKeys . SUMMARIES ] <nl> + family : Optional ; if provided , used as the prefix of the summary tag name , <nl> + which controls the tab name used for display on Tensorboard . <nl> <nl> Returns : <nl> A scalar ` Tensor ` of type ` string ` . The serialized ` Summary ` protocol <nl> buffer . <nl> " " " <nl> - name = _clean_tag ( name ) <nl> - with _ops . name_scope ( name , None , [ tensor ] ) as scope : <nl> + with _summary_scope ( name , family , values = [ tensor ] ) as ( tag , scope ) : <nl> # pylint : disable = protected - access <nl> val = _gen_logging_ops . _image_summary ( <nl> - tag = scope . rstrip ( ' / ' ) , <nl> - tensor = tensor , <nl> - max_images = max_outputs , <nl> - name = scope ) <nl> + tag = tag , tensor = tensor , max_images = max_outputs , name = scope ) <nl> _collect ( val , collections , [ _ops . GraphKeys . SUMMARIES ] ) <nl> return val <nl> <nl> <nl> - def histogram ( name , values , collections = None ) : <nl> + def histogram ( name , values , collections = None , family = None ) : <nl> # pylint : disable = line - too - long <nl> " " " Outputs a ` Summary ` protocol buffer with a histogram . <nl> <nl> def histogram ( name , values , collections = None ) : <nl> build the histogram . <nl> collections : Optional list of graph collections keys . The new summary op is <nl> added to these collections . Defaults to ` [ GraphKeys . SUMMARIES ] ` . <nl> + family : Optional ; if provided , used as the prefix of the summary tag name , <nl> + which controls the tab name used for display on Tensorboard . <nl> <nl> Returns : <nl> A scalar ` Tensor ` of type ` string ` . The serialized ` Summary ` protocol <nl> buffer . <nl> " " " <nl> - # pylint : enable = line - too - long <nl> - name = _clean_tag ( name ) <nl> - with _ops . name_scope ( name , ' HistogramSummary ' , [ values ] ) as scope : <nl> + with _summary_scope ( name , family , values = [ values ] , <nl> + default_name = ' HistogramSummary ' ) as ( tag , scope ) : <nl> # pylint : disable = protected - access <nl> val = _gen_logging_ops . _histogram_summary ( <nl> - tag = scope . rstrip ( ' / ' ) , values = values , name = scope ) <nl> + tag = tag , values = values , name = scope ) <nl> _collect ( val , collections , [ _ops . GraphKeys . SUMMARIES ] ) <nl> return val <nl> <nl> <nl> - def audio ( name , tensor , sample_rate , max_outputs = 3 , collections = None ) : <nl> + def audio ( name , tensor , sample_rate , max_outputs = 3 , collections = None , <nl> + family = None ) : <nl> # pylint : disable = line - too - long <nl> " " " Outputs a ` Summary ` protocol buffer with audio . <nl> <nl> def audio ( name , tensor , sample_rate , max_outputs = 3 , collections = None ) : <nl> max_outputs : Max number of batch elements to generate audio for . <nl> collections : Optional list of ops . GraphKeys . The collections to add the <nl> summary to . Defaults to [ _ops . GraphKeys . SUMMARIES ] <nl> + family : Optional ; if provided , used as the prefix of the summary tag name , <nl> + which controls the tab name used for display on Tensorboard . <nl> <nl> Returns : <nl> A scalar ` Tensor ` of type ` string ` . The serialized ` Summary ` protocol <nl> buffer . <nl> " " " <nl> - # pylint : enable = line - too - long <nl> - name = _clean_tag ( name ) <nl> - with _ops . name_scope ( name , None , [ tensor ] ) as scope : <nl> + with _summary_scope ( name , family = family , values = [ tensor ] ) as ( tag , scope ) : <nl> # pylint : disable = protected - access <nl> sample_rate = _ops . convert_to_tensor ( <nl> sample_rate , dtype = _dtypes . float32 , name = ' sample_rate ' ) <nl> val = _gen_logging_ops . _audio_summary_v2 ( <nl> - tag = scope . rstrip ( ' / ' ) , <nl> - tensor = tensor , <nl> - max_outputs = max_outputs , <nl> - sample_rate = sample_rate , <nl> - name = scope ) <nl> + tag = tag , tensor = tensor , max_outputs = max_outputs , <nl> + sample_rate = sample_rate , name = scope ) <nl> _collect ( val , collections , [ _ops . GraphKeys . SUMMARIES ] ) <nl> return val <nl> <nl> mmm a / tensorflow / python / summary / summary_test . py <nl> ppp b / tensorflow / python / summary / summary_test . py <nl> <nl> <nl> from six . moves import xrange # pylint : disable = redefined - builtin <nl> <nl> - from google . protobuf import json_format <nl> - <nl> from tensorflow . core . framework import summary_pb2 <nl> - from tensorflow . core . framework import types_pb2 <nl> from tensorflow . python . framework import constant_op <nl> + from tensorflow . python . framework import meta_graph <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import variables <nl> def testScalarSummary ( self ) : <nl> self . assertEqual ( values [ 0 ] . tag , ' outer / inner ' ) <nl> self . assertEqual ( values [ 0 ] . simple_value , 3 . 0 ) <nl> <nl> + def testScalarSummaryWithFamily ( self ) : <nl> + with self . test_session ( ) as s : <nl> + i = constant_op . constant ( 7 ) <nl> + with ops . name_scope ( ' outer ' ) : <nl> + im1 = summary_lib . scalar ( ' inner ' , i , family = ' family ' ) <nl> + self . assertEquals ( im1 . op . name , ' outer / family / inner ' ) <nl> + im2 = summary_lib . scalar ( ' inner ' , i , family = ' family ' ) <nl> + self . assertEquals ( im2 . op . name , ' outer / family / inner_1 ' ) <nl> + sm1 , sm2 = s . run ( [ im1 , im2 ] ) <nl> + summary = summary_pb2 . Summary ( ) <nl> + <nl> + summary . ParseFromString ( sm1 ) <nl> + values = summary . value <nl> + self . assertEqual ( len ( values ) , 1 ) <nl> + self . assertEqual ( values [ 0 ] . tag , ' family / outer / family / inner ' ) <nl> + self . assertEqual ( values [ 0 ] . simple_value , 7 . 0 ) <nl> + <nl> + summary . ParseFromString ( sm2 ) <nl> + values = summary . value <nl> + self . assertEqual ( len ( values ) , 1 ) <nl> + self . assertEqual ( values [ 0 ] . tag , ' family / outer / family / inner_1 ' ) <nl> + self . assertEqual ( values [ 0 ] . simple_value , 7 . 0 ) <nl> + <nl> def testSummarizingVariable ( self ) : <nl> with self . test_session ( ) as s : <nl> c = constant_op . constant ( 42 . 0 ) <nl> def testImageSummary ( self ) : <nl> expected = sorted ( ' outer / inner / image / { } ' . format ( i ) for i in xrange ( 3 ) ) <nl> self . assertEqual ( tags , expected ) <nl> <nl> + def testImageSummaryWithFamily ( self ) : <nl> + with self . test_session ( ) as s : <nl> + i = array_ops . ones ( ( 5 , 2 , 3 , 1 ) ) <nl> + with ops . name_scope ( ' outer ' ) : <nl> + im = summary_lib . image ( ' inner ' , i , max_outputs = 3 , family = ' family ' ) <nl> + self . assertEquals ( im . op . name , ' outer / family / inner ' ) <nl> + summary_str = s . run ( im ) <nl> + summary = summary_pb2 . Summary ( ) <nl> + summary . ParseFromString ( summary_str ) <nl> + values = summary . value <nl> + self . assertEqual ( len ( values ) , 3 ) <nl> + tags = sorted ( v . tag for v in values ) <nl> + expected = sorted ( ' family / outer / family / inner / image / { } ' . format ( i ) <nl> + for i in xrange ( 3 ) ) <nl> + self . assertEqual ( tags , expected ) <nl> + <nl> def testHistogramSummary ( self ) : <nl> with self . test_session ( ) as s : <nl> i = array_ops . ones ( ( 5 , 4 , 4 , 3 ) ) <nl> def testHistogramSummary ( self ) : <nl> self . assertEqual ( len ( summary . value ) , 1 ) <nl> self . assertEqual ( summary . value [ 0 ] . tag , ' outer / inner ' ) <nl> <nl> + def testHistogramSummaryWithFamily ( self ) : <nl> + with self . test_session ( ) as s : <nl> + i = array_ops . ones ( ( 5 , 4 , 4 , 3 ) ) <nl> + with ops . name_scope ( ' outer ' ) : <nl> + summ_op = summary_lib . histogram ( ' inner ' , i , family = ' family ' ) <nl> + self . assertEquals ( summ_op . op . name , ' outer / family / inner ' ) <nl> + summary_str = s . run ( summ_op ) <nl> + summary = summary_pb2 . Summary ( ) <nl> + summary . ParseFromString ( summary_str ) <nl> + self . assertEqual ( len ( summary . value ) , 1 ) <nl> + self . assertEqual ( summary . value [ 0 ] . tag , ' family / outer / family / inner ' ) <nl> + <nl> + def testAudioSummary ( self ) : <nl> + with self . test_session ( ) as s : <nl> + i = array_ops . ones ( ( 5 , 3 , 4 ) ) <nl> + with ops . name_scope ( ' outer ' ) : <nl> + aud = summary_lib . audio ( ' inner ' , i , 0 . 2 , max_outputs = 3 ) <nl> + summary_str = s . run ( aud ) <nl> + summary = summary_pb2 . Summary ( ) <nl> + summary . ParseFromString ( summary_str ) <nl> + values = summary . value <nl> + self . assertEqual ( len ( values ) , 3 ) <nl> + tags = sorted ( v . tag for v in values ) <nl> + expected = sorted ( ' outer / inner / audio / { } ' . format ( i ) for i in xrange ( 3 ) ) <nl> + self . assertEqual ( tags , expected ) <nl> + <nl> + def testAudioSummaryWithFamily ( self ) : <nl> + with self . test_session ( ) as s : <nl> + i = array_ops . ones ( ( 5 , 3 , 4 ) ) <nl> + with ops . name_scope ( ' outer ' ) : <nl> + aud = summary_lib . audio ( ' inner ' , i , 0 . 2 , max_outputs = 3 , family = ' family ' ) <nl> + self . assertEquals ( aud . op . name , ' outer / family / inner ' ) <nl> + summary_str = s . run ( aud ) <nl> + summary = summary_pb2 . Summary ( ) <nl> + summary . ParseFromString ( summary_str ) <nl> + values = summary . value <nl> + self . assertEqual ( len ( values ) , 3 ) <nl> + tags = sorted ( v . tag for v in values ) <nl> + expected = sorted ( ' family / outer / family / inner / audio / { } ' . format ( i ) <nl> + for i in xrange ( 3 ) ) <nl> + self . assertEqual ( tags , expected ) <nl> + <nl> def testSummaryNameConversion ( self ) : <nl> c = constant_op . constant ( 3 ) <nl> s = summary_lib . scalar ( ' name with spaces ' , c ) <nl> def testSummaryNameConversion ( self ) : <nl> s3 = summary_lib . scalar ( ' / name / with / leading / slash ' , c ) <nl> self . assertEqual ( s3 . op . name , ' name / with / leading / slash ' ) <nl> <nl> + def testSummaryWithFamilyMetaGraphExport ( self ) : <nl> + with ops . name_scope ( ' outer ' ) : <nl> + i = constant_op . constant ( 11 ) <nl> + summ = summary_lib . scalar ( ' inner ' , i ) <nl> + self . assertEquals ( summ . op . name , ' outer / inner ' ) <nl> + summ_f = summary_lib . scalar ( ' inner ' , i , family = ' family ' ) <nl> + self . assertEquals ( summ_f . op . name , ' outer / family / inner ' ) <nl> + <nl> + metagraph_def , _ = meta_graph . export_scoped_meta_graph ( export_scope = ' outer ' ) <nl> + <nl> + with ops . Graph ( ) . as_default ( ) as g : <nl> + meta_graph . import_scoped_meta_graph ( metagraph_def , graph = g , <nl> + import_scope = ' new_outer ' ) <nl> + # The summaries should exist , but with outer scope renamed . <nl> + new_summ = g . get_tensor_by_name ( ' new_outer / inner : 0 ' ) <nl> + new_summ_f = g . get_tensor_by_name ( ' new_outer / family / inner : 0 ' ) <nl> + <nl> + # However , the tags are unaffected . <nl> + with self . test_session ( ) as s : <nl> + new_summ_str , new_summ_f_str = s . run ( [ new_summ , new_summ_f ] ) <nl> + new_summ_pb = summary_pb2 . Summary ( ) <nl> + new_summ_pb . ParseFromString ( new_summ_str ) <nl> + self . assertEquals ( ' outer / inner ' , new_summ_pb . value [ 0 ] . tag ) <nl> + new_summ_f_pb = summary_pb2 . Summary ( ) <nl> + new_summ_f_pb . ParseFromString ( new_summ_f_str ) <nl> + self . assertEquals ( ' family / outer / family / inner ' , <nl> + new_summ_f_pb . value [ 0 ] . tag ) <nl> + <nl> <nl> if __name__ = = ' __main__ ' : <nl> test . main ( ) <nl> mmm a / tensorflow / stream_executor / cuda / cuda_dnn . cc <nl> ppp b / tensorflow / stream_executor / cuda / cuda_dnn . cc <nl> DeviceMemory < T > CudnnSupport : : MaybeTransformLayout ( <nl> return ( * transform_scratch ) - > device_memory ( ) ; <nl> } <nl> <nl> + bool CudnnSupport : : DoTransformTensor ( Stream * stream , <nl> + const dnn : : BatchDescriptor & input_desc , <nl> + const DeviceMemory < float > & input_data , <nl> + const dnn : : BatchDescriptor & output_desc , <nl> + DeviceMemory < float > * output_data ) { <nl> + mutex_lock lock { dnn_handle_mutex_ } ; <nl> + float alpha = 1 . 0f ; <nl> + float beta = 0 . 0f ; <nl> + ScopedTensorDescriptor input_tensor_desc ( parent_ , input_desc , <nl> + CUDNN_DATA_FLOAT ) ; <nl> + ScopedTensorDescriptor output_tensor_desc ( parent_ , output_desc , <nl> + CUDNN_DATA_FLOAT ) ; <nl> + cudnnStatus_t status = wrap : : cudnnTransformTensor ( <nl> + parent_ , ToHandle ( dnn_handle_ ) , & alpha , input_tensor_desc . handle ( ) , <nl> + input_data . opaque ( ) , & beta , output_tensor_desc . handle ( ) , <nl> + output_data - > opaque ( ) ) ; <nl> + if ( status ! = CUDNN_STATUS_SUCCESS ) { <nl> + LOG ( ERROR ) < < " Could not transform a tensor from layout " <nl> + < < input_desc . ToShortString ( ) < < " to " <nl> + < < output_desc . ToShortString ( ) ; <nl> + return false ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> template < class T > <nl> bool CudnnSupport : : DoConvolveBackwardDataImpl ( <nl> Stream * stream , <nl> mmm a / tensorflow / stream_executor / cuda / cuda_dnn . h <nl> ppp b / tensorflow / stream_executor / cuda / cuda_dnn . h <nl> class CudnnSupport : public dnn : : DnnSupport { <nl> std : : unique_ptr < TemporaryDeviceMemory < T > > * transform_scratch ) <nl> EXCLUSIVE_LOCKS_REQUIRED ( dnn_handle_mutex_ ) ; <nl> <nl> + bool DoTransformTensor ( Stream * stream , const dnn : : BatchDescriptor & input_desc , <nl> + const DeviceMemory < float > & input_data , <nl> + const dnn : : BatchDescriptor & output_desc , <nl> + DeviceMemory < float > * output_data ) override ; <nl> + <nl> template < class T > <nl> bool DoBatchNormalizationForwardImpl ( <nl> Stream * stream , dnn : : DataType data_type , const DeviceMemory < T > & x , <nl> mmm a / tensorflow / stream_executor / dnn . h <nl> ppp b / tensorflow / stream_executor / dnn . h <nl> class DnnSupport { <nl> return false ; <nl> } <nl> <nl> + / / Transforms a tensor into another tensor with a different layout and / or data <nl> + / / type . <nl> + / / <nl> + / / Arguments : <nl> + / / stream : pointer to the stream where this operation should be enqueued to . <nl> + / / input_desc : descriptor for the input tensor . <nl> + / / input_data : the device memory region that contains the input tensor . <nl> + / / output_desc : descriptor for the output tensor . <nl> + / / output_data : the device memory region that contains the output tensor . <nl> + virtual bool DoTransformTensor ( Stream * stream , <nl> + const dnn : : BatchDescriptor & input_desc , <nl> + const DeviceMemory < float > & input_data , <nl> + const dnn : : BatchDescriptor & output_desc , <nl> + DeviceMemory < float > * output_data ) { <nl> + return false ; <nl> + } <nl> + <nl> private : <nl> SE_DISALLOW_COPY_AND_ASSIGN ( DnnSupport ) ; <nl> } ; <nl> mmm a / tensorflow / stream_executor / stream . cc <nl> ppp b / tensorflow / stream_executor / stream . cc <nl> Stream & Stream : : ThenMemZero ( DeviceMemoryBase * location , uint64 size ) { <nl> return * this ; <nl> } <nl> <nl> - Stream & Stream : : ThenMemset32 ( DeviceMemoryBase * location , const uint32 & pattern , <nl> + Stream & Stream : : ThenMemset32 ( DeviceMemoryBase * location , uint32 pattern , <nl> uint64 size ) { <nl> VLOG_CALL ( PARAM ( location ) , PARAM ( pattern ) , PARAM ( size ) ) ; <nl> <nl> Stream & Stream : : ThenRnnBackward ( <nl> return * this ; <nl> } <nl> <nl> + Stream & Stream : : ThenTransformTensor ( const dnn : : BatchDescriptor & input_desc , <nl> + const DeviceMemory < float > & input_data , <nl> + const dnn : : BatchDescriptor & output_desc , <nl> + DeviceMemory < float > * output_data ) { <nl> + VLOG_CALL ( PARAM ( input_desc ) , PARAM ( input_data ) , PARAM ( output_desc ) , <nl> + PARAM ( output_data ) ) ; <nl> + if ( ok ( ) ) { <nl> + if ( dnn : : DnnSupport * dnn = parent_ - > AsDnn ( ) ) { <nl> + CheckError ( dnn - > DoTransformTensor ( this , input_desc , input_data , <nl> + output_desc , output_data ) ) ; <nl> + } else { <nl> + SetErrorAndLogNoDnnSupport ( ) ; <nl> + } <nl> + } <nl> + return * this ; <nl> + } <nl> + <nl> Stream & Stream : : ThenDoHostCallbackForTest ( std : : function < void ( ) > callback ) { <nl> VLOG_CALL ( PARAM ( callback ) ) ; <nl> <nl> mmm a / tensorflow / stream_executor / stream . h <nl> ppp b / tensorflow / stream_executor / stream . h <nl> class Stream { <nl> return ThenMemcpy ( gpu_dst , gpu_src , size ) ; <nl> } <nl> <nl> - / / Entrain onto the stream : a memset of zero at a GPU location of size <nl> - / / bytes . <nl> + / / Entrain onto the stream : a memset of zero at a GPU location of size bytes . <nl> / / The location must not be null . <nl> Stream & ThenMemZero ( DeviceMemoryBase * location , uint64 size ) ; <nl> <nl> - / / Entrain onto the stream : a memset of a 32 - bit pattern at a GPU location <nl> - / / of <nl> - / / size bytes , where bytes must be evenly 32 - bit sized ( i . e . evenly <nl> - / / divisible <nl> + / / Entrain onto the stream : a memset of a 32 - bit pattern at a GPU location of <nl> + / / size bytes , where bytes must be evenly 32 - bit sized ( i . e . evenly divisible <nl> / / by 4 ) . The location must not be null . <nl> - Stream & ThenMemset32 ( DeviceMemoryBase * location , const uint32 & pattern , <nl> - uint64 size ) ; <nl> + Stream & ThenMemset32 ( DeviceMemoryBase * location , uint32 pattern , uint64 size ) ; <nl> <nl> / / Enqueue a forward operation of the RNN model onto the stream . <nl> / / See DnnSupport : : DoRnnForward for more details . <nl> class Stream { <nl> DeviceMemory < uint8 > * reserve_space_data , <nl> ScratchAllocator * workspace_allocator ) ; <nl> <nl> + / / Enqueue onto the stream a operation that transforms a tensor . <nl> + / / See DnnSupport : : DoTransformTensor for more details . <nl> + Stream & ThenTransformTensor ( const dnn : : BatchDescriptor & input_desc , <nl> + const DeviceMemory < float > & input_data , <nl> + const dnn : : BatchDescriptor & output_desc , <nl> + DeviceMemory < float > * output_data ) ; <nl> + <nl> / / ( Synchronously ) block the host code waiting for the operations <nl> / / entrained on the stream ( enqueued to this point in program <nl> / / execution ) to complete . <nl> mmm a / tensorflow / tensorboard / BUILD <nl> ppp b / tensorflow / tensorboard / BUILD <nl> filegroup ( <nl> ] , <nl> ) <nl> <nl> + filegroup ( <nl> + name = " ts_web_library_default_typings " , <nl> + srcs = [ <nl> + # Ordering probably matters . <nl> + " @ com_microsoft_typescript / / : lib . es6 . d . ts " , <nl> + " @ io_angular_clutz / / : src / resources / closure . lib . d . ts " , <nl> + " clutz . d . ts " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + ) <nl> + <nl> filegroup ( <nl> name = " all_files " , <nl> srcs = glob ( <nl> new file mode 100644 <nl> index 0000000000000 . . 47cf307d2619a <nl> mmm / dev / null <nl> ppp b / tensorflow / tensorboard / clutz . d . ts <nl> <nl> + / / Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + / / <nl> + / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + / / you may not use this file except in compliance with the License . <nl> + / / You may obtain a copy of the License at <nl> + / / <nl> + / / http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + / / <nl> + / / Unless required by applicable law or agreed to in writing , software <nl> + / / distributed under the License is distributed on an " AS IS " BASIS , <nl> + / / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + / / See the License for the specific language governing permissions and <nl> + / / limitations under the License . <nl> + <nl> + / / tslint : disable <nl> + declare namespace ಠ_ಠ . clutz { <nl> + interface IteratorIterable < T > extends Iterator < T > , Iterable < T > { } <nl> + interface IIterableResult < T > extends IteratorResult < T > { } <nl> + } <nl> mmm a / tensorflow / tensorboard / components / tf_graph / tf - graph - scene . html <nl> ppp b / tensorflow / tensorboard / components / tf_graph / tf - graph - scene . html <nl> <nl> display : none ; <nl> } <nl> <nl> + / * Reference Edge * / <nl> + : : content . edge > path . edgeline . referenceedge { <nl> + stroke : # FFB74D ; <nl> + opacity : 1 ; <nl> + } <nl> + <nl> / * mmm Series Nodes mmm * / <nl> <nl> / * Hide the rect for a series ' annotation . * / <nl> <nl> fill : # 666 ; <nl> } <nl> <nl> - : : content . ref - arrowhead { <nl> + : : content . dataflow - arrowhead { <nl> fill : # bbb ; <nl> } <nl> <nl> + : : content . reference - arrowhead { <nl> + fill : # FFB74D ; <nl> + } <nl> + <nl> : : content . edge . control - dep { <nl> stroke - dasharray : 2 , 2 ; <nl> } <nl> <nl> < svg id = " svg " > <nl> < defs > <nl> <nl> - < ! - - Arrow heads for edge paths of different predefined sizes . - - > <nl> - < path id = " ref - arrowhead - path " d = " M 10 , 0 L 0 , 5 L 10 , 10 C 7 , 7 7 , 3 10 , 0 " / > <nl> - < marker class = " ref - arrowhead " id = " ref - arrowhead - small " viewBox = " 0 0 10 10 " markerWidth = " 10 " markerHeight = " 10 " <nl> - refX = " 8 " refY = " 5 " orient = " auto " markerUnits = " userSpaceOnUse " > <nl> - < use xlink : href = " # ref - arrowhead - path " / > <nl> + < ! - - Arrow heads for reference edge paths of different predefined sizes per color . - - > <nl> + < path id = " reference - arrowhead - path " d = " M 0 , 0 L 10 , 5 L 0 , 10 C 3 , 7 3 , 3 0 , 0 " / > <nl> + < marker class = " reference - arrowhead " id = " reference - arrowhead - small " viewBox = " 0 0 10 10 " markerWidth = " 5 " markerHeight = " 5 " <nl> + refX = " 2 " refY = " 5 " orient = " auto - start - reverse " markerUnits = " userSpaceOnUse " > <nl> + < use xlink : href = " # reference - arrowhead - path " / > <nl> + < / marker > <nl> + < marker class = " reference - arrowhead " id = " reference - arrowhead - medium " viewBox = " 0 0 10 10 " markerWidth = " 13 " markerHeight = " 13 " <nl> + refX = " 2 " refY = " 5 " orient = " auto - start - reverse " markerUnits = " userSpaceOnUse " > <nl> + < use xlink : href = " # reference - arrowhead - path " / > <nl> + < / marker > <nl> + < marker class = " reference - arrowhead " id = " reference - arrowhead - large " viewBox = " 0 0 10 10 " markerWidth = " 16 " markerHeight = " 16 " <nl> + refX = " 2 " refY = " 5 " orient = " auto - start - reverse " markerUnits = " userSpaceOnUse " > <nl> + < use xlink : href = " # reference - arrowhead - path " / > <nl> + < / marker > <nl> + < marker class = " reference - arrowhead " id = " reference - arrowhead - xlarge " viewBox = " 0 0 10 10 " markerWidth = " 20 " markerHeight = " 20 " <nl> + refX = " 2 " refY = " 5 " orient = " auto - start - reverse " markerUnits = " userSpaceOnUse " > <nl> + < use xlink : href = " # reference - arrowhead - path " / > <nl> + < / marker > <nl> + <nl> + < ! - - Arrow heads for dataflow edge paths of different predefined sizes per color . - - > <nl> + < path id = " dataflow - arrowhead - path " d = " M 0 , 0 L 10 , 5 L 0 , 10 C 3 , 7 3 , 3 0 , 0 " / > <nl> + < marker class = " dataflow - arrowhead " id = " dataflow - arrowhead - small " viewBox = " 0 0 10 10 " markerWidth = " 5 " markerHeight = " 5 " <nl> + refX = " 2 " refY = " 5 " orient = " auto - start - reverse " markerUnits = " userSpaceOnUse " > <nl> + < use xlink : href = " # dataflow - arrowhead - path " / > <nl> < / marker > <nl> - < marker class = " ref - arrowhead " id = " ref - arrowhead - medium " viewBox = " 0 0 10 10 " markerWidth = " 13 " markerHeight = " 13 " <nl> - refX = " 8 " refY = " 5 " orient = " auto " markerUnits = " userSpaceOnUse " > <nl> - < use xlink : href = " # ref - arrowhead - path " / > <nl> + < marker class = " dataflow - arrowhead " id = " dataflow - arrowhead - medium " viewBox = " 0 0 10 10 " markerWidth = " 13 " markerHeight = " 13 " <nl> + refX = " 2 " refY = " 5 " orient = " auto - start - reverse " markerUnits = " userSpaceOnUse " > <nl> + < use xlink : href = " # dataflow - arrowhead - path " / > <nl> < / marker > <nl> - < marker class = " ref - arrowhead " id = " ref - arrowhead - large " viewBox = " 0 0 10 10 " markerWidth = " 16 " markerHeight = " 16 " <nl> - refX = " 8 " refY = " 5 " orient = " auto " markerUnits = " userSpaceOnUse " > <nl> - < use xlink : href = " # ref - arrowhead - path " / > <nl> + < marker class = " dataflow - arrowhead " id = " dataflow - arrowhead - large " viewBox = " 0 0 10 10 " markerWidth = " 16 " markerHeight = " 16 " <nl> + refX = " 2 " refY = " 5 " orient = " auto - start - reverse " markerUnits = " userSpaceOnUse " > <nl> + < use xlink : href = " # dataflow - arrowhead - path " / > <nl> < / marker > <nl> - < marker class = " ref - arrowhead " id = " ref - arrowhead - xlarge " viewBox = " 0 0 10 10 " markerWidth = " 20 " markerHeight = " 20 " <nl> - refX = " 8 " refY = " 5 " orient = " auto " markerUnits = " userSpaceOnUse " > <nl> - < use xlink : href = " # ref - arrowhead - path " / > <nl> + < marker class = " dataflow - arrowhead " id = " dataflow - arrowhead - xlarge " viewBox = " 0 0 10 10 " markerWidth = " 20 " markerHeight = " 20 " <nl> + refX = " 2 " refY = " 5 " orient = " auto - start - reverse " markerUnits = " userSpaceOnUse " > <nl> + < use xlink : href = " # dataflow - arrowhead - path " / > <nl> < / marker > <nl> <nl> < ! - - Arrow head for annotation edge paths . - - > <nl> mmm a / tensorflow / tensorboard / components / tf_graph_board / tf - graph - board . html <nl> ppp b / tensorflow / tensorboard / components / tf_graph_board / tf - graph - board . html <nl> <nl> render - hierarchy = " { { renderHierarchy } } " <nl> devices - for - stats = " [ [ devicesForStats ] ] " <nl> stats = " [ [ stats ] ] " <nl> - selected - node = " { { _selectedNode } } " <nl> + selected - node = " { { selectedNode } } " <nl> highlighted - node = " { { _highlightedNode } } " <nl> color - by = " [ [ colorBy ] ] " <nl> color - by - params = " { { colorByParams } } " <nl> <nl> graph - hierarchy = " [ [ graphHierarchy ] ] " <nl> render - hierarchy = " [ [ renderHierarchy ] ] " <nl> graph = " [ [ graph ] ] " <nl> - selected - node = " { { _selectedNode } } " <nl> + selected - node = " { { selectedNode } } " <nl> selected - node - include = " { { _selectedNodeInclude } } " <nl> highlighted - node = " { { _highlightedNode } } " <nl> color - by = " [ [ colorBy ] ] " <nl> color - by - params = " [ [ colorByParams ] ] " <nl> debugger - data - enabled = " [ [ debuggerDataEnabled ] ] " <nl> are - health - pills - loading = " [ [ areHealthPillsLoading ] ] " <nl> + debugger - numeric - alerts = " [ [ debuggerNumericAlerts ] ] " <nl> node - names - to - health - pills = " [ [ nodeNamesToHealthPills ] ] " <nl> all - steps - mode - enabled = " { { allStepsModeEnabled } } " <nl> specific - health - pill - step = " { { specificHealthPillStep } } " <nl> <nl> debuggerDataEnabled : Boolean , <nl> / / Whether health pills are currently being loaded . <nl> areHealthPillsLoading : Boolean , <nl> + / / An array of alerts ( in chronological order ) provided by debugging libraries on when bad <nl> + / / values ( NaN , + / - Inf ) appear . <nl> + debuggerNumericAlerts : { <nl> + type : Array , <nl> + notify : true , <nl> + } , <nl> / / A mapping between node name to the tf . graph . scene . HealthPill to render . <nl> nodeNamesToHealthPills : Object , <nl> / / Whether the user can request health pills for individual steps from the server . This can be <nl> <nl> / / The step of health pills to show throughout the graph . <nl> healthPillStepIndex : Number , <nl> / / Private API : Data routing between child components . <nl> - _selectedNode : String , <nl> + selectedNode : { <nl> + type : String , <nl> + notify : true , <nl> + } , <nl> / / The enum value of the include property of the selected node . <nl> _selectedNodeInclude : Number , <nl> _highlightedNode : String <nl> <nl> ' node - toggle - extract ' : ' _nodeToggleExtract ' <nl> } , <nl> observers : [ <nl> - ' _updateNodeInclude ( _selectedNode ) ' <nl> + ' _updateNodeInclude ( selectedNode ) ' <nl> ] , <nl> / * * True if the progress is not complete yet ( < 100 % ) . * / <nl> _isNotComplete : function ( progress ) { <nl> <nl> node ? node . include : tf . graph . InclusionType . UNSPECIFIED ) ; <nl> } , <nl> _nodeToggleExtract : function ( ) { <nl> - this . _updateNodeInclude ( this . _selectedNode ) ; <nl> + this . _updateNodeInclude ( this . selectedNode ) ; <nl> } <nl> } ) ; <nl> < / script > <nl> mmm a / tensorflow / tensorboard / components / tf_graph_common / edge . ts <nl> ppp b / tensorflow / tensorboard / components / tf_graph_common / edge . ts <nl> function adjustPathPointsForMarker ( points : render . Point [ ] , <nl> let refX = + marker . attr ( ' refX ' ) ; <nl> let pathNode = < SVGPathElement > path . node ( ) ; <nl> if ( isStart ) { <nl> - let fractionStickingOut = refX / viewBoxWidth ; <nl> - let length = markerWidth * fractionStickingOut ; <nl> - let point = pathNode . getPointAtLength ( length ) ; <nl> + / / The edge flows downwards . Do not make the edge go the whole way , lest we <nl> + / / clobber the arrowhead . <nl> + const fractionStickingOut = 1 - refX / viewBoxWidth ; <nl> + const length = markerWidth * fractionStickingOut ; <nl> + const point = pathNode . getPointAtLength ( length ) ; <nl> / / Figure out how many segments of the path we need to remove in order <nl> / / to shorten the path . <nl> - let segIndex = pathNode . getPathSegAtLength ( length ) ; <nl> + const segIndex = pathNode . getPathSegAtLength ( length ) ; <nl> / / Update the very first segment . <nl> points [ segIndex - 1 ] = { x : point . x , y : point . y } ; <nl> / / Ignore every point before segIndex - 1 . <nl> return points . slice ( segIndex - 1 ) ; <nl> } else { <nl> - let fractionStickingOut = 1 - refX / viewBoxWidth ; <nl> - let length = pathNode . getTotalLength ( ) - markerWidth * fractionStickingOut ; <nl> - let point = pathNode . getPointAtLength ( length ) ; <nl> + / / The edge flows upwards . Do not make the edge go the whole way , lest we <nl> + / / clobber the arrowhead . <nl> + const fractionStickingOut = 1 - refX / viewBoxWidth ; <nl> + const length = <nl> + pathNode . getTotalLength ( ) - markerWidth * fractionStickingOut ; <nl> + const point = pathNode . getPointAtLength ( length ) ; <nl> / / Figure out how many segments of the path we need to remove in order <nl> / / to shorten the path . <nl> - let segIndex = pathNode . getPathSegAtLength ( length ) ; <nl> + const segIndex = pathNode . getPathSegAtLength ( length ) ; <nl> / / Update the very last segment . <nl> points [ segIndex ] = { x : point . x , y : point . y } ; <nl> / / Ignore every point after segIndex . <nl> export function appendEdge ( edgeGroup , d : EdgeData , <nl> if ( d . label & & d . label . structural ) { <nl> edgeClass + = ' ' + Class . Edge . STRUCTURAL ; <nl> } <nl> + if ( d . label & & d . label . metaedge & & d . label . metaedge . numRefEdges ) { <nl> + edgeClass + = ' ' + Class . Edge . REFERENCE_EDGE ; <nl> + } <nl> / / Give the path a unique id , which will be used to link <nl> / / the textPath ( edge label ) to this path . <nl> let pathId = ' path_ ' + getEdgeKey ( d ) ; <nl> export function appendEdge ( edgeGroup , d : EdgeData , <nl> . style ( ' stroke - width ' , strokeWidth + ' px ' ) ; <nl> <nl> / / Check if there is a reference edge and add an arrowhead of the right size . <nl> - if ( d . label & & d . label . metaedge & & d . label . metaedge . numRefEdges ) { <nl> - let markerId = ` ref - arrowhead - $ { arrowheadMap ( strokeWidth ) } ` ; <nl> - path . style ( ' marker - start ' , ` url ( # $ { markerId } ) ` ) ; <nl> - d . label . startMarkerId = markerId ; <nl> + if ( d . label & & d . label . metaedge ) { <nl> + if ( d . label . metaedge . numRefEdges ) { <nl> + / / We have a reference edge . <nl> + const markerId = ` reference - arrowhead - $ { arrowheadMap ( strokeWidth ) } ` ; <nl> + path . style ( ' marker - start ' , ` url ( # $ { markerId } ) ` ) ; <nl> + d . label . startMarkerId = markerId ; <nl> + } else { <nl> + / / We have a dataflow edge . <nl> + const markerId = ` dataflow - arrowhead - $ { arrowheadMap ( strokeWidth ) } ` ; <nl> + path . style ( ' marker - end ' , ` url ( # $ { markerId } ) ` ) ; <nl> + d . label . endMarkerId = markerId ; <nl> + } <nl> } <nl> <nl> if ( d . label = = null | | d . label . metaedge = = null ) { <nl> mmm a / tensorflow / tensorboard / components / tf_graph_common / render . ts <nl> ppp b / tensorflow / tensorboard / components / tf_graph_common / render . ts <nl> function extractHighDegrees ( renderNode : RenderGroupNodeInfo ) { <nl> } <nl> } ) ; <nl> } <nl> + <nl> + / * * <nl> + * Expands nodes in the graph until the desired node is visible . <nl> + * <nl> + * @ param scene The scene polymer component . <nl> + * @ param renderHierarchy The render hierarchy . <nl> + * @ param tensorName The name of a tensor . <nl> + * @ return A string that is the name of the node representing the given tensor . <nl> + * Note that the original tensor name might differ from this returned node <nl> + * name . Specifically , for instance , the tensor name usually ends with an <nl> + * output slot index ( such as : 0 ) , while the node name lacks that suffix . <nl> + * / <nl> + export function expandUntilNodeIsShown ( <nl> + scene , renderHierarchy , tensorName : string ) { <nl> + const splitTensorName = tensorName . split ( ' / ' ) ; <nl> + <nl> + / / Graph names do not take into account the output slot . Strip it . <nl> + const lastNodeNameMatch = <nl> + splitTensorName [ splitTensorName . length - 1 ] . match ( / ( . * ) : \ d + / ) ; <nl> + if ( lastNodeNameMatch . length = = = 2 ) { <nl> + splitTensorName [ splitTensorName . length - 1 ] = lastNodeNameMatch [ 1 ] ; <nl> + } <nl> + <nl> + let nodeName = splitTensorName [ 0 ] ; <nl> + let renderNode = renderHierarchy . getRenderNodeByName ( nodeName ) ; <nl> + for ( let i = 1 ; i < splitTensorName . length ; i + + ) { <nl> + / / Op nodes are not expandable . <nl> + if ( renderNode . node . type = = = tf . graph . NodeType . OP ) { <nl> + break ; <nl> + } <nl> + renderHierarchy . buildSubhierarchy ( nodeName ) ; <nl> + renderNode . expanded = true ; <nl> + scene . setNodeExpanded ( renderNode ) ; <nl> + nodeName + = ' / ' + splitTensorName [ i ] ; <nl> + renderNode = renderHierarchy . getRenderNodeByName ( nodeName ) ; <nl> + } <nl> + <nl> + return renderNode . node . name ; <nl> + } <nl> + <nl> } / / close module tf . graph . render <nl> mmm a / tensorflow / tensorboard / components / tf_graph_common / scene . ts <nl> ppp b / tensorflow / tensorboard / components / tf_graph_common / scene . ts <nl> module tf . graph . scene { <nl> CONTAINER : ' edges ' , <nl> GROUP : ' edge ' , <nl> LINE : ' edgeline ' , <nl> + REFERENCE_EDGE : ' referenceedge ' , <nl> REF_LINE : ' refline ' , <nl> STRUCTURAL : ' structural ' <nl> } , <nl> mmm a / tensorflow / tensorboard / components / tf_graph_common / util . ts <nl> ppp b / tensorflow / tensorboard / components / tf_graph_common / util . ts <nl> module tf . graph . util { <nl> <nl> return _ . object ( queryParams ) ; <nl> } <nl> + <nl> + / * * <nl> + * Given a timestamp in microseconds , return a human - friendly string denoting <nl> + * how long ago the timestamp was . <nl> + * / <nl> + export function computeHumanFriendlyTime ( timeInMicroseconds : number ) { <nl> + var timeDifferenceInMs = <nl> + + ( new Date ( ) ) - + ( new Date ( timeInMicroseconds / 1e3 ) ) ; <nl> + if ( timeDifferenceInMs < 30000 ) { <nl> + return ' just now ' ; <nl> + } else if ( timeDifferenceInMs < 60000 ) { <nl> + return Math . floor ( timeDifferenceInMs / 1000 ) + ' seconds ago ' ; <nl> + } else if ( timeDifferenceInMs < 120000 ) { <nl> + return ' a minute ago ' ; <nl> + } else if ( timeDifferenceInMs < 3600000 ) { <nl> + return Math . floor ( timeDifferenceInMs / 60000 ) + ' minutes ago ' ; <nl> + } else if ( Math . floor ( timeDifferenceInMs / 3600000 ) = = 1 ) { <nl> + return ' an hour ago ' ; <nl> + } else if ( timeDifferenceInMs < 86400000 ) { <nl> + return Math . floor ( timeDifferenceInMs / 3600000 ) + ' hours ago ' ; <nl> + } else if ( timeDifferenceInMs < 172800000 ) { <nl> + return ' yesterday ' ; <nl> + } <nl> + return Math . floor ( timeDifferenceInMs / 86400000 ) + ' days ago ' ; <nl> + } <nl> } <nl> mmm a / tensorflow / tensorboard / components / tf_graph_controls / tf - graph - controls . html <nl> ppp b / tensorflow / tensorboard / components / tf_graph_controls / tf - graph - controls . html <nl> <nl> # color - by - radio - group paper - radio - button { <nl> pointer - events : auto ! important ; <nl> } <nl> + <nl> + . legend - clarifier { <nl> + color : # 266236 ; <nl> + cursor : help ; <nl> + display : inline - block ; <nl> + text - decoration : underline ; <nl> + } <nl> + <nl> + . legend - clarifier paper - tooltip { <nl> + width : 150px ; <nl> + } <nl> < / style > <nl> < svg width = " 0 " height = " 0 " > <nl> < defs > <nl> <nl> disabled = " [ [ ! _xlaClustersProvided ( renderHierarchy ) ] ] " > <nl> XLA Cluster <nl> < / paper - radio - button > <nl> - < paper - tooltip for = " xla - cluster - radio - button " position = " right " > <nl> + < paper - tooltip animation - delay = " 0 " for = " xla - cluster - radio - button " position = " right " > <nl> Coloring by XLA cluster is only enabled if at least 1 op specifies an XLA cluster . <nl> < / paper - tooltip > <nl> <nl> <nl> disabled = " [ [ ! stats ] ] " > <nl> Compute time <nl> < / paper - radio - button > <nl> - < paper - tooltip for = " compute - time - radio - button " position = " right " > <nl> + < paper - tooltip animation - delay = " 0 " for = " compute - time - radio - button " position = " right " > <nl> Coloring by compute time is only enabled if the RunMetadata proto is passed to the <nl> FileWriter when a specific session is run . <nl> < / paper - tooltip > <nl> <nl> disabled = " [ [ ! stats ] ] " > <nl> Memory <nl> < / paper - radio - button > <nl> - < paper - tooltip for = " memory - radio - button " position = " right " > <nl> + < paper - tooltip animation - delay = " 0 " for = " memory - radio - button " position = " right " > <nl> Coloring by memory is only enabled if the RunMetadata proto is passed to the <nl> FileWriter when a specific session is run . <nl> < / paper - tooltip > <nl> <nl> rx = " 5 " ry = " 5 " / > <nl> < / svg > <nl> < / td > <nl> - < td > Namespace < span class = " gray " > * < / span > < / td > <nl> + < td > <nl> + Namespace < span class = " gray " > * < / span > <nl> + < div class = " legend - clarifier " > <nl> + < span > ? < / span > <nl> + < paper - tooltip animation - delay = " 0 " position = " right " > <nl> + Encapsulates a set of nodes . Namespace is hierarchical and based on scope . <nl> + < / paper - tooltip > <nl> + < / div > <nl> + < / td > <nl> < / tr > <nl> < tr > <nl> < td > <nl> <nl> y = " 6 " / > <nl> < / svg > <nl> < / td > <nl> - < td > OpNode < / td > <nl> + < td > <nl> + OpNode <nl> + < div class = " legend - clarifier " > <nl> + < span > ? < / span > <nl> + < paper - tooltip animation - delay = " 0 " position = " right " > <nl> + Node that performs an operation . These nodes cannot expand . <nl> + < / paper - tooltip > <nl> + < / div > <nl> + < / td > <nl> < / tr > <nl> < tr > <nl> < td > <nl> <nl> stroke = " # ccc " x = " 2 " y = " 2 " / > <nl> < / svg > <nl> < / td > <nl> - < td > Unconnected series < span class = " gray " > * < / span > < / td > <nl> + < td > <nl> + Unconnected series < span class = " gray " > * < / span > <nl> + < div class = " legend - clarifier " > <nl> + < span > ? < / span > <nl> + < paper - tooltip animation - delay = " 0 " position = " right " > <nl> + Sequence of numbered nodes that are not connected to each other . <nl> + < / paper - tooltip > <nl> + < / div > <nl> + < / td > <nl> < / tr > <nl> < tr > <nl> < td > <nl> <nl> fill = " white " stroke = " # ccc " x = " 2 " y = " 2 " / > <nl> < / svg > <nl> < / td > <nl> - < td > Connected series < span class = " gray " > * < / span > < / td > <nl> + < td > <nl> + Connected series < span class = " gray " > * < / span > <nl> + < div class = " legend - clarifier " > <nl> + < span > ? < / span > <nl> + < paper - tooltip animation - delay = " 0 " position = " right " > <nl> + Sequence of numbered nodes that are connected to each other . <nl> + < / paper - tooltip > <nl> + < / div > <nl> + < / td > <nl> < / tr > <nl> < tr > <nl> < td > <nl> <nl> < circle fill = " white " stroke = " # 848484 " cx = " 10 " cy = " 10 " r = " 5 " / > <nl> < / svg > <nl> < / td > <nl> - < td > Constant < / td > <nl> + < td > <nl> + Constant <nl> + < div class = " legend - clarifier " > <nl> + < span > ? < / span > <nl> + < paper - tooltip animation - delay = " 0 " position = " right " > <nl> + Node that outputs a constant value . <nl> + < / paper - tooltip > <nl> + < / div > <nl> + < / td > <nl> < / tr > <nl> < tr > <nl> < td > <nl> <nl> < use x = " 0 " y = " 0 " class = " image - icon " xlink : href = " # summary - icon " / > <nl> < / svg > <nl> < / td > <nl> - < td > Summary < / td > <nl> + < td > <nl> + Summary <nl> + < div class = " legend - clarifier " > <nl> + < span > ? < / span > <nl> + < paper - tooltip animation - delay = " 0 " position = " right " > <nl> + Node that collects data for visualization within TensorBoard . <nl> + < / paper - tooltip > <nl> + < / div > <nl> + < / td > <nl> < / tr > <nl> < tr > <nl> < td > <nl> < svg class = " icon " height = " 15px " <nl> preserveAspectRatio = " xMinYMid meet " viewBox = " 0 0 15 15 " > <nl> < defs > <nl> - < marker id = " ref - arrowhead - legend " fill = " # bbb " markerWidth = " 10 " <nl> - markerHeight = " 10 " refX = " 1 " refY = " 5 " orient = " auto " > <nl> - < path d = " M 10 , 0 L 0 , 5 L 10 , 10 C 7 , 7 7 , 3 10 , 0 " / > <nl> + < marker id = " dataflow - arrowhead - legend " fill = " # bbb " markerWidth = " 10 " <nl> + markerHeight = " 10 " refX = " 9 " refY = " 5 " orient = " auto - start - reverse " > <nl> + < path d = " M 0 , 0 L 10 , 5 L 0 , 10 C 3 , 7 3 , 3 0 , 0 " / > <nl> < / marker > <nl> < / defs > <nl> - < path stroke = " # bbb " <nl> - d = " M2 9 l 23 0 " stroke - linecap = " round " / > <nl> + < path marker - end = " url ( # dataflow - arrowhead - legend ) " <nl> + stroke = " # bbb " d = " M2 9 l 29 0 " <nl> + stroke - linecap = " round " / > <nl> < / svg > <nl> < / td > <nl> - < td > Dataflow edge < / td > <nl> + < td > <nl> + Dataflow edge <nl> + < div class = " legend - clarifier " > <nl> + < span > ? < / span > <nl> + < paper - tooltip animation - delay = " 0 " position = " right " > <nl> + Edge showing the data flow between operations . Edges flow upwards unless arrowheads specify otherwise . <nl> + < / paper - tooltip > <nl> + < / div > <nl> + < / td > <nl> < / tr > <nl> < tr > <nl> < td > <nl> < svg class = " icon " height = " 15px " <nl> preserveAspectRatio = " xMinYMid meet " viewBox = " 0 0 15 15 " > <nl> < path stroke = " # bbb " <nl> - d = " M2 9 l 23 0 " stroke - linecap = " round " stroke - dasharray = " 2 , 2 " / > <nl> + d = " M2 9 l 29 0 " stroke - linecap = " round " stroke - dasharray = " 2 , 2 " / > <nl> < / svg > <nl> < / td > <nl> - < td > Control dependency edge < / td > <nl> + < td > <nl> + Control dependency edge <nl> + < div class = " legend - clarifier " > <nl> + < span > ? < / span > <nl> + < paper - tooltip animation - delay = " 0 " position = " right " > <nl> + Edge showing the control dependency between operations . <nl> + < / paper - tooltip > <nl> + < / div > <nl> + < / td > <nl> < / tr > <nl> < tr > <nl> < td > <nl> < svg class = " icon " height = " 15px " <nl> preserveAspectRatio = " xMinYMid meet " viewBox = " 0 0 15 15 " > <nl> - < path marker - start = " url ( # ref - arrowhead - legend ) " <nl> - stroke = " # bbb " d = " M2 9 l 23 0 " <nl> - stroke - linecap = " round " / > <nl> + < defs > <nl> + < marker id = " reference - arrowhead - legend " fill = " # FFB74D " markerWidth = " 10 " <nl> + markerHeight = " 10 " refX = " 9 " refY = " 5 " orient = " auto - start - reverse " > <nl> + < path d = " M 0 , 0 L 10 , 5 L 0 , 10 C 3 , 7 3 , 3 0 , 0 " / > <nl> + < / marker > <nl> + < / defs > <nl> + < path marker - end = " url ( # reference - arrowhead - legend ) " <nl> + stroke = " # FFB74D " d = " M2 9 l 29 0 " <nl> + stroke - linecap = " round " / > <nl> < / svg > <nl> < / td > <nl> - < td > Reference edge < / td > <nl> + < td > <nl> + Reference edge <nl> + < div class = " legend - clarifier " > <nl> + < span > ? < / span > <nl> + < paper - tooltip animation - delay = " 0 " position = " right " > <nl> + Edge showing that the outgoing operation node can mutate the incoming tensor . <nl> + < / paper - tooltip > <nl> + < / div > <nl> + < / td > <nl> < / tr > <nl> < / table > <nl> < / div > <nl> mmm a / tensorflow / tensorboard / components / tf_graph_dashboard / tf - graph - dashboard . html <nl> ppp b / tensorflow / tensorboard / components / tf_graph_dashboard / tf - graph - dashboard . html <nl> <nl> progress = " [ [ _progress ] ] " <nl> debugger - data - enabled = " [ [ debuggerDataEnabled ] ] " <nl> are - health - pills - loading = " [ [ _areHealthPillsLoading ] ] " <nl> + debugger - numeric - alerts = " [ [ _debuggerNumericAlerts ] ] " <nl> node - names - to - health - pills = " [ [ _nodeNamesToHealthPills ] ] " <nl> all - steps - mode - enabled = " { { allStepsModeEnabled } } " <nl> specific - health - pill - step = " { { specificHealthPillStep } } " <nl> health - pill - step - index = " [ [ _healthPillStepIndex ] ] " <nl> render - hierarchy = " { { _renderHierarchy } } " <nl> + selected - node = " { { _selectedNode } } " <nl> stats = " [ [ _stats ] ] " <nl> > < / tf - graph - board > <nl> < / div > <nl> <nl> allStepsModeEnabled : Boolean , <nl> specificHealthPillStep : { type : Number , value : 0 } , <nl> healthPillsToggledOn : { type : Boolean , value : true , observer : ' _healthPillsToggledOnChanged ' } , <nl> + _selectedNode : Object , <nl> _isAttached : Boolean , <nl> / / Whether this dashboard is initialized . This dashboard should only be initialized once . <nl> _initialized : Boolean , <nl> / / Whether health pills are currently being loaded , in which case we may want to say show a <nl> / / spinner . <nl> _areHealthPillsLoading : Boolean , <nl> + / / An array of alerts ( in chronological order ) provided by debugging libraries on when bad <nl> + / / values ( NaN , + / - Inf ) appear . <nl> + _debuggerNumericAlerts : { <nl> + type : Array , <nl> + value : [ ] , <nl> + notify : true , <nl> + } , <nl> / / Maps the names of nodes to an array of health pills ( HealthPillDatums ) . <nl> _nodeNamesToHealthPills : { <nl> type : Object , <nl> <nl> ' node - toggle - expand ' : ' _handleNodeToggleExpand ' , <nl> } , <nl> observers : [ <nl> - ' _maybeFetchHealthPills ( allStepsModeEnabled , specificHealthPillStep ) ' , <nl> + ' _maybeFetchHealthPills ( allStepsModeEnabled , specificHealthPillStep , _selectedNode ) ' , <nl> ' _maybeInitializeDashboard ( backend , _isAttached ) ' , <nl> ] , <nl> attached : function ( ) { <nl> <nl> } , <nl> _requestHealthPills : function ( ) { <nl> this . set ( ' _areHealthPillsLoading ' , true ) ; <nl> - const requestId = + + this . _healthPillRequestId ; <nl> + var requestId = + + this . _healthPillRequestId ; <nl> <nl> if ( this . _healthPillStepRequestTimerId ! = = null ) { <nl> / / A request for health pills is already scheduled to be initiated . Clear it , and schedule a <nl> <nl> return ; <nl> } <nl> <nl> - const specificStep = this . allStepsModeEnabled ? this . specificHealthPillStep : undefined ; <nl> - this . backend . healthPills ( this . _renderHierarchy . getNamesOfRenderedOps ( ) , specificStep ) . then ( <nl> + var specificStep = this . allStepsModeEnabled ? this . specificHealthPillStep : undefined ; <nl> + <nl> + var healthPillsPromise = this . backend . healthPills ( <nl> + this . _renderHierarchy . getNamesOfRenderedOps ( ) , specificStep ) ; <nl> + var alertsPromise = this . backend . debuggerNumericsAlerts ( ) ; <nl> + <nl> + Promise . all ( [ healthPillsPromise , alertsPromise ] ) . then ( <nl> function ( result ) { <nl> + var healthPillsResult = result [ 0 ] ; <nl> + var alertsResult = result [ 1 ] ; <nl> + <nl> if ( ! this . healthPillsToggledOn ) { <nl> / / The user has opted to hide health pills via the toggle button . <nl> return ; <nl> <nl> / / Set the index for which step to show for the health pills . By default , show the last step . <nl> / / A precondition we assume ( that Tensorboard ' s reservoir sampling guarantees ) is that all <nl> / / node names should be mapped to the same number of steps . <nl> - for ( let nodeName in result ) { <nl> - this . set ( ' _healthPillStepIndex ' , result [ nodeName ] . length - 1 ) ; <nl> + for ( var nodeName in healthPillsResult ) { <nl> + this . set ( ' _healthPillStepIndex ' , healthPillsResult [ nodeName ] . length - 1 ) ; <nl> break ; <nl> } <nl> <nl> - this . set ( ' _nodeNamesToHealthPills ' , result ) ; <nl> + this . set ( ' _debuggerNumericAlerts ' , alertsResult ) ; <nl> + this . set ( ' _nodeNamesToHealthPills ' , healthPillsResult ) ; <nl> this . set ( ' _areHealthPillsLoading ' , false ) ; <nl> this . set ( ' _healthPillStepRequestTimerId ' , null ) ; <nl> } . bind ( this ) ) ; <nl> new file mode 100644 <nl> index 0000000000000 . . c74aacd84e7b0 <nl> mmm / dev / null <nl> ppp b / tensorflow / tensorboard / components / tf_graph_debugger_data_card / BUILD <nl> <nl> + package ( default_visibility = [ " / / tensorflow : internal " ] ) <nl> + <nl> + load ( " / / tensorflow / tensorboard : defs . bzl " , " tensorboard_webcomponent_library " ) <nl> + load ( " / / tensorflow / tensorboard : web . bzl " , " ts_web_library " ) <nl> + <nl> + licenses ( [ " notice " ] ) # Apache 2 . 0 <nl> + <nl> + ts_web_library ( <nl> + name = " tf_graph_debugger_data_card " , <nl> + srcs = [ <nl> + " tf - graph - debugger - data - card . html " , <nl> + ] , <nl> + path = " / tf - graph - debugger - data - card " , <nl> + deps = [ <nl> + " / / tensorflow / tensorboard / components / tf_dashboard_common " , <nl> + " / / tensorflow / tensorboard / components / tf_graph_common " , <nl> + " / / tensorflow / tensorboard / components / tf_imports : polymer " , <nl> + " @ org_polymer_paper_slider " , <nl> + " @ org_polymer_paper_spinner " , <nl> + ] , <nl> + ) <nl> + <nl> + tensorboard_webcomponent_library ( <nl> + name = " legacy " , <nl> + srcs = [ " : tf_graph_debugger_data_card " ] , <nl> + destdir = " tf - graph - debugger - data - card " , <nl> + deps = [ <nl> + " / / tensorflow / tensorboard / components / tf_dashboard_common : legacy " , <nl> + " / / tensorflow / tensorboard / components / tf_graph_common : legacy " , <nl> + " / / third_party / javascript / polymer / v1 / iron - collapse : lib " , <nl> + " / / third_party / javascript / polymer / v1 / iron - list : lib " , <nl> + " / / third_party / javascript / polymer / v1 / paper - icon - button : lib " , <nl> + " / / third_party / javascript / polymer / v1 / paper - item : lib " , <nl> + " / / third_party / javascript / polymer / v1 / paper - slider : lib " , <nl> + " / / third_party / javascript / polymer / v1 / paper - spinner : lib " , <nl> + " / / third_party / javascript / polymer / v1 / polymer : lib " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " all_files " , <nl> + srcs = glob ( [ " * * " ] ) , <nl> + tags = [ " notsan " ] , <nl> + ) <nl> new file mode 100644 <nl> index 0000000000000 . . 2395a671c3c22 <nl> mmm / dev / null <nl> ppp b / tensorflow / tensorboard / components / tf_graph_debugger_data_card / demo / BUILD <nl> <nl> + package ( default_visibility = [ " / / tensorflow : internal " ] ) <nl> + <nl> + load ( " / / tensorflow / tensorboard : web . bzl " , " ts_web_library " ) <nl> + <nl> + licenses ( [ " notice " ] ) # Apache 2 . 0 <nl> + <nl> + # bazel run / / third_party / tensorflow / tensorboard / components / tf_graph_debugger_data_card / demo <nl> + ts_web_library ( <nl> + name = " demo " , <nl> + srcs = [ " index . html " ] + glob ( [ " data / * * " ] ) , <nl> + path = " / tf - graph - debugger - data - card / demo " , <nl> + deps = [ <nl> + " / / tensorflow / tensorboard / components / tf_graph_debugger_data_card " , <nl> + " / / tensorflow / tensorboard / components / tf_imports : webcomponentsjs " , <nl> + " @ org_polymer_iron_demo_helpers " , <nl> + " @ org_polymer_paper_styles " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " all_files " , <nl> + srcs = glob ( [ " * * " ] ) , <nl> + tags = [ " notsan " ] , <nl> + ) <nl> new file mode 100644 <nl> index 0000000000000 . . 934e4f86a8372 <nl> mmm / dev / null <nl> ppp b / tensorflow / tensorboard / components / tf_graph_debugger_data_card / demo / index . html <nl> <nl> + < ! doctype html > <nl> + < ! - - <nl> + @ license <nl> + Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + - - > <nl> + < script src = " . . / . . / webcomponentsjs / webcomponents - lite . min . js " > < / script > <nl> + < link rel = " import " href = " . . / tf - graph - debugger - data - card . html " > <nl> + < link rel = " import " href = " . . / . . / iron - demo - helpers / demo - snippet . html " > <nl> + < title > TF Graph Info Demo < / title > <nl> + < style > <nl> + # demo - container { <nl> + border : 2px solid # 808080 ; <nl> + width : 1000px ; <nl> + height : 600px ; <nl> + } <nl> + < / style > <nl> + < demo - snippet > <nl> + < template > <nl> + < div id = ' demo - container ' > <nl> + < ! - - This simple demo starts up a page with a health pill legend . - - > <nl> + < tf - graph - debugger - data - card > < / tf - graph - debugger - data - card > <nl> + < / div > <nl> + < / template > <nl> + < / demo - snippet > <nl> new file mode 100644 <nl> index 0000000000000 . . 6cc99a327cb3f <nl> mmm / dev / null <nl> ppp b / tensorflow / tensorboard / components / tf_graph_debugger_data_card / tf - graph - debugger - data - card . html <nl> <nl> + < ! - - <nl> + @ license <nl> + Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + - - > <nl> + <nl> + < link rel = " import " href = " . . / polymer / polymer . html " > <nl> + < link rel = " import " href = " . . / paper - slider / paper - slider . html " > <nl> + < link rel = " import " href = " . . / paper - spinner / paper - spinner - lite . html " > <nl> + < link rel = " import " href = " . . / tf - graph - common / tf - graph - common . html " > <nl> + <nl> + < dom - module id = " tf - graph - debugger - data - card " > <nl> + < template > <nl> + < style > <nl> + : host { <nl> + font - size : 12px ; <nl> + margin : 0 ; <nl> + padding : 0 ; <nl> + display : block ; <nl> + } <nl> + <nl> + h2 { <nl> + padding : 0 ; <nl> + text - align : center ; <nl> + margin : 0 ; <nl> + } <nl> + <nl> + . health - pill - legend { <nl> + padding : 15px ; <nl> + } <nl> + <nl> + . health - pill - legend h2 { <nl> + text - align : left ; <nl> + } <nl> + <nl> + . health - pill - entry { <nl> + margin : 10px 10px 10px 0 ; <nl> + } <nl> + <nl> + . health - pill - entry . color - preview { <nl> + width : 26px ; <nl> + height : 26px ; <nl> + border - radius : 3px ; <nl> + display : inline - block ; <nl> + margin : 0 10px 0 0 ; <nl> + } <nl> + <nl> + . health - pill - entry . color - label , . health - pill - entry . tensor - count { <nl> + color : # 777 ; <nl> + display : inline - block ; <nl> + height : 26px ; <nl> + font - size : 22px ; <nl> + line - height : 26px ; <nl> + vertical - align : top ; <nl> + } <nl> + <nl> + . health - pill - entry . tensor - count { <nl> + float : right ; <nl> + } <nl> + <nl> + # health - pill - step - slider { <nl> + width : 100 % ; <nl> + margin : 0 0 0 - 15px ; <nl> + / * 31 comes from adding a padding of 15px from both sides of the paper - slider , subtracting <nl> + * 1px so that the slider width aligns with the image ( the last slider marker takes up 1px ) , <nl> + * and adding 2px to account for a border of 1px on both sides of the image . 30 - 1 + 2 . <nl> + * Apparently , the paper - slider lacks a mixin for those padding values . * / <nl> + width : calc ( 100 % + 31px ) ; <nl> + } <nl> + <nl> + # health - pills - loading - spinner { <nl> + width : 20px ; <nl> + height : 20px ; <nl> + vertical - align : top ; <nl> + } <nl> + <nl> + # health - pill - step - number - input { <nl> + text - align : center ; <nl> + vertical - align : top ; <nl> + } <nl> + <nl> + # numeric - alerts - table - container { <nl> + max - height : 400px ; <nl> + overflow - x : hidden ; <nl> + overflow - y : auto ; <nl> + } <nl> + <nl> + # numeric - alerts - table { <nl> + text - align : left ; <nl> + } <nl> + <nl> + # numeric - alerts - table td { <nl> + vertical - align : top ; <nl> + } <nl> + <nl> + # numeric - alerts - table . first - offense - td { <nl> + display : inline - block ; <nl> + } <nl> + <nl> + . first - offense - td { <nl> + width : 80px ; <nl> + } <nl> + <nl> + . tensor - device - td { <nl> + max - width : 140px ; <nl> + word - wrap : break - word ; <nl> + } <nl> + <nl> + . tensor - section - within - table { <nl> + color : # 266236 ; <nl> + cursor : pointer ; <nl> + opacity : 0 . 8 ; <nl> + text - decoration : underline ; <nl> + } <nl> + <nl> + . tensor - section - within - table : hover { <nl> + opacity : 1 ; <nl> + } <nl> + <nl> + . device - section - within - table { <nl> + color : # 666 ; <nl> + } <nl> + <nl> + . mini - health - pill { <nl> + width : 130px ; <nl> + } <nl> + <nl> + . mini - health - pill > div { <nl> + height : 100 % ; <nl> + width : 60px ; <nl> + border - radius : 3px ; <nl> + } <nl> + <nl> + # event - counts - th { <nl> + padding : 0 0 0 10px ; <nl> + } <nl> + <nl> + . negative - inf - mini - health - pill - section { <nl> + background : rgb ( 255 , 141 , 0 ) ; <nl> + width : 20px ; <nl> + } <nl> + <nl> + . positive - inf - mini - health - pill - section { <nl> + background : rgb ( 0 , 62 , 212 ) ; <nl> + width : 20px ; <nl> + } <nl> + <nl> + . nan - mini - health - pill - section { <nl> + background : rgb ( 204 , 47 , 44 ) ; <nl> + width : 20px ; <nl> + } <nl> + <nl> + . negative - inf - mini - health - pill - section , <nl> + . positive - inf - mini - health - pill - section , <nl> + . nan - mini - health - pill - section { <nl> + color : # fff ; <nl> + display : inline - block ; <nl> + height : 100 % ; <nl> + line - height : 20px ; <nl> + margin : 0 0 0 10px ; <nl> + text - align : center ; <nl> + } <nl> + <nl> + . no - numeric - alerts - notification { <nl> + margin : 0 ; <nl> + } <nl> + < / style > <nl> + < paper - material elevation = " 1 " class = " card health - pill - legend " > <nl> + < div class = " title " > <nl> + Enable all ( not just sampled ) steps . Requires slow disk read . <nl> + < / div > <nl> + < paper - toggle - button id = " enableAllStepsModeToggle " checked = " { { allStepsModeEnabled } } " > <nl> + < / paper - toggle - button > <nl> + < h2 > <nl> + Step of Health Pills : <nl> + < template is = " dom - if " if = " [ [ allStepsModeEnabled ] ] " > <nl> + < input type = " number " <nl> + id = " health - pill - step - number - input " <nl> + min = " 0 " <nl> + max = " [ [ _biggestStepEverSeen ] ] " <nl> + value = " { { specificHealthPillStep : : input } } " > <nl> + < / template > <nl> + < template is = " dom - if " if = " [ [ ! allStepsModeEnabled ] ] " > <nl> + [ [ _currentStepDisplayValue ] ] <nl> + < / template > <nl> + < paper - spinner - lite active <nl> + hidden $ = [ [ ! areHealthPillsLoading ] ] <nl> + id = " health - pills - loading - spinner " > < / paper - spinner - lite > <nl> + < / h2 > <nl> + < template is = " dom - if " if = " [ [ allStepsModeEnabled ] ] " > <nl> + < paper - slider <nl> + id = " health - pill - step - slider " <nl> + immediate - value = " { { specificHealthPillStep } } " <nl> + max = " [ [ _biggestStepEverSeen ] ] " <nl> + snaps <nl> + step = " 1 " <nl> + value = " { { specificHealthPillStep } } " > < / paper - slider > <nl> + < / template > <nl> + < template is = " dom - if " if = " [ [ ! allStepsModeEnabled ] ] " > <nl> + < template is = " dom - if " if = " [ [ _maxStepIndex ] ] " > <nl> + < paper - slider <nl> + id = " health - pill - step - slider " <nl> + immediate - value = " { { healthPillStepIndex } } " <nl> + max = " [ [ _maxStepIndex ] ] " <nl> + snaps <nl> + step = " 1 " <nl> + value = " { { healthPillStepIndex } } " > < / paper - slider > <nl> + < / template > <nl> + < / template > <nl> + < h2 > <nl> + Health Pill <nl> + < template is = " dom - if " if = " [ [ healthPillValuesForSelectedNode ] ] " > <nl> + Counts for Selected Node <nl> + < / template > <nl> + < template is = " dom - if " if = " [ [ ! healthPillValuesForSelectedNode ] ] " > <nl> + Legend <nl> + < / template > <nl> + < / h2 > <nl> + < template is = " dom - repeat " items = " [ [ healthPillEntries ] ] " > <nl> + < div class = " health - pill - entry " > <nl> + < div class = " color - preview " style = " background : [ [ item . background_color ] ] " > < / div > <nl> + < div class = " color - label " > [ [ item . label ] ] < / div > <nl> + < div class = " tensor - count " > <nl> + [ [ _computeTensorCountString ( healthPillValuesForSelectedNode , index ) ] ] <nl> + < / div > <nl> + < / div > <nl> + < / template > <nl> + < div hidden $ = " [ [ ! _hasDebuggerNumericAlerts ( debuggerNumericAlerts ) ] ] " > <nl> + < h2 id = " numeric - alerts - header " > Numeric Alerts < / h2 > <nl> + < p > <nl> + Alerts are sorted from top to bottom by increasing timestamp . <nl> + < / p > <nl> + < div id = ' numeric - alerts - table - container ' > <nl> + < table id = " numeric - alerts - table " > <nl> + < thead > <nl> + < tr > <nl> + < th > First Offense < / th > <nl> + < th > Tensor ( Device ) < / th > <nl> + < th id = ' event - counts - th ' > Event Counts < / th > <nl> + < / tr > <nl> + < / thead > <nl> + < tbody id = " numeric - alerts - body " > <nl> + < / tbody > <nl> + < / table > <nl> + < / div > <nl> + < / div > <nl> + < template is = " dom - if " if = " [ [ ! _hasDebuggerNumericAlerts ( debuggerNumericAlerts ) ] ] " > <nl> + < p class = " no - numeric - alerts - notification " > <nl> + No numeric alerts so far . That is likely good . Alerts indicate the presence of NaN <nl> + or ( + / - ) Infinity values , which may be concerning . <nl> + < / p > <nl> + < / template > <nl> + < / paper - material > <nl> + < / template > <nl> + < script > <nl> + " use strict " ; <nl> + <nl> + ( function ( ) { <nl> + Polymer ( { <nl> + is : ' tf - graph - debugger - data - card ' , <nl> + <nl> + properties : { <nl> + renderHierarchy : Object , <nl> + debuggerNumericAlerts : { <nl> + type : Array , <nl> + notify : true , <nl> + } , <nl> + nodeNamesToHealthPills : Object , <nl> + healthPillStepIndex : { <nl> + type : Number , <nl> + notify : true , <nl> + } , <nl> + / / Only relevant if we are in all steps mode , in which case the user may want to view health <nl> + / / pills for a specific step . <nl> + specificHealthPillStep : { <nl> + type : Number , <nl> + value : 0 , <nl> + notify : true , <nl> + } , <nl> + / / Two - ways <nl> + selectedNode : { <nl> + type : String , <nl> + notify : true <nl> + } , <nl> + highlightedNode : { <nl> + type : String , <nl> + notify : true <nl> + } , <nl> + / / The enum value of the include property of the selected node . <nl> + selectedNodeInclude : { <nl> + type : Number , <nl> + notify : true <nl> + } , <nl> + / / Whether health pills are currently being loaded , in which case we show a spinner ( and the <nl> + / / current health pills shown might be out of date ) . <nl> + areHealthPillsLoading : Boolean , <nl> + healthPillEntries : { <nl> + type : Array , <nl> + value : tf . graph . scene . healthPillEntries , <nl> + readOnly : true , <nl> + } , <nl> + healthPillValuesForSelectedNode : { <nl> + type : Array , <nl> + computed : ' _computeHealthPillForNode ( nodeNamesToHealthPills , healthPillStepIndex , selectedNode , allStepsModeEnabled , areHealthPillsLoading ) ' , <nl> + } , <nl> + / / When all - steps mode is enabled , the user can request health pills for any step . In this <nl> + / / mode , Tensorboard makes a request every time the user drags the slider to a different step . <nl> + allStepsModeEnabled : { <nl> + type : Boolean , <nl> + notify : true , <nl> + } , <nl> + / / The biggest step value ever seen . Used to determine what steps of health pills to let the <nl> + / / user fetch in all steps mode . <nl> + _biggestStepEverSeen : { <nl> + type : Number , <nl> + computed : ' _computeBiggestStepEverSeen ( nodeNamesToHealthPills ) ' , <nl> + } , <nl> + _maxStepIndex : { <nl> + type : Number , <nl> + computed : ' _computeMaxStepIndex ( nodeNamesToHealthPills ) ' , <nl> + } , <nl> + _currentStepDisplayValue : { <nl> + type : String , <nl> + computed : ' _computeCurrentStepDisplayValue ( nodeNamesToHealthPills , healthPillStepIndex , allStepsModeEnabled , specificHealthPillStep , areHealthPillsLoading ) ' , <nl> + } , <nl> + } , <nl> + observers : [ <nl> + ' _updateAlertsList ( debuggerNumericAlerts ) ' , <nl> + ] , <nl> + ready : function ( ) { <nl> + var mainContainer = document . getElementById ( ' mainContainer ' ) ; <nl> + var scrollbarContainer = document . querySelector ( ' tf - dashboard - layout . scrollbar ' ) ; <nl> + if ( mainContainer & & scrollbarContainer ) { <nl> + / / If this component is being used inside of TensorBoard ' s dashboard layout , it may easily <nl> + / / cause the dashboard layout element to overflow , giving the user 2 scroll bars . Prevent <nl> + / / that by hiding whatever content overflows - the user will have to expand the viewport to <nl> + / / use this debugging card . <nl> + mainContainer . style . overflow = ' hidden ' ; <nl> + scrollbarContainer . style . overflow = ' hidden ' ; <nl> + } <nl> + } , <nl> + _healthPillsAvailable : function ( debuggerDataEnabled , nodeNamesToHealthPills ) { <nl> + / / So long as there is a mapping ( even if empty ) from node name to health pills , show the <nl> + / / legend and slider . We do that because , even if no health pills exist at the current step , <nl> + / / the user may desire to change steps , and the slider must show for the user to do that . <nl> + return debuggerDataEnabled & & nodeNamesToHealthPills ; <nl> + } , <nl> + _computeTensorCountString : function ( healthPillValuesForSelectedNode , valueIndex ) { <nl> + if ( ! healthPillValuesForSelectedNode ) { <nl> + / / No health pill data is available . <nl> + return ' ' ; <nl> + } <nl> + <nl> + return healthPillValuesForSelectedNode [ valueIndex ] . toFixed ( 0 ) ; <nl> + } , <nl> + _computeHealthPillForNode : function ( <nl> + nodeNamesToHealthPills , healthPillStepIndex , selectedNode , allStepsModeEnabled , areHealthPillsLoading ) { <nl> + if ( areHealthPillsLoading ) { <nl> + / / Health pills are loading . Do not render data that is out of date . <nl> + return null ; <nl> + } <nl> + <nl> + if ( ! selectedNode ) { <nl> + / / No node is selected . <nl> + return null ; <nl> + } <nl> + <nl> + const healthPills = nodeNamesToHealthPills [ selectedNode ] ; <nl> + if ( ! healthPills ) { <nl> + / / This node lacks a health pill . <nl> + return null ; <nl> + } <nl> + <nl> + / / If all steps mode is enabled , we use the first health pill in the list because the JSON <nl> + / / response from the server is a mapping between node name and a list of 1 health pill . <nl> + const healthPill = healthPills [ allStepsModeEnabled ? 0 : healthPillStepIndex ] ; <nl> + if ( ! healthPill ) { <nl> + / / This node lacks a health pill at the current step . <nl> + return null ; <nl> + } <nl> + <nl> + / / The health pill count values start at 2 . Each health pill contains 6 values . <nl> + return healthPill . value . slice ( 2 , 8 ) ; <nl> + } , <nl> + _computeCurrentStepDisplayValue : function ( <nl> + nodeNamesToHealthPills , <nl> + healthPillStepIndex , <nl> + allStepsModeEnabled , <nl> + specificHealthPillStep , <nl> + areHealthPillsLoading ) { <nl> + if ( allStepsModeEnabled ) { <nl> + / / The user seeks health pills for specific step from the server . <nl> + return specificHealthPillStep . toFixed ( 0 ) ; <nl> + } <nl> + <nl> + if ( areHealthPillsLoading ) { <nl> + / / The current step is undefined . <nl> + return 0 ; <nl> + } <nl> + <nl> + for ( let nodeName in nodeNamesToHealthPills ) { <nl> + / / All nodes have the same number of steps stored , so only examine 1 node . We cannot <nl> + / / directly index into the nodeNamesToHealthPills object because we do not have a key . <nl> + / / If all steps mode is enabled , we only have 1 step to show . <nl> + return nodeNamesToHealthPills [ nodeName ] [ healthPillStepIndex ] . step . toFixed ( 0 ) ; <nl> + } <nl> + <nl> + / / The current step could not be computed . <nl> + return 0 ; <nl> + } , <nl> + _computeBiggestStepEverSeen : function ( nodeNamesToHealthPills ) { <nl> + for ( let nodeName in nodeNamesToHealthPills ) { <nl> + / / All nodes have the same number of steps stored , so only examine 1 node . <nl> + / / The index is 1 less than the count . Tensorboard backend logic guarantees that the length <nl> + / / of the array will be greater than 1 . <nl> + var healthPills = nodeNamesToHealthPills [ nodeName ] ; <nl> + return Math . max ( this . _biggestStepEverSeen , healthPills [ healthPills . length - 1 ] . step ) ; <nl> + } <nl> + <nl> + / / No steps seen so far . Default to 0 . <nl> + return this . _biggestStepEverSeen | | 0 ; <nl> + } , <nl> + _computeMaxStepIndex : function ( nodeNamesToHealthPills ) { <nl> + for ( let nodeName in nodeNamesToHealthPills ) { <nl> + / / All nodes have the same number of steps stored , so only examine 1 node . <nl> + / / The index is 1 less than the count . Tensorboard backend logic guarantees that the length <nl> + / / of the array will be greater than 1 . <nl> + return nodeNamesToHealthPills [ nodeName ] . length - 1 ; <nl> + } <nl> + <nl> + / / Return a falsy value . The slider should be hidden . <nl> + return 0 ; <nl> + } , <nl> + _hasDebuggerNumericAlerts : function ( debuggerNumericAlerts ) { <nl> + return debuggerNumericAlerts & & debuggerNumericAlerts . length ; <nl> + } , <nl> + _updateAlertsList : function ( debuggerNumericAlerts ) { <nl> + var alertBody = this . $ $ ( ' # numeric - alerts - body ' ) ; <nl> + if ( ! alertBody ) { <nl> + return ; <nl> + } <nl> + <nl> + alertBody . innerHTML = ' ' ; <nl> + <nl> + for ( var i = 0 ; i < debuggerNumericAlerts . length ; i + + ) { <nl> + var alert = debuggerNumericAlerts [ i ] ; <nl> + var tableRow = document . createElement ( ' tr ' ) ; <nl> + <nl> + var timestampTd = document . createElement ( ' td ' ) ; <nl> + timestampTd . innerHTML = tf . graph . util . computeHumanFriendlyTime ( alert . first_timestamp ) ; <nl> + timestampTd . classList . add ( ' first - offense - td ' ) ; <nl> + tableRow . appendChild ( timestampTd ) ; <nl> + <nl> + var tensorDeviceTd = document . createElement ( ' td ' ) ; <nl> + tensorDeviceTd . classList . add ( ' tensor - device - td ' ) <nl> + <nl> + var tensorSection = document . createElement ( ' div ' ) ; <nl> + tensorSection . classList . add ( ' tensor - section - within - table ' ) ; <nl> + tensorSection . innerHTML = alert . tensor_name ; <nl> + this . _addOpExpansionListener ( tensorSection , alert . tensor_name ) ; <nl> + tensorDeviceTd . appendChild ( tensorSection ) ; <nl> + <nl> + var deviceSection = document . createElement ( ' div ' ) ; <nl> + deviceSection . classList . add ( ' device - section - within - table ' ) ; <nl> + deviceSection . innerHTML = ' ( ' + alert . device_name + ' ) ' ; <nl> + tensorDeviceTd . appendChild ( deviceSection ) ; <nl> + tableRow . appendChild ( tensorDeviceTd ) ; <nl> + <nl> + var miniHealthPill = document . createElement ( ' div ' ) ; <nl> + miniHealthPill . classList . add ( ' mini - health - pill ' ) ; <nl> + <nl> + var miniHealthPillTd = document . createElement ( ' td ' ) ; <nl> + miniHealthPillTd . classList . add ( ' mini - health - pill - td ' ) ; <nl> + miniHealthPillTd . appendChild ( miniHealthPill ) ; <nl> + tableRow . appendChild ( miniHealthPillTd ) ; <nl> + <nl> + if ( alert . neg_inf_event_count ) { <nl> + var negativeInfCountSection = document . createElement ( ' div ' ) ; <nl> + negativeInfCountSection . classList . add ( ' negative - inf - mini - health - pill - section ' ) ; <nl> + negativeInfCountSection . innerHTML = alert . neg_inf_event_count ; <nl> + negativeInfCountSection . setAttribute ( <nl> + ' title ' , alert . neg_inf_event_count + ' events with - ∞ ' ) <nl> + miniHealthPill . appendChild ( negativeInfCountSection ) ; <nl> + } <nl> + <nl> + if ( alert . pos_inf_event_count ) { <nl> + var positiveInfCountSection = document . createElement ( ' div ' ) ; <nl> + positiveInfCountSection . classList . add ( ' positive - inf - mini - health - pill - section ' ) ; <nl> + positiveInfCountSection . innerHTML = alert . pos_inf_event_count ; <nl> + positiveInfCountSection . setAttribute ( <nl> + ' title ' , alert . pos_inf_event_count + ' events with + ∞ ' ) <nl> + miniHealthPill . appendChild ( positiveInfCountSection ) ; <nl> + } <nl> + <nl> + if ( alert . nan_event_count ) { <nl> + var nanCountSection = document . createElement ( ' div ' ) ; <nl> + nanCountSection . classList . add ( ' nan - mini - health - pill - section ' ) ; <nl> + nanCountSection . innerHTML = alert . nan_event_count ; <nl> + nanCountSection . setAttribute ( <nl> + ' title ' , alert . nan_event_count + ' events with NaN ' ) <nl> + miniHealthPill . appendChild ( nanCountSection ) ; <nl> + } <nl> + <nl> + Polymer . dom ( alertBody ) . appendChild ( tableRow ) ; <nl> + } <nl> + } , <nl> + / / Adds a listener to an element , so that when that element is clicked , the tensor with <nl> + / / tensorName expands . <nl> + _addOpExpansionListener : function ( clickableElement , tensorName ) { <nl> + clickableElement . addEventListener ( ' click ' , ( ) = > { <nl> + / / When the user clicks on a tensor name , expand all nodes until the user can see the <nl> + / / associated node . <nl> + var nameOfNodeToSelect = tf . graph . render . expandUntilNodeIsShown ( <nl> + document . getElementById ( ' scene ' ) , this . renderHierarchy , tensorName ) ; <nl> + <nl> + / / Store the current scroll of the graph info card . Node selection alters that scroll , and <nl> + / / we restore the scroll later . <nl> + var previousScrollFromBottom ; <nl> + var graphInfoCard = document . querySelector ( ' tf - graph - info # graph - info ' ) ; <nl> + if ( graphInfoCard ) { <nl> + previousScrollFromBottom = graphInfoCard . scrollHeight - graphInfoCard . scrollTop ; <nl> + } <nl> + <nl> + / / Update the selected node within graph logic . <nl> + var previousSelectedNode = this . selectedNode ; <nl> + this . set ( ' selectedNode ' , nameOfNodeToSelect ) ; <nl> + <nl> + / / Scroll the graph info card back down if necessary so that user can see the alerts section <nl> + / / again . Selecting the node causes the info card to scroll to the top , which may mean the <nl> + / / user no longer sees the list of alerts . <nl> + var scrollToOriginalLocation = ( ) = > { <nl> + graphInfoCard . scrollTop = graphInfoCard . scrollHeight - previousScrollFromBottom ; <nl> + } ; <nl> + if ( graphInfoCard ) { <nl> + / / This component is used within an info card . Restore the original scroll . <nl> + if ( previousSelectedNode ) { <nl> + / / The card for the selected node has already opened . Immediately restore the scroll . <nl> + scrollToOriginalLocation ( ) ; <nl> + } else { <nl> + / / Give some time for the DOM of the info card to be created before scrolling down . <nl> + window . setTimeout ( scrollToOriginalLocation , 20 ) ; <nl> + } <nl> + } <nl> + } ) ; <nl> + } , <nl> + } ) ; <nl> + } ) ( ) ; <nl> + < / script > <nl> + < / dom - module > <nl> mmm a / tensorflow / tensorboard / components / tf_graph_info / BUILD <nl> ppp b / tensorflow / tensorboard / components / tf_graph_info / BUILD <nl> ts_web_library ( <nl> deps = [ <nl> " / / tensorflow / tensorboard / components / tf_dashboard_common " , <nl> " / / tensorflow / tensorboard / components / tf_graph_common " , <nl> + " / / tensorflow / tensorboard / components / tf_graph_debugger_data_card " , <nl> " / / tensorflow / tensorboard / components / tf_imports : polymer " , <nl> " @ org_polymer_iron_collapse " , <nl> " @ org_polymer_iron_list " , <nl> tensorboard_webcomponent_library ( <nl> deps = [ <nl> " / / tensorflow / tensorboard / components / tf_dashboard_common : legacy " , <nl> " / / tensorflow / tensorboard / components / tf_graph_common : legacy " , <nl> + " / / tensorflow / tensorboard / components / tf_graph_debugger_data_card : legacy " , <nl> " / / third_party / javascript / polymer / v1 / iron - collapse : lib " , <nl> " / / third_party / javascript / polymer / v1 / iron - list : lib " , <nl> " / / third_party / javascript / polymer / v1 / paper - icon - button : lib " , <nl> mmm a / tensorflow / tensorboard / components / tf_graph_info / tf - graph - info . html <nl> ppp b / tensorflow / tensorboard / components / tf_graph_info / tf - graph - info . html <nl> <nl> < link rel = " import " href = " . . / paper - slider / paper - slider . html " > <nl> < link rel = " import " href = " . . / paper - spinner / paper - spinner - lite . html " > <nl> < link rel = " import " href = " . . / tf - graph - common / tf - graph - common . html " > <nl> + < link rel = " import " href = " . . / tf - graph - debugger - data - card / tf - graph - debugger - data - card . html " > <nl> < link rel = " import " href = " tf - node - info . html " > <nl> <nl> < dom - module id = " tf - graph - info " > <nl> <nl> margin : 0 ; <nl> padding : 0 ; <nl> display : block ; <nl> + max - height : 650px ; <nl> + overflow - x : hidden ; <nl> + overflow - y : auto ; <nl> } <nl> <nl> h2 { <nl> <nl> text - align : center ; <nl> margin : 0 ; <nl> } <nl> - <nl> - . health - pill - legend { <nl> - padding : 15px ; <nl> - } <nl> - <nl> - . health - pill - legend h2 { <nl> - text - align : left ; <nl> - } <nl> - <nl> - . health - pill - entry { <nl> - margin : 10px 10px 10px 0 ; <nl> - } <nl> - <nl> - . health - pill - entry . color - preview { <nl> - width : 26px ; <nl> - height : 26px ; <nl> - border - radius : 3px ; <nl> - display : inline - block ; <nl> - margin : 0 10px 0 0 ; <nl> - } <nl> - <nl> - . health - pill - entry . color - label , . health - pill - entry . tensor - count { <nl> - color : # 777 ; <nl> - display : inline - block ; <nl> - height : 26px ; <nl> - font - size : 22px ; <nl> - line - height : 26px ; <nl> - vertical - align : top ; <nl> - } <nl> - <nl> - . health - pill - entry . tensor - count { <nl> - float : right ; <nl> - } <nl> - <nl> - # health - pill - step - slider { <nl> - width : 100 % ; <nl> - margin : 0 0 0 - 15px ; <nl> - / * 31 comes from adding a padding of 15px from both sides of the paper - slider , subtracting <nl> - * 1px so that the slider width aligns with the image ( the last slider marker takes up 1px ) , <nl> - * and adding 2px to account for a border of 1px on both sides of the image . 30 - 1 + 2 . <nl> - * Apparently , the paper - slider lacks a mixin for those padding values . * / <nl> - width : calc ( 100 % + 31px ) ; <nl> - } <nl> - <nl> - # health - pills - loading - spinner { <nl> - width : 20px ; <nl> - height : 20px ; <nl> - vertical - align : top ; <nl> - } <nl> - <nl> - # health - pill - step - number - input { <nl> - text - align : center ; <nl> - vertical - align : top ; <nl> - } <nl> < / style > <nl> < template is = " dom - if " if = " { { selectedNode } } " > <nl> < paper - material elevation = " 1 " class = " card " > <nl> <nl> < / paper - material > <nl> < / template > <nl> < template is = " dom - if " if = " [ [ _healthPillsAvailable ( debuggerDataEnabled , nodeNamesToHealthPills ) ] ] " > <nl> - < paper - material elevation = " 1 " class = " card health - pill - legend " > <nl> - < div class = " title " > <nl> - Enable all ( not just sampled ) steps . Requires slow disk read . <nl> - < / div > <nl> - < paper - toggle - button id = " enableAllStepsModeToggle " checked = " { { allStepsModeEnabled } } " > <nl> - < / paper - toggle - button > <nl> - < h2 > <nl> - Step of Health Pills : <nl> - < template is = " dom - if " if = " [ [ allStepsModeEnabled ] ] " > <nl> - < input type = " number " <nl> - id = " health - pill - step - number - input " <nl> - min = " 0 " <nl> - max = " [ [ _biggestStepEverSeen ] ] " <nl> - value = " { { specificHealthPillStep : : input } } " > <nl> - < / template > <nl> - < template is = " dom - if " if = " [ [ ! allStepsModeEnabled ] ] " > <nl> - [ [ _currentStepDisplayValue ] ] <nl> - < / template > <nl> - <nl> - < paper - spinner - lite active <nl> - hidden $ = [ [ ! areHealthPillsLoading ] ] <nl> - id = " health - pills - loading - spinner " > < / paper - spinner - lite > <nl> - < / h2 > <nl> - < template is = " dom - if " if = " [ [ allStepsModeEnabled ] ] " > <nl> - < paper - slider <nl> - id = " health - pill - step - slider " <nl> - immediate - value = " { { specificHealthPillStep } } " <nl> - max = " [ [ _biggestStepEverSeen ] ] " <nl> - snaps <nl> - step = " 1 " <nl> - value = " { { specificHealthPillStep } } " > < / paper - slider > <nl> - < / template > <nl> - < template is = " dom - if " if = " [ [ ! allStepsModeEnabled ] ] " > <nl> - < template is = " dom - if " if = " [ [ _maxStepIndex ] ] " > <nl> - < paper - slider <nl> - id = " health - pill - step - slider " <nl> - immediate - value = " { { healthPillStepIndex } } " <nl> - max = " [ [ _maxStepIndex ] ] " <nl> - snaps <nl> - step = " 1 " <nl> - value = " { { healthPillStepIndex } } " > < / paper - slider > <nl> - < / template > <nl> - < / template > <nl> - < h2 > <nl> - Health Pill <nl> - < template is = " dom - if " if = " [ [ healthPillValuesForSelectedNode ] ] " > <nl> - Counts for Selected Node <nl> - < / template > <nl> - < template is = " dom - if " if = " [ [ ! healthPillValuesForSelectedNode ] ] " > <nl> - Legend <nl> - < / template > <nl> - < / h2 > <nl> - < template is = " dom - repeat " items = " [ [ healthPillEntries ] ] " > <nl> - < div class = " health - pill - entry " > <nl> - < div class = " color - preview " style = " background : [ [ item . background_color ] ] " > < / div > <nl> - < div class = " color - label " > [ [ item . label ] ] < / div > <nl> - < div class = " tensor - count " > <nl> - [ [ _computeTensorCountString ( healthPillValuesForSelectedNode , index ) ] ] <nl> - < / div > <nl> - < / div > <nl> - < / template > <nl> - < / paper - material > <nl> + < tf - graph - debugger - data - card render - hierarchy = " [ [ renderHierarchy ] ] " <nl> + debugger - numeric - alerts = " [ [ debuggerNumericAlerts ] ] " <nl> + node - names - to - health - pills = " [ [ nodeNamesToHealthPills ] ] " <nl> + render - hierarchy = " [ [ renderHierarchy ] ] " <nl> + selected - node = " { { selectedNode } } " <nl> + highlighted - node = " { { highlightedNode } } " <nl> + are - health - pills - loading = " [ [ areHealthPillsLoading ] ] " <nl> + all - steps - mode - enabled = " { { allStepsModeEnabled } } " <nl> + specific - health - pill - step = " { { specificHealthPillStep } } " <nl> + health - pill - step - index = " { { healthPillStepIndex } } " > <nl> + < / tf - graph - debugger - data - card > <nl> < / template > <nl> < / template > <nl> < script > <nl> < h2 > <nl> type : Number , <nl> notify : true , <nl> } , <nl> - / / Only relevant if we are in all steps mode , in which case the user may want to view health <nl> - / / pills for a specific step . <nl> - specificHealthPillStep : { <nl> - type : Number , <nl> - value : 0 , <nl> - notify : true , <nl> - } , <nl> colorBy : String , <nl> / / Two - ways <nl> selectedNode : { <nl> < h2 > <nl> debuggerDataEnabled : Boolean , <nl> / / Whether health pills are currently being loaded , in which case we show a spinner ( and the <nl> / / current health pills shown might be out of date ) . <nl> - areHealthPillsLoading : Boolean , <nl> - healthPillEntries : { <nl> - type : Array , <nl> - value : tf . graph . scene . healthPillEntries , <nl> - readOnly : true , <nl> - } , <nl> - healthPillValuesForSelectedNode : { <nl> - type : Array , <nl> - computed : ' _computeHealthPillForNode ( nodeNamesToHealthPills , healthPillStepIndex , selectedNode , allStepsModeEnabled , areHealthPillsLoading ) ' , <nl> - } , <nl> - / / When all - steps mode is enabled , the user can request health pills for any step . In this <nl> - / / mode , Tensorboard makes a request every time the user drags the slider to a different step . <nl> - allStepsModeEnabled : { <nl> - type : Boolean , <nl> - notify : true , <nl> - } , <nl> - / / The biggest step value ever seen . Used to determine what steps of health pills to let the <nl> - / / user fetch in all steps mode . <nl> - _biggestStepEverSeen : { <nl> - type : Number , <nl> - computed : ' _computeBiggestStepEverSeen ( nodeNamesToHealthPills ) ' , <nl> - } , <nl> - _maxStepIndex : { <nl> - type : Number , <nl> - computed : ' _computeMaxStepIndex ( nodeNamesToHealthPills ) ' , <nl> - } , <nl> - _currentStepDisplayValue : { <nl> - type : String , <nl> - computed : ' _computeCurrentStepDisplayValue ( nodeNamesToHealthPills , healthPillStepIndex , allStepsModeEnabled , specificHealthPillStep , areHealthPillsLoading ) ' , <nl> - } , <nl> } , <nl> listeners : { <nl> ' node - list - item - click ' : ' _nodeListItemClicked ' , <nl> < h2 > <nl> / / the user may desire to change steps , and the slider must show for the user to do that . <nl> return debuggerDataEnabled & & nodeNamesToHealthPills ; <nl> } , <nl> - _computeTensorCountString : function ( healthPillValuesForSelectedNode , valueIndex ) { <nl> - if ( ! healthPillValuesForSelectedNode ) { <nl> - / / No health pill data is available . <nl> - return ' ' ; <nl> - } <nl> - <nl> - return healthPillValuesForSelectedNode [ valueIndex ] . toFixed ( 0 ) ; <nl> - } , <nl> - _computeHealthPillForNode : function ( <nl> - nodeNamesToHealthPills , healthPillStepIndex , selectedNode , allStepsModeEnabled , areHealthPillsLoading ) { <nl> - if ( areHealthPillsLoading ) { <nl> - / / Health pills are loading . Do not render data that is out of date . <nl> - return null ; <nl> - } <nl> - <nl> - if ( ! selectedNode ) { <nl> - / / No node is selected . <nl> - return null ; <nl> - } <nl> - <nl> - const healthPills = nodeNamesToHealthPills [ selectedNode ] ; <nl> - if ( ! healthPills ) { <nl> - / / This node lacks a health pill . <nl> - return null ; <nl> - } <nl> - <nl> - / / If all steps mode is enabled , we use the first health pill in the list because the JSON <nl> - / / response from the server is a mapping between node name and a list of 1 health pill . <nl> - const healthPill = healthPills [ allStepsModeEnabled ? 0 : healthPillStepIndex ] ; <nl> - if ( ! healthPill ) { <nl> - / / This node lacks a health pill at the current step . <nl> - return null ; <nl> - } <nl> - <nl> - / / The health pill count values start at 2 . Each health pill contains 6 values . <nl> - return healthPill . value . slice ( 2 , 8 ) ; <nl> - } , <nl> - _computeCurrentStepDisplayValue : function ( <nl> - nodeNamesToHealthPills , <nl> - healthPillStepIndex , <nl> - allStepsModeEnabled , <nl> - specificHealthPillStep , <nl> - areHealthPillsLoading ) { <nl> - if ( allStepsModeEnabled ) { <nl> - / / The user seeks health pills for specific step from the server . <nl> - return specificHealthPillStep . toFixed ( 0 ) ; <nl> - } <nl> - <nl> - if ( areHealthPillsLoading ) { <nl> - / / The current step is undefined . <nl> - return 0 ; <nl> - } <nl> - <nl> - for ( let nodeName in nodeNamesToHealthPills ) { <nl> - / / All nodes have the same number of steps stored , so only examine 1 node . We cannot <nl> - / / directly index into the nodeNamesToHealthPills object because we do not have a key . <nl> - / / If all steps mode is enabled , we only have 1 step to show . <nl> - return nodeNamesToHealthPills [ nodeName ] [ healthPillStepIndex ] . step . toFixed ( 0 ) ; <nl> - } <nl> - <nl> - / / The current step could not be computed . <nl> - return 0 ; <nl> - } , <nl> - _computeBiggestStepEverSeen : function ( nodeNamesToHealthPills ) { <nl> - for ( let nodeName in nodeNamesToHealthPills ) { <nl> - / / All nodes have the same number of steps stored , so only examine 1 node . <nl> - / / The index is 1 less than the count . Tensorboard backend logic guarantees that the length <nl> - / / of the array will be greater than 1 . <nl> - var healthPills = nodeNamesToHealthPills [ nodeName ] ; <nl> - return Math . max ( this . _biggestStepEverSeen , healthPills [ healthPills . length - 1 ] . step ) ; <nl> - } <nl> - <nl> - / / No steps seen so far . Default to 0 . <nl> - return this . _biggestStepEverSeen | | 0 ; <nl> - } , <nl> - _computeMaxStepIndex : function ( nodeNamesToHealthPills ) { <nl> - for ( let nodeName in nodeNamesToHealthPills ) { <nl> - / / All nodes have the same number of steps stored , so only examine 1 node . <nl> - / / The index is 1 less than the count . Tensorboard backend logic guarantees that the length <nl> - / / of the array will be greater than 1 . <nl> - return nodeNamesToHealthPills [ nodeName ] . length - 1 ; <nl> - } <nl> - <nl> - / / Return a falsy value . The slider should be hidden . <nl> - return 0 ; <nl> - } , <nl> } ) ; <nl> } ) ( ) ; <nl> < / script > <nl> mmm a / tensorflow / tensorboard / defs . bzl <nl> ppp b / tensorflow / tensorboard / defs . bzl <nl> <nl> def tensorboard_webcomponent_library ( * * kwargs ) : <nl> " " " Rules referencing this will be deleted from the codebase soon . " " " <nl> pass <nl> + <nl> + def _legacy_js_impl ( target , ctx ) : <nl> + return struct ( ) <nl> + <nl> + legacy_js = aspect ( <nl> + implementation = _legacy_js_impl , <nl> + attr_aspects = [ " exports " ] ) <nl> mmm a / tensorflow / tensorboard / java / org / tensorflow / tensorboard / vulcanize / Vulcanize . java <nl> ppp b / tensorflow / tensorboard / java / org / tensorflow / tensorboard / vulcanize / Vulcanize . java <nl> <nl> import com . google . common . base . Optional ; <nl> import com . google . common . base . Splitter ; <nl> import com . google . common . collect . HashMultimap ; <nl> - import com . google . common . collect . ImmutableList ; <nl> import com . google . common . collect . ImmutableMultimap ; <nl> import com . google . common . collect . ImmutableSet ; <nl> import com . google . common . collect . Iterables ; <nl> <nl> import com . google . javascript . jscomp . DiagnosticGroups ; <nl> import com . google . javascript . jscomp . DiagnosticType ; <nl> import com . google . javascript . jscomp . JSError ; <nl> + import com . google . javascript . jscomp . ModuleIdentifier ; <nl> import com . google . javascript . jscomp . PropertyRenamingPolicy ; <nl> import com . google . javascript . jscomp . Result ; <nl> import com . google . javascript . jscomp . SourceFile ; <nl> <nl> import java . nio . file . Path ; <nl> import java . nio . file . Paths ; <nl> import java . nio . file . StandardOpenOption ; <nl> + import java . util . ArrayDeque ; <nl> import java . util . ArrayList ; <nl> import java . util . Collection ; <nl> + import java . util . Deque ; <nl> import java . util . HashMap ; <nl> import java . util . HashSet ; <nl> import java . util . LinkedHashMap ; <nl> <nl> import java . util . Set ; <nl> import java . util . regex . Matcher ; <nl> import java . util . regex . Pattern ; <nl> + import java . util . stream . Collectors ; <nl> import org . jsoup . Jsoup ; <nl> import org . jsoup . nodes . Attribute ; <nl> import org . jsoup . nodes . Comment ; <nl> <nl> private static final Set < String > legalese = new HashSet < > ( ) ; <nl> private static final List < String > licenses = new ArrayList < > ( ) ; <nl> private static final List < Webpath > stack = new ArrayList < > ( ) ; <nl> + private static final List < SourceFile > externs = new ArrayList < > ( ) ; <nl> private static final List < SourceFile > sourcesFromJsLibraries = new ArrayList < > ( ) ; <nl> private static final Map < Webpath , String > sourcesFromScriptTags = new LinkedHashMap < > ( ) ; <nl> private static final Map < Webpath , Node > sourceTags = new LinkedHashMap < > ( ) ; <nl> public static void main ( String [ ] args ) throws IOException { <nl> Path output = Paths . get ( args [ 4 ] ) ; <nl> for ( int i = 5 ; i < args . length ; i + + ) { <nl> if ( args [ i ] . endsWith ( " . js " ) ) { <nl> - sourcesFromJsLibraries . add ( SourceFile . fromFile ( args [ i ] ) ) ; <nl> + String code = new String ( Files . readAllBytes ( Paths . get ( args [ i ] ) ) , UTF_8 ) ; <nl> + SourceFile sourceFile = SourceFile . fromCode ( args [ i ] , code ) ; <nl> + if ( code . contains ( " @ externs " ) ) { <nl> + externs . add ( sourceFile ) ; <nl> + } else { <nl> + sourcesFromJsLibraries . add ( sourceFile ) ; <nl> + } <nl> continue ; <nl> } <nl> if ( ! args [ i ] . endsWith ( " . pbtxt " ) ) { <nl> private static void compile ( ) { <nl> options . setRemoveUnusedPrototypePropertiesInExterns ( false ) ; <nl> options . setRemoveUnusedClassProperties ( false ) ; <nl> <nl> - / / Closure pass . <nl> + / / Dependency management . <nl> options . setClosurePass ( true ) ; <nl> options . setManageClosureDependencies ( true ) ; <nl> options . getDependencyOptions ( ) . setDependencyPruning ( true ) ; <nl> - options . getDependencyOptions ( ) . setDependencySorting ( false ) ; <nl> + options . getDependencyOptions ( ) . setDependencySorting ( true ) ; <nl> options . getDependencyOptions ( ) . setMoocherDropping ( false ) ; <nl> + options . getDependencyOptions ( ) <nl> + . setEntryPoints ( <nl> + sourceTags <nl> + . keySet ( ) <nl> + . stream ( ) <nl> + . map ( Webpath : : toString ) <nl> + . map ( ModuleIdentifier : : forFile ) <nl> + . collect ( Collectors . toList ( ) ) ) ; <nl> <nl> / / Polymer pass . <nl> options . setPolymerVersion ( 1 ) ; <nl> private static void compile ( ) { <nl> new WarningsGuard ( ) { <nl> @ Override <nl> public CheckLevel level ( JSError error ) { <nl> + if ( error . sourceName = = null ) { <nl> + return null ; <nl> + } <nl> + if ( error . sourceName . startsWith ( " javascript / externs " ) <nl> + | | error . sourceName . contains ( " com_google_javascript_closure_compiler_externs " ) ) { <nl> + / / TODO ( jart ) : Figure out why these " mismatch of the removeEventListener property on <nl> + / / type " warnings are showing up . <nl> + / / https : / / github . com / google / closure - compiler / pull / 1959 <nl> + return CheckLevel . OFF ; <nl> + } <nl> if ( IGNORE_PATHS_PATTERN . matcher ( error . sourceName ) . matches ( ) ) { <nl> return CheckLevel . OFF ; <nl> } <nl> public CheckLevel level ( JSError error ) { <nl> / / Compile everything into a single script . <nl> Compiler compiler = new Compiler ( ) ; <nl> compiler . disableThreads ( ) ; <nl> - Result result = compiler . compile ( ImmutableList . < SourceFile > of ( ) , sauce , options ) ; <nl> + Result result = compiler . compile ( externs , sauce , options ) ; <nl> if ( ! result . success ) { <nl> System . exit ( 1 ) ; <nl> } <nl> String jsBlob = compiler . toSource ( ) ; <nl> <nl> / / Split apart the JS blob and put it back in the original < script > locations . <nl> + Deque < Map . Entry < Webpath , Node > > tags = new ArrayDeque < > ( ) ; <nl> + tags . addAll ( sourceTags . entrySet ( ) ) ; <nl> Matcher matcher = WEBPATH_PATTERN . matcher ( jsBlob ) ; <nl> - Webpath path = null ; <nl> - String pureJsDeps = " " ; <nl> - int start = - 1 ; <nl> + verify ( matcher . find ( ) , " Nothing found in compiled JS blob ! " ) ; <nl> + Webpath path = Webpath . get ( matcher . group ( 1 ) ) ; <nl> + int start = 0 ; <nl> while ( matcher . find ( ) ) { <nl> - if ( ! sourceTags . containsKey ( Webpath . get ( matcher . group ( 1 ) ) ) ) { <nl> - continue ; / / Skip over js_library dependencies , which must group at beginning of args . <nl> - } <nl> - if ( path ! = null ) { <nl> - swapScript ( path , pureJsDeps + jsBlob . substring ( start , matcher . start ( ) ) ) ; <nl> - pureJsDeps = " " ; <nl> - } else { <nl> - pureJsDeps = jsBlob . substring ( 0 , matcher . start ( ) ) ; <nl> + if ( sourceTags . containsKey ( path ) ) { <nl> + swapScript ( tags , path , jsBlob . substring ( start , matcher . start ( ) ) ) ; <nl> + start = matcher . start ( ) ; <nl> } <nl> path = Webpath . get ( matcher . group ( 1 ) ) ; <nl> - start = matcher . start ( ) ; <nl> - } <nl> - swapScript ( path , pureJsDeps + jsBlob . substring ( start ) ) ; <nl> - if ( ! sourceTags . isEmpty ( ) ) { <nl> - throw new RuntimeException ( " Couldn ' t pull out : " + ImmutableSet . copyOf ( sourceTags . keySet ( ) ) ) ; <nl> } <nl> + swapScript ( tags , path , jsBlob . substring ( start ) ) ; <nl> + verify ( tags . isEmpty ( ) , " < script > wasn ' t compiled : % s " , tags ) ; <nl> } <nl> <nl> - private static void swapScript ( Webpath path , String script ) { <nl> - Node tag = sourceTags . get ( path ) ; <nl> + private static void swapScript ( <nl> + Deque < Map . Entry < Webpath , Node > > tags , Webpath path , String script ) { <nl> + verify ( ! tags . isEmpty ( ) , " jscomp compiled % s after last < script > ? ! " , path ) ; <nl> + Webpath want = tags . getFirst ( ) . getKey ( ) ; <nl> + verify ( path . equals ( want ) , " < script > tag for % s should come before % s " , path , want ) ; <nl> + Node tag = tags . removeFirst ( ) . getValue ( ) ; <nl> tag . replaceWith ( <nl> new Element ( Tag . valueOf ( " script " ) , tag . baseUri ( ) ) <nl> . appendChild ( new DataNode ( script , tag . baseUri ( ) ) ) ) ; <nl> - sourceTags . remove ( path ) ; <nl> } <nl> <nl> private static void handleLicense ( String text ) { <nl> mmm a / tensorflow / tensorboard / plugins / audio / BUILD <nl> ppp b / tensorflow / tensorboard / plugins / audio / BUILD <nl> py_test ( <nl> ] , <nl> ) <nl> <nl> + py_binary ( <nl> + name = " audio_demo " , <nl> + srcs = [ " audio_demo . py " ] , <nl> + srcs_version = " PY2AND3 " , <nl> + deps = [ <nl> + " / / tensorflow : tensorflow_py " , <nl> + " @ six_archive / / : six " , <nl> + ] , <nl> + ) <nl> + <nl> filegroup ( <nl> name = " all_files " , <nl> srcs = glob ( [ " * * " ] ) , <nl> new file mode 100644 <nl> index 0000000000000 . . b89310d3a8aa8 <nl> mmm / dev / null <nl> ppp b / tensorflow / tensorboard / plugins / audio / audio_demo . py <nl> <nl> + # Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + " " " Sample data exhibiting audio summaries , via a waveform generator . " " " <nl> + <nl> + from __future__ import absolute_import <nl> + from __future__ import division <nl> + from __future__ import print_function <nl> + <nl> + import math <nl> + import os . path <nl> + <nl> + from six . moves import xrange # pylint : disable = redefined - builtin <nl> + import tensorflow as tf <nl> + <nl> + <nl> + FLAGS = tf . flags . FLAGS <nl> + <nl> + tf . flags . DEFINE_string ( ' logdir ' , ' / tmp / audio_demo ' , <nl> + ' Directory into which to write TensorBoard data . ' ) <nl> + <nl> + tf . flags . DEFINE_integer ( ' steps ' , 500 , <nl> + ' Number of frequencies of each waveform to generate . ' ) <nl> + <nl> + # Parameters for the audio output . <nl> + tf . flags . DEFINE_integer ( ' sample_rate ' , 44100 , ' Sample rate , in Hz . ' ) <nl> + tf . flags . DEFINE_float ( ' duration ' , 2 . 0 , ' Duration of each waveform , in s . ' ) <nl> + <nl> + <nl> + def _samples ( ) : <nl> + " " " Compute how many samples should be included in each waveform . " " " <nl> + return int ( FLAGS . sample_rate * FLAGS . duration ) <nl> + <nl> + <nl> + def run ( logdir , run_name , wave_name , wave_constructor ) : <nl> + " " " Generate wave data of the given form . <nl> + <nl> + The provided function ` wave_constructor ` should accept a scalar tensor <nl> + of type float32 , representing the frequency ( in Hz ) at which to <nl> + construct a wave , and return a tensor of shape [ 1 , _samples ( ) , ` n ` ] <nl> + representing audio data ( for some number of channels ` n ` ) . <nl> + <nl> + Waves will be generated at frequencies ranging from A4 to A5 . <nl> + <nl> + Arguments : <nl> + logdir : the top - level directory into which to write summary data <nl> + run_name : the name of this run ; will be created as a subdirectory <nl> + under logdir <nl> + wave_name : the name of the wave being generated <nl> + wave_constructor : see above <nl> + " " " <nl> + tf . reset_default_graph ( ) <nl> + tf . set_random_seed ( 0 ) <nl> + <nl> + # On each step ` i ` , we ' ll set this placeholder to ` i ` . This allows us <nl> + # to know " what time it is " at each step . <nl> + step_placeholder = tf . placeholder ( tf . float32 , shape = [ ] ) <nl> + <nl> + # We want to linearly interpolate a frequency between A4 ( 440 Hz ) and <nl> + # A5 ( 880 Hz ) . <nl> + f_min = 440 . 0 <nl> + f_max = 880 . 0 <nl> + t = step_placeholder / ( FLAGS . steps - 1 ) <nl> + frequency = f_min * ( 1 . 0 - t ) + f_max * t <nl> + <nl> + # Let ' s log this frequency , just so that we can make sure that it ' s as <nl> + # expected . <nl> + tf . summary . scalar ( ' frequency ' , frequency ) <nl> + <nl> + # Now , we pass this to the wave constructor to get our waveform . Doing <nl> + # so within a name scope means that any summaries that the wave <nl> + # constructor produces will be namespaced . <nl> + with tf . name_scope ( wave_name ) : <nl> + waveform = wave_constructor ( frequency ) <nl> + <nl> + # Here ' s the crucial piece : we interpret this result as audio . <nl> + tf . summary . audio ( ' waveform ' , waveform , FLAGS . sample_rate ) <nl> + <nl> + # Now , we can collect up all the summaries and begin the run . <nl> + summ = tf . summary . merge_all ( ) <nl> + <nl> + sess = tf . Session ( ) <nl> + writer = tf . summary . FileWriter ( os . path . join ( logdir , run_name ) ) <nl> + writer . add_graph ( sess . graph ) <nl> + sess . run ( tf . global_variables_initializer ( ) ) <nl> + for step in xrange ( FLAGS . steps ) : <nl> + s = sess . run ( summ , feed_dict = { step_placeholder : float ( step ) } ) <nl> + writer . add_summary ( s , global_step = step ) <nl> + writer . close ( ) <nl> + <nl> + <nl> + # Now , let ' s take a look at the kinds of waves that we can generate . <nl> + <nl> + <nl> + def sine_wave ( frequency ) : <nl> + " " " Emit a sine wave at the given frequency . " " " <nl> + xs = tf . reshape ( tf . range ( _samples ( ) , dtype = tf . float32 ) , [ 1 , _samples ( ) , 1 ] ) <nl> + ts = xs / FLAGS . sample_rate <nl> + return tf . sin ( 2 * math . pi * frequency * ts ) <nl> + <nl> + <nl> + def square_wave ( frequency ) : <nl> + " " " Emit a square wave at the given frequency . " " " <nl> + # The square is just the sign of the sine ! <nl> + return tf . sign ( sine_wave ( frequency ) ) <nl> + <nl> + <nl> + def triangle_wave ( frequency ) : <nl> + " " " Emit a triangle wave at the given frequency . " " " <nl> + xs = tf . reshape ( tf . range ( _samples ( ) , dtype = tf . float32 ) , [ 1 , _samples ( ) , 1 ] ) <nl> + ts = xs / FLAGS . sample_rate <nl> + # <nl> + # A triangle wave looks like this : <nl> + # <nl> + # / \ / \ <nl> + # / \ / \ <nl> + # \ / \ / <nl> + # \ / \ / <nl> + # <nl> + # If we look at just half a period ( the first four slashes in the <nl> + # diagram above ) , we can see that it looks like a transformed absolute <nl> + # value function . <nl> + # <nl> + # Let ' s start by computing the times relative to the start of each <nl> + # half - wave pulse ( each individual " mountain " or " valley " , of which <nl> + # there are four in the above diagram ) . <nl> + half_pulse_index = ts * ( frequency * 2 ) <nl> + half_pulse_angle = half_pulse_index % 1 . 0 # in [ 0 , 1 ] <nl> + # <nl> + # Now , we can see that each positive half - pulse ( " mountain " ) has <nl> + # amplitude given by A ( z ) = 0 . 5 - abs ( z - 0 . 5 ) , and then normalized : <nl> + absolute_amplitude = ( 0 . 5 - tf . abs ( half_pulse_angle - 0 . 5 ) ) / 0 . 5 <nl> + # <nl> + # But every other half - pulse is negative , so we should invert these . <nl> + half_pulse_parity = tf . sign ( 1 - ( half_pulse_index % 2 . 0 ) ) <nl> + amplitude = half_pulse_parity * absolute_amplitude <nl> + # <nl> + # This is precisely the desired result , so we ' re done ! <nl> + return amplitude <nl> + <nl> + <nl> + # If we want to get fancy , we can use our above waves as primitives to <nl> + # build more interesting waves . <nl> + <nl> + <nl> + def bisine_wave ( frequency ) : <nl> + " " " Emit two sine waves , in stereo at different octaves . " " " <nl> + # <nl> + # We can first our existing sine generator to generate two different <nl> + # waves . <nl> + f_hi = frequency <nl> + f_lo = frequency / 2 . 0 <nl> + with tf . name_scope ( ' hi ' ) : <nl> + sine_hi = sine_wave ( f_hi ) <nl> + with tf . name_scope ( ' lo ' ) : <nl> + sine_lo = sine_wave ( f_lo ) <nl> + # <nl> + # Now , we have two tensors of shape [ 1 , _samples ( ) , 1 ] . By concatenating <nl> + # them along axis 2 , we get a tensor of shape [ 1 , _samples ( ) , 2 ] mmma <nl> + # stereo waveform . <nl> + return tf . concat ( [ sine_lo , sine_hi ] , axis = 2 ) <nl> + <nl> + <nl> + def bisine_wahwah_wave ( frequency ) : <nl> + " " " Emit two sine waves with balance oscillating left and right . " " " <nl> + # <nl> + # This is clearly intended to build on the bisine wave defined above , <nl> + # so we can start by generating that . <nl> + waves_a = bisine_wave ( frequency ) <nl> + # <nl> + # Then , by reversing axis 2 , we swap the stereo channels . By mixing <nl> + # this with ` waves_a ` , we ' ll be able to create the desired effect . <nl> + waves_b = tf . reverse ( waves_a , axis = [ 2 ] ) <nl> + # <nl> + # Let ' s have the balance oscillate from left to right four times . <nl> + iterations = 4 <nl> + # <nl> + # Now , we compute the balance for each sample : ` ts ` has values <nl> + # in [ 0 , 1 ] that indicate how much we should use ` waves_a ` . <nl> + xs = tf . reshape ( tf . range ( _samples ( ) , dtype = tf . float32 ) , [ 1 , _samples ( ) , 1 ] ) <nl> + thetas = xs / _samples ( ) * iterations <nl> + ts = ( tf . sin ( math . pi * 2 * thetas ) + 1 ) / 2 <nl> + # <nl> + # Finally , we can mix the two together , and we ' re done . <nl> + return ts * waves_a + ( 1 . 0 - ts ) * waves_b <nl> + <nl> + <nl> + def run_all ( logdir , verbose = False ) : <nl> + " " " Generate waves of the shapes defined above . <nl> + <nl> + Arguments : <nl> + logdir : the directory into which to store all the runs ' data <nl> + verbose : if true , print out each run ' s name as it begins <nl> + " " " <nl> + waves = [ sine_wave , square_wave , triangle_wave , <nl> + bisine_wave , bisine_wahwah_wave ] <nl> + for ( i , wave_constructor ) in enumerate ( waves ) : <nl> + wave_name = wave_constructor . __name__ <nl> + run_name = ' wave : % 02d , % s ' % ( i + 1 , wave_name ) <nl> + if verbose : <nl> + print ( ' mmm Running : % s ' % run_name ) <nl> + run ( logdir , run_name , wave_name , wave_constructor ) <nl> + <nl> + <nl> + def main ( unused_argv ) : <nl> + print ( ' Saving output to % s . ' % FLAGS . logdir ) <nl> + run_all ( FLAGS . logdir , verbose = True ) <nl> + print ( ' Done . Output saved to % s . ' % FLAGS . logdir ) <nl> + <nl> + <nl> + if __name__ = = ' __main__ ' : <nl> + tf . app . run ( ) <nl> mmm a / tensorflow / tensorboard / vulcanize . bzl <nl> ppp b / tensorflow / tensorboard / vulcanize . bzl <nl> <nl> # See the License for the specific language governing permissions and <nl> # limitations under the License . <nl> <nl> - load ( " @ io_bazel_rules_closure / / closure / private : defs . bzl " , " unfurl " , " long_path " ) <nl> + load ( " / / tensorflow / tensorboard : defs . bzl " , " legacy_js " ) <nl> + load ( " @ io_bazel_rules_closure / / closure / private : defs . bzl " , " collect_js " , " unfurl " , " long_path " ) <nl> load ( " / / tensorflow / tensorboard : web . bzl " , " web_aspect " ) <nl> <nl> def _tensorboard_html_binary ( ctx ) : <nl> deps = unfurl ( ctx . attr . deps , provider = " webfiles " ) <nl> - manifests = depset ( order = " topological " ) <nl> - files = depset ( ) <nl> - jslibs = depset ( ctx . files . _jslibs ) <nl> - webpaths = depset ( ) <nl> + manifests = set ( order = " topological " ) <nl> + files = set ( ) <nl> + webpaths = set ( ) <nl> for dep in deps : <nl> manifests + = dep . webfiles . manifests <nl> webpaths + = dep . webfiles . webpaths <nl> files + = dep . data_runfiles . files <nl> - if hasattr ( dep . webfiles , " jslibs " ) : <nl> - jslibs + = dep . webfiles . jslibs <nl> - if hasattr ( dep , " closure_js_library " ) : <nl> - jslibs + = getattr ( dep . closure_js_library , " srcs " , [ ] ) <nl> webpaths + = [ ctx . attr . output_path ] <nl> + closure_js_library = collect_js ( <nl> + ctx , unfurl ( ctx . attr . deps , provider = " closure_js_library " ) ) <nl> <nl> # vulcanize <nl> + jslibs = depset ( ctx . files . _jslibs ) + closure_js_library . srcs <nl> ctx . action ( <nl> inputs = list ( manifests | files | jslibs ) , <nl> outputs = [ ctx . outputs . html ] , <nl> tensorboard_html_binary = rule ( <nl> " input_path " : attr . string ( mandatory = True ) , <nl> " output_path " : attr . string ( mandatory = True ) , <nl> " data " : attr . label_list ( cfg = " data " , allow_files = True ) , <nl> - " deps " : attr . label_list ( aspects = [ web_aspect ] , mandatory = True ) , <nl> + " deps " : attr . label_list ( <nl> + aspects = [ <nl> + web_aspect , <nl> + legacy_js , <nl> + ] , <nl> + mandatory = True ) , <nl> " external_assets " : attr . string_dict ( default = { " / _ / runfiles " : " . " } ) , <nl> " _jslibs " : attr . label ( <nl> default = Label ( " / / tensorflow / tensorboard / java / org / tensorflow / tensorboard / vulcanize : jslibs " ) , <nl> mmm a / tensorflow / tensorboard / web . bzl <nl> ppp b / tensorflow / tensorboard / web . bzl <nl> <nl> <nl> " " " Same as web_library but supports TypeScript . " " " <nl> <nl> + load ( " / / tensorflow / tensorboard : defs . bzl " , " legacy_js " ) <nl> + <nl> + load ( " / / third_party : clutz . bzl " , <nl> + " CLUTZ_ATTRIBUTES " , <nl> + " CLUTZ_OUTPUTS " , <nl> + " clutz_aspect " , <nl> + " extract_dts_from_closure_libraries " ) <nl> + <nl> load ( " @ io_bazel_rules_closure / / closure / private : defs . bzl " , <nl> + " CLOSURE_LIBRARY_BASE_ATTR " , <nl> + " CLOSURE_LIBRARY_DEPS_ATTR " , <nl> + " collect_js " , <nl> " collect_runfiles " , <nl> " convert_path_to_es6_module_name " , <nl> " create_argfile " , <nl> load ( " @ io_bazel_rules_closure / / closure / private : defs . bzl " , <nl> " long_path " , <nl> " unfurl " ) <nl> <nl> + _ASPECT_SLURP_FILE_TYPE = FileType ( [ <nl> + " . html " , " . js " , " . css " , " . gss " , " . png " , " . jpg " , " . gif " , " . ico " , " . svg " ] ) <nl> + <nl> + _CLOSURE_WORKER = attr . label ( <nl> + default = Label ( " @ io_bazel_rules_closure / / java / io / bazel / rules / closure : ClosureWorker " ) , <nl> + executable = True , <nl> + cfg = " host " ) <nl> + <nl> def _ts_web_library ( ctx ) : <nl> if not ctx . attr . srcs : <nl> if ctx . attr . deps : <nl> def _ts_web_library ( ctx ) : <nl> # process what came before <nl> deps = unfurl ( ctx . attr . deps , provider = " webfiles " ) <nl> webpaths = depset ( ) <nl> - manifests = depset ( order = " topological " ) <nl> - jslibs = depset ( order = " postorder " ) <nl> - ts_typings = depset ( ctx . files . _es6dts ) <nl> - ts_typings_paths = depset ( ) <nl> + ts_typings = depset ( ctx . files . _default_typings ) <nl> + ts_typings_paths = depset ( <nl> + [ long_path ( ctx , f ) for f in ctx . files . _default_typings ] ) <nl> ts_typings_execroots = depset ( ) <nl> + aspect_runfiles = depset ( ) <nl> for dep in deps : <nl> webpaths + = dep . webfiles . webpaths <nl> - manifests + = dep . webfiles . manifests <nl> if hasattr ( dep . webfiles , " ts_typings " ) : <nl> ts_typings + = dep . webfiles . ts_typings <nl> if hasattr ( dep . webfiles , " ts_typings_paths " ) : <nl> ts_typings_paths + = dep . webfiles . ts_typings_paths <nl> if hasattr ( dep . webfiles , " ts_typings_execroots " ) : <nl> ts_typings_execroots + = dep . webfiles . ts_typings_execroots <nl> - if hasattr ( dep . webfiles , " jslibs " ) : <nl> - jslibs + = dep . webfiles . jslibs <nl> - if hasattr ( dep , " closure_js_library " ) : <nl> - jslibs + = getattr ( dep . closure_js_library , " srcs " , [ ] ) <nl> + if hasattr ( dep . webfiles , " aspect_runfiles " ) : <nl> + aspect_runfiles + = dep . webfiles . aspect_runfiles <nl> <nl> # process what comes now <nl> manifest_srcs = [ ] <nl> new_webpaths = [ ] <nl> ts_inputs = depset ( ) <nl> ts_outputs = [ ] <nl> - ts_files = [ " lib . es6 . d . ts " ] + list ( ts_typings_paths ) <nl> + ts_files = list ( ts_typings_paths ) <nl> new_typings = [ ] <nl> new_typings_paths = [ ] <nl> new_typings_execroot = struct ( inputs = [ ] ) <nl> execroot = struct ( <nl> - inputs = [ ( " lib . es6 . d . ts " , ctx . files . _es6dts [ 0 ] . path ) ] , <nl> + inputs = [ ( long_path ( ctx , f ) , f . path ) for f in ctx . files . _default_typings ] , <nl> outputs = [ ] , <nl> program = [ ctx . executable . _tsc . path , " - p " ] ) <nl> web_srcs = [ ] <nl> def _ts_web_library ( ctx ) : <nl> else : <nl> web_srcs . append ( src ) <nl> <nl> - # create webfiles manifest <nl> - manifest = ctx . new_file ( ctx . configuration . bin_dir , <nl> - " % s . pbtxt " % ctx . label . name ) <nl> - ctx . file_action ( <nl> - output = manifest , <nl> - content = struct ( <nl> - label = str ( ctx . label ) , <nl> - src = manifest_srcs ) . to_proto ( ) ) <nl> - manifests + = [ manifest ] <nl> - webpaths + = new_webpaths <nl> + # get typings for closure code <nl> + clutz_dts = extract_dts_from_closure_libraries ( ctx ) <nl> + if clutz_dts : <nl> + entry = ( long_path ( ctx , clutz_dts ) , clutz_dts . path ) <nl> + ts_inputs + = [ clutz_dts ] <nl> + ts_files . append ( entry [ 0 ] ) <nl> + execroot . inputs . append ( entry ) <nl> <nl> # compile typescript <nl> workspace = " " <nl> if ctx . label . workspace_root : <nl> workspace = " / " + ctx . label . workspace_root <nl> if execroot . outputs : <nl> - ts_config = ctx . new_file ( ctx . bin_dir , " % s - tsc . json " % ctx . label . name ) <nl> + ts_config = _new_file ( ctx , " - tsc . json " ) <nl> execroot . inputs . append ( ( " tsconfig . json " , ts_config . path ) ) <nl> ctx . file_action ( <nl> output = ts_config , <nl> def _ts_web_library ( ctx ) : <nl> noResolve = True , <nl> target = " es5 " , <nl> ) , <nl> - files = list ( ts_files ) , <nl> + files = ts_files , <nl> ) . to_json ( ) ) <nl> - er_config = ctx . new_file ( ctx . bin_dir , <nl> - " % s - tsc - execroot . json " % ctx . label . name ) <nl> + er_config = _new_file ( ctx , " - tsc - execroot . json " ) <nl> ctx . file_action ( output = er_config , content = execroot . to_json ( ) ) <nl> ts_inputs + = collect_runfiles ( [ ctx . attr . _tsc ] ) <nl> ts_inputs + = ctx . files . _tsc <nl> def _ts_web_library ( ctx ) : <nl> outputs = ts_outputs , <nl> executable = ctx . executable . _execrooter , <nl> arguments = [ er_config . path ] + [ f . path for f in ts_typings_execroots ] , <nl> - progress_message = " Compiling % d TypeScript files " % len ( ts_files ) ) <nl> + progress_message = " Compiling % d TypeScript files % s " % ( <nl> + len ( ts_files ) , ctx . label ) ) <nl> <nl> # perform strict dependency checking <nl> - inputs = [ manifest ] <nl> - direct_manifests = depset ( [ manifest ] ) <nl> - args = [ " WebfilesValidator " , <nl> - " - - dummy " , ctx . outputs . dummy . path , <nl> - " - - target " , manifest . path ] <nl> - for category in ctx . attr . suppress : <nl> - args . append ( " - - suppress " ) <nl> - args . append ( category ) <nl> - inputs . extend ( web_srcs ) <nl> - for dep in deps : <nl> - inputs . append ( dep . webfiles . dummy ) <nl> - for f in dep . files : <nl> - inputs . append ( f ) <nl> - direct_manifests + = [ dep . webfiles . manifest ] <nl> - inputs . append ( dep . webfiles . manifest ) <nl> - args . append ( " - - direct_dep " ) <nl> - args . append ( dep . webfiles . manifest . path ) <nl> - for man in difference ( manifests , direct_manifests ) : <nl> - inputs . append ( man ) <nl> - args . append ( " - - transitive_dep " ) <nl> - args . append ( man . path ) <nl> - argfile = create_argfile ( ctx , args ) <nl> - inputs . append ( argfile ) <nl> - ctx . action ( <nl> - inputs = inputs , <nl> - outputs = [ ctx . outputs . dummy ] , <nl> - executable = ctx . executable . _ClosureWorker , <nl> - arguments = [ " @ @ " + argfile . path ] , <nl> - mnemonic = " Closure " , <nl> - execution_requirements = { " supports - workers " : " 1 " } , <nl> - progress_message = " Checking webfiles in % s " % ctx . label ) <nl> - web_srcs . append ( ctx . outputs . dummy ) <nl> + manifest = _make_manifest ( ctx , manifest_srcs ) <nl> + webpaths + = new_webpaths <nl> + dummy , manifests = _run_webfiles_validator ( ctx , web_srcs , deps , manifest ) <nl> + web_srcs . append ( dummy ) <nl> <nl> # define development web server that only applies to this transitive closure <nl> params = struct ( <nl> def _ts_web_library ( ctx ) : <nl> manifest = [ long_path ( ctx , man ) for man in manifests ] , <nl> external_asset = [ struct ( webpath = k , path = v ) <nl> for k , v in ctx . attr . external_assets . items ( ) ] ) <nl> - params_file = ctx . new_file ( ctx . bin_dir , " % s_params . pbtxt " % ctx . label . name ) <nl> + params_file = _new_file ( ctx , " - params . pbtxt " ) <nl> ctx . file_action ( output = params_file , content = params . to_proto ( ) ) <nl> ctx . file_action ( <nl> executable = True , <nl> def _ts_web_library ( ctx ) : <nl> long_path ( ctx , params_file ) ) ) <nl> <nl> if new_typings : <nl> - er_config = ctx . new_file ( ctx . bin_dir , <nl> - " % s - typings - execroot . json " % ctx . label . name ) <nl> + er_config = _new_file ( ctx , " - typings - execroot . json " ) <nl> ctx . file_action ( output = er_config , content = new_typings_execroot . to_json ( ) ) <nl> ts_typings + = new_typings <nl> ts_typings_paths + = new_typings_paths <nl> def _ts_web_library ( ctx ) : <nl> <nl> # export data to parent rules <nl> return struct ( <nl> - files = depset ( web_srcs ) , <nl> + files = depset ( web_srcs + [ dummy ] ) , <nl> exports = unfurl ( ctx . attr . exports ) , <nl> webfiles = struct ( <nl> manifest = manifest , <nl> manifests = manifests , <nl> webpaths = webpaths , <nl> - dummy = ctx . outputs . dummy , <nl> - jslibs = jslibs , <nl> + dummy = dummy , <nl> ts_typings = ts_typings , <nl> ts_typings_paths = ts_typings_paths , <nl> ts_typings_execroots = ts_typings_execroots ) , <nl> + closure_js_library = collect_js ( <nl> + ctx , unfurl ( ctx . attr . deps , provider = " closure_js_library " ) ) , <nl> runfiles = ctx . runfiles ( <nl> files = ctx . files . srcs + ctx . files . data + ts_outputs + [ <nl> manifest , <nl> params_file , <nl> ctx . outputs . executable , <nl> - ctx . outputs . dummy ] , <nl> + dummy ] , <nl> transitive_files = ( collect_runfiles ( [ ctx . attr . _WebfilesServer ] ) | <nl> collect_runfiles ( deps ) | <nl> - collect_runfiles ( ctx . attr . data ) ) ) ) <nl> + collect_runfiles ( ctx . attr . data ) | <nl> + aspect_runfiles ) ) ) <nl> <nl> def _web_aspect_impl ( target , ctx ) : <nl> - if ctx . rule . kind in ( " js_library " , " pinto_library " ) : <nl> - return _web_aspect_js_library ( target , ctx , [ ] , depset ( ) ) <nl> - if hasattr ( target , " js " ) : <nl> - return _web_aspect_js_library ( <nl> - target , <nl> - ctx , <nl> - target . files , <nl> - target . js . full_tc ( True ) ) <nl> - return struct ( ) <nl> - <nl> - def _web_aspect_js_library ( target , ctx , extra_srcs , extra_transitive ) : <nl> - deps = unfurl ( ( ctx . rule . attr . deps + <nl> - getattr ( ctx . rule . attr , ' sticky_deps ' , [ ] ) ) , <nl> - provider = " webfiles " ) <nl> - # process what came before <nl> + if hasattr ( target , " webfiles " ) : <nl> + return struct ( ) <nl> + srcs = [ ] <nl> + deps = [ ] <nl> + if hasattr ( ctx . rule . files , " srcs " ) : <nl> + srcs . extend ( _ASPECT_SLURP_FILE_TYPE . filter ( ctx . rule . files . srcs ) ) <nl> + for attr in ( " deps " , " sticky_deps " , " module_deps " ) : <nl> + value = getattr ( ctx . rule . attr , attr , None ) <nl> + if value : <nl> + deps . extend ( value ) <nl> + deps = unfurl ( deps , provider = " webfiles " ) <nl> webpaths = depset ( ) <nl> - manifests = depset ( order = " topological " ) <nl> - jslibs = depset ( order = " postorder " ) <nl> + aspect_runfiles = depset ( srcs ) <nl> for dep in deps : <nl> webpaths + = dep . webfiles . webpaths <nl> - manifests + = dep . webfiles . manifests <nl> - if hasattr ( dep . webfiles , " jslibs " ) : <nl> - jslibs + = dep . webfiles . jslibs <nl> - # process what comes now <nl> - srcs = ctx . rule . files . srcs + extra_srcs <nl> - jslibs + = [ src for src in srcs if src . path . endswith ( " . js " ) ] <nl> + if hasattr ( dep . webfiles , " aspect_runfiles " ) : <nl> + aspect_runfiles + = dep . webfiles . aspect_runfiles <nl> manifest_srcs = [ ] <nl> new_webpaths = [ ] <nl> - web_srcs = [ ] <nl> for src in srcs : <nl> webpath = " / " + long_path ( ctx , src ) <nl> _add_webpath ( ctx , src , webpath , webpaths , new_webpaths , manifest_srcs ) <nl> - web_srcs . append ( src ) <nl> - # create webfiles manifest <nl> - manifest = ctx . new_file ( ctx . configuration . bin_dir , <nl> - " % s - webfiles . pbtxt " % ctx . label . name ) <nl> - ctx . file_action ( <nl> - output = manifest , <nl> - content = struct ( <nl> - label = str ( ctx . label ) , <nl> - src = manifest_srcs ) . to_proto ( ) ) <nl> - manifests + = [ manifest ] <nl> webpaths + = new_webpaths <nl> + manifest = _make_manifest ( ctx , manifest_srcs ) <nl> + dummy , manifests = _run_webfiles_validator ( ctx , srcs , deps , manifest ) <nl> + aspect_runfiles + = [ dummy , manifest ] <nl> return struct ( <nl> - exports = [ ] if srcs else deps , <nl> webfiles = struct ( <nl> manifest = manifest , <nl> manifests = manifests , <nl> webpaths = webpaths , <nl> - dummy = manifest , <nl> - jslibs = jslibs ) , <nl> - closure_legacy_js_runfiles = ( depset ( srcs + ctx . rule . files . data ) | <nl> - extra_transitive | <nl> - collect_runfiles ( deps ) | <nl> - collect_runfiles ( ctx . rule . files . data ) ) ) <nl> + dummy = dummy , <nl> + aspect_runfiles = aspect_runfiles ) ) <nl> + <nl> + def _make_manifest ( ctx , src_list ) : <nl> + manifest = _new_file ( ctx , " - webfiles . pbtxt " ) <nl> + ctx . file_action ( <nl> + output = manifest , <nl> + content = struct ( <nl> + label = str ( ctx . label ) , <nl> + src = src_list ) . to_proto ( ) ) <nl> + return manifest <nl> + <nl> + def _run_webfiles_validator ( ctx , srcs , deps , manifest ) : <nl> + dummy = _new_file ( ctx , " - webfiles . ignoreme " ) <nl> + manifests = depset ( order = " topological " ) <nl> + for dep in deps : <nl> + manifests + = dep . webfiles . manifests <nl> + if srcs : <nl> + args = [ " WebfilesValidator " , <nl> + " - - dummy " , dummy . path , <nl> + " - - target " , manifest . path ] <nl> + if hasattr ( ctx , " attr " ) and hasattr ( ctx . attr , " suppress " ) : <nl> + for category in ctx . attr . suppress : <nl> + args . append ( " - - suppress " ) <nl> + args . append ( category ) <nl> + inputs = [ manifest ] <nl> + inputs . extend ( srcs ) <nl> + direct_manifests = depset ( ) <nl> + for dep in deps : <nl> + inputs . append ( dep . webfiles . dummy ) <nl> + for f in dep . files : <nl> + inputs . append ( f ) <nl> + direct_manifests + = [ dep . webfiles . manifest ] <nl> + inputs . append ( dep . webfiles . manifest ) <nl> + args . append ( " - - direct_dep " ) <nl> + args . append ( dep . webfiles . manifest . path ) <nl> + for man in difference ( manifests , direct_manifests ) : <nl> + inputs . append ( man ) <nl> + args . append ( " - - transitive_dep " ) <nl> + args . append ( man . path ) <nl> + argfile = _new_file ( ctx , " - webfiles - checker - args . txt " ) <nl> + ctx . file_action ( output = argfile , content = " \ n " . join ( args ) ) <nl> + inputs . append ( argfile ) <nl> + ctx . action ( <nl> + inputs = inputs , <nl> + outputs = [ dummy ] , <nl> + executable = ( getattr ( ctx . executable , " _ClosureWorker " , None ) or <nl> + getattr ( ctx . executable , " _ClosureWorkerAspect " , None ) ) , <nl> + arguments = [ " @ @ " + argfile . path ] , <nl> + mnemonic = " Closure " , <nl> + execution_requirements = { " supports - workers " : " 1 " } , <nl> + progress_message = " Checking webfiles % s " % ctx . label ) <nl> + else : <nl> + ctx . file_action ( output = dummy , content = " BOO ! " ) <nl> + manifests + = [ manifest ] <nl> + return dummy , manifests <nl> + <nl> + def _new_file ( ctx , suffix ) : <nl> + return ctx . new_file ( ctx . bin_dir , " % s % s " % ( ctx . label . name , suffix ) ) <nl> <nl> def _add_webpath ( ctx , src , webpath , webpaths , new_webpaths , manifest_srcs ) : <nl> if webpath in new_webpaths : <nl> def _get_strip ( ctx ) : <nl> <nl> web_aspect = aspect ( <nl> implementation = _web_aspect_impl , <nl> - attr_aspects = [ " deps " ] ) <nl> + attr_aspects = [ " deps " , " sticky_deps " , " module_deps " ] , <nl> + attrs = { " _ClosureWorkerAspect " : _CLOSURE_WORKER } ) <nl> <nl> ts_web_library = rule ( <nl> implementation = _ts_web_library , <nl> executable = True , <nl> - attrs = { <nl> + attrs = CLUTZ_ATTRIBUTES + { <nl> " path " : attr . string ( ) , <nl> " srcs " : attr . label_list ( allow_files = True ) , <nl> - " deps " : attr . label_list ( aspects = [ web_aspect ] ) , <nl> + " deps " : attr . label_list ( <nl> + aspects = [ <nl> + web_aspect , <nl> + clutz_aspect , <nl> + legacy_js , <nl> + ] ) , <nl> " exports " : attr . label_list ( ) , <nl> " data " : attr . label_list ( cfg = " data " , allow_files = True ) , <nl> " suppress " : attr . string_list ( ) , <nl> " strip_prefix " : attr . string ( ) , <nl> " external_assets " : attr . string_dict ( default = { " / _ / runfiles " : " . " } ) , <nl> + " clutz_entry_points " : attr . string_list ( ) , <nl> " _execrooter " : attr . label ( <nl> - default = Label ( <nl> - " / / tensorflow / tensorboard / scripts : execrooter " ) , <nl> + default = Label ( " / / tensorflow / tensorboard / scripts : execrooter " ) , <nl> executable = True , <nl> cfg = " host " ) , <nl> " _tsc " : attr . label ( <nl> - default = Label ( <nl> - " @ com_microsoft_typescript / / : tsc " ) , <nl> + default = Label ( " @ com_microsoft_typescript / / : tsc " ) , <nl> allow_files = True , <nl> executable = True , <nl> cfg = " host " ) , <nl> - " _es6dts " : attr . label ( <nl> - default = Label ( <nl> - " @ com_microsoft_typescript / / : lib . es6 . d . ts " ) , <nl> + " _default_typings " : attr . label ( <nl> + default = Label ( " / / tensorflow / tensorboard : ts_web_library_default_typings " ) , <nl> allow_files = True ) , <nl> - " _ClosureWorker " : attr . label ( <nl> - default = Label ( " @ io_bazel_rules_closure / / java / io / bazel / rules / closure : ClosureWorker " ) , <nl> - executable = True , <nl> - cfg = " host " ) , <nl> " _WebfilesServer " : attr . label ( <nl> - default = Label ( <nl> - " @ io_bazel_rules_closure / / java / io / bazel / rules / closure / webfiles / server : WebfilesServer " ) , <nl> + default = Label ( " @ io_bazel_rules_closure / / java / io / bazel / rules / closure / webfiles / server : WebfilesServer " ) , <nl> executable = True , <nl> cfg = " host " ) , <nl> + " _ClosureWorker " : _CLOSURE_WORKER , <nl> + " _closure_library_base " : CLOSURE_LIBRARY_BASE_ATTR , <nl> + " _closure_library_deps " : CLOSURE_LIBRARY_DEPS_ATTR , <nl> } , <nl> - outputs = { <nl> - " dummy " : " % { name } . ignoreme " , <nl> - } ) <nl> + outputs = CLUTZ_OUTPUTS ) <nl> mmm a / tensorflow / tools / api / golden / tensorflow . summary . pbtxt <nl> ppp b / tensorflow / tools / api / golden / tensorflow . summary . pbtxt <nl> tf_module { <nl> } <nl> member_method { <nl> name : " audio " <nl> - argspec : " args = [ \ ' name \ ' , \ ' tensor \ ' , \ ' sample_rate \ ' , \ ' max_outputs \ ' , \ ' collections \ ' ] , varargs = None , keywords = None , defaults = [ \ ' 3 \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' name \ ' , \ ' tensor \ ' , \ ' sample_rate \ ' , \ ' max_outputs \ ' , \ ' collections \ ' , \ ' family \ ' ] , varargs = None , keywords = None , defaults = [ \ ' 3 \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " get_summary_description " <nl> tf_module { <nl> } <nl> member_method { <nl> name : " histogram " <nl> - argspec : " args = [ \ ' name \ ' , \ ' values \ ' , \ ' collections \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' name \ ' , \ ' values \ ' , \ ' collections \ ' , \ ' family \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " image " <nl> - argspec : " args = [ \ ' name \ ' , \ ' tensor \ ' , \ ' max_outputs \ ' , \ ' collections \ ' ] , varargs = None , keywords = None , defaults = [ \ ' 3 \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' name \ ' , \ ' tensor \ ' , \ ' max_outputs \ ' , \ ' collections \ ' , \ ' family \ ' ] , varargs = None , keywords = None , defaults = [ \ ' 3 \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " merge " <nl> tf_module { <nl> } <nl> member_method { <nl> name : " scalar " <nl> - argspec : " args = [ \ ' name \ ' , \ ' tensor \ ' , \ ' collections \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' name \ ' , \ ' tensor \ ' , \ ' collections \ ' , \ ' family \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " tensor_summary " <nl> mmm a / tensorflow / tools / benchmark / benchmark_model . cc <nl> ppp b / tensorflow / tools / benchmark / benchmark_model . cc <nl> limitations under the License . <nl> # include < vector > <nl> <nl> # include " tensorflow / core / framework / graph . pb . h " <nl> + # include " tensorflow / core / framework / step_stats . pb . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> # include " tensorflow / core / graph / algorithm . h " <nl> # include " tensorflow / core / graph / graph . h " <nl> mmm a / tensorflow / tools / graph_transforms / BUILD <nl> ppp b / tensorflow / tools / graph_transforms / BUILD <nl> cc_library ( <nl> " / / tensorflow / core : framework " , <nl> " / / tensorflow / core : framework_internal " , <nl> " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core : protos_all_cc " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / tools / graph_transforms / freeze_requantization_ranges . cc <nl> ppp b / tensorflow / tools / graph_transforms / freeze_requantization_ranges . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> + # include " tensorflow / core / framework / node_def . pb . h " <nl> # include " tensorflow / core / lib / strings / str_util . h " <nl> # include " tensorflow / core / platform / env . h " <nl> # include " tensorflow / tools / graph_transforms / transform_utils . h " <nl> mmm a / tensorflow / tools / graph_transforms / set_device . cc <nl> ppp b / tensorflow / tools / graph_transforms / set_device . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> + # include " tensorflow / core / framework / node_def . pb . h " <nl> # include " tensorflow / tools / graph_transforms / transform_utils . h " <nl> <nl> namespace tensorflow { <nl> mmm a / tensorflow / tools / graph_transforms / summarize_graph_main . cc <nl> ppp b / tensorflow / tools / graph_transforms / summarize_graph_main . cc <nl> limitations under the License . <nl> / / bazel - bin / tensorflow / tools / graph_transforms / summarize_graph \ <nl> / / - - in_graph = my_graph . pb <nl> <nl> + # include " tensorflow / core / framework / node_def . pb . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> # include " tensorflow / core / lib / strings / str_util . h " <nl> # include " tensorflow / core / platform / env . h " <nl> mmm a / tensorflow / tools / graph_transforms / transform_graph . cc <nl> ppp b / tensorflow / tools / graph_transforms / transform_graph . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / tools / graph_transforms / transform_graph . h " <nl> <nl> + # include " tensorflow / core / framework / function . pb . h " <nl> # include " tensorflow / core / lib / strings / scanner . h " <nl> # include " tensorflow / core / lib / strings / str_util . h " <nl> # include " tensorflow / core / platform / env . h " <nl> mmm a / tensorflow / tools / graph_transforms / transform_utils . h <nl> ppp b / tensorflow / tools / graph_transforms / transform_utils . h <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / framework / attr_value_util . h " <nl> # include " tensorflow / core / framework / graph . pb . h " <nl> + # include " tensorflow / core / framework / node_def . pb . h " <nl> # include " tensorflow / core / framework / tensor . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> <nl> mmm a / tensorflow / tools / proto_text / gen_proto_text_functions_lib_test . cc <nl> ppp b / tensorflow / tools / proto_text / gen_proto_text_functions_lib_test . cc <nl> limitations under the License . <nl> # include " tensorflow / core / lib / strings / strcat . h " <nl> # include " tensorflow / core / platform / protobuf . h " <nl> # include " tensorflow / core / platform / test . h " <nl> - # include " tensorflow / tools / proto_text / test . pb . h " <nl> # include " tensorflow / tools / proto_text / test . pb_text . h " <nl> + # include " tensorflow / tools / proto_text / test . pb . h " <nl> <nl> namespace tensorflow { <nl> namespace test { <nl> mmm a / tensorflow / tools / tfprof / README . md <nl> ppp b / tensorflow / tools / tfprof / README . md <nl> <nl> - # tfprof : A Profiling Tool for TensorFlow Models <nl> + # tfprof : TensorFlow Profiling Tool <nl> <nl> - Author : Xin Pan ( xpan @ google . com , github : panyx0718 ) , Jon Shlens , Yao Zhang <nl> + # # # Features <nl> <nl> - Consultants : Jon Shlens , Pete Warden <nl> + * Profile model architectures <nl> + * parameters , tensor shapes , float operations , device placement , etc . <nl> + * Profile model performance <nl> + * execution time , memory consumption <nl> + * Profile multiple steps . <nl> <nl> + # # # Interfaces <nl> <nl> - # # # Major Features <nl> + * Python API <nl> + * Command Line <nl> + * Visualization <nl> + * C + + API ( Not public , contact us if needed . ) <nl> <nl> - 1 . Measure model parameters , float operations , tensor shapes . <nl> - 2 . Profile op execution times , requested memory size and device placement . <nl> - 3 . Inspect checkpoint tensors ' shapes and their values . <nl> - 4 . Selectively group , filter , account and order ops . <nl> + # # # Views and Options <nl> <nl> - # # # # tfprof supports 4 views to organize TensorFlow model profiles <nl> + tfprof provides 4 different views to organize the statistics . <nl> <nl> - * code view : graph nodes are grouped by Python codes that generate them . <nl> - * op view : graph nodes are grouped by operation type ( E . g . MatMul , Conv2D ) of the graph nodes . <nl> - * scope view : graph nodes are organized based on name scope hierarchies . <nl> - * graph view : graph nodes are organized based on op input / output . <nl> + * code view : operations are grouped by Python codes that generate them . <nl> + * op view : operations are grouped by operation type ( E . g . MatMul , Conv2D ) . <nl> + * scope view : operations are organized based on name scope hierarchies . <nl> + * graph view : operations are organized based on input / output . <nl> <nl> - # # # # For each view , there are 3 ways to display outputs : <nl> + tfprof provides options to help user select , filter and order statistics . <nl> + See [ Options ] ( g3doc / options . md ) for detail instructions . <nl> <nl> - * stdout : Results are written to stdout . <nl> - * timeline : Visualized in chrome browser as time series . <nl> - * file : Results are dumped to file . <nl> - <nl> - <nl> - [ Demo ] ( # demo ) <nl> - <nl> - [ Python API Tutorials ] ( # python - api - tutorials ) : How to use directly from <nl> - Python codes . <nl> - <nl> - [ CLI Tutorials ] ( # cli - tutorials ) : How to run from interactive command line . <nl> + ` ` ` <nl> + - max_depth 10 <nl> + - min_bytes 0 <nl> + - min_micros 0 <nl> + - min_params 0 <nl> + - min_float_ops 0 <nl> + - min_occurrence 0 <nl> + - step - 1 <nl> + - order_by name <nl> + - account_type_regexes . * <nl> + - start_name_regexes . * <nl> + - trim_name_regexes <nl> + - show_name_regexes . * <nl> + - hide_name_regexes <nl> + - account_displayed_op_only false <nl> + - select params <nl> + - output stdout : <nl> + ` ` ` <nl> <nl> - [ Options ] ( # options ) : <nl> - tfprof supports many options to selectively account / display / order ops and <nl> - statistics . <nl> + # # # Tutorials <nl> <nl> + * [ Python API ] ( g3doc / python_api . md ) <nl> + * [ Command Line Interface ] ( g3doc / command_line . md ) <nl> + * [ Profile Time ] ( g3doc / profile_time . md ) <nl> + * [ Profile Memory ] ( g3doc / profile_memory . md ) <nl> + * [ Profile Model Architecture ] ( g3doc / profile_model_architecture . md ) <nl> + * [ Options ] ( g3doc / options . md ) <nl> <nl> # # Demo <nl> - # # # Attribute the TensorFlow graph running time to your Python codes . <nl> + <nl> + # # # Attribute TensorFlow graph running time to your Python codes . <nl> ` ` ` shell <nl> tfprof > code - max_depth 1000 - show_name_regexes . * model_analyzer . * py . * - select micros - account_type_regexes . * - order_by micros <nl> _TFProfRoot ( 0us / 22 . 44ms ) <nl> Sigmoid 152 . 57MB ( 85 . 28 % , 0 . 21 % ) , 96 . 66ms ( 23 . 46 % , <nl> [ CodeTimeline ] ( g3doc / graph_timeline . png ) <nl> < / left > <nl> <nl> - # # Python API Tutorials <nl> - <nl> - tfprof is part of TensorFlow core . Simply ` ` ` import tensorflow as tf ` ` ` . <nl> - <nl> - # # # Examine the shapes and sizes of all trainable Variables . <nl> - ` ` ` python <nl> - # Print trainable variable parameter statistics to stdout . <nl> - # By default , statistics are associated with each graph node . <nl> - param_stats = tf . contrib . tfprof . model_analyzer . print_model_analysis ( <nl> - tf . get_default_graph ( ) , <nl> - tfprof_options = tf . contrib . tfprof . model_analyzer . <nl> - TRAINABLE_VARS_PARAMS_STAT_OPTIONS ) <nl> - <nl> - <nl> - # Set tfprof_cmd = ' code ' to associate statistics with Python codes . <nl> - opts = tf . contrib . tfprof . model_analyzer . TRAINABLE_VARS_PARAMS_STAT_OPTIONS <nl> - opts [ ' show_name_regexes ' ] = [ ' . * my_code1 . py . * ' , ' . * my_code2 . py . * ' ] <nl> - param_stats = tf . contrib . tfprof . model_analyzer . print_model_analysis ( <nl> - tf . get_default_graph ( ) , <nl> - tfprof_cmd = ' code ' <nl> - tfprof_options = opts ) <nl> - <nl> - # param_stats is tensorflow . tfprof . TFGraphNodeProto proto . <nl> - # Let ' s print the root below . <nl> - sys . stdout . write ( ' total_params : % d \ n ' % param_stats . total_parameters ) <nl> - ` ` ` <nl> - <nl> - # # # Examine the number of floating point operations <nl> - ` ` ` python <nl> - # Print to stdout an analysis of the number of floating point operations in the <nl> - # model broken down by individual operations . <nl> - # <nl> - # Note : Only Ops with RegisterStatistics ( ' flops ' ) defined have flop stats . It <nl> - # also requires complete shape information . It is common that shape is unknown <nl> - # statically . To complete the shape , provide run - time shape information with <nl> - # tf . RunMetadata to the API ( See next example on how to provide RunMetadata ) . <nl> - # <nl> - tf . contrib . tfprof . model_analyzer . print_model_analysis ( <nl> - tf . get_default_graph ( ) , <nl> - tfprof_options = tf . contrib . tfprof . model_analyzer . FLOAT_OPS_OPTIONS ) <nl> - ` ` ` <nl> - <nl> - # # # Examine the timing and memory usage <nl> - You will first need to run the following set up in your model in order to <nl> - compute the memory and timing statistics . <nl> - <nl> - ` ` ` python <nl> - # Generate the meta information for the model that contains the memory usage <nl> - # and timing information . <nl> - # <nl> - # Note : When run on GPU , a kernel is first scheduled ( enqueued ) and then <nl> - # executed asynchronously . tfprof only tracks the execution time . <nl> - # In addition , a substantial of time might be spent between Python and <nl> - # TensorFlow runtime , which is also not tracked by tfprof . <nl> - # <nl> - run_metadata = tf . RunMetadata ( ) <nl> - with tf . Session ( ) as sess : <nl> - _ = sess . run ( train_op , <nl> - options = tf . RunOptions ( trace_level = tf . RunOptions . FULL_TRACE ) , <nl> - run_metadata = run_metadata ) <nl> - ` ` ` <nl> - <nl> - Finally , you may run ` print_model_analysis ` to explore the timing and memory <nl> - demands of the model . <nl> - <nl> - ` ` ` python <nl> - # See model_analyzer_test . py for more examples . <nl> - # <nl> - # Print to stdout an analysis of the memory usage and the timing information <nl> - # broken down by python codes . <nl> - opts = tf . contrib . tfprof . model_analyzer . PRINT_ALL_TIMING_MEMORY . copy ( ) <nl> - opts [ ' show_name_regexes ' ] = [ ' . * my_code . py . * ' ] <nl> - tf . contrib . tfprof . model_analyzer . print_model_analysis ( <nl> - tf . get_default_graph ( ) , <nl> - run_meta = run_metadata , <nl> - tfprof_cmd = ' code ' , <nl> - tfprof_options = opts ) <nl> - <nl> - # Print to stdout an analysis of the memory usage and the timing information <nl> - # broken down by operations . <nl> - tf . contrib . tfprof . model_analyzer . print_model_analysis ( <nl> - tf . get_default_graph ( ) , <nl> - run_meta = run_metadata , <nl> - tfprof_options = tf . contrib . tfprof . model_analyzer . PRINT_ALL_TIMING_MEMORY ) <nl> - ` ` ` <nl> - <nl> - # # # Visualize <nl> - <nl> - ` ` ` <nl> - For example set opts [ ' output ' ] = ' timeline : outfile = < filename > ' to <nl> - generate a timeline json file . Open a Chrome Browser , open URL <nl> - chrome : / / tracing , and load the json file . Below are 2 examples of graph <nl> - view and scope view . See code view example in later examples . <nl> - ` ` ` <nl> - <nl> - < left > <nl> - [ CodeTimeline ] ( g3doc / graph_timeline . png ) <nl> - [ CodeTimeline ] ( g3doc / scope_timeline . png ) <nl> - < / left > <nl> - <nl> - <nl> - # # CLI Tutorials <nl> - <nl> - Tutorials below are based on a 32 layers ResNet . <nl> - <nl> - TODO ( xpan ) : Provide graph . pbtxt , model . ckpt , tfprof_log and run_meta download . <nl> - <nl> - # # # Examples <nl> - <nl> - 1 ) Start ` tfprof ` command line tool <nl> - <nl> - ` ` ` shell <nl> - # Build the tool . <nl> - bazel build - - config opt tensorflow / tools / tfprof / . . . <nl> - <nl> - # Help information , including detail ' option ' instructions . <nl> - bazel - bin / tensorflow / tools / tfprof / tfprof help <nl> - # <nl> - # The following commands will start tfprof interactive mode . <nl> - # <nl> - # Profile model shapes and parameters only . <nl> - bazel - bin / tensorflow / tools / tfprof / tfprof \ <nl> - - - graph_path = graph . pbtxt <nl> - # <nl> - # Additionally profile ops requested memory and timing . <nl> - # See CLI Input Files section on generating run_meta file . <nl> - bazel - bin / tensorflow / tools / tfprof / tfprof \ <nl> - - - graph_path = graph . pbtxt \ <nl> - - - run_meta_path = run_meta \ <nl> - # <nl> - # Additionally profile checkpoint statistics and values . <nl> - # Use ' - account_type_regexes _checkpoint_variables ' to select <nl> - # checkpoint tensors . <nl> - bazel - bin / tensorflow / tools / tfprof / tfprof \ <nl> - - - graph_path = graph . pbtxt \ <nl> - - - run_meta_path = run_meta \ <nl> - - - checkpoint_path = model . ckpt <nl> - # <nl> - # tfprof_log is used to define customized op types , float ops and code traces . <nl> - # Use tfprof_logger . write_op_log ( ) to create tfprof_log . <nl> - # See 12 ) in Examples section on generating tfprof_log file . <nl> - bazel - bin / tensorflow / tools / tfprof / tfprof \ <nl> - - - graph_path = graph . pbtxt \ <nl> - - - run_meta_path = run_meta \ <nl> - - - op_log_path = tfprof_log \ <nl> - - - checkpoint_path = model . ckpt <nl> - # <nl> - # The following command start tfprof in one - shot mode . <nl> - # <nl> - bazel - bin / tensorflow / tools / tfprof / tfprof scope \ <nl> - - - graph_path = graph . pbtxt \ <nl> - - - max_depth = 3 <nl> - ` ` ` <nl> - Note that ` graph . pbtxt ` is an ASCII text format . <nl> - <nl> - 2 ) Press enter to show the default options <nl> - <nl> - ` ` ` shell <nl> - tfprof > <nl> - - max_depth 4 <nl> - - min_bytes 0 <nl> - - min_micros 0 <nl> - - min_params 0 <nl> - - min_float_ops 0 <nl> - - min_occurrence 0 <nl> - - step - 1 <nl> - - order_by name <nl> - - account_type_regexes Variable , VariableV2 <nl> - - start_name_regexes . * <nl> - - trim_name_regexes <nl> - - show_name_regexes . * <nl> - - hide_name_regexes IsVariableInitialized_ [ 0 - 9 ] + , save \ / . * , ^ zeros [ 0 - 9_ ] * <nl> - - account_displayed_op_only false <nl> - # supported select fields . Availability depends on - - [ run_meta | checkpoint | op_log ] _path . <nl> - # [ bytes | micros | params | float_ops | occurrence | tensor_value | device | op_types ] <nl> - - select params <nl> - # format : output_type : key = value , key = value . . . <nl> - # output_types : stdout ( default ) , timeline , file . <nl> - # key = value pairs : <nl> - # 1 . timeline : outfile = < filename > <nl> - # 2 . file : outfile = < filename > <nl> - # 3 . stdout : None . <nl> - # E . g . timeline : outfile = / tmp / timeline . json <nl> - - output <nl> - ` ` ` <nl> - <nl> - 3 ) I want to see which line of my python codes costs most time ! <nl> - <nl> - ` ` ` shell <nl> - # Requires - - graph_path - - op_log_path <nl> - tfprof > code - max_depth 1000 - show_name_regexes . * model_analyzer . * py . * - select micros - account_type_regexes . * - order_by micros <nl> - _TFProfRoot ( 0us / 22 . 44ms ) <nl> - model_analyzer_test . py : 149 : run_filename_as_m . . . : none ( 0us / 22 . 44ms ) <nl> - model_analyzer_test . py : 33 : _run_code_in_main : none ( 0us / 22 . 44ms ) <nl> - model_analyzer_test . py : 208 : < module > : test . main ( ) ( 0us / 22 . 44ms ) <nl> - model_analyzer_test . py : 132 : testComplexCodeView : x = lib . BuildFull . . . ( 0us / 22 . 44ms ) <nl> - model_analyzer_testlib . py : 63 : BuildFullModel : return sgd_op . min . . . ( 0us / 21 . 83ms ) <nl> - model_analyzer_testlib . py : 58 : BuildFullModel : cell , array_ops . c . . . ( 0us / 333us ) <nl> - model_analyzer_testlib . py : 54 : BuildFullModel : seq . append ( array_ . . . ( 0us / 254us ) <nl> - model_analyzer_testlib . py : 42 : BuildSmallModel : x = nn_ops . conv2d . . . ( 0us / 134us ) <nl> - model_analyzer_testlib . py : 46 : BuildSmallModel : initializer = init_ . . . ( 0us / 40us ) <nl> - . . . <nl> - model_analyzer_testlib . py : 61 : BuildFullModel : loss = nn_ops . l2_ . . . ( 0us / 28us ) <nl> - model_analyzer_testlib . py : 60 : BuildFullModel : target = array_op . . . ( 0us / 0us ) <nl> - model_analyzer_test . py : 134 : testComplexCodeView : sess . run ( variable . . . ( 0us / 0us ) <nl> - ` ` ` <nl> - <nl> - Set ` ` ` - output timeline : outfile = < filename > ` ` ` to generate timeline instead of stdout . <nl> - < left > <nl> - [ CodeTimeline ] ( g3doc / code_timeline . png ) <nl> - < / left > <nl> - <nl> - <nl> - 4 ) I want to see the ` BatchNorm ` ' s gamma value in checkpoint . <nl> - <nl> - ` ` ` shell <nl> - # Requires - - graph_path , - - checkpoint_path . <nl> - tfprof > scope - show_name_regexes unit_1_0 . * gamma - select tensor_value - max_depth 5 <nl> - _TFProfRoot ( ) <nl> - unit_1_0 / shared_activation / init_bn / gamma ( ) <nl> - [ 1 . 80 2 . 10 2 . 06 1 . 91 2 . 26 1 . 86 1 . 81 1 . 37 1 . 78 1 . 85 1 . 96 1 . 54 2 . 04 2 . 34 2 . 22 1 . 99 ] , <nl> - unit_1_0 / sub2 / bn2 / gamma ( ) <nl> - [ 1 . 57 1 . 83 1 . 30 1 . 25 1 . 59 1 . 14 1 . 26 0 . 82 1 . 19 1 . 10 1 . 48 1 . 01 0 . 82 1 . 23 1 . 21 1 . 14 ] , <nl> - ` ` ` <nl> - <nl> - 5 ) I want to see my checkpoint tensors shape and number of parameters . <nl> - <nl> - ` ` ` shell <nl> - # Requires - - graph_path , - - checkpoint_path . <nl> - # Increase - max_depth to see all tensors . <nl> - tfprof > scope - account_type_regexes _checkpoint_variables - select params - max_depth 4 <nl> - _TFProfRoot ( - - / 930 . 58k params ) <nl> - global_step ( 0 / 0 params ) <nl> - init / init_conv / DW ( 3x3x3x16 , 432 / 864 params ) <nl> - pool_logit / DW ( 64x10 , 640 / 1 . 28k params ) <nl> - pool_logit / DW / Momentum ( 64x10 , 640 / 640 params ) <nl> - pool_logit / biases ( 10 , 10 / 20 params ) <nl> - pool_logit / biases / Momentum ( 10 , 10 / 10 params ) <nl> - unit_last / final_bn / beta ( 64 , 64 / 128 params ) <nl> - unit_last / final_bn / gamma ( 64 , 64 / 128 params ) <nl> - unit_last / final_bn / moving_mean ( 64 , 64 / 64 params ) <nl> - unit_last / final_bn / moving_variance ( 64 , 64 / 64 params ) <nl> - ` ` ` <nl> - <nl> - 6 ) I defined an op named ‘ cost ’ to calculate the loss . I want to know what ops <nl> - it depends on take a long time to run . Hint : Use the ‘ graph ’ command to explore <nl> - graph dependencies . <nl> - <nl> - ` ` ` shell <nl> - # Requires - - graph_path , - - run_meta_path . <nl> - tfprof > graph - start_name_regexes cost . * - max_depth 100 - min_micros 10000 - select micros - account_type_regexes . * <nl> - _TFProfRoot ( 0us / 3 . 61sec ) <nl> - init / init_conv / Conv2D ( 11 . 75ms / 3 . 10sec ) <nl> - random_shuffle_queue_DequeueMany ( 3 . 09sec / 3 . 09sec ) <nl> - unit_1_0 / sub2 / conv2 / Conv2D ( 74 . 14ms / 3 . 19sec ) <nl> - unit_1_3 / sub2 / conv2 / Conv2D ( 60 . 75ms / 3 . 34sec ) <nl> - unit_2_4 / sub2 / conv2 / Conv2D ( 73 . 58ms / 3 . 54sec ) <nl> - unit_3_3 / sub2 / conv2 / Conv2D ( 10 . 26ms / 3 . 60sec ) <nl> - ` ` ` <nl> - <nl> - 7 ) I want to know the expensive operations during the back propagation . <nl> - Hint : tensorflow prepend ‘ gradient ’ to your defined name scopes . Use the ‘ scope ’ <nl> - command to explore based on name scope hierarchies . <nl> - <nl> - ` ` ` shell <nl> - # Requires - - graph_path , - - run_meta_path . <nl> - tfprof > scope - start_name_regexes gradient . * - max_depth 100 - min_micros 20000 - select micros - account_type_regexes . * <nl> - _TFProfRoot ( 0us / 2 . 29sec ) <nl> - gradients / unit_1_0 / sub1 / conv1 / Conv2D_grad / Conv2DBackpropFilter ( 54 . 96ms / 54 . 96ms ) <nl> - gradients / unit_1_0 / sub2 / conv2 / Conv2D_grad / Conv2DBackpropFilter ( 83 . 63ms / 83 . 63ms ) <nl> - gradients / unit_1_1 / sub1 / conv1 / Conv2D_grad / Conv2DBackpropFilter ( 99 . 25ms / 99 . 25ms ) <nl> - gradients / unit_1_2 / sub1 / conv1 / Conv2D_grad / Conv2DBackpropFilter ( 95 . 40ms / 95 . 40ms ) <nl> - gradients / unit_1_2 / sub2 / conv2 / Conv2D_grad / Conv2DBackpropFilter ( 99 . 83ms / 99 . 83ms ) <nl> - gradients / unit_1_3 / sub1 / conv1 / Conv2D_grad / Conv2DBackpropFilter ( 95 . 39ms / 95 . 39ms ) <nl> - . . . <nl> - ` ` ` <nl> - <nl> - 8 ) Show the number of float operations in the model . <nl> - Note : float operations calculation depends on <nl> - 1 ) op . RegisterStatistics . If an op doesn ’ t <nl> - have RegisterStatistics defined , its float operations cannot be counted . <nl> - 2 ) fully defined shape is also necessary in order to calculate flops . Sometimes <nl> - full shape is not available statically . Use RunMetadata to get run - time shape . <nl> - float operations number is provided by tensorflow : : tfprof : : OpLog logged from <nl> - Python API . <nl> - <nl> - ` ` ` shell <nl> - # Requires - - graph_path , - - op_log_path . <nl> - tfprof > scope - min_float_ops 1 - max_depth 10 - select float_ops - account_type_regexes . * <nl> - _TFProfRoot ( 0 / 17 . 63b flops ) <nl> - gradients / pool_logit / xw_plus_b / MatMul_grad / MatMul ( 163 . 84k / 163 . 84k flops ) <nl> - gradients / pool_logit / xw_plus_b / MatMul_grad / MatMul_1 ( 163 . 84k / 163 . 84k flops ) <nl> - init / init_conv / Conv2D ( 113 . 25m / 113 . 25m flops ) <nl> - pool_logit / xw_plus_b ( 1 . 28k / 165 . 12k flops ) <nl> - pool_logit / xw_plus_b / MatMul ( 163 . 84k / 163 . 84k flops ) <nl> - unit_1_0 / sub1 / conv1 / Conv2D ( 603 . 98m / 603 . 98m flops ) <nl> - unit_1_0 / sub2 / conv2 / Conv2D ( 603 . 98m / 603 . 98m flops ) <nl> - unit_1_1 / sub1 / conv1 / Conv2D ( 603 . 98m / 603 . 98m flops ) <nl> - unit_1_1 / sub2 / conv2 / Conv2D ( 603 . 98m / 603 . 98m flops ) <nl> - . . . <nl> - ` ` ` <nl> - <nl> - 9 ) Show the number of parameters of all ` tf . trainable_variables ( ) ` in the model . <nl> - <nl> - ` ` ` shell <nl> - # Requires - - graph_path - - op_log_path . <nl> - # store option for future commands . <nl> - tfprof > set - account_type_regexes _trainable_variables <nl> - tfprof > scope - max_depth 4 - select params <nl> - _TFProfRoot ( - - / 464 . 15k params ) <nl> - init / init_conv / DW ( 3x3x3x16 , 432 / 432 params ) <nl> - pool_logit / DW ( 64x10 , 640 / 640 params ) <nl> - pool_logit / biases ( 10 , 10 / 10 params ) <nl> - unit_last / final_bn / beta ( 64 , 64 / 64 params ) <nl> - unit_last / final_bn / gamma ( 64 , 64 / 64 params ) <nl> - ` ` ` <nl> - <nl> - Where does “ _trainable_variables ” come from ? It is from the OpLog file <nl> - generated by write_op_log ( ) Python API . write_op_log ( ) help users create some <nl> - common op types implicitly . Users can define their own op types and log it <nl> - through the write_op_log ( ) API . <nl> - <nl> - 109 ) What if I ’ m lazy and don ’ t want to define op type ? I have given my ops <nl> - well - defined names in my model ’ s code . And want to use names to select a group <nl> - of ops . Let ’ s try it ! <nl> - <nl> - ` ` ` shell <nl> - tfprof > set - account_type_regexes . * <nl> - tfprof > scope - show_name_regexes unit_2_1 . * DW - max_depth 100 - account_displayed_op_only <nl> - _TFProfRoot ( 0 / 18 . 43k params ) <nl> - unit_2_1 / sub1 / conv1 / DW ( 3x3x32x32 , 9 . 22k / 9 . 22k params ) <nl> - unit_2_1 / sub2 / conv2 / DW ( 3x3x32x32 , 9 . 22k / 9 . 22k params ) <nl> - ` ` ` <nl> - <nl> - The above command allows you to filter ops that match specific names . <nl> - ` - account_displayed_op_only ` asks tfprof to only account ops displayed <nl> - in terminal . Otherwise , tfprof accounts all ops matched by <nl> - ` - account_type_regexes ` recursively even if they are hidden due to some <nl> - options such as - max_depth . <nl> - <nl> - 11 ) TensorFlow has built - in op types . For example , built - in op type ` Variable ` <nl> - seems to include ` Variable ' s ` created by your model . However , be careful when <nl> - depending on it because TensorFlow creates extra ` Variable ` ops implicitly and <nl> - the implicitly created ops can have the same prefix as the ` Variable ' s ` you <nl> - defined . <nl> - <nl> - In the following example , extra ` Variables ` are created and “ / Momentum ” is <nl> - appended to their names . This might cause you “ model capacity ” calculation <nl> - to get wrong . <nl> - <nl> - ` ` ` shell <nl> - tfprof > scope - account_type_regexes VariableV2 - max_depth 4 - select params <nl> - _TFProfRoot ( - - / 930 . 58k params ) <nl> - global_step ( 1 / 1 params ) <nl> - init / init_conv / DW ( 3x3x3x16 , 432 / 864 params ) <nl> - pool_logit / DW ( 64x10 , 640 / 1 . 28k params ) <nl> - pool_logit / DW / Momentum ( 64x10 , 640 / 640 params ) <nl> - pool_logit / biases ( 10 , 10 / 20 params ) <nl> - pool_logit / biases / Momentum ( 10 , 10 / 10 params ) <nl> - unit_last / final_bn / beta ( 64 , 64 / 128 params ) <nl> - unit_last / final_bn / gamma ( 64 , 64 / 128 params ) <nl> - unit_last / final_bn / moving_mean ( 64 , 64 / 64 params ) <nl> - unit_last / final_bn / moving_variance ( 64 , 64 / 64 params ) <nl> - ` ` ` <nl> - <nl> - <nl> - 12 ) A example of defining extra op type for ops using ` OpLog ` <nl> - <nl> - First , in Python code , create an ` OpLog ` proto and add op type <nl> - information to it : <nl> - <nl> - ` ` ` python <nl> - <nl> - op_log = tfprof_log_pb2 . OpLog ( ) <nl> - entry = op_log . log_entries . add ( ) <nl> - entry . name = ' pool_logit / DW ' <nl> - entry . types . append ( ' pool_logit ' ) <nl> - entry = op_log . log_entries . add ( ) <nl> - entry . name = ' pool_logit / biases ' <nl> - # Alternatively : <nl> - # var = tf . get_variable ( xxx ) <nl> - # entry . name = var . op . name <nl> - entry . types . append ( ' pool_logit ' ) <nl> - ` ` ` <nl> - <nl> - Second , call write_op_log to write the OpLog proto . <nl> - <nl> - ` ` ` python <nl> - tf . contrib . tfprof . tfprof_logger . write_op_log ( <nl> - sess . graph , / tmp / my_op_log_dir , op_log ) <nl> - <nl> - # Get run - time shape information in order to fill shapes and get flops . <nl> - tf . contrib . tfprof . tfprof_logger . write_op_log ( <nl> - sess . graph , / tmp / my_op_log_dir , op_log , run_meta ) <nl> - ` ` ` <nl> - <nl> - Third , when starting the tfprof tool , specify <nl> - " - - op_log_path / tmp / my_op_log_dir / op_log " <nl> - <nl> - ` ` ` shell <nl> - tfprof > scope - account_type_regexes pool_logit - max_depth 4 - select params <nl> - _TFProfRoot ( - - / 650 params ) <nl> - pool_logit / DW ( 64x10 , 640 / 640 params ) <nl> - pool_logit / biases ( 10 , 10 / 10 params ) <nl> - ` ` ` <nl> - <nl> - Note that when you call <nl> - ` tf . contrib . tfprof . tfprof_logger . write_op_log ( . . . ) ` , <nl> - the tool adds all ` Variables ` inside ` tf . trainable_variables ( ) ` to <nl> - ` _trainable_variables ` . <nl> - <nl> - 12 ) Run tfprof in one - shot mode and dump result to file . <nl> - <nl> - ` ` ` shell <nl> - # By default output to stdout . Use - output option to change output types . <nl> - tfprof scope - - graph_path = graph . pbtxt \ <nl> - - - max_depth = 3 \ <nl> - - - output = " file : outfile = / tmp / dump " <nl> - Reading Files . . . <nl> - Parsing GraphDef . . . <nl> - Preparing Views . . . <nl> - <nl> - cat / tmp / dump <nl> - _TFProfRoot ( - - / 930 . 58k params ) <nl> - global_step ( 0 / 0 params ) <nl> - pool_logit / DW ( 64x10 , 640 / 1 . 28k params ) <nl> - pool_logit / biases ( 10 , 10 / 20 params ) <nl> - ` ` ` <nl> - <nl> - 13 ) Analyze how balanced Variable are on parameter servers . <nl> - <nl> - In this tutorial , I ' m going to use a seq2seq model , which are split <nl> - on several gpus at workers and several parameter servers . <nl> - <nl> - In tfprof , ' device ' is an op_type . For example , if op1 and op2 are placed on <nl> - gpu0 . They share an op_type called ' gpu0 ' . <nl> - <nl> - ` ` ` shell <nl> - bazel - bin / tensorflow / tools / tfprof / tfprof \ <nl> - - - graph_path ~ / tfprof / textsum / graph . pbtxt \ <nl> - - - run_meta_path ~ / tfprof / textsum / run_meta <nl> - <nl> - # Looks like ps task 1 is holding twice more parameters than task 0 . <nl> - tfprof > scope - select device , params - account_type_regexes . * ps . * task : 0 . * - max_depth 1 <nl> - _TFProfRoot ( - - / 25 . 81m params ) <nl> - tfprof > scope - select device , params - account_type_regexes . * ps . * task : 1 . * - max_depth 1 <nl> - _TFProfRoot ( - - / 58 . 84m params ) <nl> - ` ` ` <nl> - <nl> - # # # CLI Input Files <nl> - <nl> - tfprof command line inference ( CLI ) loads dumped files from a tensorflow model . <nl> - Convert them into in - memory data structures . To use it , users need to specify <nl> - the locations of the dumped files . The following are the dumped files loaded <nl> - by tfprof : <nl> - <nl> - < b > - - graph_path : < / b > GraphDef text file ( required ) . Used to build in - memory <nl> - representation of the model . For example , graph . pbtxt written by tf . Supervisor <nl> - is a candidate . If you are not using tf . Supervisor , you can easily get GraphDef <nl> - using tf . Graph . as_graph_def ( ) or other API . <nl> - <nl> - < b > - - run_meta_path : < / b > tensorflow : : RunMetadata . <nl> - Used to get the memory and time consumption of <nl> - each op of the model . Users need to enable it . For example , the following code <nl> - snippet writes a RunMetadata file : <nl> - <nl> - ` ` ` python <nl> - run_options = config_pb2 . RunOptions ( trace_level = config_pb2 . RunOptions . FULL_TRACE ) <nl> - run_metadata = config_pb2 . RunMetadata ( ) <nl> - # Once a while , call it the get the RunMeta . <nl> - _ = self . _sess . run ( . . . , options = run_options , run_metadata = run_metadata ) <nl> - with gfile . Open ( os . path . join ( output_dir , " run_meta " ) , " w " ) as f : <nl> - f . write ( run_metadata . SerializeToString ( ) ) <nl> - ` ` ` <nl> - <nl> - < b > - - op_log_path : < / b > <nl> - tensorflow : : tfprof : : OpLog . A proto used to provide extra op information <nl> - for ops . By giving a group of ops a type name , users can easily aggregate the <nl> - statistics for those ops without accidentally missing or including extra ops . <nl> - tfprof exposes the following Python API to add op information and logging . <nl> - <nl> - ` ` ` python <nl> - tf . contrib . tfprof . tfprof_logger . write_op_log ( graph , log_dir , op_log = None ) <nl> - ` ` ` <nl> - <nl> - < b > - - checkpoint_path : < / b > <nl> - TensorFlow checkpoint . It defines _checkpoint_variable op type . It also <nl> - provides checkpointed tensors ' values . <nl> - <nl> - <nl> - # # Options <nl> - <nl> - ` - max_depth ` : Show ops that are at most this number of hops from starting op in the tree / graph structure . <nl> - <nl> - ` - min_bytes ` : Show ops that request at least this number of bytes . <nl> - <nl> - ` - min_micros ` : Show ops that spend at least this number of microseconds to run . <nl> - <nl> - ` - min_params ` : Show ops that contains at least this number of parameters . <nl> - <nl> - ` - min_float_ops ` : Show ops that contain at least this number of float operations . Only available if an op has op . RegisterStatistics ( ) defined and OpLog is provided <nl> - <nl> - ` - min_occurrence ` : Show ops that appear at least this number of times . Only available in " op " view . <nl> - <nl> - ` - step ` : Show the stats of the this step when multiple steps of RunMetadata were added . By default , show the average of all steps . " <nl> - <nl> - ` - order_by ` : Order the results by [ name | depth | bytes | micros | params | float_ops | occurrence ] <nl> - <nl> - ` - account_type_regexes ` : Account and display the ops whose types match one of the type regexes specified . tfprof allow user to define extra op types for ops through tensorflow . tfprof . OpLog proto . regexes are comma - separated . <nl> - <nl> - ` - start_name_regexes ` : Show ops starting from the ops that matches the regexes , recursively . regexes are comma - separated . <nl> - <nl> - ` - trim_name_regexes ` : Hide ops starting from the ops that matches the regexes , recursively , regexes are comma - separated . <nl> - <nl> - ` - show_name_regexes ` : Show ops that match the regexes . regexes are comma - separated . <nl> - <nl> - ` - hide_name_regexes ` : Hide ops that match the regexes . regexes are comma - separated . <nl> - <nl> - Notes : For each op , ` - account_type_regexes ` is first evaluated , only ops with <nl> - types matching the specified regexes are accounted and selected for displayed . <nl> - ` - start / trim / show / hide_name_regexes ` are used to further filter ops for display . <nl> - ` - start_name_regexes ` is evaluated first to search the starting ops to display . <nl> - Descendants of starting ops are then evaluated against ` - show / hide_name_regexes ` <nl> - to make display decision . If an op matches trim_name_regexes , all its <nl> - descendants are hidden . Ops statistics are * accounted even if they are hidden * <nl> - as long as they match the ` - account_xxx ` options . <nl> - <nl> - ` - account_displayed_op_only ` : If True , only account the statistics of ops eventually displayed . If False , account all op statistics matching - account_type_regexes recursively . <nl> - <nl> - ` - select ` : Comma - separated list of metrics to show : [ bytes | micros | params | float_ops | occurrence | tensor_value | device | op_types ] . <nl> + # # # Teams <nl> <nl> - ` - output ` : Output results as stdout , file or timeline . <nl> - The format is ` ` ` output_type : key = value , key = value ` ` ` . <nl> - For example : ` ` ` timeline : outfile = < filename > ` ` ` . <nl> - timeline : key = outfile , value = < filename > . <nl> - stdout : none . <nl> - file : key = outfile , value = < filename > . <nl> + * Xin Pan ( xpan @ google . com , github : panyx0718 ) <nl> + * Jon Shlens <nl> + * Yao Zhang <nl> new file mode 100644 <nl> index 0000000000000 . . 0d8d56809acb5 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / tfprof / g3doc / command_line . md <nl> <nl> + # # Command Line Interface Tutorials <nl> + <nl> + * [ Command Line Inputs ] ( # command - line - inputs ) <nl> + * [ Start ` tfprof ` ] ( # start - tfprof ) <nl> + * [ Examples ] ( # examples ) <nl> + * [ Profile Python Time ] ( # profile - python - time ) <nl> + * [ Profile Graph Time ] ( # profile - graph - time ) <nl> + * [ Profile Checkpoint Value ] ( # profile - checkpoint - value ) <nl> + * [ Profile Model Parameter ] ( # profile - model - parameter ) <nl> + * [ Profile Device Placement ] ( # profile - device - placement ) <nl> + * [ Define Customized Operation Type ] ( # define - customized - operation - type ) <nl> + * [ Non - interactive Mode ] ( # non - interactive - mode ) <nl> + <nl> + <nl> + # # # Command Line Inputs <nl> + <nl> + tfprof command line tool uses the following inputs : <nl> + <nl> + < b > - - graph_path : < / b > GraphDef text file ( required ) . Used to build in - memory <nl> + architecture of the model . For example , graph . pbtxt written by tf . Supervisor <nl> + can be passed to - - graph_path . You can also easily get GraphDef using <nl> + tf . get_default_graph ( ) . as_graph_def ( add_shapes = True ) or other API . <nl> + <nl> + < b > - - run_meta_path : < / b > tensorflow : : RunMetadata ( optional ) . <nl> + Used to get the memory consumption and execution time of <nl> + each op of the model . <nl> + <nl> + The following code snippet writes a RunMetadata file : <nl> + <nl> + ` ` ` python <nl> + run_options = config_pb2 . RunOptions ( trace_level = config_pb2 . RunOptions . FULL_TRACE ) <nl> + run_metadata = config_pb2 . RunMetadata ( ) <nl> + _ = self . _sess . run ( . . . , options = run_options , run_metadata = run_metadata ) <nl> + with tf . gfile . Open ( os . path . join ( output_dir , " run_meta " ) , " w " ) as f : <nl> + f . write ( run_metadata . SerializeToString ( ) ) <nl> + ` ` ` <nl> + <nl> + < b > - - op_log_path : < / b > <nl> + tensorflow : : tfprof : : OpLog ( optional ) . A proto used to provide extra operation <nl> + information . 1 ) float operations . 2 ) code traces . 3 ) define customized operation <nl> + type for - account_type_regexes option . <nl> + <nl> + The following code snippet writes a OpLog file . <nl> + <nl> + ` ` ` python <nl> + tf . contrib . tfprof . tfprof_logger . write_op_log ( graph , log_dir , op_log = None ) <nl> + ` ` ` <nl> + <nl> + < b > - - checkpoint_path : < / b > TensorFlow checkpoint ( optional ) . <nl> + It defines _checkpoint_variable op type . It also provides checkpointed tensors ' values . <nl> + <nl> + <nl> + # # # Start ` tfprof ` <nl> + <nl> + # # # # Build ` tfprof ` <nl> + <nl> + ` ` ` shell <nl> + # Build the tool . <nl> + bazel build - - config opt tensorflow / tools / tfprof / . . . <nl> + <nl> + # Help information , including detail ' option ' instructions . <nl> + bazel - bin / tensorflow / tools / tfprof / tfprof help <nl> + ` ` ` <nl> + <nl> + # # # # Start ` tfprof ` Interactive Mode <nl> + ` ` ` shell <nl> + # The following commands will start tfprof interactive mode . <nl> + # <nl> + # - - graph_path contains the model architecutre and tensor shapes . <nl> + # - - run_meta_path contains the memory and time information . <nl> + # - - op_log_path contains float operation and code traces . <nl> + # - - checkpoint_path contains the model checkpoint data . <nl> + # <nl> + # Only includes model architecture , parameters and shapes . <nl> + bazel - bin / tensorflow / tools / tfprof / tfprof \ <nl> + - - graph_path = graph . pbtxt <nl> + # <nl> + # Additionally profile ops memory and timing . <nl> + bazel - bin / tensorflow / tools / tfprof / tfprof \ <nl> + - - graph_path = graph . pbtxt \ <nl> + - - run_meta_path = run_meta \ <nl> + # <nl> + # tfprof_log is used to define customized op types , float ops and code traces . <nl> + # Use tfprof_logger . write_op_log ( ) to create tfprof_log . <nl> + bazel - bin / tensorflow / tools / tfprof / tfprof \ <nl> + - - graph_path = graph . pbtxt \ <nl> + - - run_meta_path = run_meta \ <nl> + - - op_log_path = tfprof_log \ <nl> + # <nl> + # Additionally profile checkpoint statistics and values . <nl> + # Use ' - account_type_regexes _checkpoint_variables ' to select <nl> + # checkpoint tensors . <nl> + bazel - bin / tensorflow / tools / tfprof / tfprof \ <nl> + - - graph_path = graph . pbtxt \ <nl> + - - run_meta_path = run_meta \ <nl> + - - op_log_path = tfprof_log \ <nl> + - - checkpoint_path = model . ckpt <nl> + ` ` ` <nl> + <nl> + # # # # Start ` tfprof ` Non - interactive Mode . <nl> + <nl> + ` ` ` python <nl> + # Runs tfprof in one - shot . <nl> + bazel - bin / tensorflow / tools / tfprof / tfprof scope \ <nl> + - - graph_path = graph . pbtxt \ <nl> + - - max_depth = 3 <nl> + ` ` ` <nl> + <nl> + # # # # Press enter to show the default options <nl> + <nl> + Refer to [ Options ] ( options . md ) for option instructions . <nl> + <nl> + ` ` ` shell <nl> + tfprof > <nl> + - max_depth 4 <nl> + - min_bytes 0 <nl> + - min_micros 0 <nl> + - min_params 0 <nl> + - min_float_ops 0 <nl> + - min_occurrence 0 <nl> + - step - 1 <nl> + - order_by name <nl> + - account_type_regexes Variable , VariableV2 <nl> + - start_name_regexes . * <nl> + - trim_name_regexes <nl> + - show_name_regexes . * <nl> + - hide_name_regexes IsVariableInitialized_ [ 0 - 9 ] + , save \ / . * , ^ zeros [ 0 - 9_ ] * <nl> + - account_displayed_op_only false <nl> + # supported select fileds . Availability depends on - - [ run_meta | checkpoint | op_log ] _path . <nl> + # [ bytes | micros | params | float_ops | occurrence | tensor_value | device | op_types ] <nl> + - select params <nl> + # format : output_type : key = value , key = value . . . <nl> + # output_types : stdout ( default ) , timeline , file . <nl> + # key = value pairs : <nl> + # 1 . timeline : outfile = < filename > <nl> + # 2 . file : outfile = < filename > <nl> + # 3 . stdout : None . <nl> + # E . g . timeline : outfile = / tmp / timeline . json <nl> + - output <nl> + ` ` ` <nl> + <nl> + # # # Examples <nl> + <nl> + # # # # Profile Python Time <nl> + ` ` ` shell <nl> + # Requires - - graph_path - - op_log_path <nl> + tfprof > code - max_depth 1000 - show_name_regexes . * model_analyzer . * py . * - select micros - account_type_regexes . * - order_by micros <nl> + _TFProfRoot ( 0us / 22 . 44ms ) <nl> + model_analyzer_test . py : 149 : run_filename_as_m . . . : none ( 0us / 22 . 44ms ) <nl> + model_analyzer_test . py : 33 : _run_code_in_main : none ( 0us / 22 . 44ms ) <nl> + model_analyzer_test . py : 208 : < module > : test . main ( ) ( 0us / 22 . 44ms ) <nl> + model_analyzer_test . py : 132 : testComplexCodeView : x = lib . BuildFull . . . ( 0us / 22 . 44ms ) <nl> + model_analyzer_testlib . py : 63 : BuildFullModel : return sgd_op . min . . . ( 0us / 21 . 83ms ) <nl> + model_analyzer_testlib . py : 58 : BuildFullModel : cell , array_ops . c . . . ( 0us / 333us ) <nl> + model_analyzer_testlib . py : 54 : BuildFullModel : seq . append ( array_ . . . ( 0us / 254us ) <nl> + model_analyzer_testlib . py : 42 : BuildSmallModel : x = nn_ops . conv2d . . . ( 0us / 134us ) <nl> + model_analyzer_testlib . py : 46 : BuildSmallModel : initializer = init_ . . . ( 0us / 40us ) <nl> + . . . <nl> + model_analyzer_testlib . py : 61 : BuildFullModel : loss = nn_ops . l2_ . . . ( 0us / 28us ) <nl> + model_analyzer_testlib . py : 60 : BuildFullModel : target = array_op . . . ( 0us / 0us ) <nl> + model_analyzer_test . py : 134 : testComplexCodeView : sess . run ( variable . . . ( 0us / 0us ) <nl> + ` ` ` <nl> + <nl> + Set ` ` ` - output timeline : outfile = < filename > ` ` ` to generate timeline instead of stdout . <nl> + < left > <nl> + ! [ CodeTimeline ] ( code_timeline . png ) <nl> + < / left > <nl> + <nl> + # # # # Profile Graph Time <nl> + <nl> + ` ` ` shell <nl> + # I defined an op named ‘ cost ’ to calculate the loss . I want to know what ops <nl> + # it depends on take a long time to run . <nl> + <nl> + # Requires - - graph_path , - - run_meta_path . <nl> + tfprof > graph - start_name_regexes cost . * - max_depth 100 - min_micros 10000 - select micros - account_type_regexes . * <nl> + _TFProfRoot ( 0us / 3 . 61sec ) <nl> + init / init_conv / Conv2D ( 11 . 75ms / 3 . 10sec ) <nl> + random_shuffle_queue_DequeueMany ( 3 . 09sec / 3 . 09sec ) <nl> + unit_1_0 / sub2 / conv2 / Conv2D ( 74 . 14ms / 3 . 19sec ) <nl> + unit_1_3 / sub2 / conv2 / Conv2D ( 60 . 75ms / 3 . 34sec ) <nl> + unit_2_4 / sub2 / conv2 / Conv2D ( 73 . 58ms / 3 . 54sec ) <nl> + unit_3_3 / sub2 / conv2 / Conv2D ( 10 . 26ms / 3 . 60sec ) <nl> + ` ` ` <nl> + <nl> + # # # # Profile Checkpoint Value <nl> + ` ` ` shell <nl> + # Requires - - graph_path , - - checkpoint_path . <nl> + tfprof > scope - show_name_regexes unit_1_0 . * gamma - select tensor_value - max_depth 5 <nl> + _TFProfRoot ( ) <nl> + unit_1_0 / shared_activation / init_bn / gamma ( ) <nl> + [ 1 . 80 2 . 10 2 . 06 1 . 91 2 . 26 1 . 86 1 . 81 1 . 37 1 . 78 1 . 85 1 . 96 1 . 54 2 . 04 2 . 34 2 . 22 1 . 99 ] , <nl> + unit_1_0 / sub2 / bn2 / gamma ( ) <nl> + [ 1 . 57 1 . 83 1 . 30 1 . 25 1 . 59 1 . 14 1 . 26 0 . 82 1 . 19 1 . 10 1 . 48 1 . 01 0 . 82 1 . 23 1 . 21 1 . 14 ] , <nl> + ` ` ` <nl> + <nl> + # # # # Profile Model Parameter <nl> + <nl> + ` ` ` shell <nl> + # Show the number of parameters of all ` tf . trainable_variables ( ) ` in the model . <nl> + # Requires - - graph_path - - op_log_path . <nl> + # store option for future commands . <nl> + tfprof > set - account_type_regexes _trainable_variables <nl> + tfprof > scope - max_depth 4 - select params <nl> + _TFProfRoot ( - - / 464 . 15k params ) <nl> + init / init_conv / DW ( 3x3x3x16 , 432 / 432 params ) <nl> + pool_logit / DW ( 64x10 , 640 / 640 params ) <nl> + pool_logit / biases ( 10 , 10 / 10 params ) <nl> + unit_last / final_bn / beta ( 64 , 64 / 64 params ) <nl> + unit_last / final_bn / gamma ( 64 , 64 / 64 params ) <nl> + ` ` ` <nl> + <nl> + Where does ` _trainable_variables ` come from ? It is customized operation type <nl> + defined through the OpLog file . <nl> + Users can [ Define Customized Operation Type ] ( # define - customized - operation - type ) <nl> + <nl> + < b > Following example shows importance of defining customized operation type . < / b > <nl> + In this example , extra ` Variables ` are created by TensorFlow <nl> + implicitly and “ / Momentum ” is appended to their names . They shouldn ' t be <nl> + included in you “ model capacity ” calculation . <nl> + <nl> + ` ` ` shell <nl> + tfprof > scope - account_type_regexes VariableV2 - max_depth 4 - select params <nl> + _TFProfRoot ( - - / 930 . 58k params ) <nl> + global_step ( 1 / 1 params ) <nl> + init / init_conv / DW ( 3x3x3x16 , 432 / 864 params ) <nl> + pool_logit / DW ( 64x10 , 640 / 1 . 28k params ) <nl> + pool_logit / DW / Momentum ( 64x10 , 640 / 640 params ) <nl> + pool_logit / biases ( 10 , 10 / 20 params ) <nl> + pool_logit / biases / Momentum ( 10 , 10 / 10 params ) <nl> + unit_last / final_bn / beta ( 64 , 64 / 128 params ) <nl> + unit_last / final_bn / gamma ( 64 , 64 / 128 params ) <nl> + unit_last / final_bn / moving_mean ( 64 , 64 / 64 params ) <nl> + unit_last / final_bn / moving_variance ( 64 , 64 / 64 params ) <nl> + ` ` ` <nl> + <nl> + # # # # Profile Device Placement <nl> + <nl> + In this tutorial , a model is split <nl> + on several gpus at workers and several parameter servers . <nl> + <nl> + In tfprof , ' device ' is an op_type . For example , if op1 and op2 are placed on <nl> + gpu : 0 . They share an operation type . <nl> + <nl> + ` ` ` shell <nl> + bazel - bin / tensorflow / tools / tfprof / tfprof \ <nl> + - - graph_path = / tmp / graph . pbtxt \ <nl> + - - run_meta_path = / tmp / run_meta <nl> + <nl> + # Looks like ps task 1 is holding twice more parameters than task 0 . <nl> + tfprof > scope - select device , params - account_type_regexes . * ps . * task : 0 . * - max_depth 1 <nl> + _TFProfRoot ( - - / 25 . 81m params ) <nl> + tfprof > scope - select device , params - account_type_regexes . * ps . * task : 1 . * - max_depth 1 <nl> + _TFProfRoot ( - - / 58 . 84m params ) <nl> + ` ` ` <nl> + <nl> + # # # # Define Customized Operation Type <nl> + <nl> + First , in Python code , create an ` OpLog ` proto and add op type <nl> + information to it : <nl> + <nl> + ` ` ` python <nl> + <nl> + op_log = tfprof_log_pb2 . OpLog ( ) <nl> + entry = op_log . log_entries . add ( ) <nl> + entry . name = ' pool_logit / DW ' <nl> + entry . types . append ( ' pool_logit ' ) <nl> + entry = op_log . log_entries . add ( ) <nl> + entry . name = ' pool_logit / biases ' <nl> + entry . types . append ( ' pool_logit ' ) <nl> + ` ` ` <nl> + <nl> + Second , call write_op_log to write the OpLog proto . <nl> + <nl> + ` ` ` python <nl> + tf . contrib . tfprof . tfprof_logger . write_op_log ( <nl> + sess . graph , / tmp / my_op_log_dir , op_log ) <nl> + <nl> + # Get run - time shape information in order to fill shapes and get flops . <nl> + tf . contrib . tfprof . tfprof_logger . write_op_log ( <nl> + sess . graph , / tmp / my_op_log_dir , op_log , run_meta ) <nl> + ` ` ` <nl> + <nl> + Third , when starting the tfprof tool , specify <nl> + " - - op_log_path / tmp / my_op_log_dir / op_log " <nl> + <nl> + ` ` ` shell <nl> + tfprof > scope - account_type_regexes pool_logit - max_depth 4 - select params <nl> + _TFProfRoot ( - - / 650 params ) <nl> + pool_logit / DW ( 64x10 , 640 / 640 params ) <nl> + pool_logit / biases ( 10 , 10 / 10 params ) <nl> + ` ` ` <nl> + <nl> + Note that ` tf . contrib . tfprof . tfprof_logger . write_op_log ( . . . ) ` automatically <nl> + assigns all ` Variables ` inside ` tf . trainable_variables ( ) ` a customized <nl> + operation type : ` _trainable_variables ` . <nl> + <nl> + <nl> + # # # # Non - interactive Mode <nl> + 12 ) Run tfprof in one - shot mode and dump result to file . <nl> + <nl> + ` ` ` shell <nl> + # By default output to stdout . Use - output option to change output types . <nl> + tfprof scope - - graph_path = graph . pbtxt \ <nl> + - - max_depth = 3 \ <nl> + - - output = " file : outfile = / tmp / dump " <nl> + Reading Files . . . <nl> + Parsing GraphDef . . . <nl> + Preparing Views . . . <nl> + <nl> + cat / tmp / dump <nl> + _TFProfRoot ( - - / 930 . 58k params ) <nl> + global_step ( 0 / 0 params ) <nl> + pool_logit / DW ( 64x10 , 640 / 1 . 28k params ) <nl> + pool_logit / biases ( 10 , 10 / 20 params ) <nl> + ` ` ` <nl> new file mode 100644 <nl> index 0000000000000 . . 78d7f6a85e48a <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / tfprof / g3doc / options . md <nl> <nl> + # # Options <nl> + <nl> + # # # Overview <nl> + <nl> + For all tfprof views , the statistics are processed with the following procedures <nl> + <nl> + 1 ) An in - memory data structure is used represent the view . <nl> + <nl> + 2 ) ` - account_type_regexes ` is used to first select the operations that match <nl> + the specified operation types . An operation has its default type <nl> + ( e . g . MatMul , Conv2D ) . ` tfprof ` also considers device as operation type . <nl> + User can also define customized operation type . Hence , an operation has <nl> + multiple types . Operations with matched <nl> + types are selected for display and their statistics are aggregated <nl> + by the in - memory data structure . <nl> + <nl> + 3 ) Various ` - xxx_name_regexes ` , ` - min_xxx ` , ` - max_depth ` etc options are then <nl> + applied to further filter based on names and values . <nl> + It ' s no limited operation name . In code view , <nl> + it ' s the code trace . In op view , it ' s the operation type name . Different <nl> + from ` - account_type_regexes ` , Statistics are used even if a name is not displayed . <nl> + For example , in code view , a callee might be hidden by its statistics is <nl> + still aggregated by it ' s caller . ` - account_displayed_op_only ` , however , <nl> + breaks the rule and only use statistics of displayed names . <nl> + <nl> + 4 ) Finally , the filtered data structure is displayed in a format depending <nl> + on the ` - output ` option . <nl> + <nl> + # # # # Option Semantics In Different View <nl> + options usually have the same semantics in different views . However , some <nl> + can vary . For example ` - max_depth ` in scope view means the depth of <nl> + name scope < b > tree < / b > . In op view , it means the length of operation < b > list < / b > . <nl> + In graph view , in means the number of hops in the < b > graph < / b > . <nl> + <nl> + <nl> + # # # Docs <nl> + <nl> + ` - max_depth ` : Show ops that are at most this number of hops from starting op in the tree / graph structure . <nl> + <nl> + ` - min_bytes ` : Show ops that request at least this number of bytes . <nl> + <nl> + ` - min_micros ` : Show ops that spend at least this number of microseconds to run . <nl> + <nl> + ` - min_params ` : Show ops that contains at least this number of parameters . <nl> + <nl> + ` - min_float_ops ` : Show ops that contain at least this number of float operations . Only available if an op has op . RegisterStatistics ( ) defined and OpLog is provided <nl> + <nl> + ` - min_occurrence ` : Show ops that appear at least this number of times . Only available in " op " view . <nl> + <nl> + ` - step ` : Show the stats of the this step when multiple steps of RunMetadata were added . By default , show the average of all steps . " <nl> + <nl> + ` - order_by ` : Order the results by [ name | depth | bytes | micros | params | float_ops | occurrence ] <nl> + <nl> + ` - account_type_regexes ` : Account and display the ops whose types match one of the type regexes specified . tfprof allow user to define extra op types for ops through tensorflow . tfprof . OpLog proto . regexes are comma - sperated . <nl> + <nl> + ` - start_name_regexes ` : Show ops starting from the ops that matches the regexes , recursively . regexes are comma - separated . <nl> + <nl> + ` - trim_name_regexes ` : Hide ops starting from the ops that matches the regexes , recursively , regexes are comma - seprated . <nl> + <nl> + ` - show_name_regexes ` : Show ops that match the regexes . regexes are comma - seprated . <nl> + <nl> + ` - hide_name_regexes ` : Hide ops that match the regexes . regexes are comma - seprated . <nl> + <nl> + Notes : For each op , ` - account_type_regexes ` is first evaluated , only ops with <nl> + types matching the specified regexes are accounted and selected for displayed . <nl> + ` - start / trim / show / hide_name_regexes ` are used to further filter ops for display . <nl> + ` - start_name_regexes ` is evaluated first to search the starting ops to display . <nl> + Descendants of starting ops are then evaluated against ` - show / hide_name_regexes ` <nl> + to make display decision . If an op matches trim_name_regexes , all its <nl> + descendants are hidden . Ops statistics are * accounted even if they are hidden * <nl> + as long as they match the ` - account_xxx ` options . <nl> + <nl> + ` - account_displayed_op_only ` : If True , only account the statistics of ops eventually displayed . If False , account all op statistics matching - account_type_regexes recursively . <nl> + <nl> + ` - select ` : Comma - separated list of metrics to show : [ bytes | micros | params | float_ops | occurrence | tensor_value | device | op_types ] . <nl> + <nl> + ` - output ` : Output results as stdout , file or timeline . <nl> + The format is ` ` ` output_type : key = value , key = value ` ` ` . <nl> + For example : ` ` ` - output timeline : outfile = < filename > ` ` ` . <nl> + <nl> + ` ` ` shell <nl> + timeline : key = outfile , value = < filename > . <nl> + stdout : none . <nl> + file : key = outfile , value = < filename > . <nl> + ` ` ` <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 0000000000000 . . e897967d3b7b9 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / tfprof / g3doc / profile_memory . md <nl> <nl> + # # Profile Memory <nl> + <nl> + It is generally a good idea to visualize the memory usage in timeline . <nl> + It allows you to see the memory consumption of each GPU over time . <nl> + <nl> + ` ` ` python <nl> + # To get memory information , you need - - graph_path and - - run_meta_path <nl> + tfprof > graph - max_depth 10000000 - step 0 - account_type_regexes . * - output timeline : outfile = < filename > <nl> + generating trace file . <nl> + <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + Timeline file is written to < filename > <nl> + Open a Chrome browser , enter URL chrome : / / tracing and load the timeline file . <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + ` ` ` <nl> + <nl> + < left > <nl> + TODO ( xpan ) : Show the image correctly in github . <nl> + ! [ Timeline ] ( graph_timeline . png ) <nl> + < / left > <nl> + <nl> + <nl> + ` ` ` python <nl> + # You can also visualize the memory information through other methods . <nl> + <nl> + # With op view , it shows you the aggregated output tensor bytes of each <nl> + # operation type . <nl> + tfprof > op - select bytes - order_by bytes <nl> + node name | output bytes <nl> + Identity 32515 . 37MB ( 100 . 00 % , 27 . 02 % ) <nl> + FusedBatchNormGrad 10802 . 14MB ( 72 . 98 % , 8 . 98 % ) <nl> + FusedBatchNorm 10517 . 52MB ( 64 . 01 % , 8 . 74 % ) <nl> + Conv2D 10509 . 25MB ( 55 . 27 % , 8 . 73 % ) <nl> + Conv2DBackpropInput 9701 . 39MB ( 46 . 54 % , 8 . 06 % ) <nl> + ReluGrad 9206 . 45MB ( 38 . 48 % , 7 . 65 % ) <nl> + Relu 8462 . 80MB ( 30 . 83 % , 7 . 03 % ) <nl> + DepthwiseConv2dNativeBackpropInput 7899 . 35MB ( 23 . 80 % , 6 . 56 % ) <nl> + DepthwiseConv2dNative 7425 . 17MB ( 17 . 23 % , 6 . 17 % ) <nl> + MaxPoolGrad 3015 . 44MB ( 11 . 06 % , 2 . 51 % ) <nl> + AddN 2741 . 49MB ( 8 . 56 % , 2 . 28 % ) <nl> + <nl> + # With scope view , you can see the operations that outputs largest tensors . <nl> + tfprof > scope - order_by bytes - select bytes - min_bytes 100000000 <nl> + node name | output bytes <nl> + _TFProfRoot ( - - / 120356 . 38MB ) <nl> + tower_3 / SepConv2d_2b_3x3 / separable_conv2d ( 346 . 85MB / 854 . 00MB ) <nl> + tower_3 / SepConv2d_2b_3x3 / separable_conv2d / depthwise ( 507 . 15MB / 507 . 15MB ) <nl> + tower_0 / SepConv2d_2b_3x3 / separable_conv2d ( 346 . 85MB / 693 . 71MB ) <nl> + tower_0 / SepConv2d_2b_3x3 / separable_conv2d / depthwise ( 346 . 85MB / 346 . 85MB ) <nl> + tower_2 / SepConv2d_2b_3x3 / separable_conv2d ( 346 . 85MB / 693 . 71MB ) <nl> + tower_2 / SepConv2d_2b_3x3 / separable_conv2d / depthwise ( 346 . 85MB / 346 . 85MB ) <nl> + tower_1 / SepConv2d_2b_3x3 / separable_conv2d ( 346 . 85MB / 693 . 71MB ) <nl> + tower_1 / SepConv2d_2b_3x3 / separable_conv2d / depthwise ( 346 . 85MB / 346 . 85MB ) <nl> + tower_3 / SepConv2d_2a_3x3 / separable_conv2d ( 346 . 85MB / 520 . 28MB ) <nl> + tower_3 / SepConv2d_2a_3x3 / separable_conv2d / depthwise ( 173 . 43MB / 173 . 43MB ) <nl> + tower_2 / SepConv2d_2a_3x3 / separable_conv2d ( 346 . 85MB / 520 . 28MB ) <nl> + tower_2 / SepConv2d_2a_3x3 / separable_conv2d / depthwise ( 173 . 43MB / 173 . 43MB ) <nl> + tower_0 / SepConv2d_2a_3x3 / separable_conv2d ( 346 . 85MB / 520 . 28MB ) <nl> + tower_0 / SepConv2d_2a_3x3 / separable_conv2d / depthwise ( 173 . 43MB / 173 . 43MB ) <nl> + . . . <nl> + <nl> + # code view . <nl> + tfprof > code - max_depth 10 - select bytes - order_by bytes - start_name_regexes . * seq2seq . * - min_bytes 1 <nl> + node name | output bytes <nl> + _TFProfRoot ( - - / 74148 . 60MB ) <nl> + seq2seq_attention . py ' > : 168 : run_filename_from . . . : none ( 0B / 74148 . 60MB ) <nl> + seq2seq_attention . py ' > : 33 : _run_code_in_main : none ( 0B / 74148 . 60MB ) <nl> + seq2seq_attention . py : 316 : < module > : app . run ( ) ( 0B / 74148 . 60MB ) <nl> + app . py : 432 : run : _run_main ( main or . . . ( 0B / 74148 . 60MB ) <nl> + app . py : 352 : _run_main : sys . exit ( main ( arg . . . ( 0B / 74148 . 60MB ) <nl> + seq2seq_attention . py : 270 : main : _Train ( model , bat . . . ( 0B / 74148 . 60MB ) <nl> + seq2seq_attention . py : 128 : _Train : model . build_graph ( ) ( 0B / 74148 . 60MB ) <nl> + seq2seq_attention_model . py : 363 : build_graph : self . _add_train_o . . . ( 0B / 48931 . 86MB ) <nl> + seq2seq_attention_model . py : 307 : _add_train_op : tf . gradients ( self . . . ( 0B / 46761 . 06MB ) <nl> + seq2seq_attention_model . py : 322 : _add_train_op : zip ( grads , tvars ) . . . ( 0B / 2170 . 80MB ) <nl> + seq2seq_attention_model . py : 312 : _add_train_op : tf . train . exponent . . . ( 0B / 2 . 56KB ) <nl> + seq2seq_attention_model . py : 308 : _add_train_op : tf . summary . scalar . . . ( 0B / 64B ) <nl> + seq2seq_attention_model . py : 320 : _add_train_op : tf . summary . scalar . . . ( 0B / 64B ) <nl> + seq2seq_attention_model . py : 360 : build_graph : self . _add_seq2seq ( ) ( 0B / 25216 . 74MB ) <nl> + seq2seq_attention_model . py : 192 : _add_seq2seq : sequence_length = a . . . ( 0B / 21542 . 55MB ) <nl> + ` ` ` <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 0000000000000 . . 5ad5a56513b8c <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / tfprof / g3doc / profile_model_architecture . md <nl> <nl> + # # Profile Model Architecture <nl> + <nl> + * [ Profile Model Parameters ] ( # profile - model - parameters ) <nl> + * [ Profile Model Float Operations ] ( # profile - model - float - operations ) <nl> + <nl> + # # # Profile Model Parameters <nl> + <nl> + < b > Notes : < / b > <nl> + ` VariableV2 ` operation type might contain variables created by TensorFlow <nl> + implicitly . User normally don ' t want to count them as " model capacity " . <nl> + We can use customized operation type to select a subset of variables . <nl> + For example ` _trainable_variables ` is created automatically by tfprof Python <nl> + API . User can also define customized operation type . <nl> + <nl> + ` ` ` <nl> + # parameters are created by operation type ' VariableV2 ' ( For older model , <nl> + # it ' s ' Variable ' ) . scope view is usually suitable in this case . <nl> + tfprof > scope - account_type_regexes VariableV2 - max_depth 4 - select params <nl> + _TFProfRoot ( - - / 930 . 58k params ) <nl> + global_step ( 1 / 1 params ) <nl> + init / init_conv / DW ( 3x3x3x16 , 432 / 864 params ) <nl> + pool_logit / DW ( 64x10 , 640 / 1 . 28k params ) <nl> + pool_logit / DW / Momentum ( 64x10 , 640 / 640 params ) <nl> + pool_logit / biases ( 10 , 10 / 20 params ) <nl> + pool_logit / biases / Momentum ( 10 , 10 / 10 params ) <nl> + unit_last / final_bn / beta ( 64 , 64 / 128 params ) <nl> + unit_last / final_bn / gamma ( 64 , 64 / 128 params ) <nl> + unit_last / final_bn / moving_mean ( 64 , 64 / 64 params ) <nl> + unit_last / final_bn / moving_variance ( 64 , 64 / 64 params ) <nl> + <nl> + # The Python API profiles tf . trainable_variables ( ) instead of VariableV2 . <nl> + # <nl> + # By default , it ' s printed to stdout . User can update tfprof_options [ ' output ' ] <nl> + # to write to file . The result is always returned as a proto buffer . <nl> + param_stats = tf . contrib . tfprof . model_analyzer . print_model_analysis ( <nl> + tf . get_default_graph ( ) , <nl> + tfprof_options = tf . contrib . tfprof . model_analyzer . <nl> + TRAINABLE_VARS_PARAMS_STAT_OPTIONS ) <nl> + sys . stdout . write ( ' total_params : % d \ n ' % param_stats . total_parameters ) <nl> + ` ` ` <nl> + <nl> + # # # Profile Model Float Operations <nl> + <nl> + # # # # Caveats <nl> + <nl> + For an operation to have float operation statistics : <nl> + <nl> + * It must have ` RegisterStatistics ( ' flops ' ) ` defined in TensorFlow . tfprof <nl> + use the definition to calculate float operations . Contributes are welcome . <nl> + <nl> + * It must have known " shape " information for RegisterStatistics ( ' flops ' ) <nl> + to calculate the statistics . It is suggested to pass in ` - run_meta_path ` if <nl> + shape is only known during runtime . tfprof can fill in the missing shape with <nl> + the runtime shape information from RunMetadata . <nl> + <nl> + Hence , it is suggested to use ` - account_displayed_name_only ` <nl> + option so that you know the statistics are only for the operations printed out . <nl> + <nl> + <nl> + ` ` ` python <nl> + # To profile float opertions in commandline , you need to pass - - graph_path <nl> + # and - - op_log_path . <nl> + tfprof > scope - min_float_ops 1 - select float_ops - account_displayed_op_only <nl> + node name | # float_ops <nl> + _TFProfRoot ( - - / 17 . 63b flops ) <nl> + gradients / pool_logit / xw_plus_b / MatMul_grad / MatMul ( 163 . 84k / 163 . 84k flops ) <nl> + gradients / pool_logit / xw_plus_b / MatMul_grad / MatMul_1 ( 163 . 84k / 163 . 84k flops ) <nl> + init / init_conv / Conv2D ( 113 . 25m / 113 . 25m flops ) <nl> + pool_logit / xw_plus_b ( 1 . 28k / 165 . 12k flops ) <nl> + pool_logit / xw_plus_b / MatMul ( 163 . 84k / 163 . 84k flops ) <nl> + unit_1_0 / sub1 / conv1 / Conv2D ( 603 . 98m / 603 . 98m flops ) <nl> + unit_1_0 / sub2 / conv2 / Conv2D ( 603 . 98m / 603 . 98m flops ) <nl> + unit_1_1 / sub1 / conv1 / Conv2D ( 603 . 98m / 603 . 98m flops ) <nl> + unit_1_1 / sub2 / conv2 / Conv2D ( 603 . 98m / 603 . 98m flops ) <nl> + <nl> + # Some might prefer op view that aggregate by operation type . <nl> + tfprof > op - min_float_ops 1 - select float_ops - account_displayed_op_only - order_by float_ops <nl> + node name | # float_ops <nl> + Conv2D 17 . 63b float_ops ( 100 . 00 % , 100 . 00 % ) <nl> + MatMul 491 . 52k float_ops ( 0 . 00 % , 0 . 00 % ) <nl> + BiasAdd 1 . 28k float_ops ( 0 . 00 % , 0 . 00 % ) <nl> + <nl> + # You can also do that in Python API . <nl> + tf . contrib . tfprof . model_analyzer . print_model_analysis ( <nl> + tf . get_default_graph ( ) , <nl> + tfprof_options = tf . contrib . tfprof . model_analyzer . FLOAT_OPS_OPTIONS ) <nl> + ` ` ` <nl> new file mode 100644 <nl> index 0000000000000 . . c89d7b0b03f38 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / tfprof / g3doc / profile_time . md <nl> <nl> + # # Profile Time <nl> + <nl> + * [ Profile by Python Code ] ( # profile - by - python - code ) <nl> + * [ Profile by Operation Type ] ( # profile - by - operation - type ) <nl> + * [ Profile by Graph ] ( # profile - by - graph ) <nl> + * [ Profile by Name Scope ] ( # profile - by - name - scope ) <nl> + <nl> + # # # Profile by Python Code <nl> + ` ` ` python <nl> + # In code view , the time of each line of Python code is the aggregated <nl> + # times of all operations created by that line . <nl> + # In command line , it requires - - graph_path - - op_log_path and - - run_meta_path . <nl> + # - - op_log_path provides the code traces information . <nl> + # - - run_meta_path provides the time information . <nl> + <nl> + tfprof > code - show_name_regexes seq2seq_attention . * - max_depth 10 - select micros - order_by micros <nl> + node name | execution time <nl> + _TFProfRoot ( - - / 3 . 74sec ) <nl> + seq2seq_attention . py ' > : 168 : run_filename_from . . . : none ( 0us / 3 . 74sec ) <nl> + seq2seq_attention . py ' > : 33 : _run_code_in_main : none ( 0us / 3 . 74sec ) <nl> + seq2seq_attention . py : 316 : < module > : app . run ( ) ( 0us / 3 . 74sec ) <nl> + seq2seq_attention . py : 270 : main : _Train ( model , bat . . . ( 0us / 3 . 74sec ) <nl> + seq2seq_attention . py : 128 : _Train : model . build_graph ( ) ( 0us / 3 . 74sec ) <nl> + seq2seq_attention_model . py : 360 : build_graph : self . _add_seq2seq ( ) ( 0us / 2 . 79sec ) <nl> + seq2seq_attention_model . py : 293 : _add_seq2seq : decoder_outputs , . . . ( 0us / 2 . 46sec ) <nl> + seq2seq_attention_model . py : 192 : _add_seq2seq : sequence_length = a . . . ( 0us / 265 . 31ms ) <nl> + seq2seq_attention_model . py : 253 : _add_seq2seq : initial_state_att . . . ( 0us / 50 . 35ms ) <nl> + seq2seq_attention_model . py : 173 : _add_seq2seq : for x in encoder_ . . . ( 0us / 8 . 72ms ) <nl> + seq2seq_attention_model . py : 218 : _add_seq2seq : w_t = tf . transpos . . . ( 0us / 2 . 39ms ) <nl> + . . . <nl> + seq2seq_attention_model . py : 363 : build_graph : self . _add_train_o . . . ( 0us / 949 . 10ms ) <nl> + seq2seq_attention_model . py : 307 : _add_train_op : tf . gradients ( self . . . ( 0us / 641 . 44ms ) <nl> + seq2seq_attention_model . py : 322 : _add_train_op : zip ( grads , tvars ) . . . ( 0us / 307 . 56ms ) <nl> + . . . <nl> + seq2seq_attention_model . py : 364 : build_graph : self . _summaries = . . . ( 0us / 13us ) <nl> + seq2seq_attention_model . py : 361 : build_graph : self . global_step . . . ( 0us / 12us ) <nl> + . . . <nl> + seq2seq_attention . py : 129 : _Train : saver = tf . train . . . . ( 0us / 0us ) <nl> + seq2seq_attention . py : 140 : _Train : global_step = model . . . ( 0us / 0us ) <nl> + <nl> + # Sometimes you want to explore a specific function . You can do that <nl> + # with - start_name_regexes . <nl> + tfprof > code - start_name_regexes . * _add_seq2seq . * - show_name_regexes seq2seq_attention . * - max_depth 10 - select micros - order_by micros <nl> + node name | execution time <nl> + _TFProfRoot ( - - / 3 . 74sec ) <nl> + seq2seq_attention_model . py : 360 : build_graph : self . _add_seq2seq ( ) ( 0us / 2 . 79sec ) <nl> + seq2seq_attention_model . py : 293 : _add_seq2seq : decoder_outputs , . . . ( 0us / 2 . 46sec ) <nl> + seq2seq_attention_model . py : 289 : sampled_loss_func : num_classes = vsize ) ( 0us / 2 . 46sec ) <nl> + seq2seq_attention_model . py : 282 : sampled_loss_func : labels = tf . resha . . . ( 0us / 164us ) <nl> + <nl> + # You can also dive deeper into tensorflow ' s libraries . <nl> + tfprof > code - max_depth 5 - select micros - order_by micros - start_name_regexes . * _add_seq2seq . * - min_micros 100000 <nl> + _TFProfRoot ( - - / 3 . 74sec ) <nl> + seq2seq_attention_model . py : 360 : build_graph : self . _add_seq2seq ( ) ( 0us / 2 . 79sec ) <nl> + seq2seq_attention_model . py : 293 : _add_seq2seq : decoder_outputs , . . . ( 0us / 2 . 46sec ) <nl> + seq2seq_lib . py : 181 : sampled_sequence_ . . . : average_across_ti . . . ( 0us / 2 . 46sec ) <nl> + seq2seq_lib . py : 147 : sequence_loss_by_ . . . : crossent = loss_f . . . ( 0us / 2 . 46sec ) <nl> + seq2seq_attention_model . py : 192 : _add_seq2seq : sequence_length = a . . . ( 0us / 265 . 31ms ) <nl> + seq2seq_lib . py : 104 : bidirectional_rnn : sequence_length , . . . ( 0us / 127 . 27ms ) <nl> + core_rnn . py : 195 : static_rnn : state_size = cell . s . . . ( 0us / 127 . 20ms ) <nl> + seq2seq_lib . py : 110 : bidirectional_rnn : initial_state_bw , . . . ( 0us / 125 . 96ms ) <nl> + core_rnn . py : 195 : static_rnn : state_size = cell . s . . . ( 0us / 125 . 86ms ) <nl> + <nl> + <nl> + # It can also be done in Python API <nl> + opts = model_analyzer . TRAINABLE_VARS_PARAMS_STAT_OPTIONS . copy ( ) <nl> + opts [ ' account_type_regexes ' ] = [ ' . * ' ] <nl> + opts [ ' show_name_regexes ' ] = [ ' . * model_analyzer_testlib . py . * ' ] <nl> + opts [ ' account_displayed_op_only ' ] = False <nl> + opts [ ' select ' ] = [ ' micros ' ] <nl> + <nl> + tfprof_node = model_analyzer . print_model_analysis ( <nl> + sess . graph , run_meta , tfprof_cmd = ' code ' , tfprof_options = opts ) <nl> + ` ` ` <nl> + <nl> + You can generate some visualization in code view : <nl> + Set ` ` ` - output timeline : outfile = < filename > ` ` ` to generate timeline instead of stdout . <nl> + < left > <nl> + ! [ CodeTimeline ] ( code_timeline . png ) <nl> + < / left > <nl> + <nl> + <nl> + # # # Profile by Operation Type <nl> + ` ` ` python <nl> + # In op view , you can view the aggregated time of each operation type . <nl> + tfprof > op - select micros , occurrence - order_by micros <nl> + node name | execution time | op occurrence <nl> + SoftmaxCrossEntropyWithLogits 1 . 37sec ( 100 . 00 % , 36 . 44 % ) , 30 <nl> + MatMul 618 . 97ms ( 63 . 56 % , 16 . 51 % ) , 3450 <nl> + Add 273 . 76ms ( 47 . 06 % , 7 . 30 % ) , 2180 <nl> + Sub 215 . 41ms ( 39 . 76 % , 5 . 74 % ) , 4372 <nl> + ConcatV2 203 . 88ms ( 34 . 01 % , 5 . 44 % ) , 6098 <nl> + Mul 134 . 32ms ( 28 . 58 % , 3 . 58 % ) , 9427 <nl> + ApplyAdam 92 . 66ms ( 25 . 00 % , 2 . 47 % ) , 27 <nl> + Switch 72 . 43ms ( 22 . 53 % , 1 . 93 % ) , 30654 <nl> + LogUniformCandidateSampler 69 . 01ms ( 20 . 59 % , 1 . 84 % ) , 30 <nl> + Unique 53 . 50ms ( 18 . 75 % , 1 . 43 % ) , 2 <nl> + AddN 50 . 10ms ( 17 . 33 % , 1 . 34 % ) , 5481 <nl> + <nl> + # You might be surprised to see that SoftmaxCrossEntropyWithLogits is <nl> + # that expensive . As shown below , it is placed on cpu . <nl> + tfprof > op - select micros , device - order_by micros <nl> + node name | execution time | assigned devices <nl> + SoftmaxCrossEntropyWithLogits 1 . 37sec ( 100 . 00 % , 36 . 44 % ) , / job : worker / replica : 0 / task : 0 / cpu : 0 <nl> + MatMul 618 . 97ms ( 63 . 56 % , 16 . 51 % ) , | / job : worker / replica : 0 / task : 0 / cpu : 0 | / job : worker / replica : 0 / task : 0 / gpu : 0 | / job : worker / replica : 0 / task : 0 / gpu : 1 | / job : worker / replica : 0 / task : 0 / gpu : 2 | / job : worker / replica : 0 / task : 0 / gpu : 3 <nl> + ` ` ` <nl> + <nl> + <nl> + # # # Profile by Graph <nl> + <nl> + Usually , use graph view to generate a timeline to visualize the result . <nl> + <nl> + In the chrome : / / tracing UI , click " Flow Event " in " View Options " of upper <nl> + right corner to see the flow of tensors . <nl> + <nl> + < left > <nl> + TODO ( xpan ) : Show the image correctly in github . <nl> + ! [ Timeline ] ( graph_timeline . png ) <nl> + < / left > <nl> + <nl> + tfprof options allow users to generate timeline in some advanced ways . <nl> + <nl> + ` ` ` python <nl> + # Only generate timeline for gpu3 and cpu on workers . <nl> + graph - max_depth 10000000 - step 0 - account_type_regexes . * gpu : 3 . * , . * worker . * cpu : 0 . * - output timeline : outfile = < filename . json > <nl> + generating trace file . <nl> + <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + Timeline file is written to < filename . json > . <nl> + Open a Chrome browser , enter URL chrome : / / tracing and load the timeline file . <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + ` ` ` <nl> + <nl> + # # # Profile by Name Scope <nl> + <nl> + Usually scope view allows you to pin point the problematic places if you <nl> + have properly named your operations with tf . name_scope or tf . variable_scope . <nl> + <nl> + ` ` ` python <nl> + tfprof > scope - max_depth 30 - select micros - min_micros 100000 - order_by micros <nl> + node name | execution time <nl> + _TFProfRoot ( - - / 8 . 12sec ) <nl> + tower_3 / gradients / tower_3 / Conv2d_1a_3x3 / convolution_grad / Conv2DBackpropFilter ( 126 . 34ms / 126 . 34ms ) <nl> + tower_1 / gradients / tower_1 / Conv2d_1a_3x3 / convolution_grad / Conv2DBackpropFilter ( 125 . 44ms / 125 . 44ms ) <nl> + tower_2 / gradients / tower_2 / Conv2d_1a_3x3 / convolution_grad / Conv2DBackpropFilter ( 124 . 85ms / 124 . 85ms ) <nl> + tower_0 / gradients / tower_0 / Conv2d_1a_3x3 / convolution_grad / Conv2DBackpropFilter ( 124 . 45ms / 124 . 45ms ) <nl> + ` ` ` <nl> new file mode 100644 <nl> index 0000000000000 . . 581e66baa2916 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / tfprof / g3doc / python_api . md <nl> <nl> + # # Python API Tutorials <nl> + <nl> + * [ Parameters and Shapes ] ( # parameters - and - shapes ) <nl> + * [ Float Operations ] ( # float - operations ) <nl> + * [ Time and Memory ] ( # time - and - memory ) <nl> + * [ Visualize ] ( # visualize ) <nl> + * [ Multi - step Profiling ] ( # multi - step - profiling ) <nl> + <nl> + ` ` ` import tensorflow as tf ` ` ` . <nl> + <nl> + # # # Parameters and Shapes . <nl> + ` ` ` python <nl> + # Print trainable variable parameter statistics to stdout . <nl> + param_stats = tf . contrib . tfprof . model_analyzer . print_model_analysis ( <nl> + tf . get_default_graph ( ) , <nl> + tfprof_options = tf . contrib . tfprof . model_analyzer . <nl> + TRAINABLE_VARS_PARAMS_STAT_OPTIONS ) <nl> + <nl> + # Use code view to associate statistics with Python codes . <nl> + opts = tf . contrib . tfprof . model_analyzer . TRAINABLE_VARS_PARAMS_STAT_OPTIONS <nl> + opts [ ' show_name_regexes ' ] = [ ' . * my_code1 . py . * ' , ' . * my_code2 . py . * ' ] <nl> + param_stats = tf . contrib . tfprof . model_analyzer . print_model_analysis ( <nl> + tf . get_default_graph ( ) , <nl> + tfprof_cmd = ' code ' <nl> + tfprof_options = opts ) <nl> + <nl> + # param_stats can be tensorflow . tfprof . TFGraphNodeProto or <nl> + # tensorflow . tfprof . TFMultiGraphNodeProto , depending on the view . <nl> + # Let ' s print the root below . <nl> + sys . stdout . write ( ' total_params : % d \ n ' % param_stats . total_parameters ) <nl> + ` ` ` <nl> + <nl> + # # # Float Operations <nl> + <nl> + # # # # Note : See [ Caveats ] ( profile_model_architecture . md # caveats ) in " Profile Model Architecture " Tutorial <nl> + ` ` ` python <nl> + # Print to stdout an analysis of the number of floating point operations in the <nl> + # model broken down by individual operations . <nl> + tf . contrib . tfprof . model_analyzer . print_model_analysis ( <nl> + tf . get_default_graph ( ) , <nl> + tfprof_options = tf . contrib . tfprof . model_analyzer . FLOAT_OPS_OPTIONS ) <nl> + ` ` ` <nl> + <nl> + # # # Time and Memory <nl> + You will first need to run the following set up in your model in order to <nl> + compute the memory and timing statistics . <nl> + <nl> + ` ` ` python <nl> + # Generate the RunMetadata that contains the memory and timing information . <nl> + # <nl> + # Note : When run on GPU , a kernel is first scheduled ( enqueued ) and then <nl> + # executed asynchronously . tfprof only tracks the execution time . <nl> + # <nl> + run_metadata = tf . RunMetadata ( ) <nl> + with tf . Session ( ) as sess : <nl> + _ = sess . run ( train_op , <nl> + options = tf . RunOptions ( trace_level = tf . RunOptions . FULL_TRACE ) , <nl> + run_metadata = run_metadata ) <nl> + ` ` ` <nl> + <nl> + Finally , you may run ` print_model_analysis ` to explore the timing and memory <nl> + information of the model . <nl> + <nl> + ` ` ` python <nl> + # See model_analyzer_test . py for more examples . <nl> + # <nl> + # Print to stdout an analysis of the memory usage and the timing information <nl> + # broken down by python codes . <nl> + opts = tf . contrib . tfprof . model_analyzer . PRINT_ALL_TIMING_MEMORY . copy ( ) <nl> + opts [ ' show_name_regexes ' ] = [ ' . * my_code . py . * ' ] <nl> + tf . contrib . tfprof . model_analyzer . print_model_analysis ( <nl> + tf . get_default_graph ( ) , <nl> + run_meta = run_metadata , <nl> + tfprof_cmd = ' code ' , <nl> + tfprof_options = opts ) <nl> + <nl> + # Print to stdout an analysis of the memory usage and the timing information <nl> + # broken down by operations . <nl> + tf . contrib . tfprof . model_analyzer . print_model_analysis ( <nl> + tf . get_default_graph ( ) , <nl> + run_meta = run_metadata , <nl> + tfprof_options = tf . contrib . tfprof . model_analyzer . PRINT_ALL_TIMING_MEMORY ) <nl> + ` ` ` <nl> + <nl> + # # # Visualize <nl> + <nl> + ` ` ` <nl> + To visualize the result of Python API results : <nl> + Set opts [ ' output ' ] = ' timeline : outfile = < filename > ' to generate a timeline json file . <nl> + Open a Chrome Browser , open URL chrome : / / tracing , and load the json file . <nl> + ` ` ` <nl> + <nl> + Below are 2 examples of graph view and scope view . See code view example in later examples . <nl> + <nl> + < left > <nl> + ! [ CodeTimeline ] ( graph_timeline . png ) <nl> + ! [ CodeTimeline ] ( scope_timeline . png ) <nl> + < / left > <nl> + <nl> + # # # Multi - step Profiling <nl> + <nl> + tfprof allows you to profile statistics across multiple steps . <nl> + <nl> + ` ` ` python <nl> + opts = model_analyzer . PRINT_ALL_TIMING_MEMORY . copy ( ) <nl> + opts [ ' account_type_regexes ' ] = [ ' . * ' ] <nl> + <nl> + with session . Session ( ) as sess : <nl> + r1 , r2 , r3 = lib . BuildSplitableModel ( ) <nl> + sess . run ( variables . global_variables_initializer ( ) ) <nl> + <nl> + # Create a profiler . <nl> + profiler = model_analyzer . Profiler ( sess . graph ) <nl> + # Profile without RunMetadata of any step . <nl> + pb0 = profiler . profile_name_scope ( opts ) <nl> + <nl> + run_meta = config_pb2 . RunMetadata ( ) <nl> + _ = sess . run ( r1 , <nl> + options = config_pb2 . RunOptions ( <nl> + trace_level = config_pb2 . RunOptions . FULL_TRACE ) , <nl> + run_metadata = run_meta ) <nl> + <nl> + # Add run_meta of step 1 . <nl> + profiler . add_step ( 1 , run_meta ) <nl> + pb1 = profiler . profile_name_scope ( opts ) <nl> + <nl> + run_meta2 = config_pb2 . RunMetadata ( ) <nl> + _ = sess . run ( r2 , <nl> + options = config_pb2 . RunOptions ( <nl> + trace_level = config_pb2 . RunOptions . FULL_TRACE ) , <nl> + run_metadata = run_meta2 ) <nl> + # Add run_meta of step 2 . <nl> + profiler . add_step ( 2 , run_meta2 ) <nl> + pb2 = profiler . profile_name_scope ( opts ) <nl> + <nl> + run_meta3 = config_pb2 . RunMetadata ( ) <nl> + _ = sess . run ( r3 , <nl> + options = config_pb2 . RunOptions ( <nl> + trace_level = config_pb2 . RunOptions . FULL_TRACE ) , <nl> + run_metadata = run_meta3 ) <nl> + # Add run_meta of step 3 . <nl> + profiler . add_step ( 3 , run_meta3 ) <nl> + pb3 = profiler . profile_name_scope ( opts ) <nl> + ` ` ` <nl> \ No newline at end of file <nl> mmm a / tensorflow / tools / tfprof / internal / tfprof_code . cc <nl> ppp b / tensorflow / tools / tfprof / internal / tfprof_code . cc <nl> std : : vector < CodeNode * > TFCode : : Account ( const std : : vector < CodeNode * > & roots , <nl> node - > ResetTotalStats ( ) ; <nl> std : : vector < CodeNode * > act_cnodes = Account ( node - > children , opts ) ; <nl> node - > account = ReAccount ( node , opts ) ; <nl> - / / LOG ( ERROR ) < < act_cnodes . size ( ) < < " " < < node - > account ; <nl> if ( node - > account | | ! act_cnodes . empty ( ) ) { <nl> - / / LOG ( ERROR ) < < node - > name ( ) ; <nl> node - > show_children . clear ( ) ; <nl> node - > ResetTotalStats ( ) ; <nl> node - > AddSelfToTotalStats ( ) ; <nl> mmm a / tensorflow / tools / tfprof / internal / tfprof_graph . cc <nl> ppp b / tensorflow / tools / tfprof / internal / tfprof_graph . cc <nl> const ShowNode * TFGraph : : ShowInternal ( const Options & opts , Timeline * timeline ) { <nl> / / 1 . Account and aggregate the stats based on the graph structure . <nl> / / Returns a graph consists of accounted nodes . <nl> std : : set < string > visits ; <nl> - std : : vector < GraphNode * > roots = <nl> - Account ( root_ - > children , opts , timeline , & visits ) ; <nl> + std : : vector < GraphNode * > roots = Account ( root_ - > children , opts , & visits ) ; <nl> for ( GraphNode * n : roots ) { <nl> root_ - > AggregateTotalStats ( n ) ; <nl> } <nl> const ShowNode * TFGraph : : ShowInternal ( const Options & opts , Timeline * timeline ) { <nl> Format ( root - > show_children , & root - > formatted_str , root - > mutable_proto ( ) ) ; <nl> <nl> if ( timeline ) { <nl> - timeline - > GenerateGraphTimeline ( root ) ; <nl> + timeline - > GenerateGraphTimeline ( root - > show_children ) ; <nl> } <nl> return root ; <nl> } <nl> std : : vector < GraphNode * > TFGraph : : PrintGraph ( const std : : vector < GraphNode * > roots , <nl> <nl> std : : vector < GraphNode * > TFGraph : : Account ( const std : : vector < GraphNode * > & roots , <nl> const Options & opts , <nl> - Timeline * timeline , <nl> std : : set < string > * visits ) { <nl> std : : vector < GraphNode * > act_nodes ; <nl> for ( GraphNode * node : roots ) { <nl> if ( visits - > find ( node - > name ( ) ) ! = visits - > end ( ) ) continue ; <nl> visits - > insert ( node - > name ( ) ) ; <nl> / / Depth - first . <nl> - std : : vector < GraphNode * > act_cnodes = <nl> - Account ( node - > children , opts , timeline , visits ) ; <nl> + std : : vector < GraphNode * > act_cnodes = Account ( node - > children , opts , visits ) ; <nl> <nl> node - > account = ReAccount ( node , opts ) ; <nl> if ( node - > account ) { <nl> node - > show_children . clear ( ) ; <nl> node - > ResetTotalStats ( ) ; <nl> node - > AddSelfToTotalStats ( ) ; <nl> - if ( timeline ) { <nl> - timeline - > TrackNode ( node ) ; <nl> - } <nl> / / Aggregate its accounted children stats . <nl> for ( GraphNode * c : act_cnodes ) { <nl> - if ( timeline ) { <nl> - timeline - > TrackNodeConnection ( node , c ) ; <nl> - } <nl> node - > AggregateTotalStats ( c ) ; <nl> node - > show_children . push_back ( c ) ; <nl> } <nl> mmm a / tensorflow / tools / tfprof / internal / tfprof_graph . h <nl> ppp b / tensorflow / tools / tfprof / internal / tfprof_graph . h <nl> class TFGraph : public TFShow { <nl> int last_ident , std : : set < string > * visits ) ; <nl> <nl> std : : vector < GraphNode * > Account ( const std : : vector < GraphNode * > & roots , <nl> - const Options & opts , Timeline * timeline , <nl> + const Options & opts , <nl> std : : set < string > * visits ) ; <nl> <nl> void Format ( const std : : vector < GraphNode * > roots , string * display_str , <nl> mmm a / tensorflow / tools / tfprof / internal / tfprof_node . cc <nl> ppp b / tensorflow / tools / tfprof / internal / tfprof_node . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / tools / tfprof / internal / tfprof_node . h " <nl> <nl> - # include " tensorflow / core / framework / allocation_description . pb . h " <nl> - # include " tensorflow / core / framework / tensor_description . pb . h " <nl> # include " tensorflow / tools / tfprof / internal / tfprof_utils . h " <nl> <nl> namespace tensorflow { <nl> namespace tfprof { <nl> - / / Notes about start and end time from the NodeExecStats proto . <nl> + / / Notes about start and end time from the NodeExecStats proto : <nl> / / For GPU , there is no difference between op_end_rel_micros and <nl> / / all_end_rel_micros . All are kernel times . <nl> / / For CPU , op_end_rel is the kernel time , while all_end_rel_micros includes <nl> - / / some post - processing . <nl> - / / Here , we only consider kernel time for simplicity . <nl> + / / some post - processing . Besides , currently , there is no way to measure <nl> + / / the execution time of async ops accurately . <nl> + / / <nl> + / / Notes about device : <nl> + / / For ops on gpu : <nl> + / / It will appear in three different devices in RunMetadata : 1 ) gpu : x , <nl> + / / 2 ) gpu : x : stream : all and 3 ) gpu : x : stream : id . 2 ) is used a combined view <nl> + / / of all different 3 ) . 1 ) is the op scheduling , pre - processing and <nl> + / / post processing time . 3 ) is the execution time of GPU kernels on a stream . <nl> + / / For ops on cpu : <nl> + / / It will only appear as cpu : 0 . <nl> + <nl> + void ExecStep : : AddTimeStats ( const string & dev , const NodeExecStats & step_stat ) { <nl> + devices_ . insert ( dev ) ; <nl> + if ( step_stat . all_start_micros ( ) > 0 ) { <nl> + if ( all_start_micros_ > 0 ) { <nl> + all_start_micros_ = std : : min ( <nl> + all_start_micros_ , static_cast < int64 > ( step_stat . all_start_micros ( ) ) ) ; <nl> + } else { <nl> + all_start_micros_ = step_stat . all_start_micros ( ) ; <nl> + } <nl> + int64 op_end_rel_micros = step_stat . op_end_rel_micros ( ) ; <nl> + / / Round quick execution to 1 micro to be semantically robust . <nl> + if ( op_end_rel_micros = = 0 ) { <nl> + + + op_end_rel_micros ; <nl> + } <nl> + latest_end_rel_micros_ = <nl> + std : : max ( latest_end_rel_micros_ , op_end_rel_micros ) ; <nl> + <nl> + op_execs_ [ dev ] . push_back ( <nl> + std : : make_pair ( step_stat . all_start_micros ( ) , op_end_rel_micros ) ) ; <nl> + <nl> + if ( dev . find ( " stream " ) ! = dev . npos & & dev . find ( " stream : all " ) = = dev . npos ) { <nl> + gpu_kernel_execs_ [ dev ] . push_back ( <nl> + std : : make_pair ( step_stat . all_start_micros ( ) , op_end_rel_micros ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void ExecStep : : AddMemoryStats ( const string & dev , <nl> + const NodeExecStats & step_stat ) { <nl> + if ( mem_initiated_ ) { <nl> + return ; <nl> + } <nl> + mem_initiated_ = true ; <nl> + <nl> + for ( const auto & mem : step_stat . memory ( ) ) { <nl> + / / TODO ( xpan ) : Fix this hack . Currently the allocator name seems quite <nl> + / / ad - hoc . <nl> + if ( mem . allocator_name ( ) . find ( " GPU " ) = = mem . allocator_name ( ) . npos ) { <nl> + continue ; <nl> + } <nl> + allocator_bytes_in_use_ = <nl> + std : : max ( allocator_bytes_in_use_ , <nl> + static_cast < int64 > ( mem . allocator_bytes_in_use ( ) ) ) ; <nl> + } <nl> + int64 total_output_bytes = 0 ; <nl> + for ( const auto & output : step_stat . output ( ) ) { <nl> + if ( output . has_tensor_description ( ) & & <nl> + output . tensor_description ( ) . has_allocation_description ( ) ) { <nl> + / / TODO ( xpan ) : Maybe allocated_bytes . <nl> + int64 output_bytes = std : : max ( output . tensor_description ( ) <nl> + . allocation_description ( ) <nl> + . allocated_bytes ( ) , <nl> + output . tensor_description ( ) <nl> + . allocation_description ( ) <nl> + . requested_bytes ( ) ) ; <nl> + uint64 output_ptr = <nl> + output . tensor_description ( ) . allocation_description ( ) . ptr ( ) ; <nl> + total_output_bytes + = output_bytes ; <nl> + output_bytes_ [ output . slot ( ) ] = std : : make_pair ( output_bytes , output_ptr ) ; <nl> + } <nl> + } <nl> + if ( step_stat . has_memory_stats ( ) ) { <nl> + host_temp_bytes_ + = step_stat . memory_stats ( ) . host_temp_memory_size ( ) ; <nl> + host_persistent_bytes_ + = <nl> + step_stat . memory_stats ( ) . host_persistent_memory_size ( ) ; <nl> + accelerator_temp_bytes_ + = <nl> + step_stat . memory_stats ( ) . device_temp_memory_size ( ) ; <nl> + accelerator_persistent_bytes_ + = <nl> + step_stat . memory_stats ( ) . device_persistent_memory_size ( ) ; <nl> + } <nl> + requested_bytes_ = total_output_bytes ; <nl> + } <nl> + <nl> void TFGraphNode : : AddStepStat ( int64 step , const string & device , <nl> const NodeExecStats & step_stat ) { <nl> string dev = str_util : : Lowercase ( device ) ; <nl> void TFGraphNode : : AddStepStat ( int64 step , const string & device , <nl> } <nl> } <nl> <nl> - ExecStep & exec = execs_ [ step ] ; <nl> - exec . AddTimeStats ( dev , step_stat ) ; <nl> + auto exec = execs_ . find ( step ) ; <nl> + if ( exec = = execs_ . end ( ) ) { <nl> + execs_ . insert ( std : : pair < int64 , ExecStep > ( step , ExecStep ( this ) ) ) ; <nl> + exec = execs_ . find ( step ) ; <nl> + } <nl> + <nl> + exec - > second . AddTimeStats ( dev , step_stat ) ; <nl> <nl> if ( dev = = canonical_device_ ) { <nl> - exec . AddMemoryStats ( dev , step_stat ) ; <nl> + exec - > second . AddMemoryStats ( dev , step_stat ) ; <nl> + } <nl> + } <nl> + <nl> + int64 ExecStep : : exec_micros ( ) const { <nl> + int64 total = 0 ; <nl> + for ( const auto & execs : gpu_kernel_execs_ ) { <nl> + for ( const auto & exec : execs . second ) { <nl> + total + = exec . second ; <nl> + } <nl> } <nl> + if ( total > 0 ) return total ; <nl> + <nl> + / / If there is no gpu kernel time , fall back to assume it runs on cpu . <nl> + / / TODO ( xpan ) : No way to track CPU async op timing accurately ? <nl> + if ( op_execs_ . size ( ) > 1 ) { <nl> + fprintf ( stderr , " Op : % s has over 1 no - gpu assignment \ n " , <nl> + node - > name ( ) . c_str ( ) ) ; <nl> + } <nl> + for ( const auto & execs : op_execs_ ) { <nl> + for ( const auto & exec : execs . second ) { <nl> + total + = exec . second ; <nl> + } <nl> + } <nl> + return total ; <nl> + } <nl> + <nl> + bool IsCombinedGPUStream ( const string & device ) { <nl> + return device . find ( " stream : all " ) ! = device . npos ; <nl> + } <nl> + <nl> + bool IsCPUDevice ( const string & device ) { <nl> + return device . find ( " cpu : 0 " ) ! = device . npos ; <nl> } <nl> } / / namespace tfprof <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / tools / tfprof / internal / tfprof_node . h <nl> ppp b / tensorflow / tools / tfprof / internal / tfprof_node . h <nl> limitations under the License . <nl> namespace tensorflow { <nl> namespace tfprof { <nl> <nl> + class TFGraphNode ; <nl> + <nl> class ExecStep { <nl> public : <nl> - ExecStep ( ) <nl> - : all_start_micros_ ( 0 ) , <nl> + ExecStep ( TFGraphNode * node ) <nl> + : node ( node ) , <nl> + all_start_micros_ ( 0 ) , <nl> latest_end_rel_micros_ ( 0 ) , <nl> mem_initiated_ ( false ) , <nl> requested_bytes_ ( 0 ) , <nl> class ExecStep { <nl> accelerator_persistent_bytes_ ( 0 ) , <nl> allocator_bytes_in_use_ ( 0 ) { } <nl> <nl> - void AddTimeStats ( const string & dev , const NodeExecStats & step_stat ) { <nl> - devices_ . insert ( dev ) ; <nl> - if ( step_stat . all_start_micros ( ) > 0 ) { <nl> - if ( all_start_micros_ > 0 ) { <nl> - all_start_micros_ = <nl> - std : : min ( all_start_micros_ , <nl> - static_cast < int64 > ( step_stat . all_start_micros ( ) ) ) ; <nl> - } else { <nl> - all_start_micros_ = step_stat . all_start_micros ( ) ; <nl> - } <nl> - int64 op_end_rel_micros = step_stat . op_end_rel_micros ( ) ; <nl> - / / Round quick execution to 1 micro to be semantically robust . <nl> - if ( op_end_rel_micros = = 0 ) { <nl> - + + op_end_rel_micros ; <nl> - } <nl> - latest_end_rel_micros_ = <nl> - std : : max ( latest_end_rel_micros_ , op_end_rel_micros ) ; <nl> - <nl> - op_execs_ [ dev ] . push_back ( <nl> - std : : make_pair ( step_stat . all_start_micros ( ) , op_end_rel_micros ) ) ; <nl> - <nl> - if ( dev . find ( " stream " ) ! = dev . npos & & <nl> - dev . find ( " stream : all " ) = = dev . npos ) { <nl> - gpu_kernel_execs_ [ dev ] . push_back ( <nl> - std : : make_pair ( step_stat . all_start_micros ( ) , op_end_rel_micros ) ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - void AddMemoryStats ( const string & dev , const NodeExecStats & step_stat ) { <nl> - if ( mem_initiated_ ) { <nl> - / / fprintf ( stderr , " Memory initiated twice on % s " , dev . c_str ( ) ) ; <nl> - return ; <nl> - } <nl> - mem_initiated_ = true ; <nl> - <nl> - for ( const auto & mem : step_stat . memory ( ) ) { <nl> - / / TODO ( xpan ) : Fix this hack . Currently the allocator name seems quite <nl> - / / ad - hoc . <nl> - if ( mem . allocator_name ( ) . find ( " GPU " ) = = mem . allocator_name ( ) . npos ) { <nl> - continue ; <nl> - } <nl> - allocator_bytes_in_use_ = <nl> - std : : max ( allocator_bytes_in_use_ , <nl> - static_cast < int64 > ( mem . allocator_bytes_in_use ( ) ) ) ; <nl> - } <nl> - int64 total_output_bytes = 0 ; <nl> - for ( const auto & output : step_stat . output ( ) ) { <nl> - if ( output . has_tensor_description ( ) & & <nl> - output . tensor_description ( ) . has_allocation_description ( ) ) { <nl> - / / TODO ( xpan ) : Maybe allocated_bytes . <nl> - int64 output_bytes = std : : max ( output . tensor_description ( ) <nl> - . allocation_description ( ) <nl> - . allocated_bytes ( ) , <nl> - output . tensor_description ( ) <nl> - . allocation_description ( ) <nl> - . requested_bytes ( ) ) ; <nl> - uint64 output_ptr = <nl> - output . tensor_description ( ) . allocation_description ( ) . ptr ( ) ; <nl> - total_output_bytes + = output_bytes ; <nl> - output_bytes_ [ output . slot ( ) ] = std : : make_pair ( output_bytes , output_ptr ) ; <nl> - } <nl> - } <nl> - if ( step_stat . has_memory_stats ( ) ) { <nl> - host_temp_bytes_ + = step_stat . memory_stats ( ) . host_temp_memory_size ( ) ; <nl> - host_persistent_bytes_ + = <nl> - step_stat . memory_stats ( ) . host_persistent_memory_size ( ) ; <nl> - accelerator_temp_bytes_ + = <nl> - step_stat . memory_stats ( ) . device_temp_memory_size ( ) ; <nl> - accelerator_persistent_bytes_ + = <nl> - step_stat . memory_stats ( ) . device_persistent_memory_size ( ) ; <nl> - } <nl> - requested_bytes_ = total_output_bytes ; <nl> - } <nl> + void AddTimeStats ( const string & dev , const NodeExecStats & step_stat ) ; <nl> <nl> - int64 exec_micros ( ) const { <nl> - int64 total = 0 ; <nl> - for ( const auto & execs : gpu_kernel_execs_ ) { <nl> - for ( const auto & exec : execs . second ) { <nl> - total + = exec . second ; <nl> - } <nl> - } <nl> - if ( total > 0 ) return total ; <nl> + void AddMemoryStats ( const string & dev , const NodeExecStats & step_stat ) ; <nl> <nl> - / / If there is no gpu kernel time , fall back to assume it runs on cpu . <nl> - / / TODO ( xpan ) : No way to track CPU async op timing accurately ? <nl> - for ( const auto & execs : op_execs_ ) { <nl> - for ( const auto & exec : execs . second ) { <nl> - total + = exec . second ; <nl> - } <nl> - } <nl> - return total ; <nl> - } <nl> + int64 exec_micros ( ) const ; <nl> <nl> const std : : map < string , std : : vector < std : : pair < int64 , int64 > > > & op_execs ( ) <nl> const { <nl> class ExecStep { <nl> int64 allocator_bytes_in_use ( ) const { return allocator_bytes_in_use_ ; } <nl> <nl> private : <nl> + TFGraphNode * node ; <nl> / / The earliest / latest time including scheduling and kernel execution . <nl> int64 all_start_micros_ ; <nl> int64 latest_end_rel_micros_ ; <nl> class TFMultiGraphNode { <nl> std : : map < string , const TFGraphNode * > nodes_ ; <nl> std : : map < string , std : : unique_ptr < TFMultiGraphNode > > children_ ; <nl> } ; <nl> + <nl> + bool IsCombinedGPUStream ( const string & device ) ; <nl> + bool IsCPUDevice ( const string & device ) ; <nl> } / / namespace tfprof <nl> } / / namespace tensorflow <nl> <nl> mmm a / tensorflow / tools / tfprof / internal / tfprof_node_show . h <nl> ppp b / tensorflow / tools / tfprof / internal / tfprof_node_show . h <nl> class GraphNode : public ShowNode { <nl> public : <nl> explicit GraphNode ( TFGraphNode * node ) : ShowNode ( node ) { } <nl> <nl> - bool Trackable ( int64 step ) { return node - > trackable ( step ) ; } <nl> + bool Trackable ( int64 step ) const { return node - > trackable ( step ) ; } <nl> <nl> std : : vector < GraphNode * > children ; <nl> std : : vector < GraphNode * > show_children ; <nl> mmm a / tensorflow / tools / tfprof / internal / tfprof_timeline . cc <nl> ppp b / tensorflow / tools / tfprof / internal / tfprof_timeline . cc <nl> string ChromeTraceFormatter : : Format ( ) { <nl> return trace_str ; <nl> } <nl> <nl> - void MemoryTracker : : TrackNode ( int64 step , GraphNode * node ) { <nl> + void MemoryTracker : : TrackNode ( int64 step , const GraphNode * node ) { <nl> if ( ! node - > Trackable ( step ) ) { <nl> return ; <nl> } <nl> void MemoryTracker : : TrackNode ( int64 step , GraphNode * node ) { <nl> } <nl> } <nl> <nl> - void MemoryTracker : : TrackNodeConnection ( int64 step , GraphNode * node , <nl> - GraphNode * src ) { <nl> + void MemoryTracker : : TrackNodeConnection ( int64 step , const GraphNode * node , <nl> + const GraphNode * src ) { <nl> if ( ! node - > Trackable ( step ) | | ! src - > Trackable ( step ) ) { <nl> return ; <nl> } <nl> void MemoryTracker : : TrackNodeConnection ( int64 step , GraphNode * node , <nl> } <nl> } <nl> <nl> - void Timeline : : GenerateGraphTimeline ( const GraphNode * gnode ) { <nl> - AddGraphNode ( gnode ) ; <nl> + void Timeline : : AllocateTimeNodes ( GraphNode * gnode ) { <nl> + if ( gnode - > Trackable ( step_ ) ) { <nl> + TrackNode ( gnode ) ; <nl> + const TFGraphNode * node = gnode - > node ; <nl> + for ( const auto & kernel_execs : node - > op_execs ( step_ ) ) { <nl> + const string & device = kernel_execs . first ; <nl> + if ( ! IsCombinedGPUStream ( device ) & & ! IsCPUDevice ( device ) ) { <nl> + continue ; <nl> + } <nl> + <nl> + if ( process_ . find ( device ) = = process_ . end ( ) ) { <nl> + int64 pid = AllocatePID ( ) ; <nl> + process_ [ device ] . reset ( new Process ( device , pid ) ) ; <nl> + chrome_formatter_ . EmitPID ( GetTimeDevName ( device ) , pid ) ; <nl> + } <nl> + Process * p = process_ [ device ] . get ( ) ; <nl> + <nl> + for ( const auto & exec : kernel_execs . second ) { <nl> + int64 start_micros = exec . first ; <nl> + int64 exec_micros = exec . second ; <nl> + / / TODO ( xpan ) : There might be start time duplication here . <nl> + if ( tnodes_ [ device ] . find ( start_micros ) = = tnodes_ [ device ] . end ( ) ) { <nl> + / / TODO ( xpan ) : Give each kernel call a unique_name . <nl> + tnodes_ [ device ] [ start_micros ] . reset ( <nl> + new TimeNode ( p , gnode , start_micros , exec_micros ) ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + for ( GraphNode * n : gnode - > show_children ) { <nl> + AllocateTimeNodes ( n ) ; <nl> + } <nl> + } <nl> + <nl> + void Timeline : : GenerateGraphTimeline ( const std : : vector < GraphNode * > & gnodes ) { <nl> + for ( GraphNode * gnode : gnodes ) { <nl> + AllocateTimeNodes ( gnode ) ; <nl> + } <nl> + for ( auto & process : tnodes_ ) { <nl> + for ( auto & tn : process . second ) { <nl> + TimeNode * tnode = tn . second . get ( ) ; <nl> + for ( GraphNode * inp : tnode - > node - > children ) { <nl> + if ( ! inp - > account | | ! inp - > Trackable ( step_ ) ) { <nl> + continue ; <nl> + } <nl> + TrackNodeConnection ( tnode - > node , inp ) ; <nl> + for ( const auto & kernel_execs : inp - > node - > op_execs ( step_ ) ) { <nl> + if ( process . first = = kernel_execs . first ) { <nl> + / / Not interested in flow withthin the same device . <nl> + continue ; <nl> + } <nl> + for ( const auto & exec : kernel_execs . second ) { <nl> + int64 start_micros = exec . first ; <nl> + auto cprocess = tnodes_ . find ( kernel_execs . first ) ; <nl> + if ( cprocess = = tnodes_ . end ( ) ) continue ; <nl> + auto ctn = cprocess - > second . find ( start_micros ) ; <nl> + if ( ctn = = cprocess - > second . end ( ) ) continue ; <nl> + ctn - > second - > next_tnodes . push_back ( tnode ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> AllocateLanes ( ) ; <nl> fprintf ( stdout , " generating trace file . \ n " ) ; <nl> - / / int64 flow_id = 1 ; <nl> + int64 flow_id = 1 ; <nl> for ( const auto & process : alloc_nodes_ ) { <nl> for ( const auto & lane : process . second ) { <nl> for ( const auto & node : lane . second ) { <nl> TimeNode * tnode = node . second ; <nl> <nl> Json : : Value args ( Json : : objectValue ) ; <nl> - args [ " name " ] = Json : : Value ( tnode - > name ) ; <nl> - args [ " op " ] = Json : : Value ( tnode - > name ) ; <nl> + args [ " name " ] = Json : : Value ( tnode - > name ( ) ) ; <nl> + args [ " op " ] = Json : : Value ( tnode - > name ( ) ) ; <nl> chrome_formatter_ . EmitRegion ( node . first , tnode - > exec_micros , <nl> process . first , lane . first , " Op " , <nl> - tnode - > name , args ) ; <nl> + tnode - > name ( ) , args ) ; <nl> / / Flow is a directed arrow pointing from src to dst . <nl> / / TODO ( xpan ) : Disable flow to reduce json file size for now . Need <nl> / / to think of a better way to make flow interpretable . <nl> - / * <nl> for ( TimeNode * next_tnode : node . second - > next_tnodes ) { <nl> chrome_formatter_ . EmitFlowStart ( <nl> - tnode - > name + " _flow " , tnode - > start_micros + tnode - > exec_micros , <nl> + tnode - > name ( ) + " _flow " , tnode - > start_micros + tnode - > exec_micros , <nl> process . first , lane . first , flow_id ) ; <nl> chrome_formatter_ . EmitFlowEnd ( <nl> - tnode - > name + " _flow " , next_tnode - > start_micros , <nl> + tnode - > name ( ) + " _flow " , next_tnode - > start_micros , <nl> next_tnode - > process - > pid , next_tnode - > tid , flow_id ) ; <nl> flow_id + = 1 ; <nl> } <nl> - * / <nl> } <nl> } <nl> } <nl> void Timeline : : OutputTimeline ( ) { <nl> fflush ( stdout ) ; <nl> } <nl> <nl> - std : : vector < TimeNode * > Timeline : : AddGraphNode ( const GraphNode * gnode ) { <nl> - std : : vector < TimeNode * > tnodes ; <nl> - if ( ! gnode ) return tnodes ; <nl> - <nl> - std : : vector < TimeNode * > shown_cinputs ; <nl> - for ( GraphNode * schild : gnode - > show_children ) { <nl> - std : : vector < TimeNode * > inputs = AddGraphNode ( schild ) ; <nl> - shown_cinputs . insert ( shown_cinputs . end ( ) , inputs . begin ( ) , inputs . end ( ) ) ; <nl> - } <nl> - if ( ! gnode - > node - > trackable ( step_ ) ) { <nl> - return shown_cinputs ; <nl> - } <nl> - <nl> - const TFGraphNode * node = gnode - > node ; <nl> - for ( const auto & kernel_execs : node - > op_execs ( step_ ) ) { <nl> - const string & device = kernel_execs . first ; <nl> - const std : : vector < std : : pair < int64 , int64 > > & execs = kernel_execs . second ; <nl> - <nl> - if ( process_ . find ( device ) = = process_ . end ( ) ) { <nl> - int64 pid = AllocatePID ( ) ; <nl> - process_ [ device ] . reset ( new Process ( pid ) ) ; <nl> - chrome_formatter_ . EmitPID ( GetTimeDevName ( device ) , pid ) ; <nl> - } <nl> - Process * p = process_ [ device ] . get ( ) ; <nl> - <nl> - for ( const auto & exec : execs ) { <nl> - int64 start_micros = exec . first ; <nl> - int64 exec_micros = exec . second ; <nl> - / / TODO ( xpan ) : There might be start time duplication here . <nl> - if ( tnodes_ [ device ] . find ( start_micros ) = = tnodes_ [ device ] . end ( ) ) { <nl> - / / TODO ( xpan ) : Give each kernel call a unique_name . <nl> - tnodes_ [ device ] [ start_micros ] . reset ( <nl> - new TimeNode ( p , node - > name ( ) , start_micros , exec_micros ) ) ; <nl> - } <nl> - TimeNode * tnode_ptr = tnodes_ [ device ] [ start_micros ] . get ( ) ; <nl> - <nl> - for ( int i = 0 ; i < shown_cinputs . size ( ) ; i + + ) { <nl> - shown_cinputs [ i ] - > next_tnodes . push_back ( tnode_ptr ) ; <nl> - } <nl> - tnodes . push_back ( tnode_ptr ) ; <nl> - } <nl> - } <nl> - return tnodes ; <nl> - } <nl> - <nl> void Timeline : : AllocateLanes ( ) { <nl> for ( auto & process : tnodes_ ) { <nl> Process * p = process_ [ process . first ] . get ( ) ; <nl> mmm a / tensorflow / tools / tfprof / internal / tfprof_timeline . h <nl> ppp b / tensorflow / tools / tfprof / internal / tfprof_timeline . h <nl> class ChromeTraceFormatter { <nl> <nl> class Process { <nl> public : <nl> - Process ( int64 pid ) : pid ( pid ) { } <nl> + Process ( const string & device , int64 pid ) : device ( device ) , pid ( pid ) { } <nl> <nl> / / Each lane is a map from start_time to end_time . <nl> std : : vector < std : : map < int64 , int64 > > lanes ; <nl> + string device ; <nl> int64 pid ; <nl> } ; <nl> <nl> class TimeNode { <nl> public : <nl> - TimeNode ( Process * process , const string & name , int64 start_micros , <nl> + TimeNode ( Process * process , GraphNode * node , int64 start_micros , <nl> int64 exec_micros ) <nl> : process ( process ) , <nl> - name ( name ) , <nl> + node ( node ) , <nl> start_micros ( start_micros ) , <nl> exec_micros ( exec_micros ) , <nl> tid ( - 1 ) { } <nl> virtual ~ TimeNode ( ) { } <nl> <nl> + const string & name ( ) { return node - > name ( ) ; } <nl> + <nl> Process * process ; <nl> - string name ; <nl> + GraphNode * node ; <nl> int64 start_micros ; <nl> int64 exec_micros ; <nl> int64 tid ; <nl> class MemoryTracker { <nl> std : : map < int64 , int64 > allocator_stats ; <nl> } ; <nl> <nl> - void TrackNode ( int64 step , GraphNode * node ) ; <nl> + void TrackNode ( int64 step , const GraphNode * node ) ; <nl> <nl> - void TrackNodeConnection ( int64 step , GraphNode * node , GraphNode * src ) ; <nl> + void TrackNodeConnection ( int64 step , const GraphNode * node , <nl> + const GraphNode * src ) ; <nl> <nl> const std : : map < string , Device > & devices ( ) const { return devices_ ; } <nl> <nl> class Timeline { <nl> int64 step ( ) const { return step_ ; } <nl> void SetStep ( int64 step ) { step_ = step ; } <nl> <nl> - void GenerateGraphTimeline ( const GraphNode * gnode ) ; <nl> + void GenerateGraphTimeline ( const std : : vector < GraphNode * > & gnodes ) ; <nl> <nl> void GenerateScopeTimeline ( const ScopeNode * node ) ; <nl> <nl> void GenerateCodeTimeline ( const CodeNode * node ) ; <nl> <nl> - void TrackNode ( GraphNode * node ) { mem_tracker_ . TrackNode ( step_ , node ) ; } <nl> + void TrackNode ( const GraphNode * node ) { mem_tracker_ . TrackNode ( step_ , node ) ; } <nl> <nl> void TrackNodeConnection ( GraphNode * node , GraphNode * src ) { <nl> mem_tracker_ . TrackNodeConnection ( step_ , node , src ) ; <nl> class Timeline { <nl> < < " children : " < < total_micros ; <nl> } <nl> <nl> - std : : vector < TimeNode * > AddGraphNode ( const GraphNode * gnode ) ; <nl> + void AllocateTimeNodes ( GraphNode * gnode ) ; <nl> <nl> void AllocateLanes ( ) ; <nl> <nl> mmm a / tensorflow / workspace . bzl <nl> ppp b / tensorflow / workspace . bzl <nl> patched_http_archive = repository_rule ( <nl> <nl> <nl> # If TensorFlow is linked as a submodule . <nl> - # path_prefix and tf_repo_name are no longer used . <nl> + # path_prefix is no longer used . <nl> + # tf_repo_name is thought to be under consideration . <nl> def tf_workspace ( path_prefix = " " , tf_repo_name = " " ) : <nl> # We must check the bazel version before trying to parse any other BUILD <nl> # files , in case the parsing of those build files depends on the bazel <nl> def tf_workspace ( path_prefix = " " , tf_repo_name = " " ) : <nl> if path_prefix : <nl> print ( " path_prefix was specified to tf_workspace but is no longer used " + <nl> " and will be removed in the future . " ) <nl> - if tf_repo_name : <nl> - print ( " tf_repo_name was specified to tf_workspace but is no longer used " + <nl> - " and will be removed in the future . " ) <nl> <nl> native . new_http_archive ( <nl> name = " eigen_archive " , <nl> def tf_workspace ( path_prefix = " " , tf_repo_name = " " ) : <nl> ] ) , <nl> ) <nl> <nl> - # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> - # TensorBoard JavaScript Production Dependencies <nl> + native . new_http_archive ( <nl> + name = " io_angular_clutz " , <nl> + build_file = " / / third_party : clutz . BUILD " , <nl> + sha256 = " 2981de41d1ff4774b544423da9a2cd8beb3be649e95aef2ef2fd83957300b3fe " , <nl> + strip_prefix = " clutz - b0db5ade9bb535d387f05292316c422790c9848e " , <nl> + urls = [ <nl> + " http : / / mirror . bazel . build / github . com / angular / clutz / archive / b0db5ade9bb535d387f05292316c422790c9848e . tar . gz " , # 2017 - 05 - 22 <nl> + " https : / / github . com / angular / clutz / archive / b0db5ade9bb535d387f05292316c422790c9848e . tar . gz " , <nl> + ] , <nl> + ) <nl> <nl> filegroup_external ( <nl> name = " com_google_javascript_closure_compiler_externs " , <nl> licenses = [ " notice " ] , # Apache 2 . 0 <nl> sha256_urls_extract = { <nl> - " 0ee7b88ed2955b622eaa038bece283e28d0fb5abebfbb80871fc3d0353f0000b " : [ <nl> - " http : / / mirror . bazel . build / github . com / google / closure - compiler / archive / v20170423 . tar . gz " , <nl> - " https : / / github . com / google / closure - compiler / archive / v20170423 . tar . gz " , <nl> + " 0f515a6ebfa138490b3c5ea9f3591ea1a7e4a930d3074f18b3eca86084ad9b66 " : [ <nl> + " http : / / mirror . bazel . build / github . com / google / closure - compiler / archive / b37e6000001b0a6bf4c0be49024ebda14a8711d9 . tar . gz " , # 2017 - 06 - 02 <nl> + " https : / / github . com / google / closure - compiler / archive / b37e6000001b0a6bf4c0be49024ebda14a8711d9 . tar . gz " , <nl> ] , <nl> } , <nl> - strip_prefix = { " v20170423 . tar . gz " : " closure - compiler - 20170423 / externs " } , <nl> + strip_prefix = { " b37e6000001b0a6bf4c0be49024ebda14a8711d9 . tar . gz " : " closure - compiler - b37e6000001b0a6bf4c0be49024ebda14a8711d9 / externs " } , <nl> ) <nl> <nl> filegroup_external ( <nl> def tf_workspace ( path_prefix = " " , tf_repo_name = " " ) : <nl> licenses = [ " notice " ] , # Apache 2 . 0 <nl> sha256_urls = { <nl> " 23baad9a200a717a821c6df504c84d3a893d7ea9102b14876eb80097e3b94292 " : [ <nl> - " http : / / mirror . bazel . build / raw . githubusercontent . com / google / closure - compiler / 0e8dc5597a295ee259e3fecd98d6535dc621232f / contrib / externs / polymer - 1 . 0 . js " , <nl> + " http : / / mirror . bazel . build / raw . githubusercontent . com / google / closure - compiler / 0e8dc5597a295ee259e3fecd98d6535dc621232f / contrib / externs / polymer - 1 . 0 . js " , # 2017 - 05 - 27 <nl> " https : / / raw . githubusercontent . com / google / closure - compiler / 0e8dc5597a295ee259e3fecd98d6535dc621232f / contrib / externs / polymer - 1 . 0 . js " , <nl> ] , <nl> } , <nl> ) <nl> <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # TensorBoard JavaScript Production Dependencies <nl> + <nl> web_library_external ( <nl> name = " com_lodash " , <nl> licenses = [ " notice " ] , # MIT <nl> new file mode 100644 <nl> index 0000000000000 . . 593b70366a3a0 <nl> mmm / dev / null <nl> ppp b / third_party / clutz . BUILD <nl> <nl> + # Description : <nl> + # Build tool for making TypeScript . d . ts files from Closure JavaScript . <nl> + <nl> + package ( default_visibility = [ " / / visibility : public " ] ) <nl> + <nl> + licenses ( [ " notice " ] ) # MIT <nl> + <nl> + exports_files ( [ <nl> + " LICENSE " , <nl> + " src / resources / closure . lib . d . ts " , <nl> + ] ) <nl> + <nl> + JVM_FLAGS = [ <nl> + " - Xss20m " , # JSCompiler needs big stacks for recursive parsing <nl> + " - XX : + UseParallelGC " , # Best GC when app isn ' t latency sensitive <nl> + ] <nl> + <nl> + java_binary ( <nl> + name = " clutz " , <nl> + srcs = glob ( [ " src / main / java / com / google / javascript / clutz / * * / * . java " ] ) , <nl> + jvm_flags = JVM_FLAGS , <nl> + main_class = " com . google . javascript . clutz . DeclarationGenerator " , <nl> + deps = [ <nl> + " @ args4j " , <nl> + " @ com_google_code_findbugs_jsr305 " , <nl> + " @ com_google_code_gson " , <nl> + " @ com_google_guava " , <nl> + " @ com_google_javascript_closure_compiler " , <nl> + ] , <nl> + ) <nl> + <nl> + java_binary ( <nl> + name = " gents " , <nl> + srcs = glob ( [ " src / main / java / com / google / javascript / gents / * * / * . java " ] ) , <nl> + jvm_flags = JVM_FLAGS , <nl> + main_class = " com . google . javascript . gents . TypeScriptGenerator " , <nl> + deps = [ <nl> + " @ args4j " , <nl> + " @ com_google_code_findbugs_jsr305 " , <nl> + " @ com_google_code_gson " , <nl> + " @ com_google_guava " , <nl> + " @ com_google_javascript_closure_compiler " , <nl> + ] , <nl> + ) <nl> new file mode 100644 <nl> index 0000000000000 . . f273c78c794c6 <nl> mmm / dev / null <nl> ppp b / third_party / clutz . bzl <nl> <nl> + # Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + # <nl> + # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + # you may not use this file except in compliance with the License . <nl> + # You may obtain a copy of the License at <nl> + # <nl> + # http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + # <nl> + # Unless required by applicable law or agreed to in writing , software <nl> + # distributed under the License is distributed on an " AS IS " BASIS , <nl> + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + # See the License for the specific language governing permissions and <nl> + # limitations under the License . <nl> + <nl> + " " " Build definitions for TypeScript from Closure JavaScript libraries . " " " <nl> + <nl> + load ( " @ io_bazel_rules_closure / / closure / private : defs . bzl " , <nl> + " JS_FILE_TYPE " , <nl> + " collect_js " , <nl> + " unfurl " ) <nl> + <nl> + CLUTZ_ATTRIBUTES = { <nl> + " _clutz " : attr . label ( <nl> + default = Label ( " @ io_angular_clutz / / : clutz " ) , <nl> + executable = True , <nl> + cfg = " host " ) , <nl> + " _clutz_externs " : attr . label ( <nl> + default = Label ( " @ com_google_javascript_closure_compiler_externs " ) , <nl> + allow_files = True ) , <nl> + } <nl> + <nl> + def extract_dts_from_closure_libraries ( ctx ) : <nl> + " " " Extracts type definitions from closure dependencies . <nl> + <nl> + This just generates one big . d . ts file for all transitive Closure sources , <nl> + and does not pass it down . That means each rule has to duplicate the effort , <nl> + but on the other hand allows transitive dependencies on shared rules without <nl> + causing duplicate definition errors . <nl> + <nl> + Args : <nl> + ctx : A Skylark context . <nl> + Returns : <nl> + The generated Clutz typings file , or None if there were no JS deps . <nl> + " " " <nl> + deps = unfurl ( ctx . attr . deps , provider = " closure_js_library " ) <nl> + js = collect_js ( ctx , deps ) <nl> + if not js . srcs : <nl> + return None <nl> + js_typings = ctx . new_file ( ctx . bin_dir , " % s - js - typings . d . ts " % ctx . label . name ) <nl> + srcs = depset ( JS_FILE_TYPE . filter ( ctx . files . _clutz_externs ) ) + js . srcs <nl> + args = [ " - o " , js_typings . path ] <nl> + for src in srcs : <nl> + args . append ( src . path ) <nl> + if getattr ( ctx . attr , " clutz_entry_points " , None ) : <nl> + args . append ( " - - closure_entry_points " ) <nl> + args . extend ( ctx . attr . clutz_entry_points ) <nl> + ctx . action ( <nl> + inputs = list ( srcs ) , <nl> + outputs = [ js_typings ] , <nl> + executable = ctx . executable . _clutz , <nl> + arguments = args , <nl> + mnemonic = " Clutz " , <nl> + progress_message = " Running Clutz on % d JS files % s " % ( <nl> + len ( srcs ) , ctx . label ) ) <nl> + return js_typings <nl> + <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # The following definitions are for API compatibility with internal clutz . bzl <nl> + <nl> + CLUTZ_OUTPUTS = { } <nl> + <nl> + def _clutz_aspect_impl ( target , ctx ) : <nl> + return struct ( ) <nl> + <nl> + clutz_aspect = aspect ( <nl> + implementation = _clutz_aspect_impl , <nl> + attr_aspects = [ " exports " ] ) <nl> mmm a / third_party / py / remote . BUILD . tpl <nl> ppp b / third_party / py / remote . BUILD . tpl <nl> package ( default_visibility = [ " / / visibility : public " ] ) <nl> <nl> alias ( <nl> name = " python_headers " , <nl> - actual = " @ % { REMOTE_PYTHON_REPO } / / : python_headers " , <nl> + actual = " % { REMOTE_PYTHON_REPO } : python_headers " , <nl> ) <nl> <nl> alias ( <nl> name = " numpy_headers " , <nl> - actual = " @ % { REMOTE_PYTHON_REPO } / / : numpy_headers " , <nl> + actual = " % { REMOTE_PYTHON_REPO } : numpy_headers " , <nl> ) <nl>
Merge commit for internal changes
tensorflow/tensorflow
85ce5f21eec74e203c35ebdc8452b244cbde24b9
2017-06-07T17:41:23Z
mmm a / CMakeModules / FindWasm . cmake <nl> ppp b / CMakeModules / FindWasm . cmake <nl> <nl> # TODO : Check if compiler is able to generate wasm32 <nl> <nl> find_program ( WASM_CLANG clang PATHS $ { WASM_ROOT } / bin NO_DEFAULT_PATH ) <nl> - find_program ( WASM_LLC llc HPATHS $ { WASM_ROOT } / bin NO_DEFAULT_PATH ) <nl> + find_program ( WASM_LLC llc PATHS $ { WASM_ROOT } / bin NO_DEFAULT_PATH ) <nl> find_program ( WASM_LLVM_LINK llvm - link PATHS $ { WASM_ROOT } / bin NO_DEFAULT_PATH ) <nl> <nl> include ( FindPackageHandleStandardArgs ) <nl>
fix small type in wasm cmake change
EOSIO/eos
f0733d5d21b579f603afd6002b6574b5aff6be7b
2018-02-28T17:02:21Z
mmm a / src / core / hle / service / vi / vi . h <nl> ppp b / src / core / hle / service / vi / vi . h <nl> <nl> # include " core / hle / kernel / event . h " <nl> # include " core / hle / service / service . h " <nl> <nl> + namespace CoreTiming { <nl> + struct EventType ; <nl> + } <nl> + <nl> namespace Service { <nl> namespace VI { <nl> <nl> class NVFlinger { <nl> u32 next_buffer_queue_id = 1 ; <nl> <nl> / / / CoreTiming event that handles screen composition . <nl> - int composition_event ; <nl> + CoreTiming : : EventType * composition_event ; <nl> } ; <nl> <nl> class IApplicationDisplayService final : public ServiceFramework < IApplicationDisplayService > { <nl>
vi : Use new CoreTiming : : EventType
yuzu-emu/yuzu
7e0ff43da07763cc022e304e63bf5d2f1cdb401a
2018-01-11T04:28:33Z
mmm a / Marlin / Marlin_main . cpp <nl> ppp b / Marlin / Marlin_main . cpp <nl> void stop ( ) { <nl> * / <nl> void setup ( ) { <nl> <nl> - # if ENABLED ( DISABLE_JTAG ) <nl> + # ifdef DISABLE_JTAG <nl> / / Disable JTAG on AT90USB chips to free up pins for IO <nl> MCUCR = 0x80 ; <nl> MCUCR = 0x80 ; <nl> mmm a / Marlin / SanityCheck . h <nl> ppp b / Marlin / SanityCheck . h <nl> <nl> # error " You are using an old Configuration_adv . h file , update it before building Marlin . " <nl> # endif <nl> <nl> - / * * <nl> + / * * <nl> * Warnings for old configurations <nl> * / <nl> # if WATCH_TEMP_PERIOD > 500 <nl> <nl> <nl> # endif <nl> <nl> + / * * <nl> + * Homing Bump <nl> + * / <nl> + # if X_HOME_BUMP_MM < 0 | | Y_HOME_BUMP_MM < 0 | | Z_HOME_BUMP_MM < 0 <nl> + # error " [ XYZ ] _HOME_BUMP_MM must be greater than or equal to 0 . " <nl> + # endif <nl> + <nl> / * * <nl> * Make sure Z_SAFE_HOMING point is reachable <nl> * / <nl>
Sanity check homing bumps
MarlinFirmware/Marlin
0951bffcc00c09f5cef4c1b8399fd8cf5adb6d2c
2016-09-23T20:39:02Z
mmm a / core / image . cpp <nl> ppp b / core / image . cpp <nl> void Image : : create ( const char * * p_xpm ) { <nl> <nl> if ( * line_ptr = = ' # ' ) { <nl> line_ptr + + ; <nl> - uint8_t col_r ; <nl> - uint8_t col_g ; <nl> - uint8_t col_b ; <nl> + uint8_t col_r = 0 ; <nl> + uint8_t col_g = 0 ; <nl> + uint8_t col_b = 0 ; <nl> / / uint8_t col_a = 255 ; <nl> <nl> for ( int i = 0 ; i < 6 ; i + + ) { <nl> mmm a / core / io / translation_loader_po . cpp <nl> ppp b / core / io / translation_loader_po . cpp <nl> RES TranslationLoaderPO : : load_translation ( FileAccess * f , Error * r_error , const S <nl> <nl> Ref < Translation > translation = Ref < Translation > ( memnew ( Translation ) ) ; <nl> int line = 1 ; <nl> - bool skip_this ; <nl> - bool skip_next ; <nl> + bool skip_this = false ; <nl> + bool skip_next = false ; <nl> <nl> while ( true ) { <nl> <nl> mmm a / core / math / face3 . cpp <nl> ppp b / core / math / face3 . cpp <nl> void Face3 : : get_support ( const Vector3 & p_normal , const Transform & p_transform , V <nl> / * * FIND SUPPORT VERTEX * * / <nl> <nl> int vert_support_idx = - 1 ; <nl> - real_t support_max ; <nl> + real_t support_max = 0 ; <nl> <nl> for ( int i = 0 ; i < 3 ; i + + ) { <nl> <nl> mmm a / core / math / quick_hull . cpp <nl> ppp b / core / math / quick_hull . cpp <nl> Error QuickHull : : build ( const Vector < Vector3 > & p_points , Geometry : : MeshData & r_me <nl> int simplex [ 4 ] ; <nl> <nl> { <nl> - real_t max , min ; <nl> + real_t max = 0 , min = 0 ; <nl> <nl> for ( int i = 0 ; i < p_points . size ( ) ; i + + ) { <nl> <nl> Error QuickHull : : build ( const Vector < Vector3 > & p_points , Geometry : : MeshData & r_me <nl> / / third vertex is one most further away from the line <nl> <nl> { <nl> - real_t maxd ; <nl> + real_t maxd = 0 ; <nl> Vector3 rel12 = p_points [ simplex [ 0 ] ] - p_points [ simplex [ 1 ] ] ; <nl> <nl> for ( int i = 0 ; i < p_points . size ( ) ; i + + ) { <nl> Error QuickHull : : build ( const Vector < Vector3 > & p_points , Geometry : : MeshData & r_me <nl> / / fourth vertex is the one most further away from the plane <nl> <nl> { <nl> - real_t maxd ; <nl> + real_t maxd = 0 ; <nl> Plane p ( p_points [ simplex [ 0 ] ] , p_points [ simplex [ 1 ] ] , p_points [ simplex [ 2 ] ] ) ; <nl> <nl> for ( int i = 0 ; i < p_points . size ( ) ; i + + ) { <nl> mmm a / core / os / os . cpp <nl> ppp b / core / os / os . cpp <nl> void OS : : debug_break ( ) { <nl> <nl> void OS : : print_error ( const char * p_function , const char * p_file , int p_line , const char * p_code , const char * p_rationale , ErrorType p_type ) { <nl> <nl> - const char * err_type ; <nl> + const char * err_type = " * * ERROR * * " ; <nl> switch ( p_type ) { <nl> case ERR_ERROR : err_type = " * * ERROR * * " ; break ; <nl> case ERR_WARNING : err_type = " * * WARNING * * " ; break ; <nl> case ERR_SCRIPT : err_type = " * * SCRIPT ERROR * * " ; break ; <nl> case ERR_SHADER : err_type = " * * SHADER ERROR * * " ; break ; <nl> + default : ERR_PRINT ( " Unknown error type " ) ; break ; <nl> } <nl> <nl> if ( p_rationale & & * p_rationale ) <nl> mmm a / core / reference . h <nl> ppp b / core / reference . h <nl> class Reference : public Object { <nl> template < class T > <nl> class Ref { <nl> <nl> - T * reference ; <nl> + T * reference = NULL ; <nl> <nl> void ref ( const Ref & p_from ) { <nl> <nl> mmm a / core / ustring . cpp <nl> ppp b / core / ustring . cpp <nl> String String : : sprintf ( const Array & values , bool * error ) const { <nl> CharType * self = ( CharType * ) c_str ( ) ; <nl> bool in_format = false ; <nl> int value_index = 0 ; <nl> - int min_chars ; <nl> - int min_decimals ; <nl> - bool in_decimals ; <nl> - bool pad_with_zeroes ; <nl> - bool left_justified ; <nl> - bool show_sign ; <nl> + int min_chars = 0 ; <nl> + int min_decimals = 0 ; <nl> + bool in_decimals = false ; <nl> + bool pad_with_zeroes = false ; <nl> + bool left_justified = false ; <nl> + bool show_sign = false ; <nl> <nl> * error = true ; <nl> <nl> String String : : sprintf ( const Array & values , bool * error ) const { <nl> } <nl> <nl> int64_t value = values [ value_index ] ; <nl> - int base ; <nl> + int base = 16 ; <nl> bool capitalize = false ; <nl> switch ( c ) { <nl> case ' d ' : base = 10 ; break ; <nl> case ' o ' : base = 8 ; break ; <nl> - case ' x ' : base = 16 ; break ; <nl> + case ' x ' : break ; <nl> case ' X ' : <nl> base = 16 ; <nl> capitalize = true ; <nl> String String : : sprintf ( const Array & values , bool * error ) const { <nl> } <nl> break ; <nl> } <nl> - case ' . ' : { / / Float separtor . <nl> + case ' . ' : { / / Float separator . <nl> if ( in_decimals ) { <nl> return " too many decimal points in format " ; <nl> } <nl> String String : : sprintf ( const Array & values , bool * error ) const { <nl> break ; <nl> } <nl> <nl> - case ' * ' : { / / Dyanmic width , based on value . <nl> + case ' * ' : { / / Dynamic width , based on value . <nl> if ( value_index > = values . size ( ) ) { <nl> return " not enough arguments for format string " ; <nl> } <nl> mmm a / core / vmap . h <nl> ppp b / core / vmap . h <nl> class VMap { <nl> <nl> int low = 0 ; <nl> int high = _data . size ( ) - 1 ; <nl> - int middle ; <nl> const _Pair * a = & _data [ 0 ] ; <nl> + int middle = 0 ; <nl> <nl> + # if DEBUG_ENABLED <nl> + if ( low > high ) <nl> + ERR_PRINT ( " low > high , this may be a bug " ) ; <nl> + # endif <nl> while ( low < = high ) { <nl> middle = ( low + high ) / 2 ; <nl> <nl> mmm a / core / vset . h <nl> ppp b / core / vset . h <nl> class VSet { <nl> <nl> int low = 0 ; <nl> int high = _data . size ( ) - 1 ; <nl> - int middle ; <nl> const T * a = & _data [ 0 ] ; <nl> + int middle = 0 ; <nl> + <nl> + # if DEBUG_ENABLED <nl> + if ( low > high ) <nl> + ERR_PRINT ( " low > high , this may be a bug " ) ; <nl> + # endif <nl> <nl> while ( low < = high ) { <nl> middle = ( low + high ) / 2 ; <nl> mmm a / drivers / gles3 / rasterizer_scene_gles3 . cpp <nl> ppp b / drivers / gles3 / rasterizer_scene_gles3 . cpp <nl> bool RasterizerSceneGLES3 : : _shadow_atlas_find_shadow ( ShadowAtlas * shadow_atlas , <nl> <nl> int found_free_idx = - 1 ; / / found a free one <nl> int found_used_idx = - 1 ; / / found existing one , must steal it <nl> - uint64_t min_pass ; / / pass of the existing one , try to use the least recently used one ( LRU fashion ) <nl> + uint64_t min_pass = 0 ; / / pass of the existing one , try to use the least recently used one ( LRU fashion ) <nl> <nl> for ( int j = 0 ; j < sc ; j + + ) { <nl> if ( ! sarr [ j ] . owner . is_valid ( ) ) { <nl> mmm a / drivers / unix / packet_peer_udp_posix . cpp <nl> ppp b / drivers / unix / packet_peer_udp_posix . cpp <nl> Error PacketPeerUDPPosix : : get_packet ( const uint8_t * * r_buffer , int & r_buffer_siz <nl> if ( queue_count = = 0 ) <nl> return ERR_UNAVAILABLE ; <nl> <nl> - uint32_t size ; <nl> - uint8_t type ; <nl> + uint32_t size = 0 ; <nl> + uint8_t type = IP : : TYPE_NONE ; <nl> rb . read ( & type , 1 , true ) ; <nl> if ( type = = IP : : TYPE_IPV4 ) { <nl> uint8_t ip [ 4 ] ; <nl> mmm a / drivers / unix / tcp_server_posix . cpp <nl> ppp b / drivers / unix / tcp_server_posix . cpp <nl> Ref < StreamPeerTCP > TCPServerPosix : : take_connection ( ) { <nl> Ref < StreamPeerTCPPosix > conn = memnew ( StreamPeerTCPPosix ) ; <nl> IP_Address ip ; <nl> <nl> - int port ; <nl> + int port = 0 ; <nl> _set_ip_addr_port ( ip , port , & their_addr ) ; <nl> <nl> conn - > set_socket ( fd , ip , port , sock_type ) ; <nl> mmm a / editor / editor_audio_buses . cpp <nl> ppp b / editor / editor_audio_buses . cpp <nl> void EditorAudioBus : : _notification ( int p_what ) { <nl> float real_peak [ 2 ] = { - 100 , - 100 } ; <nl> bool activity_found = false ; <nl> <nl> - int cc ; <nl> + int cc = 0 ; <nl> switch ( AudioServer : : get_singleton ( ) - > get_speaker_mode ( ) ) { <nl> case AudioServer : : SPEAKER_MODE_STEREO : cc = 1 ; break ; <nl> case AudioServer : : SPEAKER_SURROUND_51 : cc = 4 ; break ; <nl> case AudioServer : : SPEAKER_SURROUND_71 : cc = 5 ; break ; <nl> + default : <nl> + ERR_PRINT ( " Unknown speaker_mode " ) ; <nl> + break ; <nl> } <nl> <nl> for ( int i = 0 ; i < cc ; i + + ) { <nl> mmm a / editor / import / resource_importer_wav . cpp <nl> ppp b / editor / import / resource_importer_wav . cpp <nl> Error ResourceImporterWAV : : import ( const String & p_source_file , const String & p_s <nl> int format_freq = 0 ; <nl> int loop_begin = 0 ; <nl> int loop_end = 0 ; <nl> - int frames ; <nl> + int frames = 0 ; <nl> <nl> Vector < float > data ; <nl> <nl> mmm a / editor / plugins / canvas_item_editor_plugin . cpp <nl> ppp b / editor / plugins / canvas_item_editor_plugin . cpp <nl> void CanvasItemEditor : : _viewport_draw ( ) { <nl> if ( snap_show_grid ) { <nl> / / Draw the grid <nl> Size2 s = viewport - > get_size ( ) ; <nl> - int last_cell ; <nl> + int last_cell = 0 ; <nl> Transform2D xform = transform . affine_inverse ( ) ; <nl> <nl> Vector2 grid_offset ; <nl> void CanvasItemEditor : : _notification ( int p_what ) { <nl> anchors [ MARGIN_RIGHT ] = Object : : cast_to < Control > ( canvas_item ) - > get_anchor ( MARGIN_RIGHT ) ; <nl> anchors [ MARGIN_TOP ] = Object : : cast_to < Control > ( canvas_item ) - > get_anchor ( MARGIN_TOP ) ; <nl> anchors [ MARGIN_BOTTOM ] = Object : : cast_to < Control > ( canvas_item ) - > get_anchor ( MARGIN_BOTTOM ) ; <nl> - } <nl> <nl> - if ( r ! = se - > prev_rect | | xform ! = se - > prev_xform | | pivot ! = se - > prev_pivot | | anchors [ MARGIN_LEFT ] ! = se - > prev_anchors [ MARGIN_LEFT ] | | anchors [ MARGIN_RIGHT ] ! = se - > prev_anchors [ MARGIN_RIGHT ] | | anchors [ MARGIN_TOP ] ! = se - > prev_anchors [ MARGIN_TOP ] | | anchors [ MARGIN_BOTTOM ] ! = se - > prev_anchors [ MARGIN_BOTTOM ] ) { <nl> - viewport - > update ( ) ; <nl> - se - > prev_rect = r ; <nl> - se - > prev_xform = xform ; <nl> - se - > prev_pivot = pivot ; <nl> - se - > prev_anchors [ MARGIN_LEFT ] = anchors [ MARGIN_LEFT ] ; <nl> - se - > prev_anchors [ MARGIN_RIGHT ] = anchors [ MARGIN_RIGHT ] ; <nl> - se - > prev_anchors [ MARGIN_TOP ] = anchors [ MARGIN_TOP ] ; <nl> - se - > prev_anchors [ MARGIN_BOTTOM ] = anchors [ MARGIN_BOTTOM ] ; <nl> + if ( r ! = se - > prev_rect | | xform ! = se - > prev_xform | | pivot ! = se - > prev_pivot | | anchors [ MARGIN_LEFT ] ! = se - > prev_anchors [ MARGIN_LEFT ] | | anchors [ MARGIN_RIGHT ] ! = se - > prev_anchors [ MARGIN_RIGHT ] | | anchors [ MARGIN_TOP ] ! = se - > prev_anchors [ MARGIN_TOP ] | | anchors [ MARGIN_BOTTOM ] ! = se - > prev_anchors [ MARGIN_BOTTOM ] ) { <nl> + viewport - > update ( ) ; <nl> + se - > prev_rect = r ; <nl> + se - > prev_xform = xform ; <nl> + se - > prev_pivot = pivot ; <nl> + se - > prev_anchors [ MARGIN_LEFT ] = anchors [ MARGIN_LEFT ] ; <nl> + se - > prev_anchors [ MARGIN_RIGHT ] = anchors [ MARGIN_RIGHT ] ; <nl> + se - > prev_anchors [ MARGIN_TOP ] = anchors [ MARGIN_TOP ] ; <nl> + se - > prev_anchors [ MARGIN_BOTTOM ] = anchors [ MARGIN_BOTTOM ] ; <nl> + } <nl> } <nl> } <nl> <nl> mmm a / editor / plugins / polygon_2d_editor_plugin . cpp <nl> ppp b / editor / plugins / polygon_2d_editor_plugin . cpp <nl> void Polygon2DEditor : : _uv_draw ( ) { <nl> <nl> if ( snap_show_grid ) { <nl> Size2 s = uv_edit_draw - > get_size ( ) ; <nl> - int last_cell ; <nl> + int last_cell = 0 ; <nl> <nl> if ( snap_step . x ! = 0 ) { <nl> for ( int i = 0 ; i < s . width ; i + + ) { <nl> mmm a / editor / plugins / texture_region_editor_plugin . cpp <nl> ppp b / editor / plugins / texture_region_editor_plugin . cpp <nl> void TextureRegionEditor : : _region_draw ( ) { <nl> <nl> if ( snap_mode = = SNAP_GRID ) { <nl> Size2 s = edit_draw - > get_size ( ) ; <nl> - int last_cell ; <nl> + int last_cell = 0 ; <nl> <nl> if ( snap_step . x ! = 0 ) { <nl> if ( snap_separation . x = = 0 ) <nl> void TextureRegionEditor : : _region_input ( const Ref < InputEvent > & p_input ) { <nl> } else if ( drag ) { <nl> <nl> if ( edited_margin > = 0 ) { <nl> - float new_margin ; <nl> + float new_margin = 0 ; <nl> if ( edited_margin = = 0 ) <nl> new_margin = prev_margin + ( mm - > get_position ( ) . y - drag_from . y ) / draw_zoom ; <nl> else if ( edited_margin = = 1 ) <nl> void TextureRegionEditor : : _region_input ( const Ref < InputEvent > & p_input ) { <nl> new_margin = prev_margin + ( mm - > get_position ( ) . x - drag_from . x ) / draw_zoom ; <nl> else if ( edited_margin = = 3 ) <nl> new_margin = prev_margin - ( mm - > get_position ( ) . x - drag_from . x ) / draw_zoom ; <nl> + else <nl> + ERR_PRINT ( " Unexpected edited_margin " ) ; <nl> + <nl> if ( new_margin < 0 ) <nl> new_margin = 0 ; <nl> static Margin m [ 4 ] = { MARGIN_TOP , MARGIN_BOTTOM , MARGIN_LEFT , MARGIN_RIGHT } ; <nl> mmm a / modules / gdscript / gd_function . cpp <nl> ppp b / modules / gdscript / gd_function . cpp <nl> Variant GDFunction : : call ( GDInstance * p_instance , const Variant * * p_args , int p_a <nl> <nl> # ifdef DEBUG_ENABLED <nl> <nl> - uint64_t function_start_time ; <nl> - uint64_t function_call_time ; <nl> + uint64_t function_start_time = 0 ; <nl> + uint64_t function_call_time = 0 ; <nl> <nl> if ( GDScriptLanguage : : get_singleton ( ) - > profiling ) { <nl> function_start_time = OS : : get_singleton ( ) - > get_ticks_usec ( ) ; <nl> Variant GDFunction : : call ( GDInstance * p_instance , const Variant * * p_args , int p_a <nl> } <nl> <nl> # ifdef DEBUG_ENABLED <nl> - uint64_t call_time ; <nl> + uint64_t call_time = 0 ; <nl> <nl> if ( GDScriptLanguage : : get_singleton ( ) - > profiling ) { <nl> call_time = OS : : get_singleton ( ) - > get_ticks_usec ( ) ; <nl> mmm a / modules / squish / image_compress_squish . cpp <nl> ppp b / modules / squish / image_compress_squish . cpp <nl> void image_compress_squish ( Image * p_image , Image : : CompressSource p_source ) { <nl> if ( p_image - > get_format ( ) < = Image : : FORMAT_RGBA8 ) { <nl> <nl> int squish_comp = squish : : kColourRangeFit ; <nl> - Image : : Format target_format ; <nl> + Image : : Format target_format = Image : : FORMAT_RGBA8 ; <nl> <nl> Image : : DetectChannels dc = p_image - > get_detected_channels ( ) ; <nl> <nl> void image_compress_squish ( Image * p_image , Image : : CompressSource p_source ) { <nl> squish_comp | = squish : : kDxt5 ; <nl> <nl> } break ; <nl> + default : { <nl> + ERR_PRINT ( " Unknown image format , defaulting to RGBA8 " ) ; <nl> + break ; <nl> + } <nl> } <nl> <nl> PoolVector < uint8_t > data ; <nl> mmm a / modules / stb_vorbis / audio_stream_ogg_vorbis . cpp <nl> ppp b / modules / stb_vorbis / audio_stream_ogg_vorbis . cpp <nl> <nl> <nl> # include " os / file_access . h " <nl> <nl> + # pragma GCC diagnostic ignored " - Wmaybe - uninitialized " <nl> # include " thirdparty / misc / stb_vorbis . c " <nl> + # pragma GCC diagnostic pop <nl> <nl> void AudioStreamPlaybackOGGVorbis : : _mix_internal ( AudioFrame * p_buffer , int p_frames ) { <nl> <nl> mmm a / modules / stb_vorbis / audio_stream_ogg_vorbis . h <nl> ppp b / modules / stb_vorbis / audio_stream_ogg_vorbis . h <nl> <nl> # include " servers / audio / audio_stream . h " <nl> <nl> # define STB_VORBIS_HEADER_ONLY <nl> + # pragma GCC diagnostic ignored " - Wmaybe - uninitialized " <nl> # include " thirdparty / misc / stb_vorbis . c " <nl> + # pragma GCC diagnostic pop <nl> # undef STB_VORBIS_HEADER_ONLY <nl> <nl> class AudioStreamOGGVorbis ; <nl> mmm a / modules / visual_script / visual_script_yield_nodes . cpp <nl> ppp b / modules / visual_script / visual_script_yield_nodes . cpp <nl> class VisualScriptNodeInstanceYieldSignal : public VisualScriptNodeInstance { <nl> } else { <nl> / / yield <nl> <nl> - Object * object ; <nl> + Object * object = NULL ; <nl> <nl> switch ( call_mode ) { <nl> <nl> mmm a / platform / android / export / export . cpp <nl> ppp b / platform / android / export / export . cpp <nl> class EditorExportAndroid : public EditorExportPlatform { <nl> <nl> / / print_line ( " FILESIZE : " + itos ( filesize ) + " ACTUAL : " + itos ( p_manifest . size ( ) ) ) ; <nl> <nl> - uint32_t string_count ; <nl> - uint32_t styles_count ; <nl> - uint32_t string_flags ; <nl> - uint32_t string_data_offset ; <nl> - <nl> - uint32_t styles_offset ; <nl> - uint32_t string_table_begins ; <nl> - uint32_t string_table_ends ; <nl> + uint32_t string_count = 0 ; <nl> + uint32_t styles_count = 0 ; <nl> + uint32_t string_flags = 0 ; <nl> + uint32_t string_data_offset = 0 ; <nl> + <nl> + uint32_t styles_offset = 0 ; <nl> + uint32_t string_table_begins = 0 ; <nl> + uint32_t string_table_ends = 0 ; <nl> Vector < uint8_t > stable_extra ; <nl> <nl> String version_name = p_preset - > get ( " version / name " ) ; <nl> mmm a / platform / uwp / export / export . cpp <nl> ppp b / platform / uwp / export / export . cpp <nl> class EditorExportUWP : public EditorExportPlatform { <nl> Vector < uint8_t > _get_image_data ( const Ref < EditorExportPreset > & p_preset , const String & p_path ) { <nl> <nl> Vector < uint8_t > data ; <nl> - StreamTexture * image ; <nl> + StreamTexture * image = NULL ; <nl> <nl> if ( p_path . find ( " StoreLogo " ) ! = - 1 ) { <nl> image = p_preset - > get ( " images / store_logo " ) . is_zero ( ) ? NULL : Object : : cast_to < StreamTexture > ( ( ( Object * ) p_preset - > get ( " images / store_logo " ) ) ) ; <nl> class EditorExportUWP : public EditorExportPlatform { <nl> image = p_preset - > get ( " images / wide310x150_logo " ) . is_zero ( ) ? NULL : Object : : cast_to < StreamTexture > ( ( ( Object * ) p_preset - > get ( " images / wide310x150_logo " ) ) ) ; <nl> } else if ( p_path . find ( " SplashScreen " ) ! = - 1 ) { <nl> image = p_preset - > get ( " images / splash_screen " ) . is_zero ( ) ? NULL : Object : : cast_to < StreamTexture > ( ( ( Object * ) p_preset - > get ( " images / splash_screen " ) ) ) ; <nl> + } else { <nl> + ERR_PRINT ( " Unable to load logo " ) ; <nl> } <nl> <nl> if ( ! image ) return data ; <nl> mmm a / scene / 2d / line_builder . cpp <nl> ppp b / scene / 2d / line_builder . cpp <nl> void LineBuilder : : build ( ) { <nl> <nl> float current_distance0 = 0 . f ; <nl> float current_distance1 = 0 . f ; <nl> - float total_distance ; <nl> + float total_distance = 0 . f ; <nl> _interpolate_color = gradient ! = NULL ; <nl> bool distance_required = _interpolate_color | | texture_mode = = LINE_TEXTURE_TILE ; <nl> if ( distance_required ) <nl> mmm a / scene / 3d / audio_stream_player_3d . cpp <nl> ppp b / scene / 3d / audio_stream_player_3d . cpp <nl> void AudioStreamPlayer3D : : _mix_audio ( ) { <nl> <nl> float AudioStreamPlayer3D : : _get_attenuation_db ( float p_distance ) const { <nl> <nl> - float att ; <nl> + float att = 0 ; <nl> switch ( attenuation_model ) { <nl> case ATTENUATION_INVERSE_DISTANCE : { <nl> att = Math : : linear2db ( 1 . 0 / ( ( p_distance / unit_size ) + 000001 ) ) ; <nl> float AudioStreamPlayer3D : : _get_attenuation_db ( float p_distance ) const { <nl> case ATTENUATION_LOGARITHMIC : { <nl> att = - 20 * Math : : log ( p_distance / unit_size + 000001 ) ; <nl> } break ; <nl> + default : { <nl> + ERR_PRINT ( " Unknown attenuation type " ) ; <nl> + break ; <nl> + } <nl> } <nl> <nl> att + = unit_db ; <nl> mmm a / scene / 3d / gi_probe . cpp <nl> ppp b / scene / 3d / gi_probe . cpp <nl> void GIProbe : : _plot_face ( int p_idx , int p_level , int p_x , int p_y , int p_z , cons <nl> / / plot the face by guessing it ' s albedo and emission value <nl> <nl> / / find best axis to map to , for scanning values <nl> - int closest_axis ; <nl> - float closest_dot ; <nl> + int closest_axis = 0 ; <nl> + float closest_dot = 0 ; <nl> <nl> Plane plane = Plane ( p_vtx [ 0 ] , p_vtx [ 1 ] , p_vtx [ 2 ] ) ; <nl> Vector3 normal = plane . normal ; <nl> mmm a / scene / gui / label . cpp <nl> ppp b / scene / gui / label . cpp <nl> void Label : : regenerate_word_cache ( ) { <nl> bool separatable = ( current > = 0x2E08 & & current < = 0xFAFF ) | | ( current > = 0xFE30 & & current < = 0xFE4F ) ; <nl> / / current > = 33 & & ( current < 65 | | current > 90 ) & & ( current < 97 | | current > 122 ) & & ( current < 48 | | current > 57 ) ; <nl> bool insert_newline = false ; <nl> - int char_width ; <nl> + int char_width = 0 ; <nl> <nl> if ( current < 33 ) { <nl> <nl> mmm a / scene / gui / text_edit . cpp <nl> ppp b / scene / gui / text_edit . cpp <nl> String TextEdit : : get_word_at_pos ( const Vector2 & p_pos ) const { <nl> bool symbol = beg < s . length ( ) & & _is_symbol ( s [ beg ] ) ; / / not sure if right but most editors behave like this <nl> <nl> bool inside_quotes = false ; <nl> - int qbegin , qend ; <nl> + int qbegin = 0 , qend = 0 ; <nl> for ( int i = 0 ; i < s . length ( ) ; i + + ) { <nl> if ( s [ i ] = = ' " ' ) { <nl> if ( inside_quotes ) { <nl> mmm a / scene / main / viewport . cpp <nl> ppp b / scene / main / viewport . cpp <nl> void Viewport : : _notification ( int p_what ) { <nl> if ( physics_object_picking & & ( to_screen_rect = = Rect2 ( ) | | Input : : get_singleton ( ) - > get_mouse_mode ( ) ! = Input : : MOUSE_MODE_CAPTURED ) ) { <nl> <nl> Vector2 last_pos ( 1e20 , 1e20 ) ; <nl> - CollisionObject * last_object ; <nl> + CollisionObject * last_object = NULL ; <nl> ObjectID last_id = 0 ; <nl> PhysicsDirectSpaceState : : RayResult result ; <nl> Physics2DDirectSpaceState * ss2d = Physics2DServer : : get_singleton ( ) - > space_get_direct_state ( find_world_2d ( ) - > get_space ( ) ) ; <nl> void Viewport : : _notification ( int p_what ) { <nl> } else if ( pos = = last_pos ) { <nl> <nl> if ( last_id ) { <nl> - if ( ObjectDB : : get_instance ( last_id ) ) { <nl> + if ( ObjectDB : : get_instance ( last_id ) & & last_object ) { <nl> / / good , exists <nl> last_object - > _input_event ( camera , ev , result . position , result . normal , result . shape ) ; <nl> if ( last_object - > get_capture_input_on_drag ( ) & & mb . is_valid ( ) & & mb - > get_button_index ( ) = = 1 & & mb - > is_pressed ( ) ) { <nl> mmm a / scene / resources / animation . cpp <nl> ppp b / scene / resources / animation . cpp <nl> int Animation : : _find ( const Vector < K > & p_keys , float p_time ) const { <nl> <nl> int low = 0 ; <nl> int high = len - 1 ; <nl> - int middle ; <nl> + int middle = 0 ; <nl> + <nl> + # if DEBUG_ENABLED <nl> + if ( low > high ) <nl> + ERR_PRINT ( " low > high , this may be a bug " ) ; <nl> + # endif <nl> <nl> const K * keys = & p_keys [ 0 ] ; <nl> <nl> Error Animation : : transform_track_interpolate ( int p_track , float p_time , Vector3 <nl> <nl> TransformTrack * tt = static_cast < TransformTrack * > ( t ) ; <nl> <nl> - bool ok ; <nl> + bool ok = false ; <nl> <nl> TransformKey tk = _interpolate ( tt - > transforms , p_time , tt - > interpolation , tt - > loop_wrap , & ok ) ; <nl> <nl> Variant Animation : : value_track_interpolate ( int p_track , float p_time ) const { <nl> ERR_FAIL_COND_V ( t - > type ! = TYPE_VALUE , Variant ( ) ) ; <nl> ValueTrack * vt = static_cast < ValueTrack * > ( t ) ; <nl> <nl> - bool ok ; <nl> + bool ok = false ; <nl> <nl> Variant res = _interpolate ( vt - > values , p_time , vt - > update_mode = = UPDATE_CONTINUOUS ? vt - > interpolation : INTERPOLATION_NEAREST , vt - > loop_wrap , & ok ) ; <nl> <nl> mmm a / scene / resources / color_ramp . h <nl> ppp b / scene / resources / color_ramp . h <nl> class Gradient : public Resource { <nl> / / binary search <nl> int low = 0 ; <nl> int high = points . size ( ) - 1 ; <nl> - int middle ; <nl> + int middle = 0 ; <nl> + <nl> + # if DEBUG_ENABLED <nl> + if ( low > high ) <nl> + ERR_PRINT ( " low > high , this may be a bug " ) ; <nl> + # endif <nl> <nl> while ( low < = high ) { <nl> middle = ( low + high ) / 2 ; <nl> mmm a / scene / resources / font . cpp <nl> ppp b / scene / resources / font . cpp <nl> void Font : : draw_halign ( RID p_canvas_item , const Point2 & p_pos , HAlign p_align , f <nl> return ; <nl> } <nl> <nl> - float ofs ; <nl> + float ofs = 0 . f ; <nl> switch ( p_align ) { <nl> case HALIGN_LEFT : { <nl> ofs = 0 ; <nl> void Font : : draw_halign ( RID p_canvas_item , const Point2 & p_pos , HAlign p_align , f <nl> case HALIGN_RIGHT : { <nl> ofs = p_width - length ; <nl> } break ; <nl> + default : { <nl> + ERR_PRINT ( " Unknown halignment type " ) ; <nl> + } break ; <nl> } <nl> draw ( p_canvas_item , p_pos + Point2 ( ofs , 0 ) , p_text , p_modulate , p_width ) ; <nl> } <nl> mmm a / servers / physics / collision_solver_sw . cpp <nl> ppp b / servers / physics / collision_solver_sw . cpp <nl> bool CollisionSolverSW : : solve_distance_plane ( const ShapeSW * p_shape_A , const Tra <nl> <nl> bool collided = false ; <nl> Vector3 closest ; <nl> - real_t closest_d ; <nl> + real_t closest_d = 0 ; <nl> <nl> for ( int i = 0 ; i < support_count ; i + + ) { <nl> <nl> mmm a / servers / physics / gjk_epa . cpp <nl> ppp b / servers / physics / gjk_epa . cpp <nl> struct GJK <nl> if ( l > GJK_SIMPLEX3_EPS ) <nl> { <nl> real_t mindist = - 1 ; <nl> - real_t subw [ 2 ] ; <nl> - U subm ; <nl> + real_t subw [ 2 ] = { 0 , 0 } ; <nl> + U subm = 0 ; <nl> for ( U i = 0 ; i < 3 ; + + i ) <nl> { <nl> if ( vec3_dot ( * vt [ i ] , vec3_cross ( dl [ i ] , n ) ) > 0 ) <nl> struct GJK <nl> { <nl> real_t mindist = - 1 ; <nl> real_t subw [ 3 ] ; <nl> - U subm ; <nl> + U subm = 0 ; <nl> for ( U i = 0 ; i < 3 ; + + i ) <nl> { <nl> const U j = imd3 [ i ] ; <nl> mmm a / servers / physics / shape_sw . cpp <nl> ppp b / servers / physics / shape_sw . cpp <nl> Vector3 ConvexPolygonShapeSW : : get_support ( const Vector3 & p_normal ) const { <nl> Vector3 n = p_normal ; <nl> <nl> int vert_support_idx = - 1 ; <nl> - real_t support_max ; <nl> + real_t support_max = 0 ; <nl> <nl> int vertex_count = mesh . vertices . size ( ) ; <nl> if ( vertex_count = = 0 ) <nl> void ConvexPolygonShapeSW : : get_supports ( const Vector3 & p_normal , int p_max , Vect <nl> int vc = mesh . vertices . size ( ) ; <nl> <nl> / / find vertex first <nl> - real_t max ; <nl> - int vtx ; <nl> + real_t max = 0 ; <nl> + int vtx = 0 ; <nl> <nl> for ( int i = 0 ; i < vc ; i + + ) { <nl> <nl> void FaceShapeSW : : project_range ( const Vector3 & p_normal , const Transform & p_tran <nl> Vector3 FaceShapeSW : : get_support ( const Vector3 & p_normal ) const { <nl> <nl> int vert_support_idx = - 1 ; <nl> - real_t support_max ; <nl> + real_t support_max = 0 ; <nl> <nl> for ( int i = 0 ; i < 3 ; i + + ) { <nl> <nl> Vector3 ConcavePolygonShapeSW : : get_support ( const Vector3 & p_normal ) const { <nl> Vector3 n = p_normal ; <nl> <nl> int vert_support_idx = - 1 ; <nl> - real_t support_max ; <nl> + real_t support_max = 0 ; <nl> <nl> for ( int i = 0 ; i < count ; i + + ) { <nl> <nl> mmm a / servers / visual / shader_language . cpp <nl> ppp b / servers / visual / shader_language . cpp <nl> ShaderLanguage : : Node * ShaderLanguage : : _parse_expression ( BlockNode * p_block , cons <nl> } <nl> <nl> bool index_valid = false ; <nl> - DataType member_type ; <nl> + DataType member_type = TYPE_VOID ; <nl> <nl> switch ( expr - > get_datatype ( ) ) { <nl> case TYPE_BVEC2 : <nl> mmm a / servers / visual / visual_server_scene . cpp <nl> ppp b / servers / visual / visual_server_scene . cpp <nl> void VisualServerScene : : _light_instance_update_shadow ( Instance * p_instance , cons <nl> Vector3 z_vec = transform . basis . get_axis ( Vector3 : : AXIS_Z ) . normalized ( ) ; <nl> / / z_vec points agsint the camera , like in default opengl <nl> <nl> - float x_min , x_max ; <nl> - float y_min , y_max ; <nl> - float z_min , z_max ; <nl> + float x_min = 0 . f , x_max = 0 . f ; <nl> + float y_min = 0 . f , y_max = 0 . f ; <nl> + float z_min = 0 . f , z_max = 0 . f ; <nl> <nl> - float x_min_cam , x_max_cam ; <nl> - float y_min_cam , y_max_cam ; <nl> - float z_min_cam , z_max_cam ; <nl> + float x_min_cam = 0 . f , x_max_cam = 0 . f ; <nl> + float y_min_cam = 0 . f , y_max_cam = 0 . f ; <nl> + float z_min_cam = 0 . f , z_max_cam = 0 . f ; <nl> <nl> float bias_scale = 1 . 0 ; <nl> <nl> void VisualServerScene : : _render_scene ( const Transform p_cam_transform , const Cam <nl> <nl> InstanceLightData * light = static_cast < InstanceLightData * > ( ins - > base_data ) ; <nl> <nl> - float coverage ; <nl> + float coverage = 0 . f ; <nl> <nl> { / / compute coverage <nl> <nl>
Fix use of unitialized variables
godotengine/godot
9c63ab99f0a505b0f60079bb30cc453b4415fddc
2017-09-01T23:59:26Z
mmm a / src / compiler / wasm - compiler . cc <nl> ppp b / src / compiler / wasm - compiler . cc <nl> Handle < Code > CompileJSToWasmWrapper ( Isolate * isolate , wasm : : ModuleEnv * module , <nl> <nl> Handle < Code > CompileWasmToJSWrapper ( Isolate * isolate , Handle < JSReceiver > target , <nl> wasm : : FunctionSig * sig , uint32_t index , <nl> - Handle < String > import_module , <nl> - MaybeHandle < String > import_function ) { <nl> + Handle < String > module_name , <nl> + MaybeHandle < String > import_name ) { <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> / / Create the Graph <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> Handle < Code > CompileWasmToJSWrapper ( Isolate * isolate , Handle < JSReceiver > target , <nl> if ( isolate - > logger ( ) - > is_logging_code_events ( ) | | isolate - > is_profiling ( ) ) { <nl> const char * function_name = nullptr ; <nl> int function_name_size = 0 ; <nl> - if ( ! import_function . is_null ( ) ) { <nl> - Handle < String > handle = import_function . ToHandleChecked ( ) ; <nl> + if ( ! import_name . is_null ( ) ) { <nl> + Handle < String > handle = import_name . ToHandleChecked ( ) ; <nl> function_name = handle - > ToCString ( ) . get ( ) ; <nl> function_name_size = handle - > length ( ) ; <nl> } <nl> RecordFunctionCompilation ( <nl> CodeEventListener : : FUNCTION_TAG , isolate , code , " wasm - to - js " , index , <nl> - { import_module - > ToCString ( ) . get ( ) , import_module - > length ( ) } , <nl> + { module_name - > ToCString ( ) . get ( ) , module_name - > length ( ) } , <nl> { function_name , function_name_size } ) ; <nl> } <nl> <nl> mmm a / src / compiler / wasm - compiler . h <nl> ppp b / src / compiler / wasm - compiler . h <nl> class WasmCompilationUnit final { <nl> / / Wraps a JS function , producing a code object that can be called from WASM . <nl> Handle < Code > CompileWasmToJSWrapper ( Isolate * isolate , Handle < JSReceiver > target , <nl> wasm : : FunctionSig * sig , uint32_t index , <nl> - Handle < String > import_module , <nl> - MaybeHandle < String > import_function ) ; <nl> + Handle < String > module_name , <nl> + MaybeHandle < String > import_name ) ; <nl> <nl> / / Wraps a given wasm code object , producing a code object . <nl> Handle < Code > CompileJSToWasmWrapper ( Isolate * isolate , wasm : : ModuleEnv * module , <nl> mmm a / src / wasm / module - decoder . cc <nl> ppp b / src / wasm / module - decoder . cc <nl> class ModuleDecoder : public Decoder { <nl> / / = = = = = Imported global = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> import - > index = static_cast < uint32_t > ( module - > globals . size ( ) ) ; <nl> module - > globals . push_back ( <nl> - { kAstStmt , false , NO_INIT , 0 , true , false } ) ; <nl> + { kAstStmt , false , WasmInitExpr ( ) , 0 , true , false } ) ; <nl> WasmGlobal * global = & module - > globals . back ( ) ; <nl> global - > type = consume_value_type ( ) ; <nl> global - > mutability = consume_u8 ( " mutability " ) ! = 0 ; <nl> class ModuleDecoder : public Decoder { <nl> TRACE ( " DecodeGlobal [ % d ] module + % d \ n " , i , <nl> static_cast < int > ( pc_ - start_ ) ) ; <nl> / / Add an uninitialized global and pass a pointer to it . <nl> - module - > globals . push_back ( { kAstStmt , false , NO_INIT , 0 , false , false } ) ; <nl> + module - > globals . push_back ( <nl> + { kAstStmt , false , WasmInitExpr ( ) , 0 , false , false } ) ; <nl> WasmGlobal * global = & module - > globals . back ( ) ; <nl> DecodeGlobalInModule ( module , i , global ) ; <nl> } <nl> class ModuleDecoder : public Decoder { <nl> TRACE ( " DecodeDataSegment [ % d ] module + % d \ n " , i , <nl> static_cast < int > ( pc_ - start_ ) ) ; <nl> module - > data_segments . push_back ( { <nl> - NO_INIT , / / dest_addr <nl> - 0 , / / source_offset <nl> - 0 / / source_size <nl> + WasmInitExpr ( ) , / / dest_addr <nl> + 0 , / / source_offset <nl> + 0 / / source_size <nl> } ) ; <nl> WasmDataSegment * segment = & module - > data_segments . back ( ) ; <nl> DecodeDataSegmentInModule ( module , segment ) ; <nl> class ModuleDecoder : public Decoder { <nl> const byte * pos = pc ( ) ; <nl> global - > init = consume_init_expr ( module , kAstStmt ) ; <nl> switch ( global - > init . kind ) { <nl> - case WasmInitExpr : : kGlobalIndex : <nl> - if ( global - > init . val . global_index > = index ) { <nl> + case WasmInitExpr : : kGlobalIndex : { <nl> + uint32_t other_index = global - > init . val . global_index ; <nl> + if ( other_index > = index ) { <nl> error ( " invalid global index in init expression " ) ; <nl> - } else if ( module - > globals [ index ] . type ! = global - > type ) { <nl> - error ( " type mismatch in global initialization " ) ; <nl> + } else if ( module - > globals [ other_index ] . type ! = global - > type ) { <nl> + error ( pos , pos , <nl> + " type mismatch in global initialization " <nl> + " ( from global # % u ) , expected % s , got % s " , <nl> + other_index , WasmOpcodes : : TypeName ( global - > type ) , <nl> + WasmOpcodes : : TypeName ( module - > globals [ other_index ] . type ) ) ; <nl> } <nl> break ; <nl> + } <nl> default : <nl> if ( global - > type ! = TypeOf ( module , global - > init ) ) { <nl> error ( pos , pos , <nl> mmm a / src / wasm / wasm - module - builder . cc <nl> ppp b / src / wasm / wasm - module - builder . cc <nl> void WasmModuleBuilder : : MarkStartFunction ( WasmFunctionBuilder * function ) { <nl> } <nl> <nl> uint32_t WasmModuleBuilder : : AddGlobal ( LocalType type , bool exported , <nl> - bool mutability ) { <nl> - globals_ . push_back ( { type , exported , mutability } ) ; <nl> + bool mutability , <nl> + const WasmInitExpr & init ) { <nl> + globals_ . push_back ( { type , exported , mutability , init } ) ; <nl> return static_cast < uint32_t > ( globals_ . size ( ) - 1 ) ; <nl> } <nl> <nl> void WasmModuleBuilder : : WriteTo ( ZoneBuffer & buffer ) const { <nl> for ( auto global : globals_ ) { <nl> buffer . write_u8 ( WasmOpcodes : : LocalTypeCodeFor ( global . type ) ) ; <nl> buffer . write_u8 ( global . mutability ? 1 : 0 ) ; <nl> - switch ( global . type ) { <nl> - case kAstI32 : { <nl> - static const byte code [ ] = { WASM_I32V_1 ( 0 ) } ; <nl> + switch ( global . init . kind ) { <nl> + case WasmInitExpr : : kI32Const : { <nl> + DCHECK_EQ ( kAstI32 , global . type ) ; <nl> + const byte code [ ] = { WASM_I32V_5 ( global . init . val . i32_const ) } ; <nl> buffer . write ( code , sizeof ( code ) ) ; <nl> break ; <nl> } <nl> - case kAstF32 : { <nl> - static const byte code [ ] = { WASM_F32 ( 0 ) } ; <nl> + case WasmInitExpr : : kI64Const : { <nl> + DCHECK_EQ ( kAstI64 , global . type ) ; <nl> + const byte code [ ] = { WASM_I64V_10 ( global . init . val . i64_const ) } ; <nl> buffer . write ( code , sizeof ( code ) ) ; <nl> break ; <nl> } <nl> - case kAstI64 : { <nl> - static const byte code [ ] = { WASM_I64V_1 ( 0 ) } ; <nl> + case WasmInitExpr : : kF32Const : { <nl> + DCHECK_EQ ( kAstF32 , global . type ) ; <nl> + const byte code [ ] = { WASM_F32 ( global . init . val . f32_const ) } ; <nl> buffer . write ( code , sizeof ( code ) ) ; <nl> break ; <nl> } <nl> - case kAstF64 : { <nl> - static const byte code [ ] = { WASM_F64 ( 0 . 0 ) } ; <nl> + case WasmInitExpr : : kF64Const : { <nl> + DCHECK_EQ ( kAstF64 , global . type ) ; <nl> + const byte code [ ] = { WASM_F64 ( global . init . val . f64_const ) } ; <nl> buffer . write ( code , sizeof ( code ) ) ; <nl> break ; <nl> } <nl> - default : <nl> - UNREACHABLE ( ) ; <nl> + case WasmInitExpr : : kGlobalIndex : { <nl> + const byte code [ ] = { kExprGetGlobal , <nl> + U32V_5 ( global . init . val . global_index ) } ; <nl> + buffer . write ( code , sizeof ( code ) ) ; <nl> + break ; <nl> + } <nl> + default : { <nl> + / / No initializer , emit a default value . <nl> + switch ( global . type ) { <nl> + case kAstI32 : { <nl> + const byte code [ ] = { WASM_I32V_1 ( 0 ) } ; <nl> + buffer . write ( code , sizeof ( code ) ) ; <nl> + break ; <nl> + } <nl> + case kAstI64 : { <nl> + const byte code [ ] = { WASM_I64V_1 ( 0 ) } ; <nl> + buffer . write ( code , sizeof ( code ) ) ; <nl> + break ; <nl> + } <nl> + case kAstF32 : { <nl> + const byte code [ ] = { WASM_F32 ( 0 . 0 ) } ; <nl> + buffer . write ( code , sizeof ( code ) ) ; <nl> + break ; <nl> + } <nl> + case kAstF64 : { <nl> + const byte code [ ] = { WASM_F64 ( 0 . 0 ) } ; <nl> + buffer . write ( code , sizeof ( code ) ) ; <nl> + break ; <nl> + } <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> } <nl> buffer . write_u8 ( kExprEnd ) ; <nl> } <nl> mmm a / src / wasm / wasm - module - builder . h <nl> ppp b / src / wasm / wasm - module - builder . h <nl> class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject { <nl> imports_ [ index ] . name_length = name_length ; <nl> } <nl> WasmFunctionBuilder * AddFunction ( FunctionSig * sig = nullptr ) ; <nl> - uint32_t AddGlobal ( LocalType type , bool exported , bool mutability = true ) ; <nl> + uint32_t AddGlobal ( LocalType type , bool exported , bool mutability = true , <nl> + const WasmInitExpr & init = WasmInitExpr ( ) ) ; <nl> void AddDataSegment ( const byte * data , uint32_t size , uint32_t dest ) ; <nl> uint32_t AddSignature ( FunctionSig * sig ) ; <nl> void AddIndirectFunction ( uint32_t index ) ; <nl> class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject { <nl> LocalType type ; <nl> bool exported ; <nl> bool mutability ; <nl> + WasmInitExpr init ; <nl> } ; <nl> <nl> struct WasmDataSegment { <nl> mmm a / src / wasm / wasm - module . cc <nl> ppp b / src / wasm / wasm - module . cc <nl> enum WasmInstanceObjectFields { <nl> } ; <nl> <nl> enum WasmImportData { <nl> + kImportKind , / / Smi . an ExternalKind <nl> + kImportGlobalType , / / Smi . Type for globals . <nl> + kImportIndex , / / Smi . index for the import . <nl> kModuleName , / / String <nl> kFunctionName , / / maybe String <nl> kOutputCount , / / Smi . an uint32_t <nl> enum WasmImportData { <nl> } ; <nl> <nl> enum WasmExportData { <nl> - kExportName , / / String <nl> - kExportArity , / / Smi , an int <nl> - kExportedFunctionIndex , / / Smi , an uint32_t <nl> - kExportedSignature , / / ByteArray . A copy of the data in FunctionSig <nl> - kWasmExportDataSize / / Sentinel value . <nl> + kExportKind , / / Smi . an ExternalKind <nl> + kExportGlobalType , / / Smi . Type for globals . <nl> + kExportName , / / String <nl> + kExportArity , / / Smi , an int <nl> + kExportIndex , / / Smi , an uint32_t <nl> + kExportedSignature , / / ByteArray . A copy of the data in FunctionSig <nl> + kWasmExportDataSize / / Sentinel value . <nl> + } ; <nl> + <nl> + enum WasmGlobalInitData { <nl> + kGlobalInitKind , / / 0 = constant , 1 = global index <nl> + kGlobalInitType , / / Smi . Type for globals . <nl> + kGlobalInitIndex , / / Smi , an uint32_t <nl> + kGlobalInitValue , / / Number . <nl> + kWasmGlobalInitDataSize <nl> } ; <nl> <nl> enum WasmSegmentInfo { <nl> - kDestAddr , / / Smi . an uint32_t <nl> + kDestInitKind , / / 0 = constant , 1 = global index <nl> + kDestAddrValue , / / Smi . an uint32_t <nl> kSourceSize , / / Smi . an uint32_t <nl> kWasmSegmentInfoSize / / Sentinel value . <nl> } ; <nl> enum WasmIndirectFunctionTableData { <nl> kWasmIndirectFunctionTableDataSize / / Sentinel value . <nl> } ; <nl> <nl> - uint32_t GetMinModuleMemSize ( const WasmModule * module ) { <nl> - return WasmModule : : kPageSize * module - > min_mem_pages ; <nl> + byte * raw_buffer_ptr ( MaybeHandle < JSArrayBuffer > buffer , int offset ) { <nl> + return static_cast < byte * > ( buffer . ToHandleChecked ( ) - > backing_store ( ) ) + offset ; <nl> } <nl> <nl> - void LoadDataSegments ( Handle < WasmCompiledModule > compiled_module , <nl> - Address mem_addr , size_t mem_size ) { <nl> - CHECK ( compiled_module - > has_data_segments ( ) = = <nl> - compiled_module - > has_data_segments_info ( ) ) ; <nl> - <nl> - / / If we have neither , we ' re done . <nl> - if ( ! compiled_module - > has_data_segments ( ) ) return ; <nl> - <nl> - Handle < ByteArray > data = compiled_module - > data_segments ( ) ; <nl> - Handle < FixedArray > segments = compiled_module - > data_segments_info ( ) ; <nl> - <nl> - uint32_t last_extraction_pos = 0 ; <nl> - for ( int i = 0 ; i < segments - > length ( ) ; + + i ) { <nl> - Handle < ByteArray > segment = <nl> - Handle < ByteArray > ( ByteArray : : cast ( segments - > get ( i ) ) ) ; <nl> - uint32_t dest_addr = static_cast < uint32_t > ( segment - > get_int ( kDestAddr ) ) ; <nl> - uint32_t source_size = static_cast < uint32_t > ( segment - > get_int ( kSourceSize ) ) ; <nl> - CHECK_LT ( dest_addr , mem_size ) ; <nl> - CHECK_LE ( source_size , mem_size ) ; <nl> - CHECK_LE ( dest_addr , mem_size - source_size ) ; <nl> - byte * addr = mem_addr + dest_addr ; <nl> - data - > copy_out ( last_extraction_pos , addr , source_size ) ; <nl> - last_extraction_pos + = source_size ; <nl> - } <nl> + uint32_t GetMinModuleMemSize ( const WasmModule * module ) { <nl> + return WasmModule : : kPageSize * module - > min_mem_pages ; <nl> } <nl> <nl> void SaveDataSegmentInfo ( Factory * factory , const WasmModule * module , <nl> void SaveDataSegmentInfo ( Factory * factory , const WasmModule * module , <nl> factory - > NewByteArray ( kWasmSegmentInfoSize * sizeof ( uint32_t ) , TENURED ) ; <nl> / / TODO ( titzer ) : add support for global offsets for dest_addr <nl> CHECK_EQ ( WasmInitExpr : : kI32Const , segment . dest_addr . kind ) ; <nl> - js_segment - > set_int ( kDestAddr , segment . dest_addr . val . i32_const ) ; <nl> + js_segment - > set_int ( kDestAddrValue , segment . dest_addr . val . i32_const ) ; <nl> js_segment - > set_int ( kSourceSize , segment . source_size ) ; <nl> segments - > set ( i , * js_segment ) ; <nl> data - > copy_in ( last_insertion_pos , <nl> void RelocateInstanceCode ( Handle < JSObject > instance , Address old_start , <nl> } <nl> } <nl> <nl> - / / Allocate memory for a module instance as a new JSArrayBuffer . <nl> - Handle < JSArrayBuffer > AllocateMemory ( ErrorThrower * thrower , Isolate * isolate , <nl> - uint32_t min_mem_pages ) { <nl> - if ( min_mem_pages > WasmModule : : kMaxMemPages ) { <nl> - thrower - > Error ( " Out of memory : wasm memory too large " ) ; <nl> - return Handle < JSArrayBuffer > : : null ( ) ; <nl> - } <nl> - Handle < JSArrayBuffer > mem_buffer = <nl> - NewArrayBuffer ( isolate , min_mem_pages * WasmModule : : kPageSize ) ; <nl> - <nl> - if ( mem_buffer . is_null ( ) ) { <nl> - thrower - > Error ( " Out of memory : wasm memory " ) ; <nl> - } <nl> - return mem_buffer ; <nl> - } <nl> - <nl> void RelocateGlobals ( Handle < JSObject > instance , Address old_start , <nl> Address globals_start ) { <nl> Handle < FixedArray > functions = Handle < FixedArray > ( <nl> Address GetGlobalStartAddressFromCodeTemplate ( Object * undefined , <nl> return old_address ; <nl> } <nl> <nl> - Handle < FixedArray > GetImportsData ( Factory * factory , const WasmModule * module ) { <nl> + Handle < FixedArray > EncodeImports ( Factory * factory , const WasmModule * module ) { <nl> Handle < FixedArray > ret = factory - > NewFixedArray ( <nl> static_cast < int > ( module - > import_table . size ( ) ) , TENURED ) ; <nl> + <nl> for ( size_t i = 0 ; i < module - > import_table . size ( ) ; + + i ) { <nl> const WasmImport & import = module - > import_table [ i ] ; <nl> - if ( import . kind ! = kExternalFunction ) continue ; <nl> + Handle < FixedArray > encoded_import = <nl> + factory - > NewFixedArray ( kWasmImportDataSize , TENURED ) ; <nl> + encoded_import - > set ( kImportKind , Smi : : FromInt ( import . kind ) ) ; <nl> + encoded_import - > set ( kImportIndex , Smi : : FromInt ( import . index ) ) ; <nl> + <nl> + / / Add the module and function name . <nl> WasmName module_name = module - > GetNameOrNull ( import . module_name_offset , <nl> import . module_name_length ) ; <nl> WasmName function_name = module - > GetNameOrNull ( import . field_name_offset , <nl> Handle < FixedArray > GetImportsData ( Factory * factory , const WasmModule * module ) { <nl> <nl> Handle < String > module_name_string = <nl> factory - > InternalizeUtf8String ( module_name ) ; <nl> - Handle < String > function_name_string = <nl> - function_name . is_empty ( ) <nl> - ? Handle < String > : : null ( ) <nl> - : factory - > InternalizeUtf8String ( function_name ) ; <nl> - FunctionSig * fsig = module - > functions [ import . index ] . sig ; <nl> - Handle < ByteArray > sig = factory - > NewByteArray ( <nl> - static_cast < int > ( fsig - > parameter_count ( ) + fsig - > return_count ( ) ) , <nl> - TENURED ) ; <nl> - sig - > copy_in ( 0 , reinterpret_cast < const byte * > ( fsig - > raw_data ( ) ) , <nl> - sig - > length ( ) ) ; <nl> - Handle < FixedArray > encoded_import = <nl> - factory - > NewFixedArray ( kWasmImportDataSize , TENURED ) ; <nl> encoded_import - > set ( kModuleName , * module_name_string ) ; <nl> - if ( ! function_name_string . is_null ( ) ) { <nl> + if ( ! function_name . is_empty ( ) ) { <nl> + Handle < String > function_name_string = <nl> + factory - > InternalizeUtf8String ( function_name ) ; <nl> encoded_import - > set ( kFunctionName , * function_name_string ) ; <nl> } <nl> - encoded_import - > set ( kOutputCount , <nl> - Smi : : FromInt ( static_cast < int > ( fsig - > return_count ( ) ) ) ) ; <nl> - encoded_import - > set ( kSignature , * sig ) ; <nl> - ret - > set ( static_cast < int > ( i ) , * encoded_import ) ; <nl> - } <nl> - return ret ; <nl> - } <nl> - <nl> - static MaybeHandle < JSFunction > ReportFFIError ( <nl> - ErrorThrower * thrower , const char * error , uint32_t index , <nl> - Handle < String > module_name , MaybeHandle < String > function_name ) { <nl> - Handle < String > function_name_handle ; <nl> - if ( function_name . ToHandle ( & function_name_handle ) ) { <nl> - thrower - > Error ( " Import # % d module = \ " % . * s \ " function = \ " % . * s \ " error : % s " , <nl> - index , module_name - > length ( ) , module_name - > ToCString ( ) . get ( ) , <nl> - function_name_handle - > length ( ) , <nl> - function_name_handle - > ToCString ( ) . get ( ) , error ) ; <nl> - } else { <nl> - thrower - > Error ( " Import # % d module = \ " % . * s \ " error : % s " , index , <nl> - module_name - > length ( ) , module_name - > ToCString ( ) . get ( ) , <nl> - error ) ; <nl> - } <nl> - thrower - > Error ( " Import " ) ; <nl> - return MaybeHandle < JSFunction > ( ) ; <nl> - } <nl> - <nl> - static MaybeHandle < JSReceiver > LookupFunction ( <nl> - ErrorThrower * thrower , Factory * factory , Handle < JSReceiver > ffi , <nl> - uint32_t index , Handle < String > module_name , <nl> - MaybeHandle < String > function_name ) { <nl> - if ( ffi . is_null ( ) ) { <nl> - return ReportFFIError ( thrower , " FFI is not an object " , index , module_name , <nl> - function_name ) ; <nl> - } <nl> - <nl> - / / Look up the module first . <nl> - MaybeHandle < Object > result = Object : : GetProperty ( ffi , module_name ) ; <nl> - if ( result . is_null ( ) ) { <nl> - return ReportFFIError ( thrower , " module not found " , index , module_name , <nl> - function_name ) ; <nl> - } <nl> - <nl> - Handle < Object > module = result . ToHandleChecked ( ) ; <nl> - <nl> - if ( ! module - > IsJSReceiver ( ) ) { <nl> - return ReportFFIError ( thrower , " module is not an object or function " , index , <nl> - module_name , function_name ) ; <nl> - } <nl> - <nl> - Handle < Object > function ; <nl> - if ( ! function_name . is_null ( ) ) { <nl> - / / Look up the function in the module . <nl> - MaybeHandle < Object > result = <nl> - Object : : GetProperty ( module , function_name . ToHandleChecked ( ) ) ; <nl> - if ( result . is_null ( ) ) { <nl> - return ReportFFIError ( thrower , " function not found " , index , module_name , <nl> - function_name ) ; <nl> - } <nl> - function = result . ToHandleChecked ( ) ; <nl> - } else { <nl> - / / No function specified . Use the " default export " . <nl> - function = module ; <nl> - } <nl> - <nl> - if ( ! function - > IsCallable ( ) ) { <nl> - return ReportFFIError ( thrower , " not a callable " , index , module_name , <nl> - function_name ) ; <nl> - } <nl> <nl> - return Handle < JSReceiver > : : cast ( function ) ; <nl> - } <nl> - <nl> - Handle < Code > CompileImportWrapper ( Isolate * isolate , <nl> - const Handle < JSReceiver > ffi , int index , <nl> - Handle < FixedArray > import_data , <nl> - ErrorThrower * thrower ) { <nl> - Handle < FixedArray > data = <nl> - import_data - > GetValueChecked < FixedArray > ( isolate , index ) ; <nl> - Handle < String > module_name = <nl> - data - > GetValueChecked < String > ( isolate , kModuleName ) ; <nl> - MaybeHandle < String > function_name = <nl> - data - > GetValue < String > ( isolate , kFunctionName ) ; <nl> - <nl> - / / TODO ( mtrofin ) : this is an uint32_t , actually . We should rationalize <nl> - / / it when we rationalize signed / unsigned stuff . <nl> - int ret_count = Smi : : cast ( data - > get ( kOutputCount ) ) - > value ( ) ; <nl> - CHECK_GE ( ret_count , 0 ) ; <nl> - Handle < ByteArray > sig_data = <nl> - data - > GetValueChecked < ByteArray > ( isolate , kSignature ) ; <nl> - int sig_data_size = sig_data - > length ( ) ; <nl> - int param_count = sig_data_size - ret_count ; <nl> - CHECK ( param_count > = 0 ) ; <nl> - <nl> - MaybeHandle < JSReceiver > function = LookupFunction ( <nl> - thrower , isolate - > factory ( ) , ffi , index , module_name , function_name ) ; <nl> - if ( function . is_null ( ) ) return Handle < Code > : : null ( ) ; <nl> - Handle < Code > code ; <nl> - Handle < JSReceiver > target = function . ToHandleChecked ( ) ; <nl> - bool isMatch = false ; <nl> - Handle < Code > export_wrapper_code ; <nl> - if ( target - > IsJSFunction ( ) ) { <nl> - Handle < JSFunction > func = Handle < JSFunction > : : cast ( target ) ; <nl> - export_wrapper_code = handle ( func - > code ( ) ) ; <nl> - if ( export_wrapper_code - > kind ( ) = = Code : : JS_TO_WASM_FUNCTION ) { <nl> - int exported_param_count = <nl> - Smi : : cast ( func - > GetInternalField ( kInternalArity ) ) - > value ( ) ; <nl> - Handle < ByteArray > exportedSig = Handle < ByteArray > ( <nl> - ByteArray : : cast ( func - > GetInternalField ( kInternalSignature ) ) ) ; <nl> - if ( exported_param_count = = param_count & & <nl> - exportedSig - > length ( ) = = sig_data - > length ( ) & & <nl> - memcmp ( exportedSig - > GetDataStartAddress ( ) , <nl> - sig_data - > GetDataStartAddress ( ) , exportedSig - > length ( ) ) = = 0 ) { <nl> - isMatch = true ; <nl> + switch ( import . kind ) { <nl> + case kExternalFunction : { <nl> + / / Encode the signature into the import . <nl> + FunctionSig * fsig = module - > functions [ import . index ] . sig ; <nl> + Handle < ByteArray > sig = factory - > NewByteArray ( <nl> + static_cast < int > ( fsig - > parameter_count ( ) + fsig - > return_count ( ) ) , <nl> + TENURED ) ; <nl> + sig - > copy_in ( 0 , reinterpret_cast < const byte * > ( fsig - > raw_data ( ) ) , <nl> + sig - > length ( ) ) ; <nl> + encoded_import - > set ( <nl> + kOutputCount , Smi : : FromInt ( static_cast < int > ( fsig - > return_count ( ) ) ) ) ; <nl> + encoded_import - > set ( kSignature , * sig ) ; <nl> + break ; <nl> } <nl> - } <nl> - } <nl> - if ( isMatch ) { <nl> - int wasm_count = 0 ; <nl> - int const mask = RelocInfo : : ModeMask ( RelocInfo : : CODE_TARGET ) ; <nl> - for ( RelocIterator it ( * export_wrapper_code , mask ) ; ! it . done ( ) ; it . next ( ) ) { <nl> - RelocInfo * rinfo = it . rinfo ( ) ; <nl> - Address target_address = rinfo - > target_address ( ) ; <nl> - Code * target = Code : : GetCodeFromTargetAddress ( target_address ) ; <nl> - if ( target - > kind ( ) = = Code : : WASM_FUNCTION ) { <nl> - + + wasm_count ; <nl> - code = handle ( target ) ; <nl> + case kExternalTable : <nl> + / / Nothing extra required for imported tables . <nl> + break ; <nl> + case kExternalMemory : <nl> + / / Nothing extra required for imported memories . <nl> + break ; <nl> + case kExternalGlobal : { <nl> + / / Encode the offset and the global type into the import . <nl> + const WasmGlobal & global = module - > globals [ import . index ] ; <nl> + TRACE ( " import [ % zu ] . type = % s \ n " , i , WasmOpcodes : : TypeName ( global . type ) ) ; <nl> + encoded_import - > set ( <nl> + kImportGlobalType , <nl> + Smi : : FromInt ( WasmOpcodes : : LocalTypeCodeFor ( global . type ) ) ) ; <nl> + encoded_import - > set ( kImportIndex , Smi : : FromInt ( global . offset ) ) ; <nl> + break ; <nl> } <nl> } <nl> - DCHECK ( wasm_count = = 1 ) ; <nl> - return code ; <nl> - } else { <nl> - / / Copy the signature to avoid a raw pointer into a heap object when <nl> - / / GC can happen . <nl> - Zone zone ( isolate - > allocator ( ) ) ; <nl> - MachineRepresentation * reps = <nl> - zone . NewArray < MachineRepresentation > ( sig_data_size ) ; <nl> - memcpy ( reps , sig_data - > GetDataStartAddress ( ) , <nl> - sizeof ( MachineRepresentation ) * sig_data_size ) ; <nl> - FunctionSig sig ( ret_count , param_count , reps ) ; <nl> - <nl> - return compiler : : CompileWasmToJSWrapper ( isolate , target , & sig , index , <nl> - module_name , function_name ) ; <nl> + ret - > set ( static_cast < int > ( i ) , * encoded_import ) ; <nl> } <nl> + return ret ; <nl> } <nl> <nl> void InitializeParallelCompilation ( <nl> WasmModule : : WasmModule ( byte * module_start ) <nl> num_exported_functions ( 0 ) , <nl> pending_tasks ( new base : : Semaphore ( 0 ) ) { } <nl> <nl> + void EncodeInit ( const WasmModule * module , Factory * factory , <nl> + Handle < FixedArray > entry , int kind_index , int value_index , <nl> + const WasmInitExpr & expr ) { <nl> + entry - > set ( kind_index , Smi : : FromInt ( 0 ) ) ; <nl> + <nl> + Handle < Object > value ; <nl> + switch ( expr . kind ) { <nl> + case WasmInitExpr : : kGlobalIndex : { <nl> + TRACE ( " kind = 1 , global index % u \ n " , expr . val . global_index ) ; <nl> + entry - > set ( kind_index , Smi : : FromInt ( 1 ) ) ; <nl> + uint32_t offset = module - > globals [ expr . val . global_index ] . offset ; <nl> + entry - > set ( value_index , Smi : : FromInt ( offset ) ) ; <nl> + return ; <nl> + } <nl> + case WasmInitExpr : : kI32Const : <nl> + TRACE ( " kind = 0 , i32 = % d \ n " , expr . val . i32_const ) ; <nl> + value = factory - > NewNumber ( expr . val . i32_const ) ; <nl> + break ; <nl> + case WasmInitExpr : : kI64Const : <nl> + / / TODO ( titzer ) : implement initializers for i64 globals . <nl> + UNREACHABLE ( ) ; <nl> + break ; <nl> + case WasmInitExpr : : kF32Const : <nl> + TRACE ( " kind = 0 , f32 = % f \ n " , expr . val . f32_const ) ; <nl> + value = factory - > NewNumber ( expr . val . f32_const ) ; <nl> + break ; <nl> + case WasmInitExpr : : kF64Const : <nl> + TRACE ( " kind = 0 , f64 = % lf \ n " , expr . val . f64_const ) ; <nl> + value = factory - > NewNumber ( expr . val . f64_const ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + entry - > set ( value_index , * value ) ; <nl> + } <nl> + <nl> MaybeHandle < WasmCompiledModule > WasmModule : : CompileFunctions ( <nl> Isolate * isolate , ErrorThrower * thrower ) const { <nl> Factory * factory = isolate - > factory ( ) ; <nl> MaybeHandle < WasmCompiledModule > WasmModule : : CompileFunctions ( <nl> factory - > NewFixedArray ( static_cast < int > ( code_table_size ) , TENURED ) ; <nl> <nl> / / Initialize the code table with placeholders . <nl> - for ( uint32_t i = 0 ; i < functions . size ( ) ; i + + ) { <nl> + for ( uint32_t i = 0 ; i < functions . size ( ) ; + + i ) { <nl> Code : : Kind kind = Code : : WASM_FUNCTION ; <nl> if ( i < num_imported_functions ) kind = Code : : WASM_TO_JS_FUNCTION ; <nl> Handle < Code > placeholder = CreatePlaceholder ( factory , i , kind ) ; <nl> MaybeHandle < WasmCompiledModule > WasmModule : : CompileFunctions ( <nl> / / Avoid a race condition by collecting results into a second vector . <nl> std : : vector < Handle < Code > > results ; <nl> results . reserve ( temp_instance . function_code . size ( ) ) ; <nl> - for ( size_t i = 0 ; i < temp_instance . function_code . size ( ) ; i + + ) { <nl> + for ( size_t i = 0 ; i < temp_instance . function_code . size ( ) ; + + i ) { <nl> results . push_back ( temp_instance . function_code [ i ] ) ; <nl> } <nl> CompileInParallel ( isolate , this , results , thrower , & module_env ) ; <nl> <nl> - for ( size_t i = 0 ; i < results . size ( ) ; i + + ) { <nl> + for ( size_t i = 0 ; i < results . size ( ) ; + + i ) { <nl> temp_instance . function_code [ i ] = results [ i ] ; <nl> } <nl> } else { <nl> MaybeHandle < WasmCompiledModule > WasmModule : : CompileFunctions ( <nl> / / and information needed at instantiation time . This object needs to be <nl> / / serializable . Instantiation may occur off a deserialized version of this <nl> / / object . <nl> - Handle < WasmCompiledModule > ret = WasmCompiledModule : : New ( <nl> - isolate , min_mem_pages , globals_size , mem_export , origin ) ; <nl> + Handle < WasmCompiledModule > ret = <nl> + WasmCompiledModule : : New ( isolate , min_mem_pages , globals_size , origin ) ; <nl> ret - > set_code_table ( code_table ) ; <nl> if ( ! indirect_table . is_null ( ) ) { <nl> ret - > set_indirect_function_tables ( indirect_table . ToHandleChecked ( ) ) ; <nl> } <nl> - Handle < FixedArray > import_data = GetImportsData ( factory , this ) ; <nl> - ret - > set_import_data ( import_data ) ; <nl> <nl> - / / Compile exported function wrappers . <nl> - int export_size = static_cast < int > ( num_exported_functions ) ; <nl> + / / Create and set import data . <nl> + ret - > set_imports ( EncodeImports ( factory , this ) ) ; <nl> + <nl> + / / Create and set export data . <nl> + int export_size = static_cast < int > ( export_table . size ( ) ) ; <nl> if ( export_size > 0 ) { <nl> Handle < FixedArray > exports = factory - > NewFixedArray ( export_size , TENURED ) ; <nl> - int index = - 1 ; <nl> + int index = 0 ; <nl> + int func_index = 0 ; <nl> <nl> for ( const WasmExport & exp : export_table ) { <nl> - if ( exp . kind ! = kExternalFunction ) <nl> - continue ; / / skip non - function exports . <nl> - index + + ; <nl> - Handle < FixedArray > export_data = <nl> + if ( thrower - > error ( ) ) return nothing ; <nl> + Handle < FixedArray > encoded_export = <nl> factory - > NewFixedArray ( kWasmExportDataSize , TENURED ) ; <nl> - FunctionSig * funcSig = functions [ exp . index ] . sig ; <nl> - Handle < ByteArray > exportedSig = <nl> - factory - > NewByteArray ( static_cast < int > ( funcSig - > parameter_count ( ) + <nl> - funcSig - > return_count ( ) ) , <nl> - TENURED ) ; <nl> - exportedSig - > copy_in ( 0 , <nl> - reinterpret_cast < const byte * > ( funcSig - > raw_data ( ) ) , <nl> - exportedSig - > length ( ) ) ; <nl> - export_data - > set ( kExportedSignature , * exportedSig ) ; <nl> WasmName str = GetName ( exp . name_offset , exp . name_length ) ; <nl> Handle < String > name = factory - > InternalizeUtf8String ( str ) ; <nl> - Handle < Code > code = code_table - > GetValueChecked < Code > ( isolate , exp . index ) ; <nl> - Handle < Code > export_code = compiler : : CompileJSToWasmWrapper ( <nl> - isolate , & module_env , code , exp . index ) ; <nl> - if ( thrower - > error ( ) ) return nothing ; <nl> - export_data - > set ( kExportName , * name ) ; <nl> - export_data - > set ( kExportArity , <nl> - Smi : : FromInt ( static_cast < int > ( <nl> - functions [ exp . index ] . sig - > parameter_count ( ) ) ) ) ; <nl> - export_data - > set ( kExportedFunctionIndex , <nl> - Smi : : FromInt ( static_cast < int > ( exp . index ) ) ) ; <nl> - exports - > set ( index , * export_data ) ; <nl> - code_table - > set ( static_cast < int > ( functions . size ( ) + index ) , * export_code ) ; <nl> + encoded_export - > set ( kExportKind , Smi : : FromInt ( exp . kind ) ) ; <nl> + encoded_export - > set ( kExportName , * name ) ; <nl> + encoded_export - > set ( kExportIndex , <nl> + Smi : : FromInt ( static_cast < int > ( exp . index ) ) ) ; <nl> + exports - > set ( index , * encoded_export ) ; <nl> + <nl> + switch ( exp . kind ) { <nl> + case kExternalFunction : { <nl> + / / Copy the signature and arity . <nl> + FunctionSig * funcSig = functions [ exp . index ] . sig ; <nl> + Handle < ByteArray > exportedSig = factory - > NewByteArray ( <nl> + static_cast < int > ( funcSig - > parameter_count ( ) + <nl> + funcSig - > return_count ( ) ) , <nl> + TENURED ) ; <nl> + exportedSig - > copy_in ( <nl> + 0 , reinterpret_cast < const byte * > ( funcSig - > raw_data ( ) ) , <nl> + exportedSig - > length ( ) ) ; <nl> + encoded_export - > set ( kExportedSignature , * exportedSig ) ; <nl> + encoded_export - > set ( <nl> + kExportArity , <nl> + Smi : : FromInt ( static_cast < int > ( funcSig - > parameter_count ( ) ) ) ) ; <nl> + <nl> + / / Compile a wrapper for an exported function . <nl> + Handle < Code > code = <nl> + code_table - > GetValueChecked < Code > ( isolate , exp . index ) ; <nl> + Handle < Code > export_code = compiler : : CompileJSToWasmWrapper ( <nl> + isolate , & module_env , code , exp . index ) ; <nl> + int code_table_index = <nl> + static_cast < int > ( functions . size ( ) + func_index ) ; <nl> + code_table - > set ( code_table_index , * export_code ) ; <nl> + encoded_export - > set ( kExportIndex , Smi : : FromInt ( code_table_index ) ) ; <nl> + + + func_index ; <nl> + } <nl> + case kExternalTable : <nl> + / / Nothing special about exported tables . <nl> + break ; <nl> + case kExternalMemory : <nl> + / / Nothing special about exported tables . <nl> + break ; <nl> + case kExternalGlobal : { <nl> + / / Encode the global type and the global offset . <nl> + const WasmGlobal & global = globals [ exp . index ] ; <nl> + encoded_export - > set ( <nl> + kExportGlobalType , <nl> + Smi : : FromInt ( WasmOpcodes : : LocalTypeCodeFor ( global . type ) ) ) ; <nl> + encoded_export - > set ( kExportIndex , Smi : : FromInt ( global . offset ) ) ; <nl> + break ; <nl> + } <nl> + } <nl> + + + index ; <nl> } <nl> ret - > set_exports ( exports ) ; <nl> } <nl> <nl> + / / Create and set init data . <nl> + int init_size = static_cast < int > ( globals . size ( ) ) ; <nl> + if ( init_size > 0 ) { <nl> + Handle < FixedArray > inits = factory - > NewFixedArray ( init_size , TENURED ) ; <nl> + int index = 0 ; <nl> + for ( const WasmGlobal & global : globals ) { <nl> + / / Skip globals that have no initializer ( e . g . imported ones ) . <nl> + if ( global . init . kind = = WasmInitExpr : : kNone ) continue ; <nl> + <nl> + Handle < FixedArray > encoded_init = <nl> + factory - > NewFixedArray ( kWasmGlobalInitDataSize , TENURED ) ; <nl> + inits - > set ( index , * encoded_init ) ; <nl> + TRACE ( " init [ % d ] . type = % s \ n " , index , WasmOpcodes : : TypeName ( global . type ) ) ; <nl> + <nl> + encoded_init - > set ( <nl> + kGlobalInitType , <nl> + Smi : : FromInt ( WasmOpcodes : : LocalTypeCodeFor ( global . type ) ) ) ; <nl> + encoded_init - > set ( kGlobalInitIndex , Smi : : FromInt ( global . offset ) ) ; <nl> + EncodeInit ( this , factory , encoded_init , kGlobalInitKind , kGlobalInitValue , <nl> + global . init ) ; <nl> + + + index ; <nl> + } <nl> + inits - > Shrink ( index ) ; <nl> + ret - > set_inits ( inits ) ; <nl> + } <nl> + <nl> / / Record data for startup function . <nl> if ( start_function_index > = 0 ) { <nl> HandleScope scope ( isolate ) ; <nl> Handle < FixedArray > startup_data = <nl> factory - > NewFixedArray ( kWasmExportDataSize , TENURED ) ; <nl> startup_data - > set ( kExportArity , Smi : : FromInt ( 0 ) ) ; <nl> - startup_data - > set ( kExportedFunctionIndex , <nl> - Smi : : FromInt ( start_function_index ) ) ; <nl> + startup_data - > set ( kExportIndex , Smi : : FromInt ( start_function_index ) ) ; <nl> ret - > set_startup_function ( startup_data ) ; <nl> } <nl> <nl> MaybeHandle < WasmCompiledModule > WasmModule : : CompileFunctions ( <nl> return ret ; <nl> } <nl> <nl> - / / Instantiates a WASM module , creating a WebAssembly . Instance from a <nl> - / / WebAssembly . Module . <nl> - MaybeHandle < JSObject > WasmModule : : Instantiate ( Isolate * isolate , <nl> - ErrorThrower * thrower , <nl> - Handle < JSObject > module_object , <nl> - Handle < JSReceiver > ffi , <nl> - Handle < JSArrayBuffer > memory ) { <nl> - MaybeHandle < JSObject > nothing ; <nl> - HistogramTimerScope wasm_instantiate_module_time_scope ( <nl> - isolate - > counters ( ) - > wasm_instantiate_module_time ( ) ) ; <nl> - Factory * factory = isolate - > factory ( ) ; <nl> + / / A helper class to simplify instantiating a module from a compiled module . <nl> + / / It closes over the { Isolate } , the { ErrorThrower } , the { WasmCompiledModule } , <nl> + / / etc . <nl> + class WasmInstanceBuilder { <nl> + public : <nl> + WasmInstanceBuilder ( Isolate * isolate , ErrorThrower * thrower , <nl> + Handle < JSObject > module_object , Handle < JSReceiver > ffi , <nl> + Handle < JSArrayBuffer > memory ) <nl> + : isolate_ ( isolate ) , <nl> + thrower_ ( thrower ) , <nl> + module_object_ ( module_object ) , <nl> + ffi_ ( ffi ) , <nl> + memory_ ( memory ) { } <nl> + <nl> + / / Build an instance , in all of its glory . <nl> + MaybeHandle < JSObject > Build ( ) { <nl> + MaybeHandle < JSObject > nothing ; <nl> + HistogramTimerScope wasm_instantiate_module_time_scope ( <nl> + isolate_ - > counters ( ) - > wasm_instantiate_module_time ( ) ) ; <nl> + Factory * factory = isolate_ - > factory ( ) ; <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / Reuse the compiled module ( if no owner ) , otherwise clone . <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + Handle < FixedArray > code_table ; <nl> + Handle < FixedArray > old_code_table ; <nl> + Handle < JSObject > owner ; <nl> + / / If we don ' t clone , this will be null ( ) . Otherwise , this will <nl> + / / be a weak link to the original . If we lose the original to GC , <nl> + / / this will be a cleared . We ' ll link the instances chain last . <nl> + MaybeHandle < WeakCell > link_to_original ; <nl> + <nl> + TRACE ( " Starting new module instantiation \ n " ) ; <nl> + { <nl> + Handle < WasmCompiledModule > original ( <nl> + WasmCompiledModule : : cast ( module_object_ - > GetInternalField ( 0 ) ) , <nl> + isolate_ ) ; <nl> + / / Always make a new copy of the code_table , since the old_code_table <nl> + / / may still have placeholders for imports . <nl> + old_code_table = original - > code_table ( ) ; <nl> + code_table = factory - > CopyFixedArray ( old_code_table ) ; <nl> + <nl> + if ( original - > has_weak_owning_instance ( ) ) { <nl> + WeakCell * tmp = original - > ptr_to_weak_owning_instance ( ) ; <nl> + DCHECK ( ! tmp - > cleared ( ) ) ; <nl> + / / There is already an owner , clone everything . <nl> + owner = Handle < JSObject > ( JSObject : : cast ( tmp - > value ( ) ) , isolate_ ) ; <nl> + / / Insert the latest clone in front . <nl> + TRACE ( " Cloning from % d \ n " , original - > instance_id ( ) ) ; <nl> + compiled_module_ = WasmCompiledModule : : Clone ( isolate_ , original ) ; <nl> + / / Replace the strong reference to point to the new instance here . <nl> + / / This allows any of the other instances , including the original , <nl> + / / to be collected . <nl> + module_object_ - > SetInternalField ( 0 , * compiled_module_ ) ; <nl> + compiled_module_ - > set_weak_module_object ( <nl> + original - > weak_module_object ( ) ) ; <nl> + link_to_original = factory - > NewWeakCell ( original ) ; <nl> + / / Don ' t link to original here . We remember the original <nl> + / / as a weak link . If that link isn ' t clear by the time we finish <nl> + / / instantiating this instance , then we link it at that time . <nl> + compiled_module_ - > reset_weak_next_instance ( ) ; <nl> + <nl> + / / Clone the code for WASM functions and exports . <nl> + for ( int i = 0 ; i < code_table - > length ( ) ; + + i ) { <nl> + Handle < Code > orig_code = <nl> + code_table - > GetValueChecked < Code > ( isolate_ , i ) ; <nl> + switch ( orig_code - > kind ( ) ) { <nl> + case Code : : WASM_TO_JS_FUNCTION : <nl> + / / Imports will be overwritten with newly compiled wrappers . <nl> + break ; <nl> + case Code : : JS_TO_WASM_FUNCTION : <nl> + case Code : : WASM_FUNCTION : { <nl> + Handle < Code > code = factory - > CopyCode ( orig_code ) ; <nl> + code_table - > set ( i , * code ) ; <nl> + break ; <nl> + } <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + } <nl> + RecordStats ( isolate_ , code_table ) ; <nl> + } else { <nl> + / / There was no owner , so we can reuse the original . <nl> + compiled_module_ = original ; <nl> + TRACE ( " Reusing existing instance % d \ n " , <nl> + compiled_module_ - > instance_id ( ) ) ; <nl> + } <nl> + compiled_module_ - > set_code_table ( code_table ) ; <nl> + } <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / Reuse the compiled module ( if no owner ) , otherwise clone . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - Handle < WasmCompiledModule > compiled_module ; <nl> - Handle < FixedArray > code_table ; <nl> - Handle < FixedArray > old_code_table ; <nl> - Handle < JSObject > owner ; <nl> - / / If we don ' t clone , this will be null ( ) . Otherwise , this will <nl> - / / be a weak link to the original . If we lose the original to GC , <nl> - / / this will be a cleared . We ' ll link the instances chain last . <nl> - MaybeHandle < WeakCell > link_to_original ; <nl> - <nl> - TRACE ( " Starting new module instantiation \ n " ) ; <nl> - { <nl> - Handle < WasmCompiledModule > original ( <nl> - WasmCompiledModule : : cast ( module_object - > GetInternalField ( 0 ) ) , isolate ) ; <nl> - / / Always make a new copy of the code_table , since the old_code_table <nl> - / / may still have placeholders for imports . <nl> - old_code_table = original - > code_table ( ) ; <nl> - code_table = factory - > CopyFixedArray ( old_code_table ) ; <nl> - <nl> - if ( original - > has_weak_owning_instance ( ) ) { <nl> - WeakCell * tmp = original - > ptr_to_weak_owning_instance ( ) ; <nl> - DCHECK ( ! tmp - > cleared ( ) ) ; <nl> - / / There is already an owner , clone everything . <nl> - owner = Handle < JSObject > ( JSObject : : cast ( tmp - > value ( ) ) , isolate ) ; <nl> - / / Insert the latest clone in front . <nl> - TRACE ( " Cloning from % d \ n " , original - > instance_id ( ) ) ; <nl> - compiled_module = WasmCompiledModule : : Clone ( isolate , original ) ; <nl> - / / Replace the strong reference to point to the new instance here . <nl> - / / This allows any of the other instances , including the original , <nl> - / / to be collected . <nl> - module_object - > SetInternalField ( 0 , * compiled_module ) ; <nl> - compiled_module - > set_weak_module_object ( original - > weak_module_object ( ) ) ; <nl> - link_to_original = factory - > NewWeakCell ( original ) ; <nl> - / / Don ' t link to original here . We remember the original <nl> - / / as a weak link . If that link isn ' t clear by the time we finish <nl> - / / instantiating this instance , then we link it at that time . <nl> - compiled_module - > reset_weak_next_instance ( ) ; <nl> - <nl> - / / Clone the code for WASM functions and exports . <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / Allocate the instance object . <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + Handle < Map > map = factory - > NewMap ( <nl> + JS_OBJECT_TYPE , <nl> + JSObject : : kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize ) ; <nl> + Handle < JSObject > instance = factory - > NewJSObjectFromMap ( map , TENURED ) ; <nl> + instance - > SetInternalField ( kWasmModuleCodeTable , * code_table ) ; <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / Set up the memory for the new instance . <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + MaybeHandle < JSArrayBuffer > old_memory ; <nl> + / / TODO ( titzer ) : handle imported memory properly . <nl> + <nl> + uint32_t min_mem_pages = compiled_module_ - > min_memory_pages ( ) ; <nl> + isolate_ - > counters ( ) - > wasm_min_mem_pages_count ( ) - > AddSample ( min_mem_pages ) ; <nl> + / / TODO ( wasm ) : re - enable counter for max_mem_pages when we use that field . <nl> + <nl> + if ( memory_ . is_null ( ) & & min_mem_pages > 0 ) { <nl> + memory_ = AllocateMemory ( min_mem_pages ) ; <nl> + if ( memory_ . is_null ( ) ) return nothing ; / / failed to allocate memory <nl> + } <nl> + <nl> + if ( ! memory_ . is_null ( ) ) { <nl> + instance - > SetInternalField ( kWasmMemArrayBuffer , * memory_ ) ; <nl> + Address mem_start = static_cast < Address > ( memory_ - > backing_store ( ) ) ; <nl> + uint32_t mem_size = <nl> + static_cast < uint32_t > ( memory_ - > byte_length ( ) - > Number ( ) ) ; <nl> + LoadDataSegments ( mem_start , mem_size ) ; <nl> + <nl> + uint32_t old_mem_size = compiled_module_ - > has_heap ( ) <nl> + ? compiled_module_ - > mem_size ( ) <nl> + : compiled_module_ - > default_mem_size ( ) ; <nl> + Address old_mem_start = <nl> + compiled_module_ - > has_heap ( ) <nl> + ? static_cast < Address > ( compiled_module_ - > heap ( ) - > backing_store ( ) ) <nl> + : nullptr ; <nl> + RelocateInstanceCode ( instance , old_mem_start , mem_start , old_mem_size , <nl> + mem_size ) ; <nl> + compiled_module_ - > set_heap ( memory_ ) ; <nl> + } <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / Set up the globals for the new instance . <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + MaybeHandle < JSArrayBuffer > old_globals ; <nl> + MaybeHandle < JSArrayBuffer > globals ; <nl> + uint32_t globals_size = compiled_module_ - > globals_size ( ) ; <nl> + if ( globals_size > 0 ) { <nl> + Handle < JSArrayBuffer > global_buffer = <nl> + NewArrayBuffer ( isolate_ , globals_size ) ; <nl> + globals = global_buffer ; <nl> + if ( globals . is_null ( ) ) { <nl> + thrower_ - > Error ( " Out of memory : wasm globals " ) ; <nl> + return nothing ; <nl> + } <nl> + Address old_address = <nl> + owner . is_null ( ) ? nullptr : GetGlobalStartAddressFromCodeTemplate ( <nl> + * factory - > undefined_value ( ) , <nl> + JSObject : : cast ( * owner ) ) ; <nl> + RelocateGlobals ( instance , old_address , <nl> + static_cast < Address > ( global_buffer - > backing_store ( ) ) ) ; <nl> + instance - > SetInternalField ( kWasmGlobalsArrayBuffer , * global_buffer ) ; <nl> + } <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / Process the imports for the module . <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + int num_imported_functions = ProcessImports ( globals , code_table ) ; <nl> + if ( num_imported_functions < 0 ) return nothing ; <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / Process the initialization for the module ' s globals . <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + ProcessInits ( globals ) ; <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / Set up the debug support for the new instance . <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / TODO ( wasm ) : avoid referencing this stuff from the instance , use it off <nl> + / / the compiled module instead . See the following 3 assignments : <nl> + if ( compiled_module_ - > has_module_bytes ( ) ) { <nl> + instance - > SetInternalField ( kWasmModuleBytesString , <nl> + compiled_module_ - > ptr_to_module_bytes ( ) ) ; <nl> + } <nl> + <nl> + if ( compiled_module_ - > has_function_names ( ) ) { <nl> + instance - > SetInternalField ( kWasmFunctionNamesArray , <nl> + compiled_module_ - > ptr_to_function_names ( ) ) ; <nl> + } <nl> + <nl> + { <nl> + Handle < Object > handle = factory - > NewNumber ( num_imported_functions ) ; <nl> + instance - > SetInternalField ( kWasmNumImportedFunctions , * handle ) ; <nl> + } <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / Set up the runtime support for the new instance . <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + Handle < WeakCell > weak_link = factory - > NewWeakCell ( instance ) ; <nl> + <nl> + for ( int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs ; <nl> + i < code_table - > length ( ) ; + + i ) { <nl> + Handle < Code > code = code_table - > GetValueChecked < Code > ( isolate_ , i ) ; <nl> + if ( code - > kind ( ) = = Code : : WASM_FUNCTION ) { <nl> + Handle < FixedArray > deopt_data = factory - > NewFixedArray ( 2 , TENURED ) ; <nl> + deopt_data - > set ( 0 , * weak_link ) ; <nl> + deopt_data - > set ( 1 , Smi : : FromInt ( static_cast < int > ( i ) ) ) ; <nl> + deopt_data - > set_length ( 2 ) ; <nl> + code - > set_deoptimization_data ( * deopt_data ) ; <nl> + } <nl> + } <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / Set up the indirect function tables for the new instance . <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + { <nl> + std : : vector < Handle < Code > > functions ( <nl> + static_cast < size_t > ( code_table - > length ( ) ) ) ; <nl> for ( int i = 0 ; i < code_table - > length ( ) ; + + i ) { <nl> - Handle < Code > orig_code = code_table - > GetValueChecked < Code > ( isolate , i ) ; <nl> - switch ( orig_code - > kind ( ) ) { <nl> - case Code : : WASM_TO_JS_FUNCTION : <nl> - / / Imports will be overwritten with newly compiled wrappers . <nl> - break ; <nl> - case Code : : JS_TO_WASM_FUNCTION : <nl> - case Code : : WASM_FUNCTION : { <nl> - Handle < Code > code = factory - > CopyCode ( orig_code ) ; <nl> - code_table - > set ( i , * code ) ; <nl> - break ; <nl> - } <nl> - default : <nl> - UNREACHABLE ( ) ; <nl> + functions [ i ] = code_table - > GetValueChecked < Code > ( isolate_ , i ) ; <nl> + } <nl> + <nl> + if ( compiled_module_ - > has_indirect_function_tables ( ) ) { <nl> + Handle < FixedArray > indirect_tables_template = <nl> + compiled_module_ - > indirect_function_tables ( ) ; <nl> + Handle < FixedArray > to_replace = <nl> + owner . is_null ( ) ? indirect_tables_template <nl> + : handle ( FixedArray : : cast ( owner - > GetInternalField ( <nl> + kWasmModuleFunctionTable ) ) ) ; <nl> + Handle < FixedArray > indirect_tables = SetupIndirectFunctionTable ( <nl> + isolate_ , code_table , indirect_tables_template , to_replace ) ; <nl> + for ( int i = 0 ; i < indirect_tables - > length ( ) ; + + i ) { <nl> + Handle < FixedArray > metadata = <nl> + indirect_tables - > GetValueChecked < FixedArray > ( isolate_ , i ) ; <nl> + uint32_t size = Smi : : cast ( metadata - > get ( kSize ) ) - > value ( ) ; <nl> + Handle < FixedArray > table = <nl> + metadata - > GetValueChecked < FixedArray > ( isolate_ , kTable ) ; <nl> + PopulateFunctionTable ( table , size , & functions ) ; <nl> } <nl> + instance - > SetInternalField ( kWasmModuleFunctionTable , * indirect_tables ) ; <nl> } <nl> - RecordStats ( isolate , code_table ) ; <nl> - } else { <nl> - / / There was no owner , so we can reuse the original . <nl> - compiled_module = original ; <nl> - TRACE ( " Reusing existing instance % d \ n " , compiled_module - > instance_id ( ) ) ; <nl> } <nl> - compiled_module - > set_code_table ( code_table ) ; <nl> - } <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / Allocate the instance object . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - Handle < Map > map = factory - > NewMap ( <nl> - JS_OBJECT_TYPE , <nl> - JSObject : : kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize ) ; <nl> - Handle < JSObject > instance = factory - > NewJSObjectFromMap ( map , TENURED ) ; <nl> - instance - > SetInternalField ( kWasmModuleCodeTable , * code_table ) ; <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / Set up the memory for the new instance . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - MaybeHandle < JSArrayBuffer > old_memory ; <nl> - / / TODO ( titzer ) : handle imported memory properly . <nl> - <nl> - uint32_t min_mem_pages = compiled_module - > min_memory_pages ( ) ; <nl> - isolate - > counters ( ) - > wasm_min_mem_pages_count ( ) - > AddSample ( min_mem_pages ) ; <nl> - / / TODO ( wasm ) : re - enable counter for max_mem_pages when we use that field . <nl> - <nl> - if ( memory . is_null ( ) & & min_mem_pages > 0 ) { <nl> - memory = AllocateMemory ( thrower , isolate , min_mem_pages ) ; <nl> - if ( memory . is_null ( ) ) return nothing ; / / failed to allocate memory <nl> - } <nl> - <nl> - if ( ! memory . is_null ( ) ) { <nl> - instance - > SetInternalField ( kWasmMemArrayBuffer , * memory ) ; <nl> - Address mem_start = static_cast < Address > ( memory - > backing_store ( ) ) ; <nl> - uint32_t mem_size = static_cast < uint32_t > ( memory - > byte_length ( ) - > Number ( ) ) ; <nl> - LoadDataSegments ( compiled_module , mem_start , mem_size ) ; <nl> - <nl> - uint32_t old_mem_size = compiled_module - > has_heap ( ) <nl> - ? compiled_module - > mem_size ( ) <nl> - : compiled_module - > default_mem_size ( ) ; <nl> - Address old_mem_start = <nl> - compiled_module - > has_heap ( ) <nl> - ? static_cast < Address > ( compiled_module - > heap ( ) - > backing_store ( ) ) <nl> - : nullptr ; <nl> - RelocateInstanceCode ( instance , old_mem_start , mem_start , old_mem_size , <nl> - mem_size ) ; <nl> - compiled_module - > set_heap ( memory ) ; <nl> - } <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / Set up the globals for the new instance . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - MaybeHandle < JSArrayBuffer > old_globals ; <nl> - MaybeHandle < JSArrayBuffer > globals ; <nl> - uint32_t globals_size = compiled_module - > globals_size ( ) ; <nl> - if ( globals_size > 0 ) { <nl> - Handle < JSArrayBuffer > global_buffer = NewArrayBuffer ( isolate , globals_size ) ; <nl> - globals = global_buffer ; <nl> - if ( globals . is_null ( ) ) { <nl> - thrower - > Error ( " Out of memory : wasm globals " ) ; <nl> - return nothing ; <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / Set up the exports object for the new instance . <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + ProcessExports ( globals , code_table , instance ) ; <nl> + <nl> + if ( num_imported_functions > 0 | | ! owner . is_null ( ) ) { <nl> + / / If the code was cloned , or new imports were compiled , patch . <nl> + PatchDirectCalls ( old_code_table , code_table , num_imported_functions ) ; <nl> } <nl> - Address old_address = <nl> - owner . is_null ( ) ? nullptr : GetGlobalStartAddressFromCodeTemplate ( <nl> - * isolate - > factory ( ) - > undefined_value ( ) , <nl> - JSObject : : cast ( * owner ) ) ; <nl> - RelocateGlobals ( instance , old_address , <nl> - static_cast < Address > ( global_buffer - > backing_store ( ) ) ) ; <nl> - instance - > SetInternalField ( kWasmGlobalsArrayBuffer , * global_buffer ) ; <nl> - } <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / Compile the import wrappers for the new instance . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / TODO ( titzer ) : handle imported globals and function tables . <nl> - int num_imported_functions = 0 ; <nl> - if ( compiled_module - > has_import_data ( ) ) { <nl> - Handle < FixedArray > import_data = compiled_module - > import_data ( ) ; <nl> - num_imported_functions = import_data - > length ( ) ; <nl> - for ( int index = 0 ; index < num_imported_functions ; index + + ) { <nl> - Handle < Code > import_wrapper = <nl> - CompileImportWrapper ( isolate , ffi , index , import_data , thrower ) ; <nl> - if ( thrower - > error ( ) ) return nothing ; <nl> - code_table - > set ( index , * import_wrapper ) ; <nl> - RecordStats ( isolate , * import_wrapper ) ; <nl> + <nl> + FlushICache ( isolate_ , code_table ) ; <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / Run the start function if one was specified . <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + if ( compiled_module_ - > has_startup_function ( ) ) { <nl> + Handle < FixedArray > startup_data = compiled_module_ - > startup_function ( ) ; <nl> + HandleScope scope ( isolate_ ) ; <nl> + int32_t start_index = <nl> + startup_data - > GetValueChecked < Smi > ( isolate_ , kExportIndex ) - > value ( ) ; <nl> + Handle < Code > startup_code = <nl> + code_table - > GetValueChecked < Code > ( isolate_ , start_index ) ; <nl> + int arity = Smi : : cast ( startup_data - > get ( kExportArity ) ) - > value ( ) ; <nl> + MaybeHandle < ByteArray > startup_signature = <nl> + startup_data - > GetValue < ByteArray > ( isolate_ , kExportedSignature ) ; <nl> + Handle < JSFunction > startup_fct = WrapExportCodeAsJSFunction ( <nl> + isolate_ , startup_code , factory - > InternalizeUtf8String ( " start " ) , <nl> + arity , startup_signature , instance ) ; <nl> + RecordStats ( isolate_ , * startup_code ) ; <nl> + / / Call the JS function . <nl> + Handle < Object > undefined = factory - > undefined_value ( ) ; <nl> + MaybeHandle < Object > retval = <nl> + Execution : : Call ( isolate_ , startup_fct , undefined , 0 , nullptr ) ; <nl> + <nl> + if ( retval . is_null ( ) ) { <nl> + thrower_ - > Error ( " WASM . instantiateModule ( ) : start function failed " ) ; <nl> + return nothing ; <nl> + } <nl> } <nl> - } <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / Set up the debug support for the new instance . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / TODO ( wasm ) : avoid referencing this stuff from the instance , use it off <nl> - / / the compiled module instead . See the following 3 assignments : <nl> - if ( compiled_module - > has_module_bytes ( ) ) { <nl> - instance - > SetInternalField ( kWasmModuleBytesString , <nl> - compiled_module - > ptr_to_module_bytes ( ) ) ; <nl> - } <nl> + DCHECK ( wasm : : IsWasmObject ( * instance ) ) ; <nl> <nl> - if ( compiled_module - > has_function_names ( ) ) { <nl> - instance - > SetInternalField ( kWasmFunctionNamesArray , <nl> - compiled_module - > ptr_to_function_names ( ) ) ; <nl> + { <nl> + Handle < WeakCell > link_to_owner = factory - > NewWeakCell ( instance ) ; <nl> + <nl> + Handle < Object > global_handle = <nl> + isolate_ - > global_handles ( ) - > Create ( * instance ) ; <nl> + Handle < WeakCell > link_to_clone = factory - > NewWeakCell ( compiled_module_ ) ; <nl> + { <nl> + DisallowHeapAllocation no_gc ; <nl> + compiled_module_ - > set_weak_owning_instance ( link_to_owner ) ; <nl> + Handle < WeakCell > next ; <nl> + if ( link_to_original . ToHandle ( & next ) & & ! next - > cleared ( ) ) { <nl> + WasmCompiledModule * original = <nl> + WasmCompiledModule : : cast ( next - > value ( ) ) ; <nl> + DCHECK ( original - > has_weak_owning_instance ( ) ) ; <nl> + DCHECK ( ! original - > weak_owning_instance ( ) - > cleared ( ) ) ; <nl> + compiled_module_ - > set_weak_next_instance ( next ) ; <nl> + original - > set_weak_prev_instance ( link_to_clone ) ; <nl> + } <nl> + <nl> + compiled_module_ - > set_weak_owning_instance ( link_to_owner ) ; <nl> + instance - > SetInternalField ( kWasmCompiledModule , * compiled_module_ ) ; <nl> + GlobalHandles : : MakeWeak ( global_handle . location ( ) , <nl> + global_handle . location ( ) , & InstanceFinalizer , <nl> + v8 : : WeakCallbackType : : kFinalizer ) ; <nl> + } <nl> + } <nl> + TRACE ( " Finishing instance % d \ n " , compiled_module_ - > instance_id ( ) ) ; <nl> + TRACE_CHAIN ( WasmCompiledModule : : cast ( module_object_ - > GetInternalField ( 0 ) ) ) ; <nl> + return instance ; <nl> } <nl> <nl> - { <nl> - Handle < Object > handle = factory - > NewNumber ( num_imported_functions ) ; <nl> - instance - > SetInternalField ( kWasmNumImportedFunctions , * handle ) ; <nl> - } <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / Set up the runtime support for the new instance . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - Handle < WeakCell > weak_link = isolate - > factory ( ) - > NewWeakCell ( instance ) ; <nl> - <nl> - for ( int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs ; <nl> - i < code_table - > length ( ) ; + + i ) { <nl> - Handle < Code > code = code_table - > GetValueChecked < Code > ( isolate , i ) ; <nl> - if ( code - > kind ( ) = = Code : : WASM_FUNCTION ) { <nl> - Handle < FixedArray > deopt_data = <nl> - isolate - > factory ( ) - > NewFixedArray ( 2 , TENURED ) ; <nl> - deopt_data - > set ( 0 , * weak_link ) ; <nl> - deopt_data - > set ( 1 , Smi : : FromInt ( static_cast < int > ( i ) ) ) ; <nl> - deopt_data - > set_length ( 2 ) ; <nl> - code - > set_deoptimization_data ( * deopt_data ) ; <nl> + private : <nl> + Isolate * isolate_ ; <nl> + ErrorThrower * thrower_ ; <nl> + Handle < JSObject > module_object_ ; <nl> + Handle < JSReceiver > ffi_ ; <nl> + Handle < JSArrayBuffer > memory_ ; <nl> + Handle < WasmCompiledModule > compiled_module_ ; <nl> + <nl> + / / Helper routine to print out errors with imports ( FFI ) . <nl> + MaybeHandle < JSFunction > ReportFFIError ( const char * error , uint32_t index , <nl> + Handle < String > module_name , <nl> + MaybeHandle < String > function_name ) { <nl> + Handle < String > function_name_handle ; <nl> + if ( function_name . ToHandle ( & function_name_handle ) ) { <nl> + thrower_ - > Error ( " Import # % d module = \ " % . * s \ " function = \ " % . * s \ " error : % s " , <nl> + index , module_name - > length ( ) , <nl> + module_name - > ToCString ( ) . get ( ) , <nl> + function_name_handle - > length ( ) , <nl> + function_name_handle - > ToCString ( ) . get ( ) , error ) ; <nl> + } else { <nl> + thrower_ - > Error ( " Import # % d module = \ " % . * s \ " error : % s " , index , <nl> + module_name - > length ( ) , module_name - > ToCString ( ) . get ( ) , <nl> + error ) ; <nl> } <nl> + thrower_ - > Error ( " Import " ) ; <nl> + return MaybeHandle < JSFunction > ( ) ; <nl> } <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / Set up the indirect function tables for the new instance . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - { <nl> - std : : vector < Handle < Code > > functions ( <nl> - static_cast < size_t > ( code_table - > length ( ) ) ) ; <nl> - for ( int i = 0 ; i < code_table - > length ( ) ; + + i ) { <nl> - functions [ i ] = code_table - > GetValueChecked < Code > ( isolate , i ) ; <nl> + / / Look up an import value in the { ffi_ } object . <nl> + MaybeHandle < Object > LookupImport ( uint32_t index , Handle < String > module_name , <nl> + MaybeHandle < String > import_name ) { <nl> + if ( ffi_ . is_null ( ) ) { <nl> + return ReportFFIError ( " FFI is not an object " , index , module_name , <nl> + import_name ) ; <nl> } <nl> <nl> - if ( compiled_module - > has_indirect_function_tables ( ) ) { <nl> - Handle < FixedArray > indirect_tables_template = <nl> - compiled_module - > indirect_function_tables ( ) ; <nl> - Handle < FixedArray > to_replace = <nl> - owner . is_null ( ) ? indirect_tables_template <nl> - : handle ( FixedArray : : cast ( owner - > GetInternalField ( <nl> - kWasmModuleFunctionTable ) ) ) ; <nl> - Handle < FixedArray > indirect_tables = SetupIndirectFunctionTable ( <nl> - isolate , code_table , indirect_tables_template , to_replace ) ; <nl> - for ( int i = 0 ; i < indirect_tables - > length ( ) ; + + i ) { <nl> - Handle < FixedArray > metadata = <nl> - indirect_tables - > GetValueChecked < FixedArray > ( isolate , i ) ; <nl> - uint32_t size = Smi : : cast ( metadata - > get ( kSize ) ) - > value ( ) ; <nl> - Handle < FixedArray > table = <nl> - metadata - > GetValueChecked < FixedArray > ( isolate , kTable ) ; <nl> - PopulateFunctionTable ( table , size , & functions ) ; <nl> - } <nl> - instance - > SetInternalField ( kWasmModuleFunctionTable , * indirect_tables ) ; <nl> + / / Look up the module first . <nl> + MaybeHandle < Object > result = Object : : GetProperty ( ffi_ , module_name ) ; <nl> + if ( result . is_null ( ) ) { <nl> + return ReportFFIError ( " module not found " , index , module_name , <nl> + import_name ) ; <nl> } <nl> - } <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / Set up the exports object for the new instance . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - bool mem_export = compiled_module - > export_memory ( ) ; <nl> - ModuleOrigin origin = compiled_module - > origin ( ) ; <nl> + Handle < Object > module = result . ToHandleChecked ( ) ; <nl> <nl> - if ( compiled_module - > has_exports ( ) | | mem_export ) { <nl> - PropertyDescriptor desc ; <nl> - desc . set_writable ( false ) ; <nl> + if ( ! import_name . is_null ( ) ) { <nl> + / / Look up the value in the module . <nl> + if ( ! module - > IsJSReceiver ( ) ) { <nl> + return ReportFFIError ( " module is not an object or function " , index , <nl> + module_name , import_name ) ; <nl> + } <nl> <nl> - Handle < JSObject > exports_object = instance ; <nl> - if ( origin = = kWasmOrigin ) { <nl> - / / Create the " exports " object . <nl> - Handle < JSFunction > object_function = Handle < JSFunction > ( <nl> - isolate - > native_context ( ) - > object_function ( ) , isolate ) ; <nl> - exports_object = factory - > NewJSObject ( object_function , TENURED ) ; <nl> - Handle < String > exports_name = factory - > InternalizeUtf8String ( " exports " ) ; <nl> - JSObject : : AddProperty ( instance , exports_name , exports_object , READ_ONLY ) ; <nl> + result = Object : : GetProperty ( module , import_name . ToHandleChecked ( ) ) ; <nl> + if ( result . is_null ( ) ) { <nl> + return ReportFFIError ( " import not found " , index , module_name , <nl> + import_name ) ; <nl> + } <nl> + } else { <nl> + / / No function specified . Use the " default export " . <nl> + result = module ; <nl> } <nl> - int first_export = - 1 ; <nl> - / / TODO ( wasm ) : another iteration over the code objects . <nl> - for ( int i = 0 ; i < code_table - > length ( ) ; i + + ) { <nl> - Handle < Code > code = code_table - > GetValueChecked < Code > ( isolate , i ) ; <nl> - if ( code - > kind ( ) = = Code : : JS_TO_WASM_FUNCTION ) { <nl> - first_export = i ; <nl> - break ; <nl> + <nl> + return result ; <nl> + } <nl> + <nl> + / / Load data segments into the memory . <nl> + void LoadDataSegments ( Address mem_addr , size_t mem_size ) { <nl> + CHECK ( compiled_module_ - > has_data_segments ( ) = = <nl> + compiled_module_ - > has_data_segments_info ( ) ) ; <nl> + <nl> + / / If we have neither , we ' re done . <nl> + if ( ! compiled_module_ - > has_data_segments ( ) ) return ; <nl> + <nl> + Handle < ByteArray > data = compiled_module_ - > data_segments ( ) ; <nl> + Handle < FixedArray > segments = compiled_module_ - > data_segments_info ( ) ; <nl> + <nl> + uint32_t last_extraction_pos = 0 ; <nl> + for ( int i = 0 ; i < segments - > length ( ) ; + + i ) { <nl> + Handle < ByteArray > segment = <nl> + Handle < ByteArray > ( ByteArray : : cast ( segments - > get ( i ) ) ) ; <nl> + uint32_t dest_addr = <nl> + static_cast < uint32_t > ( segment - > get_int ( kDestAddrValue ) ) ; <nl> + uint32_t source_size = <nl> + static_cast < uint32_t > ( segment - > get_int ( kSourceSize ) ) ; <nl> + CHECK_LT ( dest_addr , mem_size ) ; <nl> + CHECK_LE ( source_size , mem_size ) ; <nl> + CHECK_LE ( dest_addr , mem_size - source_size ) ; <nl> + byte * addr = mem_addr + dest_addr ; <nl> + data - > copy_out ( last_extraction_pos , addr , source_size ) ; <nl> + last_extraction_pos + = source_size ; <nl> + } <nl> + } <nl> + <nl> + Handle < Code > CompileImportWrapper ( int index , Handle < FixedArray > data , <nl> + Handle < JSReceiver > target , <nl> + Handle < String > module_name , <nl> + MaybeHandle < String > import_name ) { <nl> + / / TODO ( mtrofin ) : this is an uint32_t , actually . We should rationalize <nl> + / / it when we rationalize signed / unsigned stuff . <nl> + int ret_count = Smi : : cast ( data - > get ( kOutputCount ) ) - > value ( ) ; <nl> + CHECK_GE ( ret_count , 0 ) ; <nl> + Handle < ByteArray > sig_data = <nl> + data - > GetValueChecked < ByteArray > ( isolate_ , kSignature ) ; <nl> + int sig_data_size = sig_data - > length ( ) ; <nl> + int param_count = sig_data_size - ret_count ; <nl> + CHECK ( param_count > = 0 ) ; <nl> + <nl> + Handle < Code > code ; <nl> + bool isMatch = false ; <nl> + Handle < Code > export_wrapper_code ; <nl> + if ( target - > IsJSFunction ( ) ) { <nl> + Handle < JSFunction > func = Handle < JSFunction > : : cast ( target ) ; <nl> + export_wrapper_code = handle ( func - > code ( ) ) ; <nl> + if ( export_wrapper_code - > kind ( ) = = Code : : JS_TO_WASM_FUNCTION ) { <nl> + int exported_param_count = <nl> + Smi : : cast ( func - > GetInternalField ( kInternalArity ) ) - > value ( ) ; <nl> + Handle < ByteArray > exportedSig = Handle < ByteArray > ( <nl> + ByteArray : : cast ( func - > GetInternalField ( kInternalSignature ) ) ) ; <nl> + if ( exported_param_count = = param_count & & <nl> + exportedSig - > length ( ) = = sig_data - > length ( ) & & <nl> + memcmp ( exportedSig - > GetDataStartAddress ( ) , <nl> + sig_data - > GetDataStartAddress ( ) , <nl> + exportedSig - > length ( ) ) = = 0 ) { <nl> + isMatch = true ; <nl> + } <nl> } <nl> } <nl> - if ( compiled_module - > has_exports ( ) ) { <nl> - Handle < FixedArray > exports = compiled_module - > exports ( ) ; <nl> - int export_size = exports - > length ( ) ; <nl> - for ( int i = 0 ; i < export_size ; + + i ) { <nl> - Handle < FixedArray > export_data = <nl> - exports - > GetValueChecked < FixedArray > ( isolate , i ) ; <nl> - Handle < String > name = <nl> - export_data - > GetValueChecked < String > ( isolate , kExportName ) ; <nl> - int arity = Smi : : cast ( export_data - > get ( kExportArity ) ) - > value ( ) ; <nl> - MaybeHandle < ByteArray > signature = <nl> - export_data - > GetValue < ByteArray > ( isolate , kExportedSignature ) ; <nl> - Handle < Code > export_code = <nl> - code_table - > GetValueChecked < Code > ( isolate , first_export + i ) ; <nl> - Handle < JSFunction > function = WrapExportCodeAsJSFunction ( <nl> - isolate , export_code , name , arity , signature , instance ) ; <nl> - desc . set_value ( function ) ; <nl> - Maybe < bool > status = JSReceiver : : DefineOwnProperty ( <nl> - isolate , exports_object , name , & desc , Object : : THROW_ON_ERROR ) ; <nl> - if ( ! status . IsJust ( ) ) { <nl> - thrower - > Error ( " export of % . * s failed . " , name - > length ( ) , <nl> - name - > ToCString ( ) . get ( ) ) ; <nl> - return nothing ; <nl> + if ( isMatch ) { <nl> + int wasm_count = 0 ; <nl> + int const mask = RelocInfo : : ModeMask ( RelocInfo : : CODE_TARGET ) ; <nl> + for ( RelocIterator it ( * export_wrapper_code , mask ) ; ! it . done ( ) ; <nl> + it . next ( ) ) { <nl> + RelocInfo * rinfo = it . rinfo ( ) ; <nl> + Address target_address = rinfo - > target_address ( ) ; <nl> + Code * target = Code : : GetCodeFromTargetAddress ( target_address ) ; <nl> + if ( target - > kind ( ) = = Code : : WASM_FUNCTION ) { <nl> + + + wasm_count ; <nl> + code = handle ( target ) ; <nl> } <nl> } <nl> + DCHECK ( wasm_count = = 1 ) ; <nl> + return code ; <nl> + } else { <nl> + / / Copy the signature to avoid a raw pointer into a heap object when <nl> + / / GC can happen . <nl> + Zone zone ( isolate_ - > allocator ( ) ) ; <nl> + MachineRepresentation * reps = <nl> + zone . NewArray < MachineRepresentation > ( sig_data_size ) ; <nl> + memcpy ( reps , sig_data - > GetDataStartAddress ( ) , <nl> + sizeof ( MachineRepresentation ) * sig_data_size ) ; <nl> + FunctionSig sig ( ret_count , param_count , reps ) ; <nl> + <nl> + return compiler : : CompileWasmToJSWrapper ( isolate_ , target , & sig , index , <nl> + module_name , import_name ) ; <nl> } <nl> - if ( mem_export ) { <nl> - / / Export the memory as a named property . <nl> - Handle < JSArrayBuffer > buffer = Handle < JSArrayBuffer > ( <nl> - JSArrayBuffer : : cast ( instance - > GetInternalField ( kWasmMemArrayBuffer ) ) ) ; <nl> - Handle < Object > memory_object = <nl> - WasmJs : : CreateWasmMemoryObject ( isolate , buffer , false , 0 ) ; <nl> - / / TODO ( titzer ) : export the memory with the correct name . <nl> - Handle < String > name = factory - > InternalizeUtf8String ( " memory " ) ; <nl> - JSObject : : AddProperty ( exports_object , name , memory_object , READ_ONLY ) ; <nl> + } <nl> + <nl> + void WriteGlobalValue ( MaybeHandle < JSArrayBuffer > globals , uint32_t offset , <nl> + Handle < Object > value , int type ) { <nl> + double num = 0 ; <nl> + if ( value - > IsSmi ( ) ) { <nl> + num = Smi : : cast ( * value ) - > value ( ) ; <nl> + } else if ( value - > IsHeapNumber ( ) ) { <nl> + num = HeapNumber : : cast ( * value ) - > value ( ) ; <nl> + } else { <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + TRACE ( " init [ globals + % u ] = % lf , type = % d \ n " , offset , num , type ) ; <nl> + byte * ptr = raw_buffer_ptr ( globals , offset ) ; <nl> + switch ( type ) { <nl> + case kLocalI32 : <nl> + * reinterpret_cast < int32_t * > ( ptr ) = static_cast < int32_t > ( num ) ; <nl> + break ; <nl> + case kLocalI64 : <nl> + / / TODO ( titzer ) : initialization of imported i64 globals . <nl> + UNREACHABLE ( ) ; <nl> + break ; <nl> + case kLocalF32 : <nl> + * reinterpret_cast < float * > ( ptr ) = static_cast < float > ( num ) ; <nl> + break ; <nl> + case kLocalF64 : <nl> + * reinterpret_cast < double * > ( ptr ) = num ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> } <nl> } <nl> <nl> - if ( num_imported_functions > 0 | | ! owner . is_null ( ) ) { <nl> - / / If the code was cloned , or new imports were compiled , patch . <nl> - PatchDirectCalls ( old_code_table , code_table , num_imported_functions ) ; <nl> + / / Process the imports , including functions , tables , globals , and memory , in <nl> + / / order , loading them from the { ffi_ } object . Returns the number of imported <nl> + / / functions . <nl> + int ProcessImports ( MaybeHandle < JSArrayBuffer > globals , <nl> + Handle < FixedArray > code_table ) { <nl> + int num_imported_functions = 0 ; <nl> + if ( ! compiled_module_ - > has_imports ( ) ) return num_imported_functions ; <nl> + <nl> + Handle < FixedArray > imports = compiled_module_ - > imports ( ) ; <nl> + for ( int index = 0 ; index < imports - > length ( ) ; + + index ) { <nl> + Handle < FixedArray > data = <nl> + imports - > GetValueChecked < FixedArray > ( isolate_ , index ) ; <nl> + <nl> + Handle < String > module_name = <nl> + data - > GetValueChecked < String > ( isolate_ , kModuleName ) ; <nl> + MaybeHandle < String > function_name = <nl> + data - > GetValue < String > ( isolate_ , kFunctionName ) ; <nl> + <nl> + MaybeHandle < Object > result = <nl> + LookupImport ( index , module_name , function_name ) ; <nl> + if ( thrower_ - > error ( ) ) return - 1 ; <nl> + <nl> + WasmExternalKind kind = static_cast < WasmExternalKind > ( <nl> + Smi : : cast ( data - > get ( kImportKind ) ) - > value ( ) ) ; <nl> + switch ( kind ) { <nl> + case kExternalFunction : { <nl> + / / Function imports must be callable . <nl> + Handle < Object > function = result . ToHandleChecked ( ) ; <nl> + if ( ! function - > IsCallable ( ) ) { <nl> + ReportFFIError ( " function import requires a callable " , index , <nl> + module_name , function_name ) ; <nl> + return - 1 ; <nl> + } <nl> + <nl> + Handle < Code > import_wrapper = CompileImportWrapper ( <nl> + index , data , Handle < JSReceiver > : : cast ( function ) , module_name , <nl> + function_name ) ; <nl> + int func_index = Smi : : cast ( data - > get ( kImportIndex ) ) - > value ( ) ; <nl> + code_table - > set ( func_index , * import_wrapper ) ; <nl> + RecordStats ( isolate_ , * import_wrapper ) ; <nl> + num_imported_functions + + ; <nl> + break ; <nl> + } <nl> + case kExternalTable : <nl> + / / TODO ( titzer ) : Table imports must be a WebAssembly . Table . <nl> + break ; <nl> + case kExternalMemory : <nl> + / / TODO ( titzer ) : Memory imports must be a WebAssembly . Memory . <nl> + break ; <nl> + case kExternalGlobal : { <nl> + / / Global imports are converted to numbers and written into the <nl> + / / { globals } array buffer . <nl> + Handle < Object > object = result . ToHandleChecked ( ) ; <nl> + MaybeHandle < Object > number = Object : : ToNumber ( object ) ; <nl> + if ( number . is_null ( ) ) { <nl> + ReportFFIError ( " global import could not be converted to number " , <nl> + index , module_name , function_name ) ; <nl> + return - 1 ; <nl> + } <nl> + Handle < Object > val = number . ToHandleChecked ( ) ; <nl> + int offset = Smi : : cast ( data - > get ( kImportIndex ) ) - > value ( ) ; <nl> + int type = Smi : : cast ( data - > get ( kImportGlobalType ) ) - > value ( ) ; <nl> + WriteGlobalValue ( globals , offset , val , type ) ; <nl> + break ; <nl> + } <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + break ; <nl> + } <nl> + } <nl> + return num_imported_functions ; <nl> } <nl> <nl> - FlushICache ( isolate , code_table ) ; <nl> + / / Process initialization of globals . <nl> + void ProcessInits ( MaybeHandle < JSArrayBuffer > globals ) { <nl> + if ( ! compiled_module_ - > has_inits ( ) ) return ; <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / Run the start function if one was specified . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - if ( compiled_module - > has_startup_function ( ) ) { <nl> - Handle < FixedArray > startup_data = compiled_module - > startup_function ( ) ; <nl> - HandleScope scope ( isolate ) ; <nl> - int32_t start_index = <nl> - startup_data - > GetValueChecked < Smi > ( isolate , kExportedFunctionIndex ) <nl> - - > value ( ) ; <nl> - Handle < Code > startup_code = <nl> - code_table - > GetValueChecked < Code > ( isolate , start_index ) ; <nl> - int arity = Smi : : cast ( startup_data - > get ( kExportArity ) ) - > value ( ) ; <nl> - MaybeHandle < ByteArray > startup_signature = <nl> - startup_data - > GetValue < ByteArray > ( isolate , kExportedSignature ) ; <nl> - Handle < JSFunction > startup_fct = WrapExportCodeAsJSFunction ( <nl> - isolate , startup_code , factory - > InternalizeUtf8String ( " start " ) , arity , <nl> - startup_signature , instance ) ; <nl> - RecordStats ( isolate , * startup_code ) ; <nl> - / / Call the JS function . <nl> - Handle < Object > undefined = isolate - > factory ( ) - > undefined_value ( ) ; <nl> - MaybeHandle < Object > retval = <nl> - Execution : : Call ( isolate , startup_fct , undefined , 0 , nullptr ) ; <nl> - <nl> - if ( retval . is_null ( ) ) { <nl> - thrower - > Error ( " WASM . instantiateModule ( ) : start function failed " ) ; <nl> - return nothing ; <nl> + Handle < FixedArray > inits = compiled_module_ - > inits ( ) ; <nl> + for ( int index = 0 ; index < inits - > length ( ) ; + + index ) { <nl> + Handle < FixedArray > data = <nl> + inits - > GetValueChecked < FixedArray > ( isolate_ , index ) ; <nl> + <nl> + int offset = Smi : : cast ( data - > get ( kGlobalInitIndex ) ) - > value ( ) ; <nl> + Handle < Object > val ( data - > get ( kGlobalInitValue ) , isolate_ ) ; <nl> + int type = Smi : : cast ( data - > get ( kGlobalInitType ) ) - > value ( ) ; <nl> + if ( Smi : : cast ( data - > get ( kGlobalInitKind ) ) - > value ( ) = = 0 ) { <nl> + / / Initialize with a constant . <nl> + WriteGlobalValue ( globals , offset , val , type ) ; <nl> + } else { <nl> + / / Initialize with another global . <nl> + int old_offset = Smi : : cast ( * val ) - > value ( ) ; <nl> + TRACE ( " init [ globals + % u ] = [ globals + % d ] \ n " , offset , old_offset ) ; <nl> + int size = sizeof ( int32_t ) ; <nl> + if ( type = = kLocalI64 | | type = = kLocalF64 ) size = sizeof ( double ) ; <nl> + memcpy ( raw_buffer_ptr ( globals , offset ) , <nl> + raw_buffer_ptr ( globals , old_offset ) , size ) ; <nl> + } <nl> } <nl> } <nl> <nl> - DCHECK ( wasm : : IsWasmObject ( * instance ) ) ; <nl> + / / Allocate memory for a module instance as a new JSArrayBuffer . <nl> + Handle < JSArrayBuffer > AllocateMemory ( uint32_t min_mem_pages ) { <nl> + if ( min_mem_pages > WasmModule : : kMaxMemPages ) { <nl> + thrower_ - > Error ( " Out of memory : wasm memory too large " ) ; <nl> + return Handle < JSArrayBuffer > : : null ( ) ; <nl> + } <nl> + Handle < JSArrayBuffer > mem_buffer = <nl> + NewArrayBuffer ( isolate_ , min_mem_pages * WasmModule : : kPageSize ) ; <nl> <nl> - { <nl> - Handle < WeakCell > link_to_owner = factory - > NewWeakCell ( instance ) ; <nl> + if ( mem_buffer . is_null ( ) ) { <nl> + thrower_ - > Error ( " Out of memory : wasm memory " ) ; <nl> + } <nl> + return mem_buffer ; <nl> + } <nl> <nl> - Handle < Object > global_handle = isolate - > global_handles ( ) - > Create ( * instance ) ; <nl> - Handle < WeakCell > link_to_clone = factory - > NewWeakCell ( compiled_module ) ; <nl> - { <nl> - DisallowHeapAllocation no_gc ; <nl> - compiled_module - > set_weak_owning_instance ( link_to_owner ) ; <nl> - Handle < WeakCell > next ; <nl> - if ( link_to_original . ToHandle ( & next ) & & ! next - > cleared ( ) ) { <nl> - WasmCompiledModule * original = WasmCompiledModule : : cast ( next - > value ( ) ) ; <nl> - DCHECK ( original - > has_weak_owning_instance ( ) ) ; <nl> - DCHECK ( ! original - > weak_owning_instance ( ) - > cleared ( ) ) ; <nl> - compiled_module - > set_weak_next_instance ( next ) ; <nl> - original - > set_weak_prev_instance ( link_to_clone ) ; <nl> + / / Process the exports , creating wrappers for functions , tables , memories , <nl> + / / and globals . <nl> + void ProcessExports ( MaybeHandle < JSArrayBuffer > globals , <nl> + Handle < FixedArray > code_table , <nl> + Handle < JSObject > instance ) { <nl> + if ( ! compiled_module_ - > has_exports ( ) ) return ; <nl> + <nl> + Handle < JSObject > exports_object = instance ; <nl> + if ( compiled_module_ - > origin ( ) = = kWasmOrigin ) { <nl> + / / Create the " exports " object . <nl> + Handle < JSFunction > object_function = Handle < JSFunction > ( <nl> + isolate_ - > native_context ( ) - > object_function ( ) , isolate_ ) ; <nl> + exports_object = <nl> + isolate_ - > factory ( ) - > NewJSObject ( object_function , TENURED ) ; <nl> + Handle < String > exports_name = <nl> + isolate_ - > factory ( ) - > InternalizeUtf8String ( " exports " ) ; <nl> + JSObject : : AddProperty ( instance , exports_name , exports_object , READ_ONLY ) ; <nl> + } <nl> + <nl> + PropertyDescriptor desc ; <nl> + desc . set_writable ( false ) ; <nl> + <nl> + Handle < FixedArray > exports = compiled_module_ - > exports ( ) ; <nl> + <nl> + for ( int i = 0 ; i < exports - > length ( ) ; + + i ) { <nl> + Handle < FixedArray > export_data = <nl> + exports - > GetValueChecked < FixedArray > ( isolate_ , i ) ; <nl> + Handle < String > name = <nl> + export_data - > GetValueChecked < String > ( isolate_ , kExportName ) ; <nl> + WasmExternalKind kind = static_cast < WasmExternalKind > ( <nl> + Smi : : cast ( export_data - > get ( kExportKind ) ) - > value ( ) ) ; <nl> + switch ( kind ) { <nl> + case kExternalFunction : { <nl> + / / Wrap and export the code as a JSFunction . <nl> + int code_table_index = <nl> + Smi : : cast ( export_data - > get ( kExportIndex ) ) - > value ( ) ; <nl> + Handle < Code > export_code = <nl> + code_table - > GetValueChecked < Code > ( isolate_ , code_table_index ) ; <nl> + int arity = Smi : : cast ( export_data - > get ( kExportArity ) ) - > value ( ) ; <nl> + MaybeHandle < ByteArray > signature = <nl> + export_data - > GetValue < ByteArray > ( isolate_ , kExportedSignature ) ; <nl> + desc . set_value ( WrapExportCodeAsJSFunction ( <nl> + isolate_ , export_code , name , arity , signature , instance ) ) ; <nl> + break ; <nl> + } <nl> + case kExternalTable : <nl> + / / TODO ( titzer ) : create a WebAssembly . Table instance . <nl> + / / TODO ( titzer ) : should it have the same identity as an import ? <nl> + break ; <nl> + case kExternalMemory : { <nl> + / / TODO ( titzer ) : should memory have the same identity as an <nl> + / / import ? <nl> + Handle < JSArrayBuffer > buffer = <nl> + Handle < JSArrayBuffer > ( JSArrayBuffer : : cast ( <nl> + instance - > GetInternalField ( kWasmMemArrayBuffer ) ) ) ; <nl> + desc . set_value ( <nl> + WasmJs : : CreateWasmMemoryObject ( isolate_ , buffer , false , 0 ) ) ; <nl> + break ; <nl> + } <nl> + case kExternalGlobal : { <nl> + / / Export the value of the global variable as a number . <nl> + int offset = Smi : : cast ( export_data - > get ( kExportIndex ) ) - > value ( ) ; <nl> + byte * ptr = raw_buffer_ptr ( globals , offset ) ; <nl> + double num = 0 ; <nl> + switch ( Smi : : cast ( export_data - > get ( kExportGlobalType ) ) - > value ( ) ) { <nl> + case kLocalI32 : <nl> + num = * reinterpret_cast < int32_t * > ( ptr ) ; <nl> + break ; <nl> + case kLocalF32 : <nl> + num = * reinterpret_cast < float * > ( ptr ) ; <nl> + break ; <nl> + case kLocalF64 : <nl> + num = * reinterpret_cast < double * > ( ptr ) ; <nl> + break ; <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + desc . set_value ( isolate_ - > factory ( ) - > NewNumber ( num ) ) ; <nl> + break ; <nl> + } <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + break ; <nl> } <nl> <nl> - compiled_module - > set_weak_owning_instance ( link_to_owner ) ; <nl> - instance - > SetInternalField ( kWasmCompiledModule , * compiled_module ) ; <nl> - GlobalHandles : : MakeWeak ( global_handle . location ( ) , <nl> - global_handle . location ( ) , & InstanceFinalizer , <nl> - v8 : : WeakCallbackType : : kFinalizer ) ; <nl> + Maybe < bool > status = JSReceiver : : DefineOwnProperty ( <nl> + isolate_ , exports_object , name , & desc , Object : : THROW_ON_ERROR ) ; <nl> + if ( ! status . IsJust ( ) ) { <nl> + thrower_ - > Error ( " export of % . * s failed . " , name - > length ( ) , <nl> + name - > ToCString ( ) . get ( ) ) ; <nl> + return ; <nl> + } <nl> } <nl> } <nl> - TRACE ( " Finishing instance % d \ n " , compiled_module - > instance_id ( ) ) ; <nl> - TRACE_CHAIN ( WasmCompiledModule : : cast ( module_object - > GetInternalField ( 0 ) ) ) ; <nl> - return instance ; <nl> - } <nl> + } ; <nl> <nl> - # if DEBUG <nl> - uint32_t WasmCompiledModule : : instance_id_counter_ = 0 ; <nl> - # endif <nl> + / / Instantiates a WASM module , creating a WebAssembly . Instance from a <nl> + / / WebAssembly . Module . <nl> + MaybeHandle < JSObject > WasmModule : : Instantiate ( Isolate * isolate , <nl> + ErrorThrower * thrower , <nl> + Handle < JSObject > module_object , <nl> + Handle < JSReceiver > ffi , <nl> + Handle < JSArrayBuffer > memory ) { <nl> + WasmInstanceBuilder builder ( isolate , thrower , module_object , ffi , memory ) ; <nl> + return builder . Build ( ) ; <nl> + } <nl> <nl> Handle < WasmCompiledModule > WasmCompiledModule : : New ( Isolate * isolate , <nl> uint32_t min_memory_pages , <nl> uint32_t globals_size , <nl> - bool export_memory , <nl> ModuleOrigin origin ) { <nl> Handle < FixedArray > ret = <nl> isolate - > factory ( ) - > NewFixedArray ( PropertyIndices : : Count , TENURED ) ; <nl> Handle < WasmCompiledModule > WasmCompiledModule : : New ( Isolate * isolate , <nl> ret - > set ( kID_min_memory_pages , <nl> Smi : : FromInt ( static_cast < int > ( min_memory_pages ) ) ) ; <nl> ret - > set ( kID_globals_size , Smi : : FromInt ( static_cast < int > ( globals_size ) ) ) ; <nl> - ret - > set ( kID_export_memory , Smi : : FromInt ( static_cast < int > ( export_memory ) ) ) ; <nl> ret - > set ( kID_origin , Smi : : FromInt ( static_cast < int > ( origin ) ) ) ; <nl> WasmCompiledModule : : cast ( * ret ) - > Init ( ) ; <nl> return handle ( WasmCompiledModule : : cast ( * ret ) ) ; <nl> Handle < WasmCompiledModule > WasmCompiledModule : : New ( Isolate * isolate , <nl> <nl> void WasmCompiledModule : : Init ( ) { <nl> # if DEBUG <nl> - set ( kID_instance_id , Smi : : FromInt ( instance_id_counter_ + + ) ) ; <nl> + static uint32_t instance_id_counter = 0 ; <nl> + set ( kID_instance_id , Smi : : FromInt ( instance_id_counter + + ) ) ; <nl> TRACE ( " New compiled module id : % d \ n " , instance_id ( ) ) ; <nl> # endif <nl> } <nl> bool UpdateWasmModuleMemory ( Handle < JSObject > object , Address old_start , <nl> <nl> / / Iterate through the code objects in the code table and update relocation <nl> / / information <nl> - for ( int i = 0 ; i < code_table - > length ( ) ; i + + ) { <nl> + for ( int i = 0 ; i < code_table - > length ( ) ; + + i ) { <nl> obj = code_table - > get ( i ) ; <nl> Handle < Code > code ( Code : : cast ( obj ) ) ; <nl> <nl> Handle < FixedArray > BuildFunctionTable ( Isolate * isolate , uint32_t index , <nl> / / platforms , it is possible to have the top bits of " undefined " take <nl> / / small integer values ( or zero ) , which are more likely to be equal to <nl> / / the signature index we check against . <nl> - for ( uint32_t i = table - > size ; i < table - > max_size ; i + + ) { <nl> + for ( uint32_t i = table - > size ; i < table - > max_size ; + + i ) { <nl> values - > set ( i , Smi : : FromInt ( - 1 ) ) ; <nl> } <nl> return values ; <nl> mmm a / src / wasm / wasm - module . h <nl> ppp b / src / wasm / wasm - module . h <nl> struct WasmInitExpr { <nl> double f64_const ; <nl> uint32_t global_index ; <nl> } val ; <nl> - } ; <nl> <nl> - # define NO_INIT \ <nl> - { \ <nl> - WasmInitExpr : : kNone , { 0u } \ <nl> + WasmInitExpr ( ) : kind ( kNone ) { } <nl> + explicit WasmInitExpr ( int32_t v ) : kind ( kI32Const ) { val . i32_const = v ; } <nl> + explicit WasmInitExpr ( int64_t v ) : kind ( kI64Const ) { val . i64_const = v ; } <nl> + explicit WasmInitExpr ( float v ) : kind ( kF32Const ) { val . f32_const = v ; } <nl> + explicit WasmInitExpr ( double v ) : kind ( kF64Const ) { val . f64_const = v ; } <nl> + WasmInitExpr ( WasmInitKind kind , uint32_t global_index ) : kind ( kGlobalIndex ) { <nl> + val . global_index = global_index ; <nl> } <nl> + } ; <nl> <nl> / / Static representation of a WASM function . <nl> struct WasmFunction { <nl> class WasmCompiledModule : public FixedArray { <nl> <nl> # define CORE_WCM_PROPERTY_TABLE ( MACRO ) \ <nl> MACRO ( OBJECT , FixedArray , code_table ) \ <nl> - MACRO ( OBJECT , FixedArray , import_data ) \ <nl> + MACRO ( OBJECT , FixedArray , imports ) \ <nl> MACRO ( OBJECT , FixedArray , exports ) \ <nl> + MACRO ( OBJECT , FixedArray , inits ) \ <nl> MACRO ( OBJECT , FixedArray , startup_function ) \ <nl> MACRO ( OBJECT , FixedArray , indirect_function_tables ) \ <nl> MACRO ( OBJECT , String , module_bytes ) \ <nl> class WasmCompiledModule : public FixedArray { <nl> MACRO ( OBJECT , ByteArray , data_segments ) \ <nl> MACRO ( SMALL_NUMBER , uint32_t , globals_size ) \ <nl> MACRO ( OBJECT , JSArrayBuffer , heap ) \ <nl> - MACRO ( SMALL_NUMBER , bool , export_memory ) \ <nl> MACRO ( SMALL_NUMBER , ModuleOrigin , origin ) \ <nl> MACRO ( WEAK_LINK , WasmCompiledModule , next_instance ) \ <nl> MACRO ( WEAK_LINK , WasmCompiledModule , prev_instance ) \ <nl> class WasmCompiledModule : public FixedArray { <nl> static Handle < WasmCompiledModule > New ( Isolate * isolate , <nl> uint32_t min_memory_pages , <nl> uint32_t globals_size , <nl> - bool export_memory , <nl> ModuleOrigin origin ) ; <nl> <nl> static Handle < WasmCompiledModule > Clone ( Isolate * isolate , <nl> class WasmCompiledModule : public FixedArray { <nl> void PrintInstancesChain ( ) ; <nl> <nl> private : <nl> - # if DEBUG <nl> - static uint32_t instance_id_counter_ ; <nl> - # endif <nl> void Init ( ) ; <nl> <nl> DISALLOW_IMPLICIT_CONSTRUCTORS ( WasmCompiledModule ) ; <nl> mmm a / test / cctest / wasm / test - run - wasm - module . cc <nl> ppp b / test / cctest / wasm / test - run - wasm - module . cc <nl> TEST ( Run_WasmModule_GrowMemOobVariableIndex ) { <nl> CHECK ( try_catch . HasCaught ( ) ) ; <nl> isolate - > clear_pending_exception ( ) ; <nl> } <nl> + <nl> + TEST ( Run_WasmModule_Global_init ) { <nl> + v8 : : internal : : AccountingAllocator allocator ; <nl> + Zone zone ( & allocator ) ; <nl> + TestSignatures sigs ; <nl> + <nl> + WasmModuleBuilder * builder = new ( & zone ) WasmModuleBuilder ( & zone ) ; <nl> + uint32_t global1 = <nl> + builder - > AddGlobal ( kAstI32 , false , false , WasmInitExpr ( 777777 ) ) ; <nl> + uint32_t global2 = <nl> + builder - > AddGlobal ( kAstI32 , false , false , WasmInitExpr ( 222222 ) ) ; <nl> + WasmFunctionBuilder * f1 = builder - > AddFunction ( sigs . i_v ( ) ) ; <nl> + byte code [ ] = { <nl> + WASM_I32_ADD ( WASM_GET_GLOBAL ( global1 ) , WASM_GET_GLOBAL ( global2 ) ) } ; <nl> + f1 - > EmitCode ( code , sizeof ( code ) ) ; <nl> + ExportAsMain ( f1 ) ; <nl> + TestModule ( & zone , builder , 999999 ) ; <nl> + } <nl> + <nl> + template < typename CType > <nl> + static void RunWasmModuleGlobalInitTest ( LocalType type , CType expected ) { <nl> + v8 : : internal : : AccountingAllocator allocator ; <nl> + Zone zone ( & allocator ) ; <nl> + TestSignatures sigs ; <nl> + <nl> + LocalType types [ ] = { type } ; <nl> + FunctionSig sig ( 1 , 0 , types ) ; <nl> + <nl> + for ( int padding = 0 ; padding < 5 ; padding + + ) { <nl> + / / Test with a simple initializer <nl> + WasmModuleBuilder * builder = new ( & zone ) WasmModuleBuilder ( & zone ) ; <nl> + <nl> + for ( int i = 0 ; i < padding ; i + + ) { / / pad global before <nl> + builder - > AddGlobal ( kAstI32 , false , false , WasmInitExpr ( i + 20000 ) ) ; <nl> + } <nl> + uint32_t global = <nl> + builder - > AddGlobal ( type , false , false , WasmInitExpr ( expected ) ) ; <nl> + for ( int i = 0 ; i < padding ; i + + ) { / / pad global after <nl> + builder - > AddGlobal ( kAstI32 , false , false , WasmInitExpr ( i + 30000 ) ) ; <nl> + } <nl> + <nl> + WasmFunctionBuilder * f1 = builder - > AddFunction ( & sig ) ; <nl> + byte code [ ] = { WASM_GET_GLOBAL ( global ) } ; <nl> + f1 - > EmitCode ( code , sizeof ( code ) ) ; <nl> + ExportAsMain ( f1 ) ; <nl> + TestModule ( & zone , builder , expected ) ; <nl> + } <nl> + <nl> + for ( int padding = 0 ; padding < 5 ; padding + + ) { <nl> + / / Test with a global index <nl> + WasmModuleBuilder * builder = new ( & zone ) WasmModuleBuilder ( & zone ) ; <nl> + for ( int i = 0 ; i < padding ; i + + ) { / / pad global before <nl> + builder - > AddGlobal ( kAstI32 , false , false , WasmInitExpr ( i + 40000 ) ) ; <nl> + } <nl> + <nl> + uint32_t global1 = <nl> + builder - > AddGlobal ( type , false , false , WasmInitExpr ( expected ) ) ; <nl> + <nl> + for ( int i = 0 ; i < padding ; i + + ) { / / pad global middle <nl> + builder - > AddGlobal ( kAstI32 , false , false , WasmInitExpr ( i + 50000 ) ) ; <nl> + } <nl> + <nl> + uint32_t global2 = builder - > AddGlobal ( <nl> + type , false , false , WasmInitExpr ( WasmInitExpr : : kGlobalIndex , global1 ) ) ; <nl> + <nl> + for ( int i = 0 ; i < padding ; i + + ) { / / pad global after <nl> + builder - > AddGlobal ( kAstI32 , false , false , WasmInitExpr ( i + 60000 ) ) ; <nl> + } <nl> + <nl> + WasmFunctionBuilder * f1 = builder - > AddFunction ( & sig ) ; <nl> + byte code [ ] = { WASM_GET_GLOBAL ( global2 ) } ; <nl> + f1 - > EmitCode ( code , sizeof ( code ) ) ; <nl> + ExportAsMain ( f1 ) ; <nl> + TestModule ( & zone , builder , expected ) ; <nl> + } <nl> + } <nl> + <nl> + TEST ( Run_WasmModule_Global_i32 ) { <nl> + RunWasmModuleGlobalInitTest < int32_t > ( kAstI32 , - 983489 ) ; <nl> + RunWasmModuleGlobalInitTest < int32_t > ( kAstI32 , 11223344 ) ; <nl> + } <nl> + <nl> + TEST ( Run_WasmModule_Global_f32 ) { <nl> + RunWasmModuleGlobalInitTest < float > ( kAstF32 , - 983 . 9f ) ; <nl> + RunWasmModuleGlobalInitTest < float > ( kAstF32 , 1122 . 99f ) ; <nl> + } <nl> + <nl> + TEST ( Run_WasmModule_Global_f64 ) { <nl> + RunWasmModuleGlobalInitTest < double > ( kAstF64 , - 833 . 9 ) ; <nl> + RunWasmModuleGlobalInitTest < double > ( kAstF64 , 86374 . 25 ) ; <nl> + } <nl> mmm a / test / cctest / wasm / wasm - run - utils . h <nl> ppp b / test / cctest / wasm / wasm - run - utils . h <nl> class TestingModule : public ModuleEnv { <nl> byte size = WasmOpcodes : : MemSize ( WasmOpcodes : : MachineTypeFor ( type ) ) ; <nl> global_offset = ( global_offset + size - 1 ) & ~ ( size - 1 ) ; / / align <nl> module_ . globals . push_back ( <nl> - { type , true , NO_INIT , global_offset , false , false } ) ; <nl> + { type , true , WasmInitExpr ( ) , global_offset , false , false } ) ; <nl> global_offset + = size ; <nl> / / limit number of globals . <nl> CHECK_LT ( global_offset , kMaxGlobalsSize ) ; <nl> mmm a / test / mjsunit / wasm / compiled - module - management . js <nl> ppp b / test / mjsunit / wasm / compiled - module - management . js <nl> load ( " test / mjsunit / wasm / wasm - module - builder . js " ) ; <nl> var builder = new WasmModuleBuilder ( ) ; <nl> <nl> builder . addMemory ( 1 , 1 , true ) ; <nl> - builder . addImport ( " getValue " , kSig_i ) ; <nl> - builder . addFunction ( " f " , kSig_i ) <nl> + builder . addImport ( " getValue " , kSig_i_v ) ; <nl> + builder . addFunction ( " f " , kSig_i_v ) <nl> . addBody ( [ <nl> kExprCallFunction , 0 <nl> ] ) . exportFunc ( ) ; <nl> mmm a / test / mjsunit / wasm / compiled - module - serialization . js <nl> ppp b / test / mjsunit / wasm / compiled - module - serialization . js <nl> load ( " test / mjsunit / wasm / wasm - module - builder . js " ) ; <nl> builder . addMemory ( 1 , 1 , true ) ; <nl> var kSig_v_i = makeSig ( [ kAstI32 ] , [ ] ) ; <nl> var signature = builder . addType ( kSig_v_i ) ; <nl> - builder . addImport ( " some_value " , kSig_i ) ; <nl> + builder . addImport ( " some_value " , kSig_i_v ) ; <nl> builder . addImport ( " writer " , signature ) ; <nl> <nl> builder . addFunction ( " main " , kSig_i_i ) <nl> load ( " test / mjsunit / wasm / wasm - module - builder . js " ) ; <nl> <nl> ( function RelationBetweenModuleAndClone ( ) { <nl> let builder = new WasmModuleBuilder ( ) ; <nl> - builder . addFunction ( " main " , kSig_i ) <nl> + builder . addFunction ( " main " , kSig_i_v ) <nl> . addBody ( [ kExprI8Const , 42 ] ) <nl> . exportFunc ( ) ; <nl> <nl> load ( " test / mjsunit / wasm / wasm - module - builder . js " ) ; <nl> <nl> ( function SerializeAfterInstantiation ( ) { <nl> let builder = new WasmModuleBuilder ( ) ; <nl> - builder . addFunction ( " main " , kSig_i ) <nl> + builder . addFunction ( " main " , kSig_i_v ) <nl> . addBody ( [ kExprI8Const , 42 ] ) <nl> . exportFunc ( ) ; <nl> <nl> mmm a / test / mjsunit / wasm / export - table . js <nl> ppp b / test / mjsunit / wasm / export - table . js <nl> load ( " test / mjsunit / wasm / wasm - module - builder . js " ) ; <nl> var kReturnValue = 88 ; <nl> var builder = new WasmModuleBuilder ( ) ; <nl> <nl> - builder . addFunction ( " main " , kSig_i ) <nl> + builder . addFunction ( " main " , kSig_i_v ) <nl> . addBody ( [ <nl> kExprI8Const , <nl> kReturnValue , <nl> load ( " test / mjsunit / wasm / wasm - module - builder . js " ) ; <nl> <nl> var builder = new WasmModuleBuilder ( ) ; <nl> <nl> - builder . addFunction ( " main " , kSig_i ) <nl> + builder . addFunction ( " main " , kSig_i_v ) <nl> . addBody ( [ <nl> kExprI8Const , <nl> kReturnValue , <nl> load ( " test / mjsunit / wasm / wasm - module - builder . js " ) ; <nl> <nl> var builder = new WasmModuleBuilder ( ) ; <nl> <nl> - builder . addFunction ( " main " , kSig_i ) <nl> + builder . addFunction ( " main " , kSig_i_v ) <nl> . addBody ( [ <nl> kExprI8Const , <nl> kReturnValue , <nl> mmm a / test / mjsunit / wasm / ffi . js <nl> ppp b / test / mjsunit / wasm / ffi . js <nl> print ( " Native function " ) ; <nl> <nl> var builder = new WasmModuleBuilder ( ) ; <nl> <nl> - var sig_index = builder . addType ( kSig_d ) ; <nl> + var sig_index = builder . addType ( kSig_d_v ) ; <nl> builder . addImport ( " func " , sig_index ) ; <nl> builder . addFunction ( " main " , sig_index ) <nl> . addBody ( [ <nl> mmm a / test / mjsunit / wasm / function - prototype . js <nl> ppp b / test / mjsunit / wasm / function - prototype . js <nl> load ( " test / mjsunit / wasm / wasm - module - builder . js " ) ; <nl> ( function TestFunctionPrototype ( ) { <nl> var builder = new WasmModuleBuilder ( ) ; <nl> <nl> - builder . addFunction ( " nine " , kSig_i ) <nl> + builder . addFunction ( " nine " , kSig_i_v ) <nl> . addBody ( [ kExprI8Const , 9 ] ) <nl> . exportFunc ( ) ; <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 4b976dcef2b <nl> mmm / dev / null <nl> ppp b / test / mjsunit / wasm / globals . js <nl> <nl> + / / Copyright 2016 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + / / Flags : - - expose - wasm <nl> + <nl> + load ( " test / mjsunit / wasm / wasm - constants . js " ) ; <nl> + load ( " test / mjsunit / wasm / wasm - module - builder . js " ) ; <nl> + <nl> + function TestImported ( type , val , expected ) { <nl> + print ( " TestImported " + type + " ( " + val + " ) " + " = " + expected ) ; <nl> + var builder = new WasmModuleBuilder ( ) ; <nl> + var sig = makeSig ( [ ] , [ type ] ) ; <nl> + var g = builder . addImportedGlobal ( " foo " , undefined , type ) ; <nl> + builder . addFunction ( " main " , sig ) <nl> + . addBody ( [ kExprGetGlobal , g . index ] ) <nl> + . exportAs ( " main " ) ; <nl> + builder . addGlobal ( kAstI32 ) ; / / pad <nl> + <nl> + var instance = builder . instantiate ( { foo : val } ) ; <nl> + assertEquals ( expected , instance . exports . main ( ) ) ; <nl> + } <nl> + <nl> + TestImported ( kAstI32 , 300 . 1 , 300 ) ; <nl> + TestImported ( kAstF32 , 87234 . 87238 , Math . fround ( 87234 . 87238 ) ) ; <nl> + TestImported ( kAstF64 , 77777 . 88888 , 77777 . 88888 ) ; <nl> + TestImported ( kAstF64 , " 89 " , 89 ) ; <nl> + <nl> + <nl> + function TestExported ( type , val , expected ) { <nl> + print ( " TestExported " + type + " ( " + val + " ) " + " = " + expected ) ; <nl> + var builder = new WasmModuleBuilder ( ) ; <nl> + var sig = makeSig ( [ type ] , [ ] ) ; <nl> + builder . addGlobal ( kAstI32 ) ; / / pad <nl> + var g = builder . addGlobal ( type , false ) <nl> + . exportAs ( " foo " ) ; <nl> + g . init = val ; <nl> + builder . addGlobal ( kAstI32 ) ; / / pad <nl> + <nl> + var instance = builder . instantiate ( ) ; <nl> + assertEquals ( expected , instance . exports . foo ) ; <nl> + } <nl> + <nl> + TestExported ( kAstI32 , 455 . 5 , 455 ) ; <nl> + TestExported ( kAstF32 , - 999 . 34343 , Math . fround ( - 999 . 34343 ) ) ; <nl> + TestExported ( kAstF64 , 87347 . 66666 , 87347 . 66666 ) ; <nl> + <nl> + <nl> + function TestImportedExported ( type , val , expected ) { <nl> + print ( " TestImportedExported " + type + " ( " + val + " ) " + " = " + expected ) ; <nl> + var builder = new WasmModuleBuilder ( ) ; <nl> + var sig = makeSig ( [ type ] , [ ] ) ; <nl> + var i = builder . addImportedGlobal ( " foo " , undefined , type ) ; <nl> + builder . addGlobal ( kAstI32 ) ; / / pad <nl> + var o = builder . addGlobal ( type , false ) <nl> + . exportAs ( " bar " ) ; <nl> + o . init_index = i ; <nl> + builder . addGlobal ( kAstI32 ) ; / / pad <nl> + <nl> + var instance = builder . instantiate ( { foo : val } ) ; <nl> + assertEquals ( expected , instance . exports . bar ) ; <nl> + } <nl> + <nl> + TestImportedExported ( kAstI32 , 415 . 5 , 415 ) ; <nl> + TestImportedExported ( kAstF32 , - 979 . 34343 , Math . fround ( - 979 . 34343 ) ) ; <nl> + TestImportedExported ( kAstF64 , 81347 . 66666 , 81347 . 66666 ) ; <nl> mmm a / test / mjsunit / wasm / import - table . js <nl> ppp b / test / mjsunit / wasm / import - table . js <nl> testCallPrint ( ) ; <nl> function testCallImport2 ( foo , bar , expected ) { <nl> var builder = new WasmModuleBuilder ( ) ; <nl> <nl> - builder . addImport ( " foo " , kSig_i ) ; <nl> - builder . addImport ( " bar " , kSig_i ) ; <nl> - builder . addFunction ( " main " , kSig_i ) <nl> + builder . addImport ( " foo " , kSig_i_v ) ; <nl> + builder . addImport ( " bar " , kSig_i_v ) ; <nl> + builder . addFunction ( " main " , kSig_i_v ) <nl> . addBody ( [ <nl> kExprCallFunction , 0 , / / - - <nl> kExprCallFunction , 1 , / / - - <nl> mmm a / test / mjsunit / wasm / instance - gc . js <nl> ppp b / test / mjsunit / wasm / instance - gc . js <nl> let nogc = ( ) = > { } ; <nl> function newModule ( ) { <nl> let builder = new WasmModuleBuilder ( ) ; <nl> builder . addMemory ( 1 , 1 , true ) ; <nl> - builder . addFunction ( " main " , kSig_i ) <nl> + builder . addFunction ( " main " , kSig_i_v ) <nl> . addBody ( [ kExprI32Const , 0 , kExprI32LoadMem , 0 , 0 ] ) <nl> . exportFunc ( ) ; <nl> <nl> mmm a / test / mjsunit / wasm / instantiate - module - basic . js <nl> ppp b / test / mjsunit / wasm / instantiate - module - basic . js <nl> let kReturnValue = 117 ; <nl> let buffer = ( ( ) = > { <nl> let builder = new WasmModuleBuilder ( ) ; <nl> builder . addMemory ( 1 , 1 , true ) ; <nl> - builder . addFunction ( " main " , kSig_i ) <nl> + builder . addFunction ( " main " , kSig_i_v ) <nl> . addBody ( [ kExprI8Const , kReturnValue ] ) <nl> . exportFunc ( ) ; <nl> <nl> assertFalse ( WebAssembly . validate ( bytes ( 88 , 88 , 88 , 88 , 88 , 88 , 88 , 88 ) ) ) ; <nl> builder . addMemory ( 1 , 1 , true ) ; <nl> var kSig_v_i = makeSig ( [ kAstI32 ] , [ ] ) ; <nl> var signature = builder . addType ( kSig_v_i ) ; <nl> - builder . addImport ( " some_value " , kSig_i ) ; <nl> + builder . addImport ( " some_value " , kSig_i_v ) ; <nl> builder . addImport ( " writer " , signature ) ; <nl> <nl> builder . addFunction ( " main " , kSig_i_i ) <nl> assertFalse ( WebAssembly . validate ( bytes ( 88 , 88 , 88 , 88 , 88 , 88 , 88 , 88 ) ) ) ; <nl> ( function GlobalsArePrivateToTheInstance ( ) { <nl> print ( " GlobalsArePrivateToTheInstance . . . " ) ; <nl> var builder = new WasmModuleBuilder ( ) ; <nl> - builder . addGlobal ( kAstI32 ) ; <nl> + builder . addGlobal ( kAstI32 , true ) ; <nl> builder . addFunction ( " read " , kSig_i_v ) <nl> . addBody ( [ <nl> kExprGetGlobal , 0 ] ) <nl> assertFalse ( WebAssembly . validate ( bytes ( 88 , 88 , 88 , 88 , 88 , 88 , 88 , 88 ) ) ) ; <nl> var builder = new WasmModuleBuilder ( ) ; <nl> builder . addMemory ( 1 , 1 , true ) ; <nl> <nl> - builder . addFunction ( " f " , kSig_i ) <nl> + builder . addFunction ( " f " , kSig_i_v ) <nl> . addBody ( [ <nl> kExprI32Const , 0 , <nl> kExprI32LoadMem , 0 , 0 <nl> mmm a / test / mjsunit / wasm / start - function . js <nl> ppp b / test / mjsunit / wasm / start - function . js <nl> function assertVerifies ( sig , body ) { <nl> } <nl> <nl> assertVerifies ( kSig_v_v , [ kExprNop ] ) ; <nl> - assertVerifies ( kSig_i , [ kExprI8Const , 0 ] ) ; <nl> + assertVerifies ( kSig_i_v , [ kExprI8Const , 0 ] ) ; <nl> <nl> / / Arguments aren ' t allow to start functions . <nl> assertFails ( kSig_i_i , [ kExprGetLocal , 0 ] ) ; <nl> mmm a / test / mjsunit / wasm / test - wasm - module - builder . js <nl> ppp b / test / mjsunit / wasm / test - wasm - module - builder . js <nl> var debug = true ; <nl> ( function BasicTest ( ) { <nl> var module = new WasmModuleBuilder ( ) ; <nl> module . addMemory ( 1 , 2 , false ) ; <nl> - module . addFunction ( " foo " , kSig_i ) <nl> + module . addFunction ( " foo " , kSig_i_v ) <nl> . addBody ( [ kExprI8Const , 11 ] ) <nl> . exportAs ( " blarg " ) ; <nl> <nl> var debug = true ; <nl> ( function BasicTestWithUint8Array ( ) { <nl> var module = new WasmModuleBuilder ( ) ; <nl> module . addMemory ( 1 , 2 , false ) ; <nl> - module . addFunction ( " foo " , kSig_i ) <nl> + module . addFunction ( " foo " , kSig_i_v ) <nl> . addBody ( [ kExprI8Const , 17 ] ) <nl> . exportAs ( " blarg " ) ; <nl> <nl> mmm a / test / mjsunit / wasm / wasm - constants . js <nl> ppp b / test / mjsunit / wasm / wasm - constants . js <nl> var kExternalMemory = 2 ; <nl> var kExternalGlobal = 3 ; <nl> <nl> / / Useful signatures <nl> - var kSig_i = makeSig ( [ ] , [ kAstI32 ] ) ; <nl> - var kSig_d = makeSig ( [ ] , [ kAstF64 ] ) ; <nl> var kSig_i_i = makeSig ( [ kAstI32 ] , [ kAstI32 ] ) ; <nl> var kSig_i_l = makeSig ( [ kAstI64 ] , [ kAstI32 ] ) ; <nl> var kSig_i_ii = makeSig ( [ kAstI32 , kAstI32 ] , [ kAstI32 ] ) ; <nl> var kSig_l_ll = makeSig ( [ kAstI64 , kAstI64 ] , [ kAstI64 ] ) ; <nl> var kSig_i_dd = makeSig ( [ kAstF64 , kAstF64 ] , [ kAstI32 ] ) ; <nl> var kSig_v_v = makeSig ( [ ] , [ ] ) ; <nl> var kSig_i_v = makeSig ( [ ] , [ kAstI32 ] ) ; <nl> + var kSig_f_v = makeSig ( [ ] , [ kAstF64 ] ) ; <nl> + var kSig_d_v = makeSig ( [ ] , [ kAstF64 ] ) ; <nl> var kSig_v_i = makeSig ( [ kAstI32 ] , [ ] ) ; <nl> var kSig_v_ii = makeSig ( [ kAstI32 , kAstI32 ] , [ ] ) ; <nl> var kSig_v_iii = makeSig ( [ kAstI32 , kAstI32 , kAstI32 ] , [ ] ) ; <nl> mmm a / test / mjsunit / wasm / wasm - module - builder . js <nl> ppp b / test / mjsunit / wasm / wasm - module - builder . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> + / / Used for encoding f32 and double constants to bits . <nl> + let __buffer = new ArrayBuffer ( 8 ) ; <nl> + let byte_view = new Int8Array ( __buffer ) ; <nl> + let f32_view = new Float32Array ( __buffer ) ; <nl> + let f64_view = new Float64Array ( __buffer ) ; <nl> + <nl> class Binary extends Array { <nl> emit_u8 ( val ) { <nl> this . push ( val ) ; <nl> class Binary extends Array { <nl> this . push ( ( val > > 24 ) & 0xff ) ; <nl> } <nl> <nl> - emit_varint ( val ) { <nl> + emit_u32v ( val ) { <nl> while ( true ) { <nl> let v = val & 0xff ; <nl> val = val > > > 7 ; <nl> class Binary extends Array { <nl> emit_string ( string ) { <nl> / / When testing illegal names , we pass a byte array directly . <nl> if ( string instanceof Array ) { <nl> - this . emit_varint ( string . length ) ; <nl> + this . emit_u32v ( string . length ) ; <nl> this . emit_bytes ( string ) ; <nl> return ; <nl> } <nl> class Binary extends Array { <nl> / / This is the hacky way to convert a JavaScript string to a UTF8 encoded <nl> / / string only containing single - byte characters . <nl> let string_utf8 = unescape ( encodeURIComponent ( string ) ) ; <nl> - this . emit_varint ( string_utf8 . length ) ; <nl> + this . emit_u32v ( string_utf8 . length ) ; <nl> for ( let i = 0 ; i < string_utf8 . length ; i + + ) { <nl> this . emit_u8 ( string_utf8 . charCodeAt ( i ) ) ; <nl> } <nl> class Binary extends Array { <nl> let section = new Binary ; <nl> content_generator ( section ) ; <nl> / / Emit section length . <nl> - this . emit_varint ( section . length ) ; <nl> + this . emit_u32v ( section . length ) ; <nl> / / Copy the temporary buffer . <nl> this . push ( . . . section ) ; <nl> } <nl> } <nl> <nl> class WasmFunctionBuilder { <nl> - constructor ( name , type_index ) { <nl> + constructor ( module , name , type_index ) { <nl> + this . module = module ; <nl> this . name = name ; <nl> this . type_index = type_index ; <nl> - this . exports = [ ] ; <nl> } <nl> <nl> exportAs ( name ) { <nl> - this . exports . push ( name ) ; <nl> + this . module . exports . push ( { name : name , kind : kExternalFunction , index : this . index } ) ; <nl> return this ; <nl> } <nl> <nl> exportFunc ( ) { <nl> - this . exports . push ( this . name ) ; <nl> + this . exportAs ( this . name ) ; <nl> return this ; <nl> } <nl> <nl> class WasmFunctionBuilder { <nl> } <nl> } <nl> <nl> + class WasmGlobalBuilder { <nl> + constructor ( module , type , mutable ) { <nl> + this . module = module ; <nl> + this . type = type ; <nl> + this . mutable = mutable ; <nl> + this . init = 0 ; <nl> + } <nl> + <nl> + exportAs ( name ) { <nl> + this . module . exports . push ( { name : name , kind : kExternalGlobal , index : this . index } ) ; <nl> + return this ; <nl> + } <nl> + } <nl> + <nl> class WasmModuleBuilder { <nl> constructor ( ) { <nl> this . types = [ ] ; <nl> this . imports = [ ] ; <nl> + this . exports = [ ] ; <nl> this . globals = [ ] ; <nl> this . functions = [ ] ; <nl> - this . exports = [ ] ; <nl> this . table = [ ] ; <nl> this . segments = [ ] ; <nl> this . explicit = [ ] ; <nl> this . pad = null ; <nl> + this . num_imported_funcs = 0 ; <nl> + this . num_imported_globals = 0 ; <nl> return this ; <nl> } <nl> <nl> class WasmModuleBuilder { <nl> return this . types . length - 1 ; <nl> } <nl> <nl> - addGlobal ( local_type ) { <nl> - this . globals . push ( local_type ) ; <nl> - return this . globals . length - 1 ; <nl> + addGlobal ( local_type , mutable ) { <nl> + let glob = new WasmGlobalBuilder ( this , local_type , mutable ) ; <nl> + glob . index = this . globals . length + this . num_imported_globals ; <nl> + this . globals . push ( glob ) ; <nl> + return glob ; <nl> } <nl> <nl> addFunction ( name , type ) { <nl> let type_index = ( typeof type ) = = " number " ? type : this . addType ( type ) ; <nl> - let func = new WasmFunctionBuilder ( name , type_index ) ; <nl> - func . index = this . functions . length + this . imports . length ; <nl> + let func = new WasmFunctionBuilder ( this , name , type_index ) ; <nl> + func . index = this . functions . length + this . num_imported_funcs ; <nl> this . functions . push ( func ) ; <nl> return func ; <nl> } <nl> <nl> addImportWithModule ( module , name , type ) { <nl> let type_index = ( typeof type ) = = " number " ? type : this . addType ( type ) ; <nl> - this . imports . push ( { module : module , name : name , type : type_index } ) ; <nl> - return this . imports . length - 1 ; <nl> + this . imports . push ( { module : module , name : name , kind : kExternalFunction , <nl> + type : type_index } ) ; <nl> + return this . num_imported_funcs + + ; <nl> } <nl> <nl> addImport ( name , type ) { <nl> return this . addImportWithModule ( name , undefined , type ) ; <nl> } <nl> <nl> + addImportedGlobal ( module , name , type ) { <nl> + let o = { module : module , name : name , kind : kExternalGlobal , type : type , <nl> + mutable : false } <nl> + this . imports . push ( o ) ; <nl> + return this . num_imported_globals + + ; <nl> + } <nl> + <nl> addDataSegment ( addr , data , init ) { <nl> this . segments . push ( { addr : addr , data : data , init : init } ) ; <nl> return this . segments . length - 1 ; <nl> class WasmModuleBuilder { <nl> if ( wasm . types . length > 0 ) { <nl> if ( debug ) print ( " emitting types @ " + binary . length ) ; <nl> binary . emit_section ( kTypeSectionCode , section = > { <nl> - section . emit_varint ( wasm . types . length ) ; <nl> + section . emit_u32v ( wasm . types . length ) ; <nl> for ( let type of wasm . types ) { <nl> section . emit_u8 ( kWasmFunctionTypeForm ) ; <nl> - section . emit_varint ( type . params . length ) ; <nl> + section . emit_u32v ( type . params . length ) ; <nl> for ( let param of type . params ) { <nl> section . emit_u8 ( param ) ; <nl> } <nl> - section . emit_varint ( type . results . length ) ; <nl> + section . emit_u32v ( type . results . length ) ; <nl> for ( let result of type . results ) { <nl> section . emit_u8 ( result ) ; <nl> } <nl> class WasmModuleBuilder { <nl> if ( wasm . imports . length > 0 ) { <nl> if ( debug ) print ( " emitting imports @ " + binary . length ) ; <nl> binary . emit_section ( kImportSectionCode , section = > { <nl> - section . emit_varint ( wasm . imports . length ) ; <nl> + section . emit_u32v ( wasm . imports . length ) ; <nl> for ( let imp of wasm . imports ) { <nl> section . emit_string ( imp . module ) ; <nl> section . emit_string ( imp . name | | ' ' ) ; <nl> - section . emit_u8 ( kExternalFunction ) ; <nl> - section . emit_varint ( imp . type ) ; <nl> + section . emit_u8 ( imp . kind ) ; <nl> + if ( imp . kind = = kExternalFunction ) { <nl> + section . emit_u32v ( imp . type ) ; <nl> + } else if ( imp . kind = = kExternalGlobal ) { <nl> + section . emit_u32v ( imp . type ) ; <nl> + section . emit_u8 ( imp . mutable ) ; <nl> + } else { <nl> + throw new Error ( " unknown / unsupported import kind " + imp . kind ) ; <nl> + } <nl> } <nl> } ) ; <nl> } <nl> class WasmModuleBuilder { <nl> / / Add functions declarations <nl> let has_names = false ; <nl> let names = false ; <nl> - let exports = 0 ; <nl> if ( wasm . functions . length > 0 ) { <nl> if ( debug ) print ( " emitting function decls @ " + binary . length ) ; <nl> binary . emit_section ( kFunctionSectionCode , section = > { <nl> - section . emit_varint ( wasm . functions . length ) ; <nl> + section . emit_u32v ( wasm . functions . length ) ; <nl> for ( let func of wasm . functions ) { <nl> has_names = has_names | | ( func . name ! = undefined & & <nl> func . name . length > 0 ) ; <nl> - exports + = func . exports . length ; <nl> - section . emit_varint ( func . type_index ) ; <nl> + section . emit_u32v ( func . type_index ) ; <nl> } <nl> } ) ; <nl> } <nl> class WasmModuleBuilder { <nl> section . emit_u8 ( 1 ) ; / / one table entry <nl> section . emit_u8 ( kWasmAnyFunctionTypeForm ) ; <nl> section . emit_u8 ( 1 ) ; <nl> - section . emit_varint ( wasm . table . length ) ; <nl> - section . emit_varint ( wasm . table . length ) ; <nl> + section . emit_u32v ( wasm . table . length ) ; <nl> + section . emit_u32v ( wasm . table . length ) ; <nl> } ) ; <nl> } <nl> <nl> class WasmModuleBuilder { <nl> if ( debug ) print ( " emitting memory @ " + binary . length ) ; <nl> binary . emit_section ( kMemorySectionCode , section = > { <nl> section . emit_u8 ( 1 ) ; / / one memory entry <nl> - section . emit_varint ( kResizableMaximumFlag ) ; <nl> - section . emit_varint ( wasm . memory . min ) ; <nl> - section . emit_varint ( wasm . memory . max ) ; <nl> + section . emit_u32v ( kResizableMaximumFlag ) ; <nl> + section . emit_u32v ( wasm . memory . min ) ; <nl> + section . emit_u32v ( wasm . memory . max ) ; <nl> } ) ; <nl> } <nl> <nl> class WasmModuleBuilder { <nl> if ( wasm . globals . length > 0 ) { <nl> if ( debug ) print ( " emitting globals @ " + binary . length ) ; <nl> binary . emit_section ( kGlobalSectionCode , section = > { <nl> - section . emit_varint ( wasm . globals . length ) ; <nl> - for ( let global_type of wasm . globals ) { <nl> - section . emit_u8 ( global_type ) ; <nl> - section . emit_u8 ( true ) ; / / mutable <nl> - switch ( global_type ) { <nl> + section . emit_u32v ( wasm . globals . length ) ; <nl> + for ( let global of wasm . globals ) { <nl> + section . emit_u8 ( global . type ) ; <nl> + section . emit_u8 ( global . mutable ) ; <nl> + if ( ( typeof global . init_index ) = = " undefined " ) { <nl> + / / Emit a constant initializer . <nl> + switch ( global . type ) { <nl> case kAstI32 : <nl> section . emit_u8 ( kExprI32Const ) ; <nl> - section . emit_u8 ( 0 ) ; <nl> + section . emit_u32v ( global . init ) ; <nl> break ; <nl> case kAstI64 : <nl> section . emit_u8 ( kExprI64Const ) ; <nl> - section . emit_u8 ( 0 ) ; <nl> + section . emit_u8 ( global . init ) ; <nl> break ; <nl> case kAstF32 : <nl> section . emit_u8 ( kExprF32Const ) ; <nl> - section . emit_u32 ( 0 ) ; <nl> + f32_view [ 0 ] = global . init ; <nl> + section . emit_u8 ( byte_view [ 0 ] ) ; <nl> + section . emit_u8 ( byte_view [ 1 ] ) ; <nl> + section . emit_u8 ( byte_view [ 2 ] ) ; <nl> + section . emit_u8 ( byte_view [ 3 ] ) ; <nl> break ; <nl> case kAstF64 : <nl> - section . emit_u8 ( kExprI32Const ) ; <nl> - section . emit_u32 ( 0 ) ; <nl> - section . emit_u32 ( 0 ) ; <nl> + section . emit_u8 ( kExprF64Const ) ; <nl> + f64_view [ 0 ] = global . init ; <nl> + section . emit_u8 ( byte_view [ 0 ] ) ; <nl> + section . emit_u8 ( byte_view [ 1 ] ) ; <nl> + section . emit_u8 ( byte_view [ 2 ] ) ; <nl> + section . emit_u8 ( byte_view [ 3 ] ) ; <nl> + section . emit_u8 ( byte_view [ 4 ] ) ; <nl> + section . emit_u8 ( byte_view [ 5 ] ) ; <nl> + section . emit_u8 ( byte_view [ 6 ] ) ; <nl> + section . emit_u8 ( byte_view [ 7 ] ) ; <nl> break ; <nl> + } <nl> + } else { <nl> + / / Emit a global - index initializer . <nl> + section . emit_u8 ( kExprGetGlobal ) ; <nl> + section . emit_u32v ( global . init_index ) ; <nl> } <nl> section . emit_u8 ( kExprEnd ) ; / / end of init expression <nl> } <nl> class WasmModuleBuilder { <nl> <nl> / / Add export table . <nl> var mem_export = ( wasm . memory ! = undefined & & wasm . memory . exp ) ; <nl> - if ( exports > 0 | | mem_export ) { <nl> + var exports_count = wasm . exports . length + ( mem_export ? 1 : 0 ) ; <nl> + if ( exports_count > 0 ) { <nl> if ( debug ) print ( " emitting exports @ " + binary . length ) ; <nl> binary . emit_section ( kExportSectionCode , section = > { <nl> - section . emit_varint ( exports + ( mem_export ? 1 : 0 ) ) ; <nl> - for ( let func of wasm . functions ) { <nl> - for ( let exp of func . exports ) { <nl> - section . emit_string ( exp ) ; <nl> - section . emit_u8 ( kExternalFunction ) ; <nl> - section . emit_varint ( func . index ) ; <nl> - } <nl> + section . emit_u32v ( exports_count ) ; <nl> + for ( let exp of wasm . exports ) { <nl> + section . emit_string ( exp . name ) ; <nl> + section . emit_u8 ( exp . kind ) ; <nl> + section . emit_u32v ( exp . index ) ; <nl> } <nl> if ( mem_export ) { <nl> section . emit_string ( " memory " ) ; <nl> class WasmModuleBuilder { <nl> if ( wasm . start_index ! = undefined ) { <nl> if ( debug ) print ( " emitting start function @ " + binary . length ) ; <nl> binary . emit_section ( kStartSectionCode , section = > { <nl> - section . emit_varint ( wasm . start_index ) ; <nl> + section . emit_u32v ( wasm . start_index ) ; <nl> } ) ; <nl> } <nl> <nl> class WasmModuleBuilder { <nl> section . emit_u8 ( kExprI32Const ) ; <nl> section . emit_u8 ( 0 ) ; <nl> section . emit_u8 ( kExprEnd ) ; <nl> - section . emit_varint ( wasm . table . length ) ; <nl> + section . emit_u32v ( wasm . table . length ) ; <nl> for ( let index of wasm . table ) { <nl> - section . emit_varint ( index ) ; <nl> + section . emit_u32v ( index ) ; <nl> } <nl> } ) ; <nl> } <nl> class WasmModuleBuilder { <nl> / / emit function bodies <nl> if ( debug ) print ( " emitting code @ " + binary . length ) ; <nl> binary . emit_section ( kCodeSectionCode , section = > { <nl> - section . emit_varint ( wasm . functions . length ) ; <nl> + section . emit_u32v ( wasm . functions . length ) ; <nl> for ( let func of wasm . functions ) { <nl> / / Function body length will be patched later . <nl> let local_decls = [ ] ; <nl> class WasmModuleBuilder { <nl> } <nl> <nl> let header = new Binary ; <nl> - header . emit_varint ( local_decls . length ) ; <nl> + header . emit_u32v ( local_decls . length ) ; <nl> for ( let decl of local_decls ) { <nl> - header . emit_varint ( decl . count ) ; <nl> + header . emit_u32v ( decl . count ) ; <nl> header . emit_u8 ( decl . type ) ; <nl> } <nl> <nl> - section . emit_varint ( header . length + func . body . length ) ; <nl> + section . emit_u32v ( header . length + func . body . length ) ; <nl> section . emit_bytes ( header ) ; <nl> section . emit_bytes ( func . body ) ; <nl> } <nl> class WasmModuleBuilder { <nl> if ( wasm . segments . length > 0 ) { <nl> if ( debug ) print ( " emitting data segments @ " + binary . length ) ; <nl> binary . emit_section ( kDataSectionCode , section = > { <nl> - section . emit_varint ( wasm . segments . length ) ; <nl> + section . emit_u32v ( wasm . segments . length ) ; <nl> for ( let seg of wasm . segments ) { <nl> section . emit_u8 ( 0 ) ; / / linear memory index 0 <nl> section . emit_u8 ( kExprI32Const ) ; <nl> - section . emit_varint ( seg . addr ) ; <nl> + section . emit_u32v ( seg . addr ) ; <nl> section . emit_u8 ( kExprEnd ) ; <nl> - section . emit_varint ( seg . data . length ) ; <nl> + section . emit_u32v ( seg . data . length ) ; <nl> section . emit_bytes ( seg . data ) ; <nl> } <nl> } ) ; <nl> class WasmModuleBuilder { <nl> if ( debug ) print ( " emitting names @ " + binary . length ) ; <nl> binary . emit_section ( kUnknownSectionCode , section = > { <nl> section . emit_string ( " name " ) ; <nl> - section . emit_varint ( wasm . functions . length ) ; <nl> + section . emit_u32v ( wasm . functions . length ) ; <nl> for ( let func of wasm . functions ) { <nl> var name = func . name = = undefined ? " " : func . name ; <nl> section . emit_string ( name ) ; <nl> mmm a / test / unittests / wasm / ast - decoder - unittest . cc <nl> ppp b / test / unittests / wasm / ast - decoder - unittest . cc <nl> class TestModuleEnv : public ModuleEnv { <nl> module = & mod ; <nl> } <nl> byte AddGlobal ( LocalType type , bool mutability = true ) { <nl> - mod . globals . push_back ( { type , mutability , NO_INIT , 0 , false , false } ) ; <nl> + mod . globals . push_back ( { type , mutability , WasmInitExpr ( ) , 0 , false , false } ) ; <nl> CHECK ( mod . globals . size ( ) < = 127 ) ; <nl> return static_cast < byte > ( mod . globals . size ( ) - 1 ) ; <nl> } <nl>
[ wasm ] Refactor import handling for 0xC .
v8/v8
599f8a83420346d9cba5ff97bd2a7520468207b6
2016-10-06T12:30:50Z
mmm a / docs / WindowsBuild . md <nl> ppp b / docs / WindowsBuild . md <nl> set llvm_bin_dir = " % swift_source_dir % / build / Ninja - RelWithDebInfoAssert / llvm - windo <nl> ` ` ` <nl> <nl> # # # 7 . Build Swift <nl> - - This must be done from within a developer command prompt and could take up to <nl> - two hours depending on your system . <nl> + - This must be done from within a developer command prompt and could take hours <nl> + depending on your system . <nl> - You may need to adjust the ` SWIFT_WINDOWS_LIB_DIRECTORY ` parameter depending on <nl> your target platform or Windows SDK version . <nl> ` ` ` cmd <nl> cmake - G " Visual Studio 15 " " % swift_source_dir % / swift " ^ <nl> ` ` ` <nl> <nl> # # # 8 . Build lldb <nl> - - This must be done from within a developer command prompt and could take up to <nl> - two hours depending on your system . <nl> + - This must be done from within a developer command prompt and could take hours <nl> + depending on your system . <nl> ` ` ` cmd <nl> mkdir " % swift_source_dir % / build / Ninja - RelWithDebInfoAssert / lldb - windows - amd64 " <nl> pushd " % swift_source_dir % / build / Ninja - RelWithDebInfoAssert / lldb - windows - amd64 " <nl>
Merge remote - tracking branch ' origin / master ' into master - next
apple/swift
5f01a5c914c80da5f67d79d75c903fb5a5687c20
2018-04-27T19:29:10Z
mmm a / version . txt <nl> ppp b / version . txt <nl> @ @ - 1 + 1 @ @ <nl> - 3 . 1 . 1 <nl> + 3 . 1 . 2 - pre - <nl>
post 3 . 1 . 1
mongodb/mongo
8edf5dc38a704eb85dea8e6d64db1362873097b2
2015-04-06T21:51:21Z
mmm a / src / library_idbfs . js <nl> ppp b / src / library_idbfs . js <nl> mergeInto ( LibraryManager . library , { <nl> } catch ( e ) { <nl> return callback ( e ) ; <nl> } <nl> + if ( ! req ) { <nl> + return callback ( " Unable to connect to IndexedDB " ) ; <nl> + } <nl> req . onupgradeneeded = function ( e ) { <nl> var db = e . target . result ; <nl> var transaction = e . target . transaction ; <nl>
Handle missing connection to IndexedDB ( )
emscripten-core/emscripten
0941e0187b4ae203a7d93d45b6aaf58f737b9614
2016-05-31T19:26:30Z
mmm a / lib / Sema / MiscDiagnostics . cpp <nl> ppp b / lib / Sema / MiscDiagnostics . cpp <nl> bool AvailabilityWalker : : diagnoseMemoryLayoutMigration ( const ValueDecl * D , <nl> . Case ( " sizeof " , { " size " , false } ) <nl> . Case ( " alignof " , { " alignment " , false } ) <nl> . Case ( " strideof " , { " stride " , false } ) <nl> - . Case ( " sizeofValue " , { " size " , true } ) <nl> - . Case ( " alignofValue " , { " alignment " , true } ) <nl> - . Case ( " strideofValue " , { " stride " , true } ) <nl> . Default ( { } ) ; <nl> <nl> if ( KindValue . first . empty ( ) ) <nl> bool AvailabilityWalker : : diagnoseMemoryLayoutMigration ( const ValueDecl * D , <nl> <nl> auto subject = args - > getSubExpr ( ) ; <nl> if ( ! isValue ) { <nl> - / / sizeof ( x . dynamicType ) is equivalent to sizeofValue ( x ) <nl> + / / sizeof ( type ( of : x ) ) is equivalent to sizeofValue ( x ) <nl> if ( auto DTE = dyn_cast < DynamicTypeExpr > ( subject ) ) { <nl> subject = DTE - > getBase ( ) ; <nl> isValue = true ; <nl> bool AvailabilityWalker : : diagnoseMemoryLayoutMigration ( const ValueDecl * D , <nl> if ( isValue ) { <nl> auto valueType = subject - > getType ( ) - > getRValueType ( ) ; <nl> if ( ! valueType | | valueType - > is < ErrorType > ( ) ) { <nl> - / / If we dont have good argument , We cannot emit fix - it . <nl> - return true ; <nl> + / / If we don ' t have a suitable argument , we cannot emit a fix - it . <nl> + return true ; <nl> } <nl> <nl> / / NOTE : We are destructively replacing the source text here . <nl> - / / For instance , ` sizeof ( x . doSomethig ( ) ) ` = > ` MemoryLayout < T > . size ` where <nl> - / / T is return type of ` doSomething ( ) ` . If that function have any <nl> - / / side effects , it will break the source . <nl> + / / ` sizeof ( type ( of : doSomething ( ) ) ) ` = > ` MemoryLayout < T > . size ` , where T is <nl> + / / the return type of ` doSomething ( ) ` . If that function has any side <nl> + / / effects , this replacement will break the source . <nl> diag . fixItReplace ( call - > getSourceRange ( ) , <nl> ( Prefix + valueType - > getString ( ) + Suffix + Kind ) . str ( ) ) ; <nl> } else { <nl> mmm a / stdlib / private / SwiftPrivateLibcExtras / Subprocess . swift <nl> ppp b / stdlib / private / SwiftPrivateLibcExtras / Subprocess . swift <nl> public func spawnChild ( _ args : [ String ] ) <nl> <nl> / / If execve ( ) encountered an error , we write the errno encountered to the <nl> / / parent write pipe . <nl> - let errnoSize = MemoryLayout . _ofInstance ( errno ) . size <nl> + let errnoSize = MemoryLayout . size ( ofValue : errno ) <nl> var execveErrno = errno <nl> let writtenBytes = withUnsafePointer ( to : & execveErrno ) { <nl> write ( childToParentPipe . writeFD , UnsafePointer ( $ 0 ) , errnoSize ) <nl> mmm a / stdlib / public / SDK / Foundation / Data . swift <nl> ppp b / stdlib / public / SDK / Foundation / Data . swift <nl> public struct Data : ReferenceConvertible , Equatable , Hashable , RandomAccessColl <nl> public mutating func next ( ) - > UInt8 ? { <nl> guard _idx < _endIdx else { return nil } <nl> defer { _idx + = 1 } <nl> - let bufferSize = MemoryLayout . _ofInstance ( _buffer ) . size <nl> + let bufferSize = MemoryLayout . size ( ofValue : _buffer ) <nl> return withUnsafeMutablePointer ( to : & _buffer ) { ptr_ in <nl> let ptr = UnsafeMutableRawPointer ( ptr_ ) . assumingMemoryBound ( to : UInt8 . self ) <nl> let bufferIdx = _idx % bufferSize <nl> mmm a / stdlib / public / core / BridgeObjectiveC . swift <nl> ppp b / stdlib / public / core / BridgeObjectiveC . swift <nl> internal struct _CocoaFastEnumerationStackBuf { <nl> _item14 = _item0 <nl> _item15 = _item0 <nl> <nl> - _sanityCheck ( MemoryLayout . _ofInstance ( self ) . size > = <nl> + _sanityCheck ( MemoryLayout . size ( ofValue : self ) > = <nl> MemoryLayout < Optional < UnsafeRawPointer > > . size * count ) <nl> } <nl> } <nl> mmm a / stdlib / public / core / Builtin . swift <nl> ppp b / stdlib / public / core / Builtin . swift <nl> public func sizeof < T > ( _ : T . Type ) - > Int { <nl> Builtin . unreachable ( ) <nl> } <nl> <nl> - @ available ( * , unavailable , message : " use MemoryLayout < T > . size instead . " ) <nl> + @ available ( * , unavailable , renamed : " MemoryLayout . size ( ofValue : ) " ) <nl> public func sizeofValue < T > ( _ : T ) - > Int { <nl> Builtin . unreachable ( ) <nl> } <nl> public func alignof < T > ( _ : T . Type ) - > Int { <nl> Builtin . unreachable ( ) <nl> } <nl> <nl> - @ available ( * , unavailable , message : " use MemoryLayout < T > . alignment instead . " ) <nl> + @ available ( * , unavailable , renamed : " MemoryLayout . alignment ( ofValue : ) " ) <nl> public func alignofValue < T > ( _ : T ) - > Int { <nl> Builtin . unreachable ( ) <nl> } <nl> public func strideof < T > ( _ : T . Type ) - > Int { <nl> Builtin . unreachable ( ) <nl> } <nl> <nl> - @ available ( * , unavailable , message : " use MemoryLayout < T > . stride instead . " ) <nl> + @ available ( * , unavailable , renamed : " MemoryLayout . stride ( ofValue : ) " ) <nl> public func strideofValue < T > ( _ : T ) - > Int { <nl> Builtin . unreachable ( ) <nl> } <nl> mmm a / stdlib / public / core / Character . swift <nl> ppp b / stdlib / public / core / Character . swift <nl> public struct Character : <nl> let ( count , initialUTF8 ) = s . _core . _encodeSomeUTF8 ( from : 0 ) <nl> / / Notice that the result of sizeof ( ) is a small non - zero number and can ' t <nl> / / overflow when multiplied by 8 . <nl> - let bits = MemoryLayout . _ofInstance ( initialUTF8 ) . size & * 8 & - 1 <nl> + let bits = MemoryLayout . size ( ofValue : initialUTF8 ) & * 8 & - 1 <nl> if _fastPath ( <nl> count = = s . _core . count & & ( initialUTF8 & ( 1 < < numericCast ( bits ) ) ) ! = 0 ) { <nl> _representation = . small ( Builtin . trunc_Int64_Int63 ( initialUTF8 . _value ) ) <nl> mmm a / stdlib / public / core / MemoryLayout . swift <nl> ppp b / stdlib / public / core / MemoryLayout . swift <nl> <nl> <nl> / / / The memory layout of a type , describing its size , stride , and alignment . <nl> public enum MemoryLayout < T > { <nl> - <nl> - / / / The contiguous memory footprint of the type . <nl> + / / / The contiguous memory footprint of ` T ` . <nl> / / / <nl> - / / / The ` size ` property for a type ` T ` does not include any <nl> - / / / dynamically - allocated or " remote " storage . In particular , <nl> - / / / ` MemoryLayout < T > . size ` , when ` T ` is a class type , is the same regardless <nl> - / / / of how many stored properties ` T ` has . <nl> + / / / Does not include any dynamically - allocated or " remote " storage . In <nl> + / / / particular , ` MemoryLayout < T > . size ` , when ` T ` is a class type , is the same <nl> + / / / regardless of how many stored properties ` T ` has . <nl> @ _transparent <nl> public static var size : Int { <nl> return Int ( Builtin . sizeof ( T . self ) ) <nl> } <nl> <nl> - / / / The number of bytes from the start of one instance to the start of the <nl> - / / / next , when stored in a contiguous array . <nl> + / / / The number of bytes from the start of one instance of ` T ` to the start of <nl> + / / / the next in an ` Array < T > ` . <nl> / / / <nl> / / / This is the same as the number of bytes moved when an ` UnsafePointer < T > ` <nl> - / / / is incremented . The type may have a lower minimal alignment that trades <nl> - / / / runtime performance for space efficiency . The result is always positive . <nl> + / / / is incremented . ` T ` may have a lower minimal alignment that trades runtime <nl> + / / / performance for space efficiency . The result is always positive . <nl> @ _transparent <nl> public static var stride : Int { <nl> return Int ( Builtin . strideof_nonzero ( T . self ) ) <nl> } <nl> <nl> - / / / The default memory alignment of the type . <nl> + / / / The default memory alignment of ` T ` . <nl> @ _transparent <nl> public static var alignment : Int { <nl> return Int ( Builtin . alignof ( T . self ) ) <nl> public enum MemoryLayout < T > { <nl> } <nl> <nl> extension MemoryLayout { <nl> + / / / Returns the contiguous memory footprint of ` T ` . <nl> + / / / <nl> + / / / Does not include any dynamically - allocated or " remote " storage . In <nl> + / / / particular , ` MemoryLayout . size ( ofValue : x ) ` , when ` x ` is a class instance , <nl> + / / / is the same regardless of how many stored properties ` T ` has . <nl> + @ _transparent <nl> + public static func size ( ofValue _ : T ) - > Int { <nl> + return MemoryLayout . size <nl> + } <nl> + <nl> + / / / Returns the number of bytes from the start of one instance of ` T ` to the <nl> + / / / start of the next in an ` Array < T > ` . <nl> + / / / <nl> + / / / This is the same as the number of bytes moved when an ` UnsafePointer < T > ` <nl> + / / / is incremented . ` T ` may have a lower minimal alignment that trades runtime <nl> + / / / performance for space efficiency . The result is always positive . <nl> + @ _transparent <nl> + public static func stride ( ofValue _ : T ) - > Int { <nl> + return MemoryLayout . stride <nl> + } <nl> + <nl> + / / / Returns the default memory alignment of ` T ` . <nl> @ _transparent <nl> - public / / @ testable <nl> - static func _ofInstance ( _ : @ autoclosure ( ) - > T ) - > MemoryLayout < T > . Type { <nl> - return MemoryLayout < T > . self <nl> + public static func alignment ( ofValue _ : T ) - > Int { <nl> + return MemoryLayout . alignment <nl> } <nl> } <nl> mmm a / stdlib / public / core / Unicode . swift <nl> ppp b / stdlib / public / core / Unicode . swift <nl> internal func _transcodeSomeUTF16AsUTF8 < Input > ( <nl> nextIndex = input . index ( nextIndex , offsetBy : utf16Length ) <nl> } <nl> / / FIXME : Annoying check , courtesy of < rdar : / / problem / 16740169 > <nl> - if utf8Count < MemoryLayout . _ofInstance ( result ) . size { <nl> + if utf8Count < MemoryLayout . size ( ofValue : result ) { <nl> result | = ~ 0 < < numericCast ( utf8Count * 8 ) <nl> } <nl> return ( nextIndex , result ) <nl> mmm a / stdlib / public / core / VarArgs . swift <nl> ppp b / stdlib / public / core / VarArgs . swift <nl> extension Int64 : CVarArg , _CVarArgAligned { <nl> / / / the value returned by ` _cVarArgEncoding ` . <nl> public var _cVarArgAlignment : Int { <nl> / / FIXME : alignof differs from the ABI alignment on some architectures <nl> - return MemoryLayout . _ofInstance ( self ) . alignment <nl> + return MemoryLayout . alignment ( ofValue : self ) <nl> } <nl> } <nl> <nl> extension UInt64 : CVarArg , _CVarArgAligned { <nl> / / / the value returned by ` _cVarArgEncoding ` . <nl> public var _cVarArgAlignment : Int { <nl> / / FIXME : alignof differs from the ABI alignment on some architectures <nl> - return MemoryLayout . _ofInstance ( self ) . alignment <nl> + return MemoryLayout . alignment ( ofValue : self ) <nl> } <nl> } <nl> <nl> extension Float : _CVarArgPassedAsDouble , _CVarArgAligned { <nl> / / / the value returned by ` _cVarArgEncoding ` . <nl> public var _cVarArgAlignment : Int { <nl> / / FIXME : alignof differs from the ABI alignment on some architectures <nl> - return MemoryLayout . _ofInstance ( Double ( self ) ) . alignment <nl> + return MemoryLayout . alignment ( ofValue : Double ( self ) ) <nl> } <nl> } <nl> <nl> extension Double : _CVarArgPassedAsDouble , _CVarArgAligned { <nl> / / / the value returned by ` _cVarArgEncoding ` . <nl> public var _cVarArgAlignment : Int { <nl> / / FIXME : alignof differs from the ABI alignment on some architectures <nl> - return MemoryLayout . _ofInstance ( self ) . alignment <nl> + return MemoryLayout . alignment ( ofValue : self ) <nl> } <nl> } <nl> <nl> mmm a / test / 1_stdlib / Character . swift <nl> ppp b / test / 1_stdlib / Character . swift <nl> CharacterTests . test ( " sizeof " ) { <nl> expectTrue ( size1 = = 8 | | size1 = = 9 ) <nl> <nl> var a : Character = " a " <nl> - let size2 = MemoryLayout . _ofInstance ( a ) . size <nl> + let size2 = MemoryLayout . size ( ofValue : a ) <nl> expectTrue ( size2 = = 8 | | size2 = = 9 ) <nl> <nl> expectEqual ( size1 , size2 ) <nl> mmm a / test / 1_stdlib / Renames . swift <nl> ppp b / test / 1_stdlib / Renames . swift <nl> func _MemoryLayout < T > ( t : T ) { <nl> _ = sizeof ( T . self ) / / expected - error { { ' sizeof ' is unavailable : use MemoryLayout < T > . size instead . } } { { 7 - 14 = MemoryLayout < } } { { 15 - 21 = > . size } } { { none } } <nl> _ = alignof ( T . self ) / / expected - error { { ' alignof ' is unavailable : use MemoryLayout < T > . alignment instead . } } { { 7 - 15 = MemoryLayout < } } { { 16 - 22 = > . alignment } } { { none } } <nl> _ = strideof ( T . self ) / / expected - error { { ' strideof ' is unavailable : use MemoryLayout < T > . stride instead . } } { { 7 - 16 = MemoryLayout < } } { { 17 - 23 = > . stride } } { { none } } <nl> - _ = sizeofValue ( t ) / / expected - error { { ' sizeofValue ' is unavailable : use MemoryLayout < T > . size instead . } } { { 7 - 21 = MemoryLayout < T > . size } } { { none } } <nl> - _ = alignofValue ( t ) / / expected - error { { ' alignofValue ' is unavailable : use MemoryLayout < T > . alignment instead . } } { { 7 - 22 = MemoryLayout < T > . alignment } } { { none } } <nl> - _ = strideofValue ( t ) / / expected - error { { ' strideofValue ' is unavailable : use MemoryLayout < T > . stride instead . } } { { 7 - 23 = MemoryLayout < T > . stride } } { { none } } <nl> + _ = sizeofValue ( t ) / / expected - error { { ' sizeofValue ' has been replaced by ' MemoryLayout . size ( ofValue : ) ' } } { { 7 - 18 = MemoryLayout . size } } { { 19 - 19 = ofValue : } } { { none } } <nl> + _ = alignofValue ( t ) / / expected - error { { ' alignofValue ' has been replaced by ' MemoryLayout . alignment ( ofValue : ) ' } } { { 7 - 19 = MemoryLayout . alignment } } { { 20 - 20 = ofValue : } } { { none } } <nl> + _ = strideofValue ( t ) / / expected - error { { ' strideofValue ' has been replaced by ' MemoryLayout . stride ( ofValue : ) ' } } { { 7 - 20 = MemoryLayout . stride } } { { 21 - 21 = ofValue : } } { { none } } <nl> } <nl> <nl> func _Mirror ( ) { <nl> mmm a / test / Interpreter / SDK / c_pointers . swift <nl> ppp b / test / Interpreter / SDK / c_pointers . swift <nl> puts ( s ) <nl> / / <nl> <nl> var unsorted = [ 3 , 14 , 15 , 9 , 2 , 6 , 5 ] <nl> - qsort ( & unsorted , unsorted . count , MemoryLayout . _ofInstance ( unsorted [ 0 ] ) . size ) { a , b in <nl> + qsort ( & unsorted , unsorted . count , MemoryLayout . size ( ofValue : unsorted [ 0 ] ) ) { a , b in <nl> return Int32 ( a ! . load ( as : Int . self ) - b ! . load ( as : Int . self ) ) <nl> } <nl> / / CHECK - NEXT : [ 2 , 3 , 5 , 6 , 9 , 14 , 15 ] <nl> mmm a / test / Interpreter / enum . swift <nl> ppp b / test / Interpreter / enum . swift <nl> struct OptionalTuple < T > { <nl> } <nl> } <nl> func test_optional_generic_tuple < T > ( _ a : OptionalTuple < T > ) - > T { <nl> - print ( " optional pair is same size as pair : \ ( MemoryLayout . _ofInstance ( a ) . size = = MemoryLayout < T > . size * 2 ) " ) <nl> + print ( " optional pair is same size as pair : \ ( MemoryLayout . size ( ofValue : a ) = = MemoryLayout < T > . size * 2 ) " ) <nl> return a . value ! . 0 <nl> } <nl> print ( " Int result : \ ( test_optional_generic_tuple ( OptionalTuple < Int > ( ( 5 , 6 ) ) ) ) " ) <nl> mmm a / test / expr / expressions . swift <nl> ppp b / test / expr / expressions . swift <nl> func se0101 < P : Pse0101 > ( x : Cse0101 < P > ) { <nl> _ = sizeof ( Cse0101 < P > . self ) / / expected - error { { ' sizeof ' is unavailable : use MemoryLayout < T > . size instead . } } { { 7 - 14 = MemoryLayout < } } { { 24 - 30 = > . size } } { { none } } <nl> _ = alignof ( Cse0101 < P > . T . self ) / / expected - error { { ' alignof ' is unavailable : use MemoryLayout < T > . alignment instead . } } { { 7 - 15 = MemoryLayout < } } { { 27 - 33 = > . alignment } } { { none } } <nl> _ = strideof ( P . Type . self ) / / expected - error { { ' strideof ' is unavailable : use MemoryLayout < T > . stride instead . } } { { 7 - 16 = MemoryLayout < } } { { 22 - 28 = > . stride } } { { none } } <nl> - _ = sizeof ( type ( of : x ) ) / / expected - error { { ' sizeof ' is unavailable : use MemoryLayout < T > . size instead . } } { { 7 - 26 = MemoryLayout < Cse0101 < P > > . size } } { { none } } / <nl> - <nl> - _ = sizeofValue ( x ) / / expected - error { { ' sizeofValue ' is unavailable : use MemoryLayout < T > . size instead . } } { { 7 - 21 = MemoryLayout < Cse0101 < P > > . size } } { { none } } <nl> - _ = alignofValue ( x . val ) / / expected - error { { ' alignofValue ' is unavailable : use MemoryLayout < T > . alignment instead . } } { { 7 - 26 = MemoryLayout < P > . alignment } } { { none } } <nl> - _ = strideofValue ( x . val . getIt ( ) ) / / expected - error { { ' strideofValue ' is unavailable : use MemoryLayout < T > . stride instead . } } { { 7 - 35 = MemoryLayout < P . Value > . stride } } { { none } } <nl> + _ = sizeof ( type ( of : x ) ) / / expected - error { { ' sizeof ' is unavailable : use MemoryLayout < T > . size instead . } } { { 7 - 26 = MemoryLayout < Cse0101 < P > > . size } } { { none } } <nl> } <nl> mmm a / validation - test / stdlib / Arrays . swift . gyb <nl> ppp b / validation - test / stdlib / Arrays . swift . gyb <nl> var ArrayTestSuite = TestSuite ( " Array " ) <nl> ArrayTestSuite . test ( " sizeof " ) { <nl> var a = [ 10 , 20 , 30 ] <nl> # if arch ( i386 ) | | arch ( arm ) <nl> - expectEqual ( 4 , MemoryLayout . _ofInstance ( a ) . size ) <nl> + expectEqual ( 4 , MemoryLayout . size ( ofValue : a ) ) <nl> # else <nl> - expectEqual ( 8 , MemoryLayout . _ofInstance ( a ) . size ) <nl> + expectEqual ( 8 , MemoryLayout . size ( ofValue : a ) ) <nl> # endif <nl> } <nl> <nl> mmm a / validation - test / stdlib / Dictionary . swift <nl> ppp b / validation - test / stdlib / Dictionary . swift <nl> DictionaryTestSuite . test ( " AssociatedTypes " ) { <nl> DictionaryTestSuite . test ( " sizeof " ) { <nl> var dict = [ 1 : " meow " , 2 : " meow " ] <nl> # if arch ( i386 ) | | arch ( arm ) <nl> - expectEqual ( 4 , MemoryLayout . _ofInstance ( dict ) . size ) <nl> + expectEqual ( 4 , MemoryLayout . size ( ofValue : dict ) ) <nl> # else <nl> - expectEqual ( 8 , MemoryLayout . _ofInstance ( dict ) . size ) <nl> + expectEqual ( 8 , MemoryLayout . size ( ofValue : dict ) ) <nl> # endif <nl> } <nl> <nl> mmm a / validation - test / stdlib / NSNumberBridging . swift . gyb <nl> ppp b / validation - test / stdlib / NSNumberBridging . swift . gyb <nl> extension $ { Self } { <nl> func toNSNumberByteArray ( ) - > [ UInt8 ] { <nl> var v = self . bitPattern <nl> var result : [ UInt8 ] = [ ] <nl> - for _ in 0 . . < MemoryLayout . _ofInstance ( v ) . size { <nl> + for _ in 0 . . < MemoryLayout . size ( ofValue : v ) { <nl> result . append ( UInt8 ( v & 0xff ) ) <nl> v = v > > 8 <nl> } <nl> mmm a / validation - test / stdlib / OpenCLSDKOverlay . swift <nl> ppp b / validation - test / stdlib / OpenCLSDKOverlay . swift <nl> tests . test ( " clSetKernelArgsListAPPLE " ) { <nl> kernel ! , 3 , <nl> 0 , MemoryLayout < cl_mem > . size , inputPtr , <nl> 1 , MemoryLayout < cl_mem > . size , outputPtr , <nl> - 2 , MemoryLayout . _ofInstance ( count ) . size , countPtr ) <nl> + 2 , MemoryLayout . size ( ofValue : count ) , countPtr ) <nl> } <nl> } <nl> } <nl> tests . test ( " clSetKernelArgsListAPPLE " ) { <nl> <nl> / / Get the maximum work group size for executing the kernel on the device <nl> / / <nl> - err = clGetKernelWorkGroupInfo ( kernel , device_id , cl_kernel_work_group_info ( CL_KERNEL_WORK_GROUP_SIZE ) , MemoryLayout . _ofInstance ( local ) . size , & local , nil ) <nl> + err = clGetKernelWorkGroupInfo ( kernel , device_id , cl_kernel_work_group_info ( CL_KERNEL_WORK_GROUP_SIZE ) , MemoryLayout . size ( ofValue : local ) , & local , nil ) <nl> if ( err ! = CL_SUCCESS ) <nl> { <nl> print ( " Error : Failed to retrieve kernel work group info ! \ ( err ) " ) <nl> mmm a / validation - test / stdlib / Set . swift <nl> ppp b / validation - test / stdlib / Set . swift <nl> SetTestSuite . test ( " AssociatedTypes " ) { <nl> SetTestSuite . test ( " sizeof " ) { <nl> var s = Set ( [ " Hello " , " world " ] ) <nl> # if arch ( i386 ) | | arch ( arm ) <nl> - expectEqual ( 4 , MemoryLayout . _ofInstance ( s ) . size ) <nl> + expectEqual ( 4 , MemoryLayout . size ( ofValue : s ) ) <nl> # else <nl> - expectEqual ( 8 , MemoryLayout . _ofInstance ( s ) . size ) <nl> + expectEqual ( 8 , MemoryLayout . size ( ofValue : s ) ) <nl> # endif <nl> } <nl> <nl>
Merge pull request from xwu / memory - layout - of - value
apple/swift
e6dec58cc5e08a59028310aac14d1e8fbc46c2d4
2016-08-12T18:09:17Z
new file mode 100755 <nl> index 00000000000 . . a2cdd53896b <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / append_prepend . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import sys , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + <nl> + op = memcached_workload_common . option_parser_for_memcache ( ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + with memcached_workload_common . make_memcache_connection ( opts ) as mc : <nl> + <nl> + print " Testing append " <nl> + if mc . set ( " a " , " aaa " ) = = 0 : <nl> + raise ValueError , " Set failed " <nl> + mc . append ( " a " , " bbb " ) <nl> + if mc . get ( " a " ) ! = " aaabbb " : <nl> + raise ValueError ( " Append failed , expected % r , got % r " , " aaabbb " , mc . get ( " a " ) ) <nl> + <nl> + print " Testing prepend " <nl> + if mc . set ( " a " , " aaa " ) = = 0 : <nl> + raise ValueError , " Set failed " <nl> + mc . prepend ( " a " , " bbb " ) <nl> + if mc . get ( " a " ) ! = " bbbaaa " : <nl> + raise ValueError ( " Append failed , expected % r , got % r " , " bbbaaa " , mc . get ( " a " ) ) <nl> + <nl> + print " Done " <nl> new file mode 100755 <nl> index 00000000000 . . 21ef14b35ac <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / append_stress . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import sys , socket , random , time , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + from vcoptparse import * <nl> + <nl> + # A hacky function that reads a response from a socket of an expected size . <nl> + def read_response_of_expected_size ( s , n ) : <nl> + data = " " <nl> + while ( len ( data ) < n ) : <nl> + data + = s . recv ( n - len ( data ) ) <nl> + # Stop if we get a shorter than expected response . <nl> + if ( data . count ( " \ r \ n " ) > = 3 ) : <nl> + return data <nl> + return data <nl> + <nl> + op = memcached_workload_common . option_parser_for_socket ( ) <nl> + op [ " n_appends " ] = IntFlag ( " - - num - appends " , 20000 ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + with memcached_workload_common . make_socket_connection ( opts ) as s : <nl> + <nl> + def send ( x ) : <nl> + # print str <nl> + s . sendall ( x ) <nl> + <nl> + key = ' fizz ' <nl> + val_chunks = [ ' buzzBUZZZ ' , ' baazBAAZ ' , ' bozoBOZO ' ] <nl> + <nl> + send ( " set % s 0 0 % d noreply \ r \ n % s \ r \ n " % ( key , len ( val_chunks [ 0 ] ) , val_chunks [ 0 ] ) ) <nl> + <nl> + # All commands have noreply except the last . <nl> + for i in xrange ( 1 , opts [ " n_appends " ] ) : <nl> + time . sleep ( . 001 ) <nl> + send ( " append % s 0 0 % d % s \ r \ n % s \ r \ n " % ( key , len ( val_chunks [ i % 3 ] ) , " " if i = = opts [ " n_appends " ] - 1 else " noreply " , val_chunks [ i % 3 ] ) ) <nl> + <nl> + # Read the reply from the last command . <nl> + expected_stored = " STORED \ r \ n " <nl> + stored_reply = read_response_of_expected_size ( s , len ( expected_stored ) ) <nl> + if ( expected_stored ! = stored_reply ) : <nl> + raise ValueError ( " Expecting STORED reply . " ) <nl> + <nl> + val = ' ' . join ( [ val_chunks [ i % 3 ] for i in xrange ( opts [ " n_appends " ] ) ] ) <nl> + <nl> + send ( " get % s \ r \ n " % key ) <nl> + expected_res = " VALUE % s 0 % d \ r \ n % s \ r \ nEND \ r \ n " % ( key , len ( val ) , val ) <nl> + actual_val = read_response_of_expected_size ( s , len ( expected_res ) ) <nl> + if ( expected_res ! = actual_val ) : <nl> + print " Expected val : % s " % expected_res <nl> + print " Incorrect val ( len = % d ) : % s " % ( len ( actual_val ) , actual_val ) <nl> + raise ValueError ( " Incorrect value . " ) <nl> new file mode 100755 <nl> index 00000000000 . . 607c9c702ef <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / big_values . py <nl> <nl> + # ! / usr / bin / env python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import sys , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + <nl> + def readline ( s ) : <nl> + buf = " " <nl> + while not buf . endswith ( " \ r \ n " ) : <nl> + buf + = s . recv ( 1 ) <nl> + return buf <nl> + <nl> + def abbreviate ( s ) : <nl> + if len ( s ) < 50 : return repr ( s ) <nl> + else : return repr ( s [ : 25 ] ) + " . . . " + repr ( s [ - 25 : ] ) <nl> + <nl> + def expect ( s , string ) : <nl> + msg = " " <nl> + while len ( msg ) < len ( string ) : <nl> + msg + = s . recv ( len ( string ) - len ( msg ) ) <nl> + if msg ! = string : <nl> + raise ValueError ( " Didn ' t get what we expected : expected % s , got % s " % ( abbreviate ( string ) , abbreviate ( msg ) ) ) <nl> + <nl> + def expect_get_response ( s , value ) : <nl> + expect ( s , " VALUE x 0 % d \ r \ n " % len ( value ) ) <nl> + print " value decl . . . " , <nl> + expect ( s , value + " \ r \ n " ) <nl> + print " content . . . " , <nl> + expect ( s , " END \ r \ n " ) <nl> + print " ok . " <nl> + <nl> + def test_sizes_one_way ( ap , x , y , s ) : <nl> + print " Sending a % d - byte value . . . " % x , <nl> + s . send ( ( " set x 0 0 % d \ r \ n " % x ) + ( " a " * x ) + " \ r \ n " ) <nl> + <nl> + max_legal_value_size = 1024 * 1024 <nl> + <nl> + if x < = max_legal_value_size : <nl> + expect ( s , " STORED \ r \ n " ) <nl> + print " getting . . . " , <nl> + s . send ( " get x \ r \ n " ) <nl> + print " sent get . . . " , <nl> + expect_get_response ( s , " a " * x ) <nl> + <nl> + print " Now % sing upto length % d . . . " % ( ap , y ) , <nl> + s . send ( ( " % s x 0 0 % d \ r \ n " % ( ap , y - x ) ) + ( " b " * ( y - x ) ) + " \ r \ n " ) <nl> + <nl> + if y < = max_legal_value_size : <nl> + expect ( s , " STORED \ r \ n " ) <nl> + print " getting . . . " , <nl> + s . send ( " get x \ r \ n " ) <nl> + print " sent get . . . " , <nl> + expect_get_response ( s , ( " a " * x + " b " * ( y - x ) if ap = = " append " else " b " * ( y - x ) + " a " * x ) ) <nl> + else : <nl> + expect ( s , " SERVER_ERROR object too large for cache \ r \ n " ) <nl> + print " too big . . . ok . " <nl> + else : <nl> + expect ( s , " SERVER_ERROR object too large for cache \ r \ n " ) <nl> + print " too big . . . ok . " <nl> + <nl> + def test_sizes_another_way ( ap , x , y , s ) : <nl> + test_sizes_one_way ( ap , x , y , s ) <nl> + <nl> + def test_sizes ( x , y , s ) : <nl> + test_sizes_one_way ( " append " , x , y , s ) <nl> + test_sizes_another_way ( " prepend " , x , y , s ) <nl> + <nl> + op = memcached_workload_common . option_parser_for_socket ( ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + with memcached_workload_common . make_socket_connection ( opts ) as s : <nl> + <nl> + # 250 - the maximum small value <nl> + # 251 - the minimum large buf ( in a leaf node ) <nl> + # 4080 - the size of a large buf block ( the largest large buf that uses a single block ) <nl> + # 8160 - twice the size of a large buf block <nl> + # 65536 - 16 - bit rollover <nl> + # 73710 - netrecord causes some kind of weird failure at this point sometimes <nl> + # ( 234 / 4 ) * 4080 - the biggest large value that uses one level <nl> + # 10 * 1048576 - the maximum legal value size <nl> + <nl> + sizes = [ 250 , 4079 , 4080 , 4081 , 8160 , 8161 , ( 232 / 4 ) * 4080 - 1 , ( 232 / 4 ) * 4080 , ( 232 / 4 ) * 4080 + 1 , 1048576 , 10 * 1048577 ] <nl> + <nl> + for x in sizes : <nl> + for y in sizes : <nl> + if x < y : <nl> + test_sizes ( x , y , s ) <nl> + <nl> + s . send ( " quit \ r \ n " ) <nl> new file mode 100755 <nl> index 00000000000 . . c7fc8532a10 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / cas . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + from random import shuffle <nl> + import sys , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + from vcoptparse import * <nl> + <nl> + op = memcached_workload_common . option_parser_for_memcache ( ) <nl> + del op [ " mclib " ] # No longer optional ; we only work with memcache . <nl> + op [ " num_ints " ] = IntFlag ( " - - num - ints " , 10 ) <nl> + opts = op . parse ( sys . argv ) <nl> + opts [ " mclib " ] = " memcache " <nl> + <nl> + with memcached_workload_common . make_memcache_connection ( opts ) as mc : <nl> + print " Shuffling numbers " <nl> + ints = range ( 0 , opts [ " num_ints " ] ) <nl> + shuffle ( ints ) <nl> + <nl> + print " Checking cas on numbers " <nl> + <nl> + for i in ints : <nl> + print " Inserting % d " % i <nl> + if ( 0 = = mc . set ( str ( i ) , str ( i ) ) ) : <nl> + raise ValueError ( " Insert of % d failed " % i ) <nl> + <nl> + print " Getting % d " % i <nl> + value , _ , cas_id = mc . explicit_gets ( str ( i ) ) <nl> + if ( value ! = str ( i ) ) : <nl> + raise ValueError ( " get failed , should be % d = > % d , was % s " % ( i , i , value ) ) <nl> + <nl> + print " ' cas ' - ing % d " % i <nl> + if ( 0 = = mc . explicit_cas ( str ( i ) , str ( i + 1 ) , cas_id ) ) : <nl> + raise ValueError ( " cas of % d failed " % i ) <nl> + <nl> + print " Verifying cas % d " % i <nl> + value , _ , cas_id = mc . explicit_gets ( str ( i ) ) <nl> + if ( value ! = str ( i + 1 ) ) : <nl> + raise ValueError ( " get for cas failed , should be % d = > % d , was % s " % ( i , i + 1 , value ) ) <nl> + <nl> + print " Modifying % d again " % i <nl> + if ( 0 = = mc . set ( str ( i ) , str ( i + 10 ) ) ) : <nl> + raise ValueError ( " Modify of % d failed " % i ) <nl> + <nl> + print " ' cas ' - ing % d again " % i <nl> + if ( 0 ! = mc . explicit_cas ( str ( i ) , str ( i + 20 ) , cas_id ) ) : <nl> + raise ValueError ( " cas of % d should have failed , item has been modified " % i ) <nl> new file mode 100755 <nl> index 00000000000 . . c5b27b43837 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / check_many_keys . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import random , sys , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + from vcoptparse import * <nl> + <nl> + op = memcached_workload_common . option_parser_for_memcache ( ) <nl> + op [ " num_keys " ] = IntFlag ( " - - num - keys " , 5000 ) <nl> + op [ " sequential " ] = BoolFlag ( " - - sequential " ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + with memcached_workload_common . make_memcache_connection ( opts ) as mc : <nl> + keys = [ str ( x ) for x in xrange ( opts [ " num_keys " ] ) ] <nl> + # Verify everything <nl> + print " Verifying " <nl> + i = 0 <nl> + values = mc . get_multi ( keys [ i : i + 16 ] ) <nl> + for key in keys : <nl> + if i % 16 = = 0 : <nl> + values = mc . get_multi ( keys [ i : i + 16 ] ) <nl> + <nl> + value = values [ key ] <nl> + if value ! = key : <nl> + raise ValueError ( " Key % r is set to % r , expected % r " % ( key , value , key ) ) <nl> + i + = 1 <nl> + <nl> + print " Success " <nl> new file mode 100755 <nl> index 00000000000 . . e3f5e935119 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / deletion . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import sys , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + from vcoptparse import * <nl> + <nl> + def mid ( a ) : <nl> + half = len ( a ) / 2 <nl> + for j in xrange ( half ) : <nl> + yield a [ half - j - 1 ] <nl> + yield a [ half + j ] <nl> + <nl> + reorder_funs = { <nl> + ' fwd ' : lambda x : x , <nl> + ' rev ' : reversed , <nl> + ' mid ' : mid , <nl> + ' midrev ' : lambda x : mid ( list ( reversed ( x ) ) ) <nl> + } <nl> + <nl> + # This test is written somewhat oddly because I was trying to reproduce my <nl> + # earlier manual tests exactly . <nl> + # The general goal is to trigger all the various edge cases of the leveling <nl> + # code . <nl> + <nl> + op = memcached_workload_common . option_parser_for_memcache ( ) <nl> + op [ ' max_key ' ] = IntFlag ( " - - max - key " , 1000 ) <nl> + op [ ' key_len ' ] = IntFlag ( " - - key - len " , 4 ) <nl> + op [ ' val_len ' ] = IntFlag ( " - - val - len " , 45 ) <nl> + op [ ' pattern ' ] = ChoiceFlag ( " - - pattern " , reorder_funs . keys ( ) , " fwd " ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + with memcached_workload_common . make_memcache_connection ( opts ) as mc : <nl> + keys = [ ( " % 0 " + str ( opts [ ' key_len ' ] ) + " d " ) % ( key , ) for key in xrange ( opts [ ' max_key ' ] + 1 ) ] <nl> + val = ' Q ' * opts [ ' val_len ' ] <nl> + <nl> + print " Inserting " <nl> + for k in keys : <nl> + mc . set ( k , val ) <nl> + <nl> + print " Deleting " <nl> + for k in reorder_funs [ opts [ ' pattern ' ] ] ( keys ) : <nl> + mc . delete ( k ) <nl> + <nl> + print " Done " <nl> new file mode 100755 <nl> index 00000000000 . . 8b13cdc863f <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / expiration . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import time , sys , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + <nl> + def expect ( b , msg ) : <nl> + if ( not b ) : <nl> + raise ValueError , msg <nl> + <nl> + def typical_test ( mc , k , t , first_sleep , second_sleep ) : <nl> + expect ( mc . set ( k , " aaa " , time = t ) ! = 0 , " Set failed " ) <nl> + print " Make sure we can get the element back after a short sleep " <nl> + time . sleep ( first_sleep ) <nl> + expect ( mc . get ( k ) = = " aaa " , <nl> + " Failure : value can ' t be found but it ' s supposed to be " ) <nl> + <nl> + print " Make sure the element eventually expires " <nl> + time . sleep ( second_sleep ) <nl> + expect ( mc . get ( k ) = = None , <nl> + " Failure : value should have expired but it didn ' t " ) <nl> + <nl> + # Not really an exptime test . <nl> + def zero_test ( mc ) : <nl> + print " zero_test . . . " <nl> + expect ( mc . set ( " z " , " aaa " , time = 0 ) ! = 0 , " Set failed " ) <nl> + print " Make sure we can get the element back . " <nl> + expect ( mc . get ( " z " ) = = " aaa " , " Value can ' t be found , hey we didn ' t even set the exptime ! " ) <nl> + print " Done zero_test . " <nl> + <nl> + # Tests an absolute timestamp 5 seconds from now . <nl> + def absolute_test ( mc ) : <nl> + print " absolute_test . . . " <nl> + t = int ( time . time ( ) ) + 5 <nl> + typical_test ( mc , " b " , t , 1 , 4 ) <nl> + print " Done absolute_test . " <nl> + <nl> + # Tests a relative timestamp 5 seconds from now . <nl> + def basic_test ( mc ) : <nl> + print " basic_test . . . " <nl> + typical_test ( mc , " a " , 5 , 1 , 4 ) <nl> + print " Done basic_test . " <nl> + <nl> + # Tests an expiration time that ' s already in the past . <nl> + def past_test ( mc ) : <nl> + print " past_test . . . " <nl> + expect ( mc . set ( " p " , " aaa " , time = int ( time . time ( ) ) - 5 ) = = 1 , " Set failed " ) <nl> + print " Make sure we can ' t get the element back . " <nl> + expect ( mc . get ( " p " ) = = None , " Wait , we got a value ? ! " ) <nl> + print " Done past_test . " <nl> + <nl> + op = memcached_workload_common . option_parser_for_memcache ( ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + with memcached_workload_common . make_memcache_connection ( opts ) as mc : <nl> + zero_test ( mc ) <nl> + basic_test ( mc ) <nl> + absolute_test ( mc ) <nl> + past_test ( mc ) <nl> + <nl> + print " Done " <nl> new file mode 100755 <nl> index 00000000000 . . cccff1836dd <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / flags . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + from random import shuffle <nl> + import sys , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + from vcoptparse import * <nl> + <nl> + op = memcached_workload_common . option_parser_for_memcache ( ) <nl> + del op [ " mclib " ] # No longer optional ; we only work with memcache . <nl> + op [ " num_ints " ] = IntFlag ( " - - num - ints " , 10 ) <nl> + opts = op . parse ( sys . argv ) <nl> + opts [ " mclib " ] = " memcache " <nl> + <nl> + with memcached_workload_common . make_memcache_connection ( opts ) as mc : <nl> + <nl> + print " Shuffling numbers " <nl> + ints = range ( 0 , opts [ " num_ints " ] ) <nl> + shuffle ( ints ) <nl> + <nl> + # flag values are : <nl> + # - 0 for strings <nl> + # - 2 for ints <nl> + <nl> + print " Testing with flags " <nl> + for i in ints : <nl> + print " Inserting % d " % i <nl> + if i % 2 : val = str ( i ) <nl> + else : val = i <nl> + <nl> + if ( 0 = = mc . set ( str ( i ) , val ) ) : <nl> + raise ValueError ( " Insert of % d failed " % i ) <nl> + <nl> + print " Getting % d " % i <nl> + value , flags , _ = mc . explicit_gets ( str ( i ) ) <nl> + if ( value ! = val ) : <nl> + raise ValueError ( " get failed , should be % d = > % d , was % s " % ( i , val , value ) ) <nl> + <nl> + print " Checking flag for % d " % i <nl> + if i % 2 : <nl> + if flags ! = 0 : <nl> + raise ValueError ( " flag set failed , should be % d , was % d " % ( 0 , flags ) ) <nl> + else : <nl> + if flags ! = 2 : <nl> + raise ValueError ( " flag set failed , should be % d , was % d " % ( 2 , flags ) ) <nl> new file mode 100755 <nl> index 00000000000 . . 6fffa5abb78 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / fuzz . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import sys , random , time , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + from vcoptparse import * <nl> + <nl> + # " I am a string " - > [ " I a " , " m a s " , " trin " , " g " ] <nl> + def rand_split ( string , nsub_strings ) : <nl> + cutoffs = random . sample ( range ( len ( string ) ) , nsub_strings ) ; <nl> + cutoffs . sort ( ) ; <nl> + cutoffs . insert ( 0 , 0 ) <nl> + cutoffs . append ( len ( string ) ) <nl> + strings = [ ] <nl> + for ( start , end ) in zip ( cutoffs [ 0 : len ( cutoffs ) - 1 ] , cutoffs [ 1 : len ( cutoffs ) ] ) : <nl> + strings . append ( string [ start : end ] ) <nl> + <nl> + return strings <nl> + <nl> + meaningful_words = [ ' set ' , ' get ' , ' delete ' , ' gets ' , ' add ' , ' replace ' , ' append ' , ' prepend ' , ' cas ' , ' incr ' , ' decr ' , ' stat ' , ' rethinkdbctl ' , ' noreply ' ] <nl> + <nl> + def word ( ) : <nl> + return random . choice ( meaningful_words ) <nl> + <nl> + delims = [ ' \ t ' , ' ' , ' \ n ' , ' \ r ' ] <nl> + <nl> + def delim ( ) : <nl> + return random . choice ( delims ) <nl> + <nl> + def number ( ) : <nl> + return str ( random . randint ( 0 , 999999999999 ) ) <nl> + <nl> + def garbage ( ) : <nl> + chars = [ ' a ' , ' b ' , ' c ' , ' d ' , ' e ' , ' f ' , ' g ' , ' h ' , ' i ' , ' j ' , ' k ' , ' l ' , ' m ' , ' n ' , ' o ' , ' p ' , ' q ' , ' r ' , ' s ' , ' t ' , ' u ' , ' v ' , ' w ' , ' x ' , ' y ' , ' z ' , ' A ' , ' B ' , ' C ' , ' D ' , ' E ' , ' F ' , ' G ' , ' H ' , ' I ' , ' J ' , ' K ' , ' L ' , ' M ' , ' N ' , ' O ' , ' P ' , ' Q ' , ' R ' , ' S ' , ' T ' , ' U ' , ' V ' , ' W ' , ' X ' , ' Y ' , ' Z ' , ' 1 ' , ' 2 ' , ' 3 ' , ' 4 ' , ' 5 ' , ' 6 ' , ' 7 ' , ' 8 ' , ' 9 ' , ' 0 ' , ' ! ' , ' @ ' , ' # ' , ' $ ' , ' % ' , ' ^ ' , ' & ' , ' * ' , ' ( ' , ' ) ' , ' _ ' , ' + ' , ' , ' , ' . ' , ' / ' , ' ; ' , ' [ ' , ' ] ' , ' < ' , ' > ' , ' : ' , ' { ' , ' } ' , ' ? ' , ' \ r \ n ' , ' \ r ' , ' \ n ' , ' \ t ' , ' ' ] <nl> + len = random . randint ( 1 , 1000 ) <nl> + res = ' ' <nl> + for i in range ( len ) : <nl> + res + = random . choice ( chars ) <nl> + return res <nl> + <nl> + def funny ( ) : <nl> + return " Yo dawg " <nl> + <nl> + op = memcached_workload_common . option_parser_for_socket ( ) <nl> + op [ " duration " ] = IntFlag ( " - - duration " , 1000 ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + with memcached_workload_common . make_socket_connection ( opts ) as s : <nl> + sent_log = open ( ' fuzz_sent ' , ' w ' ) <nl> + recv_log = open ( ' fuzz_recv ' , ' w ' ) <nl> + <nl> + start_time = time . time ( ) <nl> + <nl> + time . sleep ( 2 ) <nl> + <nl> + while ( time . time ( ) - start_time < opts [ " duration " ] ) : <nl> + time . sleep ( . 05 ) <nl> + string = ' ' <nl> + for i in range ( 20 ) : <nl> + choice = random . random ( ) <nl> + if choice < . 5 : <nl> + string + = word ( ) <nl> + elif choice < . 75 : <nl> + string + = number ( ) <nl> + elif choice < . 85 : <nl> + string + = garbage ( ) <nl> + elif choice < . 99 : <nl> + string + = delim ( ) <nl> + else : <nl> + string + = funny ( ) <nl> + <nl> + for substr in rand_split ( string , random . randint ( 10 , 40 ) ) : <nl> + s . send ( substr ) <nl> + sent_log . write ( substr ) <nl> + <nl> + s . settimeout ( 0 ) <nl> + try : <nl> + server_str = s . recv ( 100000 ) <nl> + recv_log . write ( server_str ) <nl> + except : <nl> + pass <nl> + <nl> + s . send ( ' quit \ r \ n ' ) <nl> new file mode 100755 <nl> index 00000000000 . . ef244b1ccea <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / incr_decr . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import sys , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + <nl> + op = memcached_workload_common . option_parser_for_memcache ( ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + with memcached_workload_common . make_memcache_connection ( opts ) as mc : <nl> + <nl> + print " Testing increment " <nl> + if mc . set ( str ( 1 ) , str ( 1 ) ) = = 0 : <nl> + raise ValueError , " Set failed " <nl> + mc . incr ( str ( 1 ) , 10 ) <nl> + if mc . get ( str ( 1 ) ) ! = str ( 11 ) : <nl> + raise ValueError ( " simple increment fails , should have been 11 , was % s " % mc . get ( str ( 1 ) ) ) <nl> + <nl> + # TODO : Problem with Python not being able to handle large unsigned values in the call of incr ? <nl> + # if mc . set ( str ( 1 ) , str ( 1 ) ) = = 0 : <nl> + # raise ValueError , " Set failed " <nl> + # mc . incr ( str ( 1 ) , 9223372036854775808 ) <nl> + # if mc . get ( str ( 1 ) ) ! = str ( 9223372036854775808 ) : <nl> + # raise ValueError ( " large number increment fails , should have been 9223372036854775808 , was % s " % mc . get ( str ( 1 ) ) ) <nl> + <nl> + # if mc . set ( str ( 1 ) , str ( 9223372036854775807 ) ) = = 0 : <nl> + # raise ValueError , " Set failed " <nl> + # mc . incr ( str ( 1 ) , 2 ) <nl> + # mc . incr ( str ( 1 ) , 9223372036854775807 ) <nl> + # if mc . get ( str ( 1 ) ) ! = str ( 0 ) : <nl> + # raise ValueError ( " overflow increment fails , should have been 0 , was % s " % mc . get ( str ( 1 ) ) ) <nl> + <nl> + # TODO : Figure out a way to test negative increments and incrementing by a very large value . <nl> + # memcache doesn ' t allow either . <nl> + <nl> + print " Testing decrement " <nl> + if mc . set ( str ( 1 ) , str ( 50 ) ) = = 0 : <nl> + raise ValueError , " Set failed " <nl> + mc . decr ( str ( 1 ) , 10 ) <nl> + if mc . get ( str ( 1 ) ) ! = str ( 40 ) : <nl> + raise ValueError ( " simple decrement fails , should have been 40 , was % s " % mc . get ( str ( 1 ) ) ) <nl> + <nl> + # TODO : Problem with Python not being able to handle large unsigned values in the call of decr ? <nl> + # if mc . set ( str ( 1 ) , str ( 9223372036854775809 ) ) = = 0 : <nl> + # raise ValueError , " Set failed " <nl> + # mc . decr ( str ( 1 ) , 9223372036854775808 ) <nl> + # if mc . get ( str ( 1 ) ) ! = str ( 1 ) : <nl> + # raise ValueError ( " large number decrement fails , should have been 1 , was % s " % mc . get ( str ( 1 ) ) ) <nl> + <nl> + if mc . set ( str ( 1 ) , str ( 51 ) ) = = 0 : <nl> + raise ValueError , " Set failed " <nl> + mc . decr ( str ( 1 ) , 52 ) <nl> + if mc . get ( str ( 1 ) ) ! = str ( 0 ) : <nl> + raise ValueError ( " underflow decrement fails , should have been 0 , was % s " % mc . get ( str ( 1 ) ) ) <nl> + <nl> + # TODO : Figure out a way to test negative decrements and decrementing by a very large value . <nl> + # memcache doesn ' t allow either . <nl> new file mode 100755 <nl> index 00000000000 . . a2ed8faf6ef <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / insert_many_keys . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import random , sys , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + from vcoptparse import * <nl> + <nl> + op = memcached_workload_common . option_parser_for_memcache ( ) <nl> + op [ " num_keys " ] = IntFlag ( " - - num - keys " , 5000 ) <nl> + op [ " sequential " ] = BoolFlag ( " - - sequential " ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + with memcached_workload_common . make_memcache_connection ( opts ) as mc : <nl> + <nl> + print " Inserting " <nl> + keys = [ str ( x ) for x in xrange ( opts [ " num_keys " ] ) ] <nl> + if not opts [ " sequential " ] : <nl> + random . shuffle ( keys ) <nl> + <nl> + i = 0 <nl> + for key in keys : <nl> + # if ( i % 500 = = 0 or ( i < 500 and ( i & ( i - 1 ) ) = = 0 ) ) : <nl> + # print i <nl> + ok = mc . set ( key , key ) <nl> + if ok = = 0 : <nl> + print " Failed to set a value " <nl> + raise ValueError ( " Could not set % r " % key ) <nl> + i + = 1 <nl> new file mode 100755 <nl> index 00000000000 . . eb81e52da84 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / many_keys . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import random , sys , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + from vcoptparse import * <nl> + <nl> + op = memcached_workload_common . option_parser_for_memcache ( ) <nl> + op [ " num_keys " ] = IntFlag ( " - - num - keys " , 5000 ) <nl> + op [ " sequential " ] = BoolFlag ( " - - sequential " ) <nl> + op [ " phase " ] = ChoiceFlag ( " - - phase " , [ " w " , " r " , " wr " ] , " wr " ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + with memcached_workload_common . make_memcache_connection ( opts ) as mc : <nl> + <nl> + if " w " in opts [ " phase " ] : <nl> + print " Inserting " <nl> + keys = [ str ( x ) for x in xrange ( opts [ " num_keys " ] ) ] <nl> + if not opts [ " sequential " ] : <nl> + random . shuffle ( keys ) <nl> + i = 0 <nl> + for key in keys : <nl> + # if ( i % 500 = = 0 or ( i < 500 and ( i & ( i - 1 ) ) = = 0 ) ) : <nl> + # print i <nl> + ok = mc . set ( key , key ) <nl> + if ok = = 0 : <nl> + raise ValueError ( " Could not set % r " % key ) <nl> + i + = 1 <nl> + if " r " not in opts [ " phase " ] : <nl> + print " Dumping chosen keys to disk " <nl> + with open ( " keys " , " w " ) as keys_file : <nl> + keys_file . write ( " " . join ( keys ) ) <nl> + <nl> + if " r " in opts [ " phase " ] : <nl> + if " w " not in opts [ " phase " ] : <nl> + print " Loading chosen keys from disk " <nl> + with open ( " keys " , " r " ) as keys_file : <nl> + keys = keys_file . read ( ) . split ( " " ) <nl> + # Verify everything <nl> + print " Verifying " <nl> + i = 0 <nl> + for key in keys : <nl> + if i % 16 = = 0 : <nl> + values = mc . get_multi ( keys [ i : i + 16 ] ) <nl> + value = values [ key ] <nl> + if value ! = key : <nl> + raise ValueError ( " Key % r is set to % r , expected % r " % ( key , value , key ) ) <nl> + i + = 1 <nl> + <nl> + print " Success " <nl> new file mode 100755 <nl> index 00000000000 . . 88f30400a16 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import os , sys <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import subprocess , memcached_workload_common <nl> + from vcoptparse import * <nl> + <nl> + op = memcached_workload_common . option_parser_for_socket ( ) <nl> + op [ " suite - test " ] = PositionalArg ( ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + # Figure out where the memcached scripts are located <nl> + memcached_suite_dir = os . path . join ( os . path . dirname ( __file__ ) , " memcached_suite " ) <nl> + <nl> + # The memcached test scripts now get the port as an environment variable <nl> + # ( instead of running the server themselves ) . <nl> + assert opts [ " address " ] [ 0 ] in [ " localhost " , " 127 . 0 . 0 . 1 " ] <nl> + os . environ [ " RUN_PORT " ] = str ( opts [ " address " ] [ 1 ] ) <nl> + os . environ [ " PERLLIB " ] = os . path . join ( memcached_suite_dir , " lib " ) + " : " + os . getenv ( " PERLLIB " , " " ) <nl> + <nl> + subprocess . check_call ( os . path . join ( memcached_suite_dir , opts [ " suite - test " ] ) ) <nl> new file mode 100755 <nl> index 00000000000 . . aaae6cd6ff8 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / bogus - commands . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 1 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + print $ sock " boguscommand slkdsldkfjsd \ r \ n " ; <nl> + is ( scalar < $ sock > , " ERROR \ r \ n " , " got error back " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . 8797ebf2d18 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / cas . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 43 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + my $ sock2 = $ server - > new_sock ; <nl> + <nl> + my @ result ; <nl> + my @ result2 ; <nl> + <nl> + ok ( $ sock ! = $ sock2 , " have two different connections open " ) ; <nl> + <nl> + sub check_args { <nl> + my ( $ line , $ name ) = @ _ ; <nl> + <nl> + my $ svr = new_memcached ( ) ; <nl> + my $ s = $ svr - > sock ; <nl> + <nl> + print $ s $ line ; <nl> + is ( scalar < $ s > , " CLIENT_ERROR bad command line format \ r \ n " , $ name ) ; <nl> + undef $ svr ; <nl> + } <nl> + <nl> + check_args " cas bad blah 0 0 0 \ r \ n \ r \ n " , " bad flags " ; <nl> + check_args " cas bad 0 blah 0 0 \ r \ n \ r \ n " , " bad exp " ; <nl> + check_args " cas bad 0 0 blah 0 \ r \ n \ r \ n " , " bad cas " ; <nl> + check_args " cas bad 0 0 0 blah \ r \ n \ r \ n " , " bad size " ; <nl> + <nl> + # gets foo ( should not exist ) <nl> + print $ sock " gets foo \ r \ n " ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " gets failed " ) ; <nl> + <nl> + # set foo <nl> + print $ sock " set foo 0 0 6 \ r \ nbarval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored barval " ) ; <nl> + <nl> + # gets foo and verify identifier exists <nl> + @ result = mem_gets ( $ sock , " foo " ) ; <nl> + mem_gets_is ( $ sock , $ result [ 0 ] , " foo " , " barval " ) ; <nl> + <nl> + # cas fail <nl> + print $ sock " cas foo 0 0 6 123 \ r \ nbarva2 \ r \ n " ; <nl> + is ( scalar < $ sock > , " EXISTS \ r \ n " , " cas failed for foo " ) ; <nl> + <nl> + # gets foo - success <nl> + @ result = mem_gets ( $ sock , " foo " ) ; <nl> + mem_gets_is ( $ sock , $ result [ 0 ] , " foo " , " barval " ) ; <nl> + <nl> + # cas success <nl> + print $ sock " cas foo 0 0 6 $ result [ 0 ] \ r \ nbarva2 \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " cas success , set foo " ) ; <nl> + <nl> + # cas failure ( reusing the same key ) <nl> + print $ sock " cas foo 0 0 6 $ result [ 0 ] \ r \ nbarva2 \ r \ n " ; <nl> + is ( scalar < $ sock > , " EXISTS \ r \ n " , " reusing a CAS ID " ) ; <nl> + <nl> + # delete foo <nl> + print $ sock " delete foo \ r \ n " ; <nl> + is ( scalar < $ sock > , " DELETED \ r \ n " , " deleted foo " ) ; <nl> + <nl> + # cas missing <nl> + print $ sock " cas foo 0 0 6 $ result [ 0 ] \ r \ nbarva2 \ r \ n " ; <nl> + is ( scalar < $ sock > , " NOT_FOUND \ r \ n " , " cas failed , foo does not exist " ) ; <nl> + <nl> + # cas empty <nl> + print $ sock " cas foo 0 0 6 \ r \ nbarva2 \ r \ n " ; <nl> + is ( scalar < $ sock > , " ERROR \ r \ n " , " cas empty , throw error " ) ; <nl> + # cant parse barval2 \ r \ n <nl> + is ( scalar < $ sock > , " ERROR \ r \ n " , " error out on barval2 parsing " ) ; <nl> + <nl> + # set foo1 <nl> + print $ sock " set foo1 0 0 1 \ r \ n1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " set foo1 " ) ; <nl> + # set foo2 <nl> + print $ sock " set foo2 0 0 1 \ r \ n2 \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " set foo2 " ) ; <nl> + <nl> + # gets foo1 check <nl> + print $ sock " gets foo1 \ r \ n " ; <nl> + ok ( scalar < $ sock > = ~ / VALUE foo1 0 1 ( \ d + ) \ r \ n / , " gets foo1 regexp success " ) ; <nl> + my $ foo1_cas = $ 1 ; <nl> + is ( scalar < $ sock > , " 1 \ r \ n " , " gets foo1 data is 1 " ) ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " gets foo1 END " ) ; <nl> + <nl> + # gets foo2 check <nl> + print $ sock " gets foo2 \ r \ n " ; <nl> + ok ( scalar < $ sock > = ~ / VALUE foo2 0 1 ( \ d + ) \ r \ n / , " gets foo2 regexp success " ) ; <nl> + my $ foo2_cas = $ 1 ; <nl> + is ( scalar < $ sock > , " 2 \ r \ n " , " gets foo2 data is 2 " ) ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " gets foo2 END " ) ; <nl> + <nl> + # validate foo1 ! = foo2 <nl> + ok ( $ foo1_cas ! = $ foo2_cas , " foo1 ! = foo2 single - gets success " ) ; <nl> + <nl> + # multi - gets <nl> + print $ sock " gets foo1 foo2 \ r \ n " ; <nl> + ok ( scalar < $ sock > = ~ / VALUE foo1 0 1 ( \ d + ) \ r \ n / , " validating first set of data is foo1 " ) ; <nl> + $ foo1_cas = $ 1 ; <nl> + is ( scalar < $ sock > , " 1 \ r \ n " , " validating foo1 set of data is 1 " ) ; <nl> + ok ( scalar < $ sock > = ~ / VALUE foo2 0 1 ( \ d + ) \ r \ n / , " validating second set of data is foo2 " ) ; <nl> + $ foo2_cas = $ 1 ; <nl> + is ( scalar < $ sock > , " 2 \ r \ n " , " validating foo2 set of data is 2 " ) ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " validating foo1 , foo2 gets is over - END " ) ; <nl> + <nl> + # validate foo1 ! = foo2 <nl> + ok ( $ foo1_cas ! = $ foo2_cas , " foo1 ! = foo2 multi - gets success " ) ; <nl> + <nl> + # # # simulate race condition with cas <nl> + <nl> + # gets foo1 - success <nl> + @ result = mem_gets ( $ sock , " foo1 " ) ; <nl> + ok ( $ result [ 0 ] ! = " " , " sock - gets foo1 is not empty " ) ; <nl> + <nl> + # gets foo2 - success <nl> + @ result2 = mem_gets ( $ sock2 , " foo1 " ) ; <nl> + ok ( $ result2 [ 0 ] ! = " " , " sock2 - gets foo1 is not empty " ) ; <nl> + <nl> + print $ sock " cas foo1 0 0 6 $ result [ 0 ] \ r \ nbarva2 \ r \ n " ; <nl> + print $ sock2 " cas foo1 0 0 5 $ result2 [ 0 ] \ r \ napple \ r \ n " ; <nl> + <nl> + my $ res1 = < $ sock > ; <nl> + my $ res2 = < $ sock2 > ; <nl> + <nl> + ok ( ( $ res1 eq " STORED \ r \ n " & & $ res2 eq " EXISTS \ r \ n " ) | | <nl> + ( $ res1 eq " EXISTS \ r \ n " & & $ res2 eq " STORED \ r \ n " ) , <nl> + " cas on same item from two sockets " ) ; <nl> + <nl> + # # # bug 15 : http : / / code . google . com / p / memcached / issues / detail ? id = 15 <nl> + <nl> + # set foo <nl> + print $ sock " set bug15 0 0 1 \ r \ n0 \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored 0 " ) ; <nl> + <nl> + # Check out the first gets . <nl> + print $ sock " gets bug15 \ r \ n " ; <nl> + ok ( scalar < $ sock > = ~ / VALUE bug15 0 1 ( \ d + ) \ r \ n / , " gets bug15 regexp success " ) ; <nl> + my $ bug15_cas = $ 1 ; <nl> + is ( scalar < $ sock > , " 0 \ r \ n " , " gets bug15 data is 0 " ) ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " gets bug15 END " ) ; <nl> + <nl> + # Increment <nl> + print $ sock " incr bug15 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 1 \ r \ n " , " incr worked " ) ; <nl> + <nl> + # Validate a changed CAS <nl> + print $ sock " gets bug15 \ r \ n " ; <nl> + ok ( scalar < $ sock > = ~ / VALUE bug15 0 1 ( \ d + ) \ r \ n / , " gets bug15 regexp success " ) ; <nl> + my $ next_bug15_cas = $ 1 ; <nl> + is ( scalar < $ sock > , " 1 \ r \ n " , " gets bug15 data is 0 " ) ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " gets bug15 END " ) ; <nl> + <nl> + ok ( $ bug15_cas ! = $ next_bug15_cas , " CAS changed " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . 26967d0d235 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / 00 - startup . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 18 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + eval { <nl> + my $ server = new_memcached ( ) ; <nl> + ok ( $ server , " started the server " ) ; <nl> + } ; <nl> + is ( $ @ , ' ' , ' Basic startup works ' ) ; <nl> + <nl> + eval { <nl> + my $ server = new_memcached ( " - l fooble " ) ; <nl> + } ; <nl> + ok ( $ @ , " Died with illegal - l args " ) ; <nl> + <nl> + eval { <nl> + my $ server = new_memcached ( " - l 127 . 0 . 0 . 1 " ) ; <nl> + } ; <nl> + is ( $ @ , ' ' , " - l 127 . 0 . 0 . 1 works " ) ; <nl> + <nl> + eval { <nl> + my $ server = new_memcached ( ' - C ' ) ; <nl> + my $ stats = mem_stats ( $ server - > sock , ' settings ' ) ; <nl> + is ( ' no ' , $ stats - > { ' cas_enabled ' } ) ; <nl> + } ; <nl> + is ( $ @ , ' ' , " - C works " ) ; <nl> + <nl> + eval { <nl> + my $ server = new_memcached ( ' - b 8675 ' ) ; <nl> + my $ stats = mem_stats ( $ server - > sock , ' settings ' ) ; <nl> + is ( ' 8675 ' , $ stats - > { ' tcp_backlog ' } ) ; <nl> + } ; <nl> + is ( $ @ , ' ' , " - b works " ) ; <nl> + <nl> + foreach my $ val ( ' auto ' , ' ascii ' ) { <nl> + eval { <nl> + my $ server = new_memcached ( " - B $ val " ) ; <nl> + my $ stats = mem_stats ( $ server - > sock , ' settings ' ) ; <nl> + ok ( $ stats - > { ' binding_protocol ' } = ~ / $ val / , " $ val works " ) ; <nl> + } ; <nl> + is ( $ @ , ' ' , " $ val works " ) ; <nl> + } <nl> + <nl> + # For the binary test , we just verify it starts since we don ' t have an easy bin client . <nl> + eval { <nl> + my $ server = new_memcached ( " - B binary " ) ; <nl> + } ; <nl> + is ( $ @ , ' ' , " binary works " ) ; <nl> + <nl> + eval { <nl> + my $ server = new_memcached ( " - vv - B auto " ) ; <nl> + } ; <nl> + is ( $ @ , ' ' , " auto works " ) ; <nl> + <nl> + eval { <nl> + my $ server = new_memcached ( " - vv - B ascii " ) ; <nl> + } ; <nl> + is ( $ @ , ' ' , " ascii works " ) ; <nl> + <nl> + <nl> + # For the binary test , we just verify it starts since we don ' t have an easy bin client . <nl> + eval { <nl> + my $ server = new_memcached ( " - vv - B binary " ) ; <nl> + } ; <nl> + is ( $ @ , ' ' , " binary works " ) ; <nl> + <nl> + <nl> + # Should blow up with something invalid . <nl> + eval { <nl> + my $ server = new_memcached ( " - B http " ) ; <nl> + } ; <nl> + ok ( $ @ , " Died with illegal - B arg . " ) ; <nl> + <nl> + # Should not allow - t 0 <nl> + eval { <nl> + my $ server = new_memcached ( " - t 0 " ) ; <nl> + } ; <nl> + ok ( $ @ , " Died with illegal 0 thread count " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . 425ff8aa344 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / 64bit . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + $ ENV { T_MEMD_INITIAL_MALLOC } = " 4294967328 " ; # 2 * * 32 + 32 , just over 4GB <nl> + $ ENV { T_MEMD_SLABS_ALLOC } = 0 ; # don ' t preallocate slabs <nl> + <nl> + my $ server = new_memcached ( " - m 4098 - M " ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + my ( $ stats , $ slabs ) = @ _ ; <nl> + <nl> + $ stats = mem_stats ( $ sock ) ; <nl> + <nl> + if ( $ stats - > { ' pointer_size ' } eq " 32 " ) { <nl> + plan skip_all = > ' Skipping 64 - bit tests on 32 - bit build ' ; <nl> + exit 0 ; <nl> + } else { <nl> + plan tests = > 6 ; <nl> + } <nl> + <nl> + is ( $ stats - > { ' pointer_size ' } , 64 , " is 64 bit " ) ; <nl> + is ( $ stats - > { ' limit_maxbytes ' } , " 4297064448 " , " max bytes is 4098 MB " ) ; <nl> + <nl> + $ slabs = mem_stats ( $ sock , ' slabs ' ) ; <nl> + is ( $ slabs - > { ' total_malloced ' } , " 4294967328 " , " expected ( faked ) value of total_malloced " ) ; <nl> + is ( $ slabs - > { ' active_slabs ' } , 0 , " no active slabs " ) ; <nl> + <nl> + my $ hit_limit = 0 ; <nl> + for ( 1 . . 5 ) { <nl> + my $ size = 400 * 1024 ; <nl> + my $ data = " a " x $ size ; <nl> + print $ sock " set big $ _ 0 0 $ size \ r \ n $ data \ r \ n " ; <nl> + my $ res = < $ sock > ; <nl> + $ hit_limit = 1 if $ res ne " STORED \ r \ n " ; <nl> + } <nl> + ok ( $ hit_limit , " hit size limit " ) ; <nl> + <nl> + $ slabs = mem_stats ( $ sock , ' slabs ' ) ; <nl> + is ( $ slabs - > { ' active_slabs ' } , 1 , " 1 active slab " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . f57a8a34d88 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / binary - get . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 8 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + my $ count = 1 ; <nl> + <nl> + foreach my $ blob ( " mooo \ 0 " , " mumble \ 0 \ 0 \ 0 \ 0 \ r \ rblarg " , " \ 0 " , " \ r " ) { <nl> + my $ key = " foo $ count " ; <nl> + my $ len = length ( $ blob ) ; <nl> + print " len is $ len \ n " ; <nl> + print $ sock " set $ key 0 0 $ len \ r \ n $ blob \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored $ key " ) ; <nl> + mem_get_is ( $ sock , $ key , $ blob ) ; <nl> + $ count + + ; <nl> + } <nl> + <nl> new file mode 100755 <nl> index 00000000000 . . 56d05dcc4fa <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / binary - sasl . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use warnings ; <nl> + use Cwd ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ supports_sasl = supports_sasl ( ) ; <nl> + <nl> + use Test : : More ; <nl> + <nl> + if ( supports_sasl ( ) ) { <nl> + plan tests = > 25 ; <nl> + } else { <nl> + plan tests = > 1 ; <nl> + eval { <nl> + my $ server = new_memcached ( " - S " ) ; <nl> + } ; <nl> + ok ( $ @ , " Died with illegal - S args when SASL is not supported . " ) ; <nl> + exit 0 ; <nl> + } <nl> + <nl> + eval { <nl> + my $ server = new_memcached ( " - S - B auto " ) ; <nl> + } ; <nl> + ok ( $ @ , " SASL shouldn ' t be used with protocol auto negotiate " ) ; <nl> + <nl> + eval { <nl> + my $ server = new_memcached ( " - S - B ascii " ) ; <nl> + } ; <nl> + ok ( $ @ , " SASL isn ' t implemented in the ascii protocol " ) ; <nl> + <nl> + eval { <nl> + my $ server = new_memcached ( " - S - B binary - B ascii " ) ; <nl> + } ; <nl> + ok ( $ @ , " SASL isn ' t implemented in the ascii protocol " ) ; <nl> + <nl> + # Based almost 100 % off testClient . py which is : <nl> + # Copyright ( c ) 2007 Dustin Sallings < dustin @ spy . net > <nl> + <nl> + # Command constants <nl> + use constant CMD_GET = > 0x00 ; <nl> + use constant CMD_SET = > 0x01 ; <nl> + use constant CMD_ADD = > 0x02 ; <nl> + use constant CMD_REPLACE = > 0x03 ; <nl> + use constant CMD_DELETE = > 0x04 ; <nl> + use constant CMD_INCR = > 0x05 ; <nl> + use constant CMD_DECR = > 0x06 ; <nl> + use constant CMD_QUIT = > 0x07 ; <nl> + use constant CMD_FLUSH = > 0x08 ; <nl> + use constant CMD_GETQ = > 0x09 ; <nl> + use constant CMD_NOOP = > 0x0A ; <nl> + use constant CMD_VERSION = > 0x0B ; <nl> + use constant CMD_GETK = > 0x0C ; <nl> + use constant CMD_GETKQ = > 0x0D ; <nl> + use constant CMD_APPEND = > 0x0E ; <nl> + use constant CMD_PREPEND = > 0x0F ; <nl> + use constant CMD_STAT = > 0x10 ; <nl> + use constant CMD_SETQ = > 0x11 ; <nl> + use constant CMD_ADDQ = > 0x12 ; <nl> + use constant CMD_REPLACEQ = > 0x13 ; <nl> + use constant CMD_DELETEQ = > 0x14 ; <nl> + use constant CMD_INCREMENTQ = > 0x15 ; <nl> + use constant CMD_DECREMENTQ = > 0x16 ; <nl> + use constant CMD_QUITQ = > 0x17 ; <nl> + use constant CMD_FLUSHQ = > 0x18 ; <nl> + use constant CMD_APPENDQ = > 0x19 ; <nl> + use constant CMD_PREPENDQ = > 0x1A ; <nl> + <nl> + use constant CMD_SASL_LIST_MECHS = > 0x20 ; <nl> + use constant CMD_SASL_AUTH = > 0x21 ; <nl> + use constant CMD_SASL_STEP = > 0x22 ; <nl> + use constant ERR_AUTH_ERROR = > 0x20 ; <nl> + <nl> + <nl> + # REQ and RES formats are divided even though they currently share <nl> + # the same format , since they _could_ differ in the future . <nl> + use constant REQ_PKT_FMT = > " CCnCCnNNNN " ; <nl> + use constant RES_PKT_FMT = > " CCnCCnNNNN " ; <nl> + use constant INCRDECR_PKT_FMT = > " NNNNN " ; <nl> + use constant MIN_RECV_BYTES = > length ( pack ( RES_PKT_FMT ) ) ; <nl> + use constant REQ_MAGIC = > 0x80 ; <nl> + use constant RES_MAGIC = > 0x81 ; <nl> + <nl> + my $ pwd = getcwd ; <nl> + $ ENV { ' SASL_CONF_PATH ' } = " $ pwd / t / sasl " ; <nl> + <nl> + my $ server = new_memcached ( ' - B binary - S ' ) ; <nl> + <nl> + my $ mc = MC : : Client - > new ; <nl> + <nl> + my $ check = sub { <nl> + my ( $ key , $ orig_val ) = @ _ ; <nl> + my ( $ status , $ val , $ cas ) = $ mc - > get ( $ key ) ; <nl> + <nl> + if ( $ val = ~ / ^ \ d + $ / ) { <nl> + cmp_ok ( $ val , ' = = ' , $ orig_val , " $ val = $ orig_val " ) ; <nl> + } <nl> + else { <nl> + cmp_ok ( $ val , ' eq ' , $ orig_val , " $ val = $ orig_val " ) ; <nl> + } <nl> + } ; <nl> + <nl> + my $ set = sub { <nl> + my ( $ key , $ orig_value , $ exp ) = @ _ ; <nl> + $ exp = defined $ exp ? $ exp : 0 ; <nl> + my ( $ status , $ rv ) = $ mc - > set ( $ key , $ orig_value , $ exp ) ; <nl> + $ check - > ( $ key , $ orig_value ) ; <nl> + } ; <nl> + <nl> + my $ empty = sub { <nl> + my $ key = shift ; <nl> + my ( $ status , $ rv ) = ( ) = eval { $ mc - > get ( $ key ) } ; <nl> + # if ( $ status = = ERR_AUTH_ERROR ) { <nl> + # ok ( $ @ - > auth_error , " Not authorized to connect " ) ; <nl> + # } <nl> + # else { <nl> + # ok ( $ @ - > not_found , " We got a not found error when we expected one " ) ; <nl> + # } <nl> + if ( $ status ) { <nl> + ok ( $ @ - > not_found , " We got a not found error when we expected one " ) ; <nl> + } <nl> + } ; <nl> + <nl> + my $ delete = sub { <nl> + my ( $ key , $ when ) = @ _ ; <nl> + $ mc - > delete ( $ key , $ when ) ; <nl> + $ empty - > ( $ key ) ; <nl> + } ; <nl> + <nl> + # BEGIN THE TEST <nl> + ok ( $ server , " started the server " ) ; <nl> + <nl> + my $ v = $ mc - > version ; <nl> + ok ( defined $ v & & length ( $ v ) , " Proper version : $ v " ) ; <nl> + <nl> + # list mechs <nl> + my $ mechs = $ mc - > list_mechs ( ) ; <nl> + Test : : More : : cmp_ok ( $ mechs , ' eq ' , ' CRAM - MD5 PLAIN ' , " list_mechs $ mechs " ) ; <nl> + <nl> + # this should fail , not authenticated <nl> + { <nl> + my ( $ status , $ val ) = $ mc - > set ( ' x ' , " somevalue " ) ; <nl> + ok ( $ status , " this fails to authenticate " ) ; <nl> + cmp_ok ( $ status , ' = = ' , ERR_AUTH_ERROR , " error code matches " ) ; <nl> + } <nl> + $ empty - > ( ' x ' ) ; <nl> + { <nl> + my $ mc = MC : : Client - > new ; <nl> + my ( $ status , $ val ) = $ mc - > delete ( ' x ' ) ; <nl> + ok ( $ status , " this fails to authenticate " ) ; <nl> + cmp_ok ( $ status , ' = = ' , ERR_AUTH_ERROR , " error code matches " ) ; <nl> + } <nl> + $ empty - > ( ' x ' ) ; <nl> + { <nl> + my $ mc = MC : : Client - > new ; <nl> + my ( $ status , $ val ) = $ mc - > set ( ' x ' , " somevalue " ) ; <nl> + ok ( $ status , " this fails to authenticate " ) ; <nl> + cmp_ok ( $ status , ' = = ' , ERR_AUTH_ERROR , " error code matches " ) ; <nl> + } <nl> + $ empty - > ( ' x ' ) ; <nl> + { <nl> + my $ mc = MC : : Client - > new ; <nl> + my ( $ status , $ val ) = $ mc - > flush ( ' x ' ) ; <nl> + ok ( $ status , " this fails to authenticate " ) ; <nl> + cmp_ok ( $ status , ' = = ' , ERR_AUTH_ERROR , " error code matches " ) ; <nl> + } <nl> + $ empty - > ( ' x ' ) ; <nl> + <nl> + # Build the auth DB for testing . <nl> + my $ sasldb = ' / tmp / test - memcached . sasldb ' ; <nl> + unlink $ sasldb ; <nl> + system ( " echo testpass | saslpasswd2 - a memcached - c - p testuser " ) ; <nl> + <nl> + $ mc = MC : : Client - > new ; <nl> + <nl> + # Attempt a bad auth mech . <nl> + is ( $ mc - > authenticate ( ' testuser ' , ' testpass ' , " X " x 40 ) , 0x4 , " bad mech " ) ; <nl> + <nl> + # Attempt bad authentication . <nl> + is ( $ mc - > authenticate ( ' testuser ' , ' wrongpassword ' ) , 0x20 , " bad auth " ) ; <nl> + <nl> + # Now try good authentication and make the tests work . <nl> + is ( $ mc - > authenticate ( ' testuser ' , ' testpass ' ) , 0 , " authenticated " ) ; <nl> + # these should work <nl> + { <nl> + my ( $ status , $ val ) = $ mc - > set ( ' x ' , " somevalue " ) ; <nl> + ok ( ! $ status ) ; <nl> + } <nl> + $ check - > ( ' x ' , ' somevalue ' ) ; <nl> + <nl> + { <nl> + my ( $ status , $ val ) = $ mc - > delete ( ' x ' ) ; <nl> + ok ( ! $ status ) ; <nl> + } <nl> + $ empty - > ( ' x ' ) ; <nl> + <nl> + { <nl> + my ( $ status , $ val ) = $ mc - > set ( ' x ' , " somevalue " ) ; <nl> + ok ( ! $ status ) ; <nl> + } <nl> + $ check - > ( ' x ' , ' somevalue ' ) ; <nl> + <nl> + { <nl> + my ( $ status , $ val ) = $ mc - > flush ( ' x ' ) ; <nl> + ok ( ! $ status ) ; <nl> + } <nl> + $ empty - > ( ' x ' ) ; <nl> + <nl> + # check the SASL stats , make sure they track things correctly <nl> + # note : the enabled or not is presence checked in stats . t <nl> + <nl> + # while authenticated , get current counter <nl> + # <nl> + # My initial approach was going to be to get current counts , reauthenticate <nl> + # and fail , followed by a reauth successfully so I ' d know what happened . <nl> + # Reauthentication is currently unsupported , so it doesn ' t work that way at the <nl> + # moment . Adding tests may break this . <nl> + <nl> + { <nl> + my % stats = $ mc - > stats ( ' ' ) ; <nl> + is ( $ stats { ' auth_cmds ' } , 2 , " auth commands counted " ) ; <nl> + is ( $ stats { ' auth_errors ' } , 1 , " auth errors correct " ) ; <nl> + } <nl> + <nl> + <nl> + # Along with the assertion added to the code to verify we ' re staying <nl> + # within bounds when we do a stats detail dump ( detail turned on at <nl> + # the top ) . <nl> + # my % stats = $ mc - > stats ( ' detail dump ' ) ; <nl> + <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # Test ends around here . <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + package MC : : Client ; <nl> + <nl> + use strict ; <nl> + use warnings ; <nl> + use fields qw ( socket ) ; <nl> + use IO : : Socket : : INET ; <nl> + <nl> + use constant ERR_AUTH_ERROR = > 0x20 ; <nl> + <nl> + sub new { <nl> + my $ self = shift ; <nl> + my ( $ s ) = @ _ ; <nl> + $ s = $ server unless defined $ s ; <nl> + my $ sock = $ s - > sock ; <nl> + $ self = fields : : new ( $ self ) ; <nl> + $ self - > { socket } = $ sock ; <nl> + return $ self ; <nl> + } <nl> + <nl> + sub authenticate { <nl> + my ( $ self , $ user , $ pass , $ mech ) = @ _ ; <nl> + $ mech | | = ' PLAIN ' ; <nl> + my $ buf = sprintf ( " % c % s % c % s " , 0 , $ user , 0 , $ pass ) ; <nl> + my ( $ status , $ rv , undef ) = $ self - > _do_command ( : : CMD_SASL_AUTH , $ mech , $ buf , ' ' ) ; <nl> + return $ status ; <nl> + } <nl> + sub list_mechs { <nl> + my ( $ self ) = @ _ ; <nl> + my ( $ status , $ rv , undef ) = $ self - > _do_command ( : : CMD_SASL_LIST_MECHS , ' ' , ' ' , ' ' ) ; <nl> + return join ( " " , sort ( split ( / \ s + / , $ rv ) ) ) ; <nl> + } <nl> + <nl> + sub build_command { <nl> + my $ self = shift ; <nl> + die " Not enough args to send_command " unless @ _ > = 4 ; <nl> + my ( $ cmd , $ key , $ val , $ opaque , $ extra_header , $ cas ) = @ _ ; <nl> + <nl> + $ extra_header = ' ' unless defined $ extra_header ; <nl> + my $ keylen = length ( $ key ) ; <nl> + my $ vallen = length ( $ val ) ; <nl> + my $ extralen = length ( $ extra_header ) ; <nl> + my $ datatype = 0 ; # field for future use <nl> + my $ reserved = 0 ; # field for future use <nl> + my $ totallen = $ keylen + $ vallen + $ extralen ; <nl> + my $ ident_hi = 0 ; <nl> + my $ ident_lo = 0 ; <nl> + <nl> + if ( $ cas ) { <nl> + $ ident_hi = int ( $ cas / 2 * * 32 ) ; <nl> + $ ident_lo = int ( $ cas % 2 * * 32 ) ; <nl> + } <nl> + <nl> + my $ msg = pack ( : : REQ_PKT_FMT , : : REQ_MAGIC , $ cmd , $ keylen , $ extralen , <nl> + $ datatype , $ reserved , $ totallen , $ opaque , $ ident_hi , <nl> + $ ident_lo ) ; <nl> + my $ full_msg = $ msg . $ extra_header . $ key . $ val ; <nl> + return $ full_msg ; <nl> + } <nl> + <nl> + sub send_command { <nl> + my $ self = shift ; <nl> + die " Not enough args to send_command " unless @ _ > = 4 ; <nl> + my ( $ cmd , $ key , $ val , $ opaque , $ extra_header , $ cas ) = @ _ ; <nl> + <nl> + my $ full_msg = $ self - > build_command ( $ cmd , $ key , $ val , $ opaque , $ extra_header , $ cas ) ; <nl> + <nl> + my $ sent = $ self - > { socket } - > send ( $ full_msg ) ; <nl> + die ( " Send failed : $ ! " ) unless $ sent ; <nl> + if ( $ sent ! = length ( $ full_msg ) ) { <nl> + die ( " only sent $ sent of " . length ( $ full_msg ) . " bytes " ) ; <nl> + } <nl> + } <nl> + <nl> + sub flush_socket { <nl> + my $ self = shift ; <nl> + $ self - > { socket } - > flush ; <nl> + } <nl> + <nl> + # Send a silent command and ensure it doesn ' t respond . <nl> + sub send_silent { <nl> + my $ self = shift ; <nl> + die " Not enough args to send_silent " unless @ _ > = 4 ; <nl> + my ( $ cmd , $ key , $ val , $ opaque , $ extra_header , $ cas ) = @ _ ; <nl> + <nl> + $ self - > send_command ( $ cmd , $ key , $ val , $ opaque , $ extra_header , $ cas ) ; <nl> + $ self - > send_command ( : : CMD_NOOP , ' ' , ' ' , $ opaque + 1 ) ; <nl> + <nl> + my ( $ ropaque , $ status , $ data ) = $ self - > _handle_single_response ; <nl> + Test : : More : : is ( $ ropaque , $ opaque + 1 ) ; <nl> + } <nl> + <nl> + sub silent_mutation { <nl> + my $ self = shift ; <nl> + my ( $ cmd , $ key , $ value ) = @ _ ; <nl> + <nl> + $ empty - > ( $ key ) ; <nl> + my $ extra = pack " NN " , 82 , 0 ; <nl> + $ mc - > send_silent ( $ cmd , $ key , $ value , 7278552 , $ extra , 0 ) ; <nl> + $ check - > ( $ key , $ value ) ; <nl> + } <nl> + <nl> + sub _handle_single_response { <nl> + my $ self = shift ; <nl> + my $ myopaque = shift ; <nl> + <nl> + $ self - > { socket } - > recv ( my $ response , : : MIN_RECV_BYTES ) ; <nl> + <nl> + my ( $ magic , $ cmd , $ keylen , $ extralen , $ datatype , $ status , $ remaining , <nl> + $ opaque , $ ident_hi , $ ident_lo ) = unpack ( : : RES_PKT_FMT , $ response ) ; <nl> + <nl> + return ( $ opaque , ' ' , ' ' , ' ' , 0 ) if not defined $ remaining ; <nl> + return ( $ opaque , ' ' , ' ' , ' ' , 0 ) if ( $ remaining = = 0 ) ; <nl> + <nl> + # fetch the value <nl> + my $ rv = " " ; <nl> + while ( $ remaining - length ( $ rv ) > 0 ) { <nl> + $ self - > { socket } - > recv ( my $ buf , $ remaining - length ( $ rv ) ) ; <nl> + $ rv . = $ buf ; <nl> + } <nl> + if ( length ( $ rv ) ! = $ remaining ) { <nl> + my $ found = length ( $ rv ) ; <nl> + die ( " Expected $ remaining bytes , got $ found " ) ; <nl> + } <nl> + <nl> + my $ cas = ( $ ident_hi * 2 * * 32 ) + $ ident_lo ; <nl> + <nl> + # if ( $ status ) { <nl> + # die MC : : Error - > new ( $ status , $ rv ) ; <nl> + # } <nl> + <nl> + return ( $ opaque , $ status , $ rv , $ cas , $ keylen ) ; <nl> + } <nl> + <nl> + sub _do_command { <nl> + my $ self = shift ; <nl> + die unless @ _ > = 3 ; <nl> + my ( $ cmd , $ key , $ val , $ extra_header , $ cas ) = @ _ ; <nl> + <nl> + $ extra_header = ' ' unless defined $ extra_header ; <nl> + my $ opaque = int ( rand ( 2 * * 32 ) ) ; <nl> + $ self - > send_command ( $ cmd , $ key , $ val , $ opaque , $ extra_header , $ cas ) ; <nl> + my ( undef , $ status , $ rv , $ rcas ) = $ self - > _handle_single_response ( $ opaque ) ; <nl> + return ( $ status , $ rv , $ rcas ) ; <nl> + } <nl> + <nl> + sub _incrdecr_header { <nl> + my $ self = shift ; <nl> + my ( $ amt , $ init , $ exp ) = @ _ ; <nl> + <nl> + my $ amt_hi = int ( $ amt / 2 * * 32 ) ; <nl> + my $ amt_lo = int ( $ amt % 2 * * 32 ) ; <nl> + <nl> + my $ init_hi = int ( $ init / 2 * * 32 ) ; <nl> + my $ init_lo = int ( $ init % 2 * * 32 ) ; <nl> + <nl> + my $ extra_header = pack ( : : INCRDECR_PKT_FMT , $ amt_hi , $ amt_lo , $ init_hi , <nl> + $ init_lo , $ exp ) ; <nl> + <nl> + return $ extra_header ; <nl> + } <nl> + <nl> + sub _incrdecr { <nl> + my $ self = shift ; <nl> + my ( $ cmd , $ key , $ amt , $ init , $ exp ) = @ _ ; <nl> + <nl> + my ( $ status , $ data , undef ) = $ self - > _do_command ( $ cmd , $ key , ' ' , <nl> + $ self - > _incrdecr_header ( $ amt , $ init , $ exp ) ) ; <nl> + <nl> + my $ header = substr $ data , 0 , 8 , ' ' ; <nl> + my ( $ resp_hi , $ resp_lo ) = unpack " NN " , $ header ; <nl> + my $ resp = ( $ resp_hi * 2 * * 32 ) + $ resp_lo ; <nl> + <nl> + return $ resp ; <nl> + } <nl> + <nl> + sub silent_incrdecr { <nl> + my $ self = shift ; <nl> + my ( $ cmd , $ key , $ amt , $ init , $ exp ) = @ _ ; <nl> + my $ opaque = 8275753 ; <nl> + <nl> + $ mc - > send_silent ( $ cmd , $ key , ' ' , $ opaque , <nl> + $ mc - > _incrdecr_header ( $ amt , $ init , $ exp ) ) ; <nl> + } <nl> + <nl> + sub stats { <nl> + my $ self = shift ; <nl> + my $ key = shift ; <nl> + my $ cas = 0 ; <nl> + my $ opaque = int ( rand ( 2 * * 32 ) ) ; <nl> + $ self - > send_command ( : : CMD_STAT , $ key , ' ' , $ opaque , ' ' , $ cas ) ; <nl> + <nl> + my % rv = ( ) ; <nl> + my $ found_key = ' ' ; <nl> + my $ found_val = ' ' ; <nl> + my $ status = 0 ; <nl> + do { <nl> + my ( $ op , $ status , $ data , $ cas , $ keylen ) = $ self - > _handle_single_response ( $ opaque ) ; <nl> + if ( $ keylen > 0 ) { <nl> + $ found_key = substr ( $ data , 0 , $ keylen ) ; <nl> + $ found_val = substr ( $ data , $ keylen ) ; <nl> + $ rv { $ found_key } = $ found_val ; <nl> + } else { <nl> + $ found_key = ' ' ; <nl> + } <nl> + } while ( $ found_key ne ' ' ) ; <nl> + return % rv ; <nl> + } <nl> + <nl> + sub get { <nl> + my $ self = shift ; <nl> + my $ key = shift ; <nl> + my ( $ status , $ rv , $ cas ) = $ self - > _do_command ( : : CMD_GET , $ key , ' ' , ' ' ) ; <nl> + <nl> + my $ header = substr $ rv , 0 , 4 , ' ' ; <nl> + my $ flags = unpack ( " N " , $ header ) ; <nl> + <nl> + return ( $ status , $ rv ) ; <nl> + } <nl> + <nl> + sub get_multi { <nl> + my $ self = shift ; <nl> + my @ keys = @ _ ; <nl> + <nl> + for ( my $ i = 0 ; $ i < @ keys ; $ i + + ) { <nl> + $ self - > send_command ( : : CMD_GETQ , $ keys [ $ i ] , ' ' , $ i , ' ' , 0 ) ; <nl> + } <nl> + <nl> + my $ terminal = @ keys + 10 ; <nl> + $ self - > send_command ( : : CMD_NOOP , ' ' , ' ' , $ terminal ) ; <nl> + <nl> + my % return ; <nl> + my $ status = 0 ; <nl> + while ( 1 ) { <nl> + my ( $ opaque , $ status , $ data ) = $ self - > _handle_single_response ; <nl> + last if $ opaque = = $ terminal ; <nl> + <nl> + my $ header = substr $ data , 0 , 4 , ' ' ; <nl> + my $ flags = unpack ( " N " , $ header ) ; <nl> + <nl> + $ return { $ keys [ $ opaque ] } = [ $ flags , $ data ] ; <nl> + } <nl> + <nl> + return % return if wantarray ; <nl> + return \ % return ; <nl> + } <nl> + <nl> + sub version { <nl> + my $ self = shift ; <nl> + return $ self - > _do_command ( : : CMD_VERSION , ' ' , ' ' ) ; <nl> + } <nl> + <nl> + sub flush { <nl> + my $ self = shift ; <nl> + return $ self - > _do_command ( : : CMD_FLUSH , ' ' , ' ' ) ; <nl> + } <nl> + <nl> + sub add { <nl> + my $ self = shift ; <nl> + my ( $ key , $ val , $ flags , $ expire ) = @ _ ; <nl> + my $ extra_header = pack " NN " , $ flags , $ expire ; <nl> + my $ cas = 0 ; <nl> + return $ self - > _do_command ( : : CMD_ADD , $ key , $ val , $ extra_header , $ cas ) ; <nl> + } <nl> + <nl> + sub set { <nl> + my $ self = shift ; <nl> + my $ flags = 0 ; <nl> + my $ cas = 0 ; <nl> + my ( $ key , $ val , $ expire ) = @ _ ; <nl> + $ expire = defined $ expire ? $ expire : 0 ; <nl> + my $ extra_header = pack " NN " , $ flags , $ expire ; <nl> + return $ self - > _do_command ( : : CMD_SET , $ key , $ val , $ extra_header , $ cas ) ; <nl> + } <nl> + <nl> + sub _append_prepend { <nl> + my $ self = shift ; <nl> + my ( $ cmd , $ key , $ val , $ cas ) = @ _ ; <nl> + return $ self - > _do_command ( $ cmd , $ key , $ val , ' ' , $ cas ) ; <nl> + } <nl> + <nl> + sub replace { <nl> + my $ self = shift ; <nl> + my ( $ key , $ val , $ flags , $ expire ) = @ _ ; <nl> + my $ extra_header = pack " NN " , $ flags , $ expire ; <nl> + my $ cas = 0 ; <nl> + return $ self - > _do_command ( : : CMD_REPLACE , $ key , $ val , $ extra_header , $ cas ) ; <nl> + } <nl> + <nl> + sub delete { <nl> + my $ self = shift ; <nl> + my ( $ key ) = @ _ ; <nl> + return $ self - > _do_command ( : : CMD_DELETE , $ key , ' ' ) ; <nl> + } <nl> + <nl> + sub incr { <nl> + my $ self = shift ; <nl> + my ( $ key , $ amt , $ init , $ exp ) = @ _ ; <nl> + $ amt = 1 unless defined $ amt ; <nl> + $ init = 0 unless defined $ init ; <nl> + $ exp = 0 unless defined $ exp ; <nl> + <nl> + return $ self - > _incrdecr ( : : CMD_INCR , $ key , $ amt , $ init , $ exp ) ; <nl> + } <nl> + <nl> + sub decr { <nl> + my $ self = shift ; <nl> + my ( $ key , $ amt , $ init , $ exp ) = @ _ ; <nl> + $ amt = 1 unless defined $ amt ; <nl> + $ init = 0 unless defined $ init ; <nl> + $ exp = 0 unless defined $ exp ; <nl> + <nl> + return $ self - > _incrdecr ( : : CMD_DECR , $ key , $ amt , $ init , $ exp ) ; <nl> + } <nl> + <nl> + sub noop { <nl> + my $ self = shift ; <nl> + return $ self - > _do_command ( : : CMD_NOOP , ' ' , ' ' ) ; <nl> + } <nl> + <nl> + package MC : : Error ; <nl> + <nl> + use strict ; <nl> + use warnings ; <nl> + <nl> + use constant ERR_UNKNOWN_CMD = > 0x81 ; <nl> + use constant ERR_NOT_FOUND = > 0x1 ; <nl> + use constant ERR_EXISTS = > 0x2 ; <nl> + use constant ERR_TOO_BIG = > 0x3 ; <nl> + use constant ERR_EINVAL = > 0x4 ; <nl> + use constant ERR_NOT_STORED = > 0x5 ; <nl> + use constant ERR_DELTA_BADVAL = > 0x6 ; <nl> + use constant ERR_AUTH_ERROR = > 0x20 ; <nl> + <nl> + use overload ' " " ' = > sub { <nl> + my $ self = shift ; <nl> + return " Memcache Error ( $ self - > [ 0 ] ) : $ self - > [ 1 ] " ; <nl> + } ; <nl> + <nl> + sub new { <nl> + my $ class = shift ; <nl> + my $ error = [ @ _ ] ; <nl> + my $ self = bless $ error , ( ref $ class | | $ class ) ; <nl> + <nl> + return $ self ; <nl> + } <nl> + <nl> + sub not_found { <nl> + my $ self = shift ; <nl> + return $ self - > [ 0 ] = = ERR_NOT_FOUND ; <nl> + } <nl> + <nl> + sub exists { <nl> + my $ self = shift ; <nl> + return $ self - > [ 0 ] = = ERR_EXISTS ; <nl> + } <nl> + <nl> + sub too_big { <nl> + my $ self = shift ; <nl> + return $ self - > [ 0 ] = = ERR_TOO_BIG ; <nl> + } <nl> + <nl> + sub delta_badval { <nl> + my $ self = shift ; <nl> + return $ self - > [ 0 ] = = ERR_DELTA_BADVAL ; <nl> + } <nl> + <nl> + sub auth_error { <nl> + my $ self = shift ; <nl> + return $ self - > [ 0 ] = = ERR_AUTH_ERROR ; <nl> + } <nl> + <nl> + unlink $ sasldb ; <nl> + <nl> + # vim : filetype = perl <nl> + <nl> new file mode 100755 <nl> index 00000000000 . . 1108b8ee653 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / binary . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use warnings ; <nl> + use Test : : More tests = > 3361 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + ok ( $ server , " started the server " ) ; <nl> + <nl> + # Based almost 100 % off testClient . py which is : <nl> + # Copyright ( c ) 2007 Dustin Sallings < dustin @ spy . net > <nl> + <nl> + # Command constants <nl> + use constant CMD_GET = > 0x00 ; <nl> + use constant CMD_SET = > 0x01 ; <nl> + use constant CMD_ADD = > 0x02 ; <nl> + use constant CMD_REPLACE = > 0x03 ; <nl> + use constant CMD_DELETE = > 0x04 ; <nl> + use constant CMD_INCR = > 0x05 ; <nl> + use constant CMD_DECR = > 0x06 ; <nl> + use constant CMD_QUIT = > 0x07 ; <nl> + use constant CMD_FLUSH = > 0x08 ; <nl> + use constant CMD_GETQ = > 0x09 ; <nl> + use constant CMD_NOOP = > 0x0A ; <nl> + use constant CMD_VERSION = > 0x0B ; <nl> + use constant CMD_GETK = > 0x0C ; <nl> + use constant CMD_GETKQ = > 0x0D ; <nl> + use constant CMD_APPEND = > 0x0E ; <nl> + use constant CMD_PREPEND = > 0x0F ; <nl> + use constant CMD_STAT = > 0x10 ; <nl> + use constant CMD_SETQ = > 0x11 ; <nl> + use constant CMD_ADDQ = > 0x12 ; <nl> + use constant CMD_REPLACEQ = > 0x13 ; <nl> + use constant CMD_DELETEQ = > 0x14 ; <nl> + use constant CMD_INCREMENTQ = > 0x15 ; <nl> + use constant CMD_DECREMENTQ = > 0x16 ; <nl> + use constant CMD_QUITQ = > 0x17 ; <nl> + use constant CMD_FLUSHQ = > 0x18 ; <nl> + use constant CMD_APPENDQ = > 0x19 ; <nl> + use constant CMD_PREPENDQ = > 0x1A ; <nl> + <nl> + # REQ and RES formats are divided even though they currently share <nl> + # the same format , since they _could_ differ in the future . <nl> + use constant REQ_PKT_FMT = > " CCnCCnNNNN " ; <nl> + use constant RES_PKT_FMT = > " CCnCCnNNNN " ; <nl> + use constant INCRDECR_PKT_FMT = > " NNNNN " ; <nl> + use constant MIN_RECV_BYTES = > length ( pack ( RES_PKT_FMT ) ) ; <nl> + use constant REQ_MAGIC = > 0x80 ; <nl> + use constant RES_MAGIC = > 0x81 ; <nl> + <nl> + my $ mc = MC : : Client - > new ; <nl> + <nl> + # Let ' s turn on detail stats for all this stuff <nl> + <nl> + $ mc - > stats ( ' detail on ' ) ; <nl> + <nl> + my $ check = sub { <nl> + my ( $ key , $ orig_flags , $ orig_val ) = @ _ ; <nl> + my ( $ flags , $ val , $ cas ) = $ mc - > get ( $ key ) ; <nl> + is ( $ flags , $ orig_flags , " Flags is set properly " ) ; <nl> + ok ( $ val eq $ orig_val | | $ val = = $ orig_val , $ val . " = " . $ orig_val ) ; <nl> + } ; <nl> + <nl> + my $ set = sub { <nl> + my ( $ key , $ exp , $ orig_flags , $ orig_value ) = @ _ ; <nl> + $ mc - > set ( $ key , $ orig_value , $ orig_flags , $ exp ) ; <nl> + $ check - > ( $ key , $ orig_flags , $ orig_value ) ; <nl> + } ; <nl> + <nl> + my $ empty = sub { <nl> + my $ key = shift ; <nl> + my $ rv = ( ) = eval { $ mc - > get ( $ key ) } ; <nl> + is ( $ rv , 0 , " Didn ' t get a result from get " ) ; <nl> + ok ( $ @ - > not_found , " We got a not found error when we expected one " ) ; <nl> + } ; <nl> + <nl> + my $ delete = sub { <nl> + my ( $ key , $ when ) = @ _ ; <nl> + $ mc - > delete ( $ key , $ when ) ; <nl> + $ empty - > ( $ key ) ; <nl> + } ; <nl> + <nl> + # diag " Test Version " ; <nl> + my $ v = $ mc - > version ; <nl> + ok ( defined $ v & & length ( $ v ) , " Proper version : $ v " ) ; <nl> + <nl> + # Bug 71 <nl> + { <nl> + my % stats1 = $ mc - > stats ( ' ' ) ; <nl> + $ mc - > flush ; <nl> + my % stats2 = $ mc - > stats ( ' ' ) ; <nl> + <nl> + is ( $ stats2 { ' cmd_flush ' } , $ stats1 { ' cmd_flush ' } + 1 , <nl> + " Stats not updated on a binary flush " ) ; <nl> + } <nl> + <nl> + # diag " Flushing . . . " ; <nl> + $ mc - > flush ; <nl> + <nl> + # diag " Noop " ; <nl> + $ mc - > noop ; <nl> + <nl> + # diag " Simple set / get " ; <nl> + $ set - > ( ' x ' , 5 , 19 , " somevalue " ) ; <nl> + <nl> + # diag " Delete " ; <nl> + $ delete - > ( ' x ' ) ; <nl> + <nl> + # diag " Flush " ; <nl> + $ set - > ( ' x ' , 5 , 19 , " somevaluex " ) ; <nl> + $ set - > ( ' y ' , 5 , 17 , " somevaluey " ) ; <nl> + $ mc - > flush ; <nl> + $ empty - > ( ' x ' ) ; <nl> + $ empty - > ( ' y ' ) ; <nl> + <nl> + { <nl> + # diag " Add " ; <nl> + $ empty - > ( ' i ' ) ; <nl> + $ mc - > add ( ' i ' , ' ex ' , 5 , 10 ) ; <nl> + $ check - > ( ' i ' , 5 , " ex " ) ; <nl> + <nl> + my $ rv = ( ) = eval { $ mc - > add ( ' i ' , " ex2 " , 10 , 5 ) } ; <nl> + is ( $ rv , 0 , " Add didn ' t return anything " ) ; <nl> + ok ( $ @ - > exists , " Expected exists error received " ) ; <nl> + $ check - > ( ' i ' , 5 , " ex " ) ; <nl> + } <nl> + <nl> + { <nl> + # diag " Too big . " ; <nl> + $ empty - > ( ' toobig ' ) ; <nl> + $ mc - > set ( ' toobig ' , ' not too big ' , 10 , 10 ) ; <nl> + eval { <nl> + my $ bigval = ( " x " x ( 1024 * 1024 ) ) . " x " ; <nl> + $ mc - > set ( ' toobig ' , $ bigval , 10 , 10 ) ; <nl> + } ; <nl> + ok ( $ @ - > too_big , " Was too big " ) ; <nl> + $ empty - > ( ' toobig ' ) ; <nl> + } <nl> + <nl> + { <nl> + # diag " Replace " ; <nl> + $ empty - > ( ' j ' ) ; <nl> + <nl> + my $ rv = ( ) = eval { $ mc - > replace ( ' j ' , " ex " , 19 , 5 ) } ; <nl> + is ( $ rv , 0 , " Replace didn ' t return anything " ) ; <nl> + ok ( $ @ - > not_found , " Expected not_found error received " ) ; <nl> + $ empty - > ( ' j ' ) ; <nl> + $ mc - > add ( ' j ' , " ex2 " , 14 , 5 ) ; <nl> + $ check - > ( ' j ' , 14 , " ex2 " ) ; <nl> + $ mc - > replace ( ' j ' , " ex3 " , 24 , 5 ) ; <nl> + $ check - > ( ' j ' , 24 , " ex3 " ) ; <nl> + } <nl> + <nl> + { <nl> + # diag " MultiGet " ; <nl> + $ mc - > add ( ' xx ' , " ex " , 1 , 5 ) ; <nl> + $ mc - > add ( ' wye ' , " why " , 2 , 5 ) ; <nl> + my $ rv = $ mc - > get_multi ( qw ( xx wye zed ) ) ; <nl> + <nl> + # CAS is returned with all gets . <nl> + $ rv - > { xx } - > [ 2 ] = 0 ; <nl> + $ rv - > { wye } - > [ 2 ] = 0 ; <nl> + is_deeply ( $ rv - > { xx } , [ 1 , ' ex ' , 0 ] , " X is correct " ) ; <nl> + is_deeply ( $ rv - > { wye } , [ 2 , ' why ' , 0 ] , " Y is correct " ) ; <nl> + is ( keys ( % $ rv ) , 2 , " Got only two answers like we expect " ) ; <nl> + } <nl> + <nl> + # diag " Test increment " ; <nl> + $ mc - > flush ; <nl> + is ( $ mc - > incr ( " x " ) , 0 , " First incr call is zero " ) ; <nl> + is ( $ mc - > incr ( " x " ) , 1 , " Second incr call is one " ) ; <nl> + is ( $ mc - > incr ( " x " , 211 ) , 212 , " Adding 211 gives you 212 " ) ; <nl> + is ( $ mc - > incr ( " x " , 2 * * 33 ) , 8589934804 , " Blast the 32bit border " ) ; <nl> + <nl> + # diag " Issue 48 - incrementing plain text . " ; <nl> + { <nl> + $ mc - > set ( " issue48 " , " text " , 0 , 0 ) ; <nl> + my $ rv = ( ) = eval { $ mc - > incr ( ' issue48 ' ) ; } ; <nl> + ok ( $ @ & & $ @ - > delta_badval , " Expected invalid value when incrementing text . " ) ; <nl> + $ check - > ( ' issue48 ' , 0 , " text " ) ; <nl> + <nl> + $ rv = ( ) = eval { $ mc - > decr ( ' issue48 ' ) ; } ; <nl> + ok ( $ @ & & $ @ - > delta_badval , " Expected invalid value when decrementing text . " ) ; <nl> + $ check - > ( ' issue48 ' , 0 , " text " ) ; <nl> + } <nl> + <nl> + <nl> + # diag " Test decrement " ; <nl> + $ mc - > flush ; <nl> + is ( $ mc - > incr ( " x " , undef , 5 ) , 5 , " Initial value " ) ; <nl> + is ( $ mc - > decr ( " x " ) , 4 , " Decrease by one " ) ; <nl> + is ( $ mc - > decr ( " x " , 211 ) , 0 , " Floor is zero " ) ; <nl> + <nl> + { <nl> + # diag " bug21 " ; <nl> + $ mc - > add ( " bug21 " , " 9223372036854775807 " , 0 , 0 ) ; <nl> + is ( $ mc - > incr ( " bug21 " ) , 9223372036854775808 , " First incr for bug21 . " ) ; <nl> + is ( $ mc - > incr ( " bug21 " ) , 9223372036854775809 , " Second incr for bug21 . " ) ; <nl> + is ( $ mc - > decr ( " bug21 " ) , 9223372036854775808 , " Decr for bug21 . " ) ; <nl> + } <nl> + <nl> + { <nl> + # diag " CAS " ; <nl> + $ mc - > flush ; <nl> + <nl> + { <nl> + my $ rv = ( ) = eval { $ mc - > set ( " x " , " bad value " , 19 , 5 , 0x7FFFFFF ) } ; <nl> + is ( $ rv , 0 , " Empty return on expected failure " ) ; <nl> + ok ( $ @ - > not_found , " Error was ' not found ' as expected " ) ; <nl> + } <nl> + <nl> + my ( $ r , $ rcas ) = $ mc - > add ( " x " , " original value " , 5 , 19 ) ; <nl> + <nl> + my ( $ flags , $ val , $ i ) = $ mc - > get ( " x " ) ; <nl> + is ( $ val , " original value " , " - > gets returned proper value " ) ; <nl> + is ( $ rcas , $ i , " Add CAS matched . " ) ; <nl> + <nl> + { <nl> + my $ rv = ( ) = eval { $ mc - > set ( " x " , " broken value " , 19 , 5 , $ i + 1 ) } ; <nl> + is ( $ rv , 0 , " Empty return on expected failure ( 1 ) " ) ; <nl> + ok ( $ @ - > exists , " Expected error state of ' exists ' ( 1 ) " ) ; <nl> + } <nl> + <nl> + ( $ r , $ rcas ) = $ mc - > set ( " x " , " new value " , 19 , 5 , $ i ) ; <nl> + <nl> + my ( $ newflags , $ newval , $ newi ) = $ mc - > get ( " x " ) ; <nl> + is ( $ newval , " new value " , " CAS properly overwrote value " ) ; <nl> + is ( $ rcas , $ newi , " Get CAS matched . " ) ; <nl> + <nl> + { <nl> + my $ rv = ( ) = eval { $ mc - > set ( " x " , " replay value " , 19 , 5 , $ i ) } ; <nl> + is ( $ rv , 0 , " Empty return on expected failure ( 2 ) " ) ; <nl> + ok ( $ @ - > exists , " Expected error state of ' exists ' ( 2 ) " ) ; <nl> + } <nl> + } <nl> + <nl> + # diag " Silent set . " ; <nl> + $ mc - > silent_mutation ( : : CMD_SETQ , ' silentset ' , ' silentsetval ' ) ; <nl> + <nl> + # diag " Silent add . " ; <nl> + $ mc - > silent_mutation ( : : CMD_ADDQ , ' silentadd ' , ' silentaddval ' ) ; <nl> + <nl> + # diag " Silent replace . " ; <nl> + { <nl> + my $ key = " silentreplace " ; <nl> + my $ extra = pack " NN " , 829 , 0 ; <nl> + $ empty - > ( $ key ) ; <nl> + # $ mc - > send_silent ( : : CMD_REPLACEQ , $ key , ' somevalue ' , 7278552 , $ extra , 0 ) ; <nl> + # $ empty - > ( $ key ) ; <nl> + <nl> + $ mc - > add ( $ key , " xval " , 831 , 0 ) ; <nl> + $ check - > ( $ key , 831 , ' xval ' ) ; <nl> + <nl> + $ mc - > send_silent ( : : CMD_REPLACEQ , $ key , ' somevalue ' , 7278552 , $ extra , 0 ) ; <nl> + $ check - > ( $ key , 829 , ' somevalue ' ) ; <nl> + } <nl> + <nl> + # diag " Silent delete " ; <nl> + { <nl> + my $ key = " silentdelete " ; <nl> + $ empty - > ( $ key ) ; <nl> + $ mc - > set ( $ key , " some val " , 19 , 0 ) ; <nl> + $ mc - > send_silent ( : : CMD_DELETEQ , $ key , ' ' , 772 ) ; <nl> + $ empty - > ( $ key ) ; <nl> + } <nl> + <nl> + # diag " Silent increment " ; <nl> + { <nl> + my $ key = " silentincr " ; <nl> + my $ opaque = 98428747 ; <nl> + $ empty - > ( $ key ) ; <nl> + $ mc - > silent_incrdecr ( : : CMD_INCREMENTQ , $ key , 0 , 0 , 0 ) ; <nl> + is ( $ mc - > incr ( $ key , 0 ) , 0 , " First call is 0 " ) ; <nl> + <nl> + $ mc - > silent_incrdecr ( : : CMD_INCREMENTQ , $ key , 8 , 0 , 0 ) ; <nl> + is ( $ mc - > incr ( $ key , 0 ) , 8 ) ; <nl> + } <nl> + <nl> + # diag " Silent decrement " ; <nl> + { <nl> + my $ key = " silentdecr " ; <nl> + my $ opaque = 98428147 ; <nl> + $ empty - > ( $ key ) ; <nl> + $ mc - > silent_incrdecr ( : : CMD_DECREMENTQ , $ key , 0 , 185 , 0 ) ; <nl> + is ( $ mc - > incr ( $ key , 0 ) , 185 ) ; <nl> + <nl> + $ mc - > silent_incrdecr ( : : CMD_DECREMENTQ , $ key , 8 , 0 , 0 ) ; <nl> + is ( $ mc - > incr ( $ key , 0 ) , 177 ) ; <nl> + } <nl> + <nl> + # diag " Silent flush " ; <nl> + { <nl> + my % stats1 = $ mc - > stats ( ' ' ) ; <nl> + <nl> + $ set - > ( ' x ' , 5 , 19 , " somevaluex " ) ; <nl> + $ set - > ( ' y ' , 5 , 17 , " somevaluey " ) ; <nl> + $ mc - > send_silent ( : : CMD_FLUSHQ , ' ' , ' ' , 2775256 ) ; <nl> + $ empty - > ( ' x ' ) ; <nl> + $ empty - > ( ' y ' ) ; <nl> + <nl> + my % stats2 = $ mc - > stats ( ' ' ) ; <nl> + is ( $ stats2 { ' cmd_flush ' } , $ stats1 { ' cmd_flush ' } + 1 , <nl> + " Stats not updated on a binary quiet flush " ) ; <nl> + } <nl> + <nl> + # diag " Append " ; <nl> + { <nl> + my $ key = " appendkey " ; <nl> + my $ value = " some value " ; <nl> + $ set - > ( $ key , 8 , 19 , $ value ) ; <nl> + $ mc - > _append_prepend ( : : CMD_APPEND , $ key , " more " ) ; <nl> + $ check - > ( $ key , 19 , $ value . " more " ) ; <nl> + } <nl> + <nl> + # diag " Prepend " ; <nl> + { <nl> + my $ key = " prependkey " ; <nl> + my $ value = " some value " ; <nl> + $ set - > ( $ key , 8 , 19 , $ value ) ; <nl> + $ mc - > _append_prepend ( : : CMD_PREPEND , $ key , " prefixed " ) ; <nl> + $ check - > ( $ key , 19 , " prefixed " . $ value ) ; <nl> + } <nl> + <nl> + # diag " Silent append " ; <nl> + { <nl> + my $ key = " appendqkey " ; <nl> + my $ value = " some value " ; <nl> + $ set - > ( $ key , 8 , 19 , $ value ) ; <nl> + $ mc - > send_silent ( : : CMD_APPENDQ , $ key , " more " , 7284492 ) ; <nl> + $ check - > ( $ key , 19 , $ value . " more " ) ; <nl> + } <nl> + <nl> + # diag " Silent prepend " ; <nl> + { <nl> + my $ key = " prependqkey " ; <nl> + my $ value = " some value " ; <nl> + $ set - > ( $ key , 8 , 19 , $ value ) ; <nl> + $ mc - > send_silent ( : : CMD_PREPENDQ , $ key , " prefixed " , 7284492 ) ; <nl> + $ check - > ( $ key , 19 , " prefixed " . $ value ) ; <nl> + } <nl> + <nl> + # diag " Leaky binary get test . " ; <nl> + # # http : / / code . google . com / p / memcached / issues / detail ? id = 16 <nl> + { <nl> + # Get a new socket so we can speak text to it . <nl> + my $ sock = $ server - > new_sock ; <nl> + my $ max = 1024 * 1024 ; <nl> + my $ big = " a big value that ' s > . 5M and < 1M . " ; <nl> + while ( length ( $ big ) * 2 < $ max ) { <nl> + $ big = $ big . $ big ; <nl> + } <nl> + my $ biglen = length ( $ big ) ; <nl> + <nl> + for ( 1 . . 100 ) { <nl> + my $ key = " some_key_ $ _ " ; <nl> + # print STDERR " Key is $ key \ n " ; <nl> + # print $ sock " set $ key 0 0 $ vallen \ r \ n $ value \ r \ n " ; <nl> + print $ sock " set $ key 0 0 $ biglen \ r \ n $ big \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored big " ) ; <nl> + my ( $ f , $ v , $ c ) = $ mc - > get ( $ key ) ; <nl> + } <nl> + } <nl> + <nl> + # diag " Test stats settings . " <nl> + { <nl> + my % stats = $ mc - > stats ( ' settings ' ) ; <nl> + <nl> + is ( 1024 , $ stats { ' maxconns ' } ) ; <nl> + is ( ' NULL ' , $ stats { ' domain_socket ' } ) ; <nl> + is ( ' on ' , $ stats { ' evictions ' } ) ; <nl> + is ( ' yes ' , $ stats { ' cas_enabled ' } ) ; <nl> + } <nl> + <nl> + # diag " Test quit commands . " ; <nl> + { <nl> + my $ s2 = new_memcached ( ) ; <nl> + my $ mc2 = MC : : Client - > new ( $ s2 ) ; <nl> + $ mc2 - > send_command ( CMD_QUITQ , ' ' , ' ' , 0 , ' ' , 0 ) ; <nl> + <nl> + # Five seconds ought to be enough to get hung up on . <nl> + my $ oldalarmt = alarm ( 5 ) ; <nl> + <nl> + # Verify we can ' t read anything . <nl> + my $ bytesread = - 1 ; <nl> + eval { <nl> + local $ SIG { ' ALRM ' } = sub { die " timeout " } ; <nl> + my $ data = " " ; <nl> + $ bytesread = sysread ( $ mc2 - > { socket } , $ data , 24 ) , <nl> + } ; <nl> + is ( $ bytesread , 0 , " Read after quit . " ) ; <nl> + <nl> + # Restore signal stuff . <nl> + alarm ( $ oldalarmt ) ; <nl> + } <nl> + <nl> + # diag " Test protocol boundary overruns " ; <nl> + { <nl> + use List : : Util qw [ min ] ; <nl> + # Attempting some protocol overruns by toying around with the edge <nl> + # of the data buffer at a few different sizes . This assumes the <nl> + # boundary is at or around 2048 bytes . <nl> + for ( my $ i = 1900 ; $ i < 2100 ; $ i + + ) { <nl> + my $ k = " test_key_ $ i " ; <nl> + my $ v = ' x ' x $ i ; <nl> + # diag " Trying $ i $ k " ; <nl> + my $ extra = pack " NN " , 82 , 0 ; <nl> + my $ data = $ mc - > build_command ( : : CMD_SETQ , $ k , $ v , 0 , $ extra , 0 ) ; <nl> + $ data . = $ mc - > build_command ( : : CMD_SETQ , " alt_ $ k " , " blah " , 0 , $ extra , 0 ) ; <nl> + if ( length ( $ data ) > 2024 ) { <nl> + for ( my $ j = 2024 ; $ j < min ( 2096 , length ( $ data ) ) ; $ j + + ) { <nl> + $ mc - > { socket } - > send ( substr ( $ data , 0 , $ j ) ) ; <nl> + $ mc - > flush_socket ; <nl> + sleep ( 0 . 001 ) ; <nl> + $ mc - > { socket } - > send ( substr ( $ data , $ j ) ) ; <nl> + $ mc - > flush_socket ; <nl> + } <nl> + } else { <nl> + $ mc - > { socket } - > send ( $ data ) ; <nl> + } <nl> + $ mc - > flush_socket ; <nl> + $ check - > ( $ k , 82 , $ v ) ; <nl> + $ check - > ( " alt_ $ k " , 82 , " blah " ) ; <nl> + } <nl> + } <nl> + <nl> + # Along with the assertion added to the code to verify we ' re staying <nl> + # within bounds when we do a stats detail dump ( detail turned on at <nl> + # the top ) . <nl> + my % stats = $ mc - > stats ( ' detail dump ' ) ; <nl> + <nl> + # This test causes a disconnection . <nl> + { <nl> + # diag " Key too large . " ; <nl> + my $ key = " x " x 365 ; <nl> + eval { <nl> + $ mc - > get ( $ key , ' should die ' , 10 , 10 ) ; <nl> + } ; <nl> + ok ( $ @ - > einval , " Invalid key length " ) ; <nl> + } <nl> + <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # Test ends around here . <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + package MC : : Client ; <nl> + <nl> + use strict ; <nl> + use warnings ; <nl> + use fields qw ( socket ) ; <nl> + use IO : : Socket : : INET ; <nl> + <nl> + sub new { <nl> + my $ self = shift ; <nl> + my ( $ s ) = @ _ ; <nl> + $ s = $ server unless defined $ s ; <nl> + my $ sock = $ s - > sock ; <nl> + $ self = fields : : new ( $ self ) ; <nl> + $ self - > { socket } = $ sock ; <nl> + return $ self ; <nl> + } <nl> + <nl> + sub build_command { <nl> + my $ self = shift ; <nl> + die " Not enough args to send_command " unless @ _ > = 4 ; <nl> + my ( $ cmd , $ key , $ val , $ opaque , $ extra_header , $ cas ) = @ _ ; <nl> + <nl> + $ extra_header = ' ' unless defined $ extra_header ; <nl> + my $ keylen = length ( $ key ) ; <nl> + my $ vallen = length ( $ val ) ; <nl> + my $ extralen = length ( $ extra_header ) ; <nl> + my $ datatype = 0 ; # field for future use <nl> + my $ reserved = 0 ; # field for future use <nl> + my $ totallen = $ keylen + $ vallen + $ extralen ; <nl> + my $ ident_hi = 0 ; <nl> + my $ ident_lo = 0 ; <nl> + <nl> + if ( $ cas ) { <nl> + $ ident_hi = int ( $ cas / 2 * * 32 ) ; <nl> + $ ident_lo = int ( $ cas % 2 * * 32 ) ; <nl> + } <nl> + <nl> + my $ msg = pack ( : : REQ_PKT_FMT , : : REQ_MAGIC , $ cmd , $ keylen , $ extralen , <nl> + $ datatype , $ reserved , $ totallen , $ opaque , $ ident_hi , <nl> + $ ident_lo ) ; <nl> + my $ full_msg = $ msg . $ extra_header . $ key . $ val ; <nl> + return $ full_msg ; <nl> + } <nl> + <nl> + sub send_command { <nl> + my $ self = shift ; <nl> + die " Not enough args to send_command " unless @ _ > = 4 ; <nl> + my ( $ cmd , $ key , $ val , $ opaque , $ extra_header , $ cas ) = @ _ ; <nl> + <nl> + my $ full_msg = $ self - > build_command ( $ cmd , $ key , $ val , $ opaque , $ extra_header , $ cas ) ; <nl> + <nl> + my $ sent = $ self - > { socket } - > send ( $ full_msg ) ; <nl> + die ( " Send failed : $ ! " ) unless $ sent ; <nl> + if ( $ sent ! = length ( $ full_msg ) ) { <nl> + die ( " only sent $ sent of " . length ( $ full_msg ) . " bytes " ) ; <nl> + } <nl> + } <nl> + <nl> + sub flush_socket { <nl> + my $ self = shift ; <nl> + $ self - > { socket } - > flush ; <nl> + } <nl> + <nl> + # Send a silent command and ensure it doesn ' t respond . <nl> + sub send_silent { <nl> + my $ self = shift ; <nl> + die " Not enough args to send_silent " unless @ _ > = 4 ; <nl> + my ( $ cmd , $ key , $ val , $ opaque , $ extra_header , $ cas ) = @ _ ; <nl> + <nl> + $ self - > send_command ( $ cmd , $ key , $ val , $ opaque , $ extra_header , $ cas ) ; <nl> + $ self - > send_command ( : : CMD_NOOP , ' ' , ' ' , $ opaque + 1 ) ; <nl> + <nl> + my ( $ ropaque , $ data ) = $ self - > _handle_single_response ; <nl> + Test : : More : : is ( $ ropaque , $ opaque + 1 ) ; <nl> + } <nl> + <nl> + sub silent_mutation { <nl> + my $ self = shift ; <nl> + my ( $ cmd , $ key , $ value ) = @ _ ; <nl> + <nl> + $ empty - > ( $ key ) ; <nl> + my $ extra = pack " NN " , 82 , 0 ; <nl> + $ mc - > send_silent ( $ cmd , $ key , $ value , 7278552 , $ extra , 0 ) ; <nl> + $ check - > ( $ key , 82 , $ value ) ; <nl> + } <nl> + <nl> + sub _handle_single_response { <nl> + my $ self = shift ; <nl> + my $ myopaque = shift ; <nl> + <nl> + $ self - > { socket } - > recv ( my $ response , : : MIN_RECV_BYTES ) ; <nl> + Test : : More : : is ( length ( $ response ) , : : MIN_RECV_BYTES , " Expected read length " ) ; <nl> + <nl> + my ( $ magic , $ cmd , $ keylen , $ extralen , $ datatype , $ status , $ remaining , <nl> + $ opaque , $ ident_hi , $ ident_lo ) = unpack ( : : RES_PKT_FMT , $ response ) ; <nl> + Test : : More : : is ( $ magic , : : RES_MAGIC , " Got proper response magic " ) ; <nl> + <nl> + my $ cas = ( $ ident_hi * 2 * * 32 ) + $ ident_lo ; <nl> + <nl> + return ( $ opaque , ' ' , $ cas , 0 ) if ( $ remaining = = 0 ) ; <nl> + <nl> + # fetch the value <nl> + my $ rv = " " ; <nl> + while ( $ remaining - length ( $ rv ) > 0 ) { <nl> + $ self - > { socket } - > recv ( my $ buf , $ remaining - length ( $ rv ) ) ; <nl> + $ rv . = $ buf ; <nl> + } <nl> + if ( length ( $ rv ) ! = $ remaining ) { <nl> + my $ found = length ( $ rv ) ; <nl> + die ( " Expected $ remaining bytes , got $ found " ) ; <nl> + } <nl> + <nl> + if ( defined $ myopaque ) { <nl> + Test : : More : : is ( $ opaque , $ myopaque , " Expected opaque " ) ; <nl> + } else { <nl> + Test : : More : : pass ( " Implicit pass since myopaque is undefined " ) ; <nl> + } <nl> + <nl> + if ( $ status ) { <nl> + die MC : : Error - > new ( $ status , $ rv ) ; <nl> + } <nl> + <nl> + return ( $ opaque , $ rv , $ cas , $ keylen ) ; <nl> + } <nl> + <nl> + sub _do_command { <nl> + my $ self = shift ; <nl> + die unless @ _ > = 3 ; <nl> + my ( $ cmd , $ key , $ val , $ extra_header , $ cas ) = @ _ ; <nl> + <nl> + $ extra_header = ' ' unless defined $ extra_header ; <nl> + my $ opaque = int ( rand ( 2 * * 32 ) ) ; <nl> + $ self - > send_command ( $ cmd , $ key , $ val , $ opaque , $ extra_header , $ cas ) ; <nl> + my ( undef , $ rv , $ rcas ) = $ self - > _handle_single_response ( $ opaque ) ; <nl> + return ( $ rv , $ rcas ) ; <nl> + } <nl> + <nl> + sub _incrdecr_header { <nl> + my $ self = shift ; <nl> + my ( $ amt , $ init , $ exp ) = @ _ ; <nl> + <nl> + my $ amt_hi = int ( $ amt / 2 * * 32 ) ; <nl> + my $ amt_lo = int ( $ amt % 2 * * 32 ) ; <nl> + <nl> + my $ init_hi = int ( $ init / 2 * * 32 ) ; <nl> + my $ init_lo = int ( $ init % 2 * * 32 ) ; <nl> + <nl> + my $ extra_header = pack ( : : INCRDECR_PKT_FMT , $ amt_hi , $ amt_lo , $ init_hi , <nl> + $ init_lo , $ exp ) ; <nl> + <nl> + return $ extra_header ; <nl> + } <nl> + <nl> + sub _incrdecr { <nl> + my $ self = shift ; <nl> + my ( $ cmd , $ key , $ amt , $ init , $ exp ) = @ _ ; <nl> + <nl> + my ( $ data , undef ) = $ self - > _do_command ( $ cmd , $ key , ' ' , <nl> + $ self - > _incrdecr_header ( $ amt , $ init , $ exp ) ) ; <nl> + <nl> + my $ header = substr $ data , 0 , 8 , ' ' ; <nl> + my ( $ resp_hi , $ resp_lo ) = unpack " NN " , $ header ; <nl> + my $ resp = ( $ resp_hi * 2 * * 32 ) + $ resp_lo ; <nl> + <nl> + return $ resp ; <nl> + } <nl> + <nl> + sub silent_incrdecr { <nl> + my $ self = shift ; <nl> + my ( $ cmd , $ key , $ amt , $ init , $ exp ) = @ _ ; <nl> + my $ opaque = 8275753 ; <nl> + <nl> + $ mc - > send_silent ( $ cmd , $ key , ' ' , $ opaque , <nl> + $ mc - > _incrdecr_header ( $ amt , $ init , $ exp ) ) ; <nl> + } <nl> + <nl> + sub stats { <nl> + my $ self = shift ; <nl> + my $ key = shift ; <nl> + my $ cas = 0 ; <nl> + my $ opaque = int ( rand ( 2 * * 32 ) ) ; <nl> + $ self - > send_command ( : : CMD_STAT , $ key , ' ' , $ opaque , ' ' , $ cas ) ; <nl> + <nl> + my % rv = ( ) ; <nl> + my $ found_key = ' ' ; <nl> + my $ found_val = ' ' ; <nl> + do { <nl> + my ( $ op , $ data , $ cas , $ keylen ) = $ self - > _handle_single_response ( $ opaque ) ; <nl> + if ( $ keylen > 0 ) { <nl> + $ found_key = substr ( $ data , 0 , $ keylen ) ; <nl> + $ found_val = substr ( $ data , $ keylen ) ; <nl> + $ rv { $ found_key } = $ found_val ; <nl> + } else { <nl> + $ found_key = ' ' ; <nl> + } <nl> + } while ( $ found_key ne ' ' ) ; <nl> + return % rv ; <nl> + } <nl> + <nl> + sub get { <nl> + my $ self = shift ; <nl> + my $ key = shift ; <nl> + my ( $ rv , $ cas ) = $ self - > _do_command ( : : CMD_GET , $ key , ' ' , ' ' ) ; <nl> + <nl> + my $ header = substr $ rv , 0 , 4 , ' ' ; <nl> + my $ flags = unpack ( " N " , $ header ) ; <nl> + <nl> + return ( $ flags , $ rv , $ cas ) ; <nl> + } <nl> + <nl> + sub get_multi { <nl> + my $ self = shift ; <nl> + my @ keys = @ _ ; <nl> + <nl> + for ( my $ i = 0 ; $ i < @ keys ; $ i + + ) { <nl> + $ self - > send_command ( : : CMD_GETQ , $ keys [ $ i ] , ' ' , $ i , ' ' , 0 ) ; <nl> + } <nl> + <nl> + my $ terminal = @ keys + 10 ; <nl> + $ self - > send_command ( : : CMD_NOOP , ' ' , ' ' , $ terminal ) ; <nl> + <nl> + my % return ; <nl> + while ( 1 ) { <nl> + my ( $ opaque , $ data ) = $ self - > _handle_single_response ; <nl> + last if $ opaque = = $ terminal ; <nl> + <nl> + my $ header = substr $ data , 0 , 4 , ' ' ; <nl> + my $ flags = unpack ( " N " , $ header ) ; <nl> + <nl> + $ return { $ keys [ $ opaque ] } = [ $ flags , $ data ] ; <nl> + } <nl> + <nl> + return % return if wantarray ; <nl> + return \ % return ; <nl> + } <nl> + <nl> + sub version { <nl> + my $ self = shift ; <nl> + return $ self - > _do_command ( : : CMD_VERSION , ' ' , ' ' ) ; <nl> + } <nl> + <nl> + sub flush { <nl> + my $ self = shift ; <nl> + return $ self - > _do_command ( : : CMD_FLUSH , ' ' , ' ' ) ; <nl> + } <nl> + <nl> + sub add { <nl> + my $ self = shift ; <nl> + my ( $ key , $ val , $ flags , $ expire ) = @ _ ; <nl> + my $ extra_header = pack " NN " , $ flags , $ expire ; <nl> + my $ cas = 0 ; <nl> + return $ self - > _do_command ( : : CMD_ADD , $ key , $ val , $ extra_header , $ cas ) ; <nl> + } <nl> + <nl> + sub set { <nl> + my $ self = shift ; <nl> + my ( $ key , $ val , $ flags , $ expire , $ cas ) = @ _ ; <nl> + my $ extra_header = pack " NN " , $ flags , $ expire ; <nl> + return $ self - > _do_command ( : : CMD_SET , $ key , $ val , $ extra_header , $ cas ) ; <nl> + } <nl> + <nl> + sub _append_prepend { <nl> + my $ self = shift ; <nl> + my ( $ cmd , $ key , $ val , $ cas ) = @ _ ; <nl> + return $ self - > _do_command ( $ cmd , $ key , $ val , ' ' , $ cas ) ; <nl> + } <nl> + <nl> + sub replace { <nl> + my $ self = shift ; <nl> + my ( $ key , $ val , $ flags , $ expire ) = @ _ ; <nl> + my $ extra_header = pack " NN " , $ flags , $ expire ; <nl> + my $ cas = 0 ; <nl> + return $ self - > _do_command ( : : CMD_REPLACE , $ key , $ val , $ extra_header , $ cas ) ; <nl> + } <nl> + <nl> + sub delete { <nl> + my $ self = shift ; <nl> + my ( $ key ) = @ _ ; <nl> + return $ self - > _do_command ( : : CMD_DELETE , $ key , ' ' ) ; <nl> + } <nl> + <nl> + sub incr { <nl> + my $ self = shift ; <nl> + my ( $ key , $ amt , $ init , $ exp ) = @ _ ; <nl> + $ amt = 1 unless defined $ amt ; <nl> + $ init = 0 unless defined $ init ; <nl> + $ exp = 0 unless defined $ exp ; <nl> + <nl> + return $ self - > _incrdecr ( : : CMD_INCR , $ key , $ amt , $ init , $ exp ) ; <nl> + } <nl> + <nl> + sub decr { <nl> + my $ self = shift ; <nl> + my ( $ key , $ amt , $ init , $ exp ) = @ _ ; <nl> + $ amt = 1 unless defined $ amt ; <nl> + $ init = 0 unless defined $ init ; <nl> + $ exp = 0 unless defined $ exp ; <nl> + <nl> + return $ self - > _incrdecr ( : : CMD_DECR , $ key , $ amt , $ init , $ exp ) ; <nl> + } <nl> + <nl> + sub noop { <nl> + my $ self = shift ; <nl> + return $ self - > _do_command ( : : CMD_NOOP , ' ' , ' ' ) ; <nl> + } <nl> + <nl> + package MC : : Error ; <nl> + <nl> + use strict ; <nl> + use warnings ; <nl> + <nl> + use constant ERR_UNKNOWN_CMD = > 0x81 ; <nl> + use constant ERR_NOT_FOUND = > 0x1 ; <nl> + use constant ERR_EXISTS = > 0x2 ; <nl> + use constant ERR_TOO_BIG = > 0x3 ; <nl> + use constant ERR_EINVAL = > 0x4 ; <nl> + use constant ERR_NOT_STORED = > 0x5 ; <nl> + use constant ERR_DELTA_BADVAL = > 0x6 ; <nl> + <nl> + use overload ' " " ' = > sub { <nl> + my $ self = shift ; <nl> + return " Memcache Error ( $ self - > [ 0 ] ) : $ self - > [ 1 ] " ; <nl> + } ; <nl> + <nl> + sub new { <nl> + my $ class = shift ; <nl> + my $ error = [ @ _ ] ; <nl> + my $ self = bless $ error , ( ref $ class | | $ class ) ; <nl> + <nl> + return $ self ; <nl> + } <nl> + <nl> + sub not_found { <nl> + my $ self = shift ; <nl> + return $ self - > [ 0 ] = = ERR_NOT_FOUND ; <nl> + } <nl> + <nl> + sub exists { <nl> + my $ self = shift ; <nl> + return $ self - > [ 0 ] = = ERR_EXISTS ; <nl> + } <nl> + <nl> + sub too_big { <nl> + my $ self = shift ; <nl> + return $ self - > [ 0 ] = = ERR_TOO_BIG ; <nl> + } <nl> + <nl> + sub delta_badval { <nl> + my $ self = shift ; <nl> + return $ self - > [ 0 ] = = ERR_DELTA_BADVAL ; <nl> + } <nl> + <nl> + sub einval { <nl> + my $ self = shift ; <nl> + return $ self - > [ 0 ] = = ERR_EINVAL ; <nl> + } <nl> + <nl> + # vim : filetype = perl <nl> + <nl> new file mode 100755 <nl> index 00000000000 . . e6fe7f58600 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / daemonize . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 7 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + use File : : Temp qw ( tempfile ) ; <nl> + <nl> + my ( undef , $ tmpfn ) = tempfile ( ) ; <nl> + <nl> + my $ server = new_memcached ( " - d - P $ tmpfn " ) ; <nl> + my $ sock = $ server - > sock ; <nl> + sleep 0 . 5 ; <nl> + <nl> + ok ( - e $ tmpfn , " pid file exists " ) ; <nl> + ok ( - s $ tmpfn , " pid file has length " ) ; <nl> + <nl> + open ( my $ fh , $ tmpfn ) or die ; <nl> + my $ readpid = do { local $ / ; < $ fh > ; } ; <nl> + chomp $ readpid ; <nl> + close ( $ fh ) ; <nl> + <nl> + ok ( kill ( 0 , $ readpid ) , " process is still running " ) ; <nl> + <nl> + my $ stats = mem_stats ( $ sock ) ; <nl> + is ( $ stats - > { pid } , $ readpid , " memcached reports same pid as file " ) ; <nl> + <nl> + ok ( $ server - > new_sock , " opened new socket " ) ; <nl> + ok ( kill ( 9 , $ readpid ) , " sent KILL signal " ) ; <nl> + sleep 0 . 5 ; <nl> + ok ( ! $ server - > new_sock , " failed to open new socket " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . af9e09bee9e <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / dash - M . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ' - M - m 1 ' ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + my $ value = " B " x 8192 ; <nl> + my $ vallen = length ( $ value ) ; <nl> + <nl> + my $ resp = " STORED \ r \ n " ; <nl> + my $ key = 0 ; <nl> + <nl> + while ( $ resp eq " STORED \ r \ n " ) { <nl> + print $ sock " set dash $ key 0 0 $ vallen \ r \ n $ value \ r \ n " ; <nl> + $ key + + ; <nl> + $ resp = scalar < $ sock > ; <nl> + } <nl> + <nl> + my $ max_stored = $ key - 1 ; <nl> + <nl> + plan tests = > $ max_stored + 1 ; <nl> + <nl> + print $ sock " set dash $ key 0 0 $ vallen \ r \ n $ value \ r \ n " ; <nl> + is ( scalar < $ sock > , " SERVER_ERROR out of memory storing object \ r \ n " , <nl> + " failed to add another one . " ) ; <nl> + <nl> + for ( $ key = 0 ; $ key < $ max_stored ; $ key + + ) { <nl> + mem_get_is $ sock , " dash $ key " , $ value , " Failed at dash $ key " ; <nl> + } <nl> new file mode 100755 <nl> index 00000000000 . . d0fa394076b <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / evictions . t <nl> <nl> + # ! / usr / bin / perl <nl> + # Test the ' stats items ' evictions counters . <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 92 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( " - m 3 " ) ; <nl> + my $ sock = $ server - > sock ; <nl> + my $ value = " B " x66560 ; <nl> + my $ key = 0 ; <nl> + <nl> + # These aren ' t set to expire . <nl> + for ( $ key = 0 ; $ key < 40 ; $ key + + ) { <nl> + print $ sock " set key $ key 0 0 66560 \ r \ n $ value \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored key $ key " ) ; <nl> + } <nl> + <nl> + # These ones would expire in 600 seconds . <nl> + for ( $ key = 0 ; $ key < 50 ; $ key + + ) { <nl> + print $ sock " set key $ key 0 600 66560 \ r \ n $ value \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored key $ key " ) ; <nl> + } <nl> + <nl> + my $ stats = mem_stats ( $ sock , " items " ) ; <nl> + my $ evicted = $ stats - > { " items : 31 : evicted " } ; <nl> + isnt ( $ evicted , " 0 " , " check evicted " ) ; <nl> + my $ evicted_nonzero = $ stats - > { " items : 31 : evicted_nonzero " } ; <nl> + isnt ( $ evicted_nonzero , " 0 " , " check evicted_nonzero " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . e113fc96018 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / flush - all . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 14 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + my $ expire ; <nl> + <nl> + print $ sock " set foo 0 0 6 \ r \ nfooval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo " ) ; <nl> + <nl> + mem_get_is ( $ sock , " foo " , " fooval " ) ; <nl> + print $ sock " flush_all \ r \ n " ; <nl> + is ( scalar < $ sock > , " OK \ r \ n " , " did flush_all " ) ; <nl> + mem_get_is ( $ sock , " foo " , undef ) ; <nl> + <nl> + # Test flush_all with zero delay . <nl> + print $ sock " set foo 0 0 6 \ r \ nfooval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo " ) ; <nl> + <nl> + mem_get_is ( $ sock , " foo " , " fooval " ) ; <nl> + print $ sock " flush_all 0 \ r \ n " ; <nl> + is ( scalar < $ sock > , " OK \ r \ n " , " did flush_all " ) ; <nl> + mem_get_is ( $ sock , " foo " , undef ) ; <nl> + <nl> + # check that flush_all doesn ' t blow away items that immediately get set <nl> + print $ sock " set foo 0 0 3 \ r \ nnew \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo = ' new ' " ) ; <nl> + mem_get_is ( $ sock , " foo " , ' new ' ) ; <nl> + <nl> + # and the other form , specifying a flush_all time . . . <nl> + my $ expire = time ( ) + 2 ; <nl> + print $ sock " flush_all $ expire \ r \ n " ; <nl> + is ( scalar < $ sock > , " OK \ r \ n " , " did flush_all in future " ) ; <nl> + <nl> + print $ sock " set foo 0 0 4 \ r \ n1234 \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo = ' 1234 ' " ) ; <nl> + mem_get_is ( $ sock , " foo " , ' 1234 ' ) ; <nl> + sleep ( 2 . 2 ) ; <nl> + mem_get_is ( $ sock , " foo " , undef ) ; <nl> new file mode 100755 <nl> index 00000000000 . . 2213f69027b <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / issue_104 . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 6 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + # first get should miss <nl> + print $ sock " get foo \ r \ n " ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " get foo " ) ; <nl> + <nl> + # Now set and get ( should hit ) <nl> + print $ sock " set foo 0 0 6 \ r \ nfooval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo " ) ; <nl> + mem_get_is ( $ sock , " foo " , " fooval " ) ; <nl> + <nl> + my $ stats = mem_stats ( $ sock ) ; <nl> + is ( $ stats - > { cmd_get } , 2 , " Should have 2 get requests " ) ; <nl> + is ( $ stats - > { get_hits } , 1 , " Should have 1 hit " ) ; <nl> + is ( $ stats - > { get_misses } , 1 , " Should have 1 miss " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . fb14b0300f5 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / issue_14 . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 21 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + my $ value = " B " x66560 ; <nl> + my $ key = 0 ; <nl> + <nl> + for ( $ key = 0 ; $ key < 10 ; $ key + + ) { <nl> + print $ sock " set key $ key 0 2 66560 \ r \ n $ value \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored key $ key " ) ; <nl> + } <nl> + <nl> + # print $ sock " stats slabs " <nl> + my $ first_stats = mem_stats ( $ sock , " slabs " ) ; <nl> + my $ first_malloc = $ first_stats - > { total_malloced } ; <nl> + <nl> + sleep ( 4 ) ; <nl> + <nl> + for ( $ key = 10 ; $ key < 20 ; $ key + + ) { <nl> + print $ sock " set key $ key 0 2 66560 \ r \ n $ value \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored key $ key " ) ; <nl> + } <nl> + <nl> + my $ second_stats = mem_stats ( $ sock , " slabs " ) ; <nl> + my $ second_malloc = $ second_stats - > { total_malloced } ; <nl> + <nl> + <nl> + is ( $ second_malloc , $ first_malloc , " Memory grows . . " ) <nl> new file mode 100755 <nl> index 00000000000 . . f498c4502f6 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / issue_22 . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 84 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( " - m 3 " ) ; <nl> + my $ sock = $ server - > sock ; <nl> + my $ value = " B " x66560 ; <nl> + my $ key = 0 ; <nl> + <nl> + for ( $ key = 0 ; $ key < 40 ; $ key + + ) { <nl> + print $ sock " set key $ key 0 0 66560 \ r \ n $ value \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored key $ key " ) ; <nl> + } <nl> + <nl> + my $ first_stats = mem_stats ( $ sock , " items " ) ; <nl> + my $ first_evicted = $ first_stats - > { " items : 31 : evicted " } ; <nl> + # I get 1 eviction on a 32 bit binary , but 4 on a 64 binary . . <nl> + # Just check that I have evictions . . . <nl> + isnt ( $ first_evicted , " 0 " , " check evicted " ) ; <nl> + <nl> + print $ sock " stats reset \ r \ n " ; <nl> + is ( scalar < $ sock > , " RESET \ r \ n " , " Stats reset " ) ; <nl> + <nl> + my $ second_stats = mem_stats ( $ sock , " items " ) ; <nl> + my $ second_evicted = $ second_stats - > { " items : 31 : evicted " } ; <nl> + is ( " 0 " , $ second_evicted , " check evicted " ) ; <nl> + <nl> + for ( $ key = 40 ; $ key < 80 ; $ key + + ) { <nl> + print $ sock " set key $ key 0 0 66560 \ r \ n $ value \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored key $ key " ) ; <nl> + } <nl> + <nl> + my $ last_stats = mem_stats ( $ sock , " items " ) ; <nl> + my $ last_evicted = $ last_stats - > { " items : 31 : evicted " } ; <nl> + is ( $ last_evicted , " 40 " , " check evicted " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . a585f97ac21 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / issue_29 . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 4 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + print $ sock " set issue29 0 0 0 \ r \ n \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored issue29 " ) ; <nl> + <nl> + my $ first_stats = mem_stats ( $ sock , " slabs " ) ; <nl> + my $ first_used = $ first_stats - > { " 1 : used_chunks " } ; <nl> + <nl> + is ( 1 , $ first_used , " Used one " ) ; <nl> + <nl> + print $ sock " set issue29_b 0 0 0 \ r \ n \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored issue29_b " ) ; <nl> + <nl> + my $ second_stats = mem_stats ( $ sock , " slabs " ) ; <nl> + my $ second_used = $ second_stats - > { " 1 : used_chunks " } ; <nl> + <nl> + is ( 2 , $ second_used , " Used two " ) <nl> new file mode 100755 <nl> index 00000000000 . . fb1a292c974 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / issue_41 . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use warnings ; <nl> + use POSIX qw ( ceil ) ; <nl> + use Test : : More tests = > 691 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + my $ factor = 2 ; <nl> + my $ val = " x " x $ factor ; <nl> + my $ key = ' ' ; <nl> + <nl> + # SET items of diverse size to the daemon so it can attempt <nl> + # to return a large stats output for slabs <nl> + for ( my $ i = 0 ; $ i < 69 ; $ i + + ) { <nl> + for ( my $ j = 0 ; $ j < 10 ; $ j + + ) { <nl> + $ key = " $ i : $ j " ; <nl> + print $ sock " set key $ key 0 0 $ factor \ r \ n $ val \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored key $ key " ) ; <nl> + } <nl> + $ factor * = 1 . 2 ; <nl> + $ factor = ceil ( $ factor ) ; <nl> + $ val = " x " x $ factor ; <nl> + } <nl> + <nl> + # This request will kill the daemon if it has not allocated <nl> + # enough memory internally . <nl> + my $ stats = mem_stats ( $ sock , " slabs " ) ; <nl> + <nl> + # Verify whether the daemon is still running or not by asking <nl> + # it for statistics . <nl> + print $ sock " version \ r \ n " ; <nl> + my $ v = scalar < $ sock > ; <nl> + ok ( defined $ v & & length ( $ v ) , " memcached didn ' t respond " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . 299c1ec684b <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / issue_42 . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 11 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + my $ value = " B " x10 ; <nl> + my $ key = 0 ; <nl> + <nl> + for ( $ key = 0 ; $ key < 10 ; $ key + + ) { <nl> + print $ sock " set key $ key 0 0 10 \ r \ n $ value \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored key $ key " ) ; <nl> + } <nl> + <nl> + my $ first_stats = mem_stats ( $ sock , " slabs " ) ; <nl> + my $ req = $ first_stats - > { " 1 : mem_requested " } ; <nl> + ok ( $ req = = " 640 " | | $ req = = " 800 " , " Check allocated size " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . 80876b273b2 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / issue_50 . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 1 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ' - B binary ' ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + $ SIG { ALRM } = sub { die " alarm \ n " } ; <nl> + alarm ( 2 ) ; <nl> + print $ sock " Here ' s a bunch of garbage that doesn ' t look like the bin prot . " ; <nl> + my $ rv = < $ sock > ; <nl> + ok ( 1 , " Either the above worked and quit , or hung forever . " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . ec38a7d913b <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / issue_61 . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 7 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( " - R 1 " ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + print $ sock " set foobar 0 0 5 \ r \ nBubba \ r \ nset foobar 0 0 5 \ r \ nBubba \ r \ nset foobar 0 0 5 \ r \ nBubba \ r \ nset foobar 0 0 5 \ r \ nBubba \ r \ nset foobar 0 0 5 \ r \ nBubba \ r \ nset foobar 0 0 5 \ r \ nBubba \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foobar " ) ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foobar " ) ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foobar " ) ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foobar " ) ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foobar " ) ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foobar " ) ; <nl> + my $ stats = mem_stats ( $ sock ) ; <nl> + is ( $ stats - > { " conn_yields " } , " 5 " , " Got a decent number of yields " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . 285306a200d <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / issue_67 . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 22 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + use Carp qw ( croak ) ; <nl> + <nl> + use Cwd ; <nl> + my $ builddir = getcwd ; <nl> + <nl> + $ ENV { ' MEMCACHED_PORT_FILENAME ' } = " / tmp / ports . $ $ " ; <nl> + <nl> + sub read_ports { <nl> + my % rv = ( ) ; <nl> + open ( my $ f , " / tmp / ports . $ $ " ) | | die ( " Can ' t open ports file . " ) ; <nl> + while ( < $ f > ) { <nl> + my ( $ type , $ port ) = split ( / : \ s + / ) ; <nl> + $ rv { $ type } = $ port + 0 ; <nl> + } <nl> + unlink " / tmp / ports . $ $ " ; <nl> + return % rv ; <nl> + } <nl> + <nl> + sub validate_port { <nl> + my ( $ name , $ got , $ expected ) = @ _ ; <nl> + # diag " Wanted $ expected , got $ got " ; <nl> + if ( $ expected = = - 1 ) { <nl> + ok ( ! defined ( $ got ) , " $ name expected no port , got $ got " ) ; <nl> + } elsif ( $ expected = = 0 ) { <nl> + ok ( $ got ! = 11211 , " $ name expected random port ( got $ got ) " ) ; <nl> + } else { <nl> + is ( $ got , $ expected , " $ name " ) ; <nl> + } <nl> + } <nl> + <nl> + sub run_server { <nl> + my ( $ args ) = @ _ ; <nl> + <nl> + my $ exe = " $ builddir / memcached - debug " ; <nl> + croak ( " memcached binary doesn ' t exist . Haven ' t run ' make ' ? \ n " ) unless - e $ exe ; <nl> + <nl> + my $ childpid = fork ( ) ; <nl> + <nl> + my $ cmd = " $ builddir / timedrun 10 $ exe $ args " ; <nl> + <nl> + unless ( $ childpid ) { <nl> + exec $ cmd ; <nl> + exit ; # NOTREACHED <nl> + } <nl> + <nl> + for ( 1 . . 20 ) { <nl> + if ( - f " / tmp / ports . $ $ " ) { <nl> + return Memcached : : Handle - > new ( pid = > $ childpid ) ; <nl> + } <nl> + select undef , undef , undef , 0 . 10 ; <nl> + } <nl> + croak " Failed to start server . " ; <nl> + } <nl> + <nl> + sub when { <nl> + my ( $ name , $ params , $ expected_tcp , $ expected_udp ) = @ _ ; <nl> + <nl> + my $ server = run_server ( $ params ) ; <nl> + my % ports = read_ports ( ) ; <nl> + <nl> + validate_port ( $ name , $ ports { ' TCP INET ' } , $ expected_tcp ) ; <nl> + validate_port ( $ name , $ ports { ' UDP INET ' } , $ expected_udp ) ; <nl> + } <nl> + <nl> + # Disabling the defaults since it conflicts with a running instance . <nl> + # when ( ' no arguments ' , ' ' , 11211 , 11211 ) ; <nl> + when ( ' specifying tcp port ' , ' - p 11212 ' , 11212 , 11212 ) ; <nl> + when ( ' specifying udp port ' , ' - U 11222 ' , 11222 , 11222 ) ; <nl> + when ( ' specifying tcp ephemeral port ' , ' - p - 1 ' , 0 , 0 ) ; <nl> + when ( ' specifying udp ephemeral port ' , ' - U - 1 ' , 0 , 0 ) ; <nl> + when ( ' tcp port disabled ' , ' - p 0 ' , - 1 , - 1 ) ; <nl> + when ( ' udp port disabled ' , ' - U 0 ' , - 1 , - 1 ) ; <nl> + when ( ' specifying tcp and udp ports ' , ' - p 11232 - U 11233 ' , 11232 , 11233 ) ; <nl> + when ( ' specifying tcp and disabling udp ' , ' - p 11242 - U 0 ' , 11242 , - 1 ) ; <nl> + when ( ' specifying udp and disabling tcp ' , ' - p - 1 - U 11252 ' , 0 , 11252 ) ; <nl> + when ( ' specifying tcp and ephemeral udp ' , ' - p 11262 - U - 1 ' , 11262 , 0 ) ; <nl> + when ( ' specifying udp and ephemeral tcp ' , ' - p - 1 - U 11272 ' , 0 , 11272 ) ; <nl> new file mode 100755 <nl> index 00000000000 . . 85460fadac9 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / item_size_max . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 7 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + my $ stats = mem_stats ( $ sock , ' settings ' ) ; <nl> + <nl> + # Ensure default still works . <nl> + is ( $ stats - > { item_size_max } , 1024 * 1024 ) ; <nl> + $ server - > stop ( ) ; <nl> + <nl> + # Should die . <nl> + eval { <nl> + $ server = new_memcached ( ' - I 1000 ' ) ; <nl> + } ; <nl> + ok ( $ @ & & $ @ = ~ m / ^ Failed / , " Shouldn ' t start with < 1k item max " ) ; <nl> + <nl> + eval { <nl> + $ server = new_memcached ( ' - I 256m ' ) ; <nl> + } ; <nl> + ok ( $ @ & & $ @ = ~ m / ^ Failed / , " Shouldn ' t start with > 128m item max " ) ; <nl> + <nl> + # Minimum . <nl> + $ server = new_memcached ( ' - I 1024 ' ) ; <nl> + my $ stats = mem_stats ( $ server - > sock , ' settings ' ) ; <nl> + is ( $ stats - > { item_size_max } , 1024 ) ; <nl> + $ server - > stop ( ) ; <nl> + <nl> + # Reasonable but unreasonable . <nl> + $ server = new_memcached ( ' - I 1049600 ' ) ; <nl> + my $ stats = mem_stats ( $ server - > sock , ' settings ' ) ; <nl> + is ( $ stats - > { item_size_max } , 1049600 ) ; <nl> + $ server - > stop ( ) ; <nl> + <nl> + # Suffix kilobytes . <nl> + $ server = new_memcached ( ' - I 512k ' ) ; <nl> + my $ stats = mem_stats ( $ server - > sock , ' settings ' ) ; <nl> + is ( $ stats - > { item_size_max } , 524288 ) ; <nl> + $ server - > stop ( ) ; <nl> + <nl> + # Suffix megabytes . <nl> + $ server = new_memcached ( ' - I 32m ' ) ; <nl> + my $ stats = mem_stats ( $ server - > sock , ' settings ' ) ; <nl> + is ( $ stats - > { item_size_max } , 33554432 ) ; <nl> + $ server - > stop ( ) ; <nl> + <nl> new file mode 120000 <nl> index 00000000000 . . dc598c56dce <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / lib <nl> @ @ - 0 , 0 + 1 @ @ <nl> + . . / lib <nl> \ No newline at end of file <nl> new file mode 100755 <nl> index 00000000000 . . 829f4d0bf99 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / line - lengths . t <nl> <nl> + # ! / usr / bin / perl <nl> + use strict ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + our @ files ; <nl> + <nl> + BEGIN { <nl> + chdir " $ Bin / . . " or die ; <nl> + @ files = ( " doc / protocol . txt " ) ; <nl> + } <nl> + <nl> + use Test : : More tests = > scalar ( @ files ) ; <nl> + <nl> + foreach my $ f ( @ files ) { <nl> + open ( my $ fh , $ f ) or die ( " Can ' t open $ f " ) ; <nl> + my @ long_lines = ( ) ; <nl> + my $ line_number = 0 ; <nl> + while ( < $ fh > ) { <nl> + $ line_number + + ; <nl> + if ( length ( $ _ ) > 80 ) { <nl> + push ( @ long_lines , $ line_number ) ; <nl> + } <nl> + } <nl> + close ( $ fh ) ; <nl> + ok ( @ long_lines = = 0 , " $ f has a long lines : @ long_lines " ) ; <nl> + } <nl> new file mode 100755 <nl> index 00000000000 . . 86a8d1e5161 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / lru . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 149 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + # assuming max slab is 1M and default mem is 64M <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + # create a big value for the largest slab <nl> + my $ max = 1024 * 1024 ; <nl> + my $ big = ' x ' x ( 1024 * 1024 - 250 ) ; <nl> + <nl> + ok ( length ( $ big ) > 512 * 1024 ) ; <nl> + ok ( length ( $ big ) < 1024 * 1024 ) ; <nl> + <nl> + # test that an even bigger value is rejected while we ' re here <nl> + my $ too_big = $ big . $ big . $ big ; <nl> + my $ len = length ( $ too_big ) ; <nl> + print $ sock " set too_big 0 0 $ len \ r \ n $ too_big \ r \ n " ; <nl> + is ( scalar < $ sock > , " SERVER_ERROR object too large for cache \ r \ n " , " too_big not stored " ) ; <nl> + <nl> + # set the big value <nl> + my $ len = length ( $ big ) ; <nl> + print $ sock " set big 0 0 $ len \ r \ n $ big \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored big " ) ; <nl> + mem_get_is ( $ sock , " big " , $ big ) ; <nl> + <nl> + # no evictions yet <nl> + my $ stats = mem_stats ( $ sock ) ; <nl> + is ( $ stats - > { " evictions " } , " 0 " , " no evictions to start " ) ; <nl> + <nl> + # set many big items , enough to get evictions <nl> + for ( my $ i = 0 ; $ i < 100 ; $ i + + ) { <nl> + print $ sock " set item_ $ i 0 0 $ len \ r \ n $ big \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored item_ $ i " ) ; <nl> + } <nl> + <nl> + # some evictions should have happened <nl> + my $ stats = mem_stats ( $ sock ) ; <nl> + my $ evictions = int ( $ stats - > { " evictions " } ) ; <nl> + ok ( $ evictions = = 37 , " some evictions happened " ) ; <nl> + <nl> + # the first big value should be gone <nl> + mem_get_is ( $ sock , " big " , undef ) ; <nl> + <nl> + # the earliest items should be gone too <nl> + for ( my $ i = 0 ; $ i < $ evictions - 1 ; $ i + + ) { <nl> + mem_get_is ( $ sock , " item_ $ i " , undef ) ; <nl> + } <nl> + <nl> + # check that the non - evicted are the right ones <nl> + for ( my $ i = $ evictions - 1 ; $ i < $ evictions + 4 ; $ i + + ) { <nl> + mem_get_is ( $ sock , " item_ $ i " , $ big ) ; <nl> + } <nl> new file mode 100755 <nl> index 00000000000 . . d30966ad0d5 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / maxconns . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use warnings ; <nl> + <nl> + use Test : : More tests = > 11 ; <nl> + <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + <nl> + # start up a server with 10 maximum connections <nl> + my $ server = new_memcached ( ' - c 10 ' ) ; <nl> + my $ sock = $ server - > sock ; <nl> + my @ sockets ; <nl> + <nl> + ok ( defined ( $ sock ) , ' Connection 0 ' ) ; <nl> + push ( @ sockets , $ sock ) ; <nl> + <nl> + <nl> + foreach my $ conn ( 1 . . 10 ) { <nl> + $ sock = $ server - > new_sock ; <nl> + ok ( defined ( $ sock ) , " Made connection $ conn " ) ; <nl> + push ( @ sockets , $ sock ) ; <nl> + } <nl> new file mode 100755 <nl> index 00000000000 . . b7099a2425e <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / stats - detail . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 24 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + my $ expire ; <nl> + <nl> + print $ sock " stats detail dump \ r \ n " ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " verified empty stats at start " ) ; <nl> + <nl> + print $ sock " stats detail on \ r \ n " ; <nl> + is ( scalar < $ sock > , " OK \ r \ n " , " detail collection turned on " ) ; <nl> + <nl> + print $ sock " set foo : 123 0 0 6 \ r \ nfooval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo " ) ; <nl> + <nl> + print $ sock " stats detail dump \ r \ n " ; <nl> + is ( scalar < $ sock > , " PREFIX foo get 0 hit 0 set 1 del 0 \ r \ n " , " details after set " ) ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " end of details " ) ; <nl> + <nl> + mem_get_is ( $ sock , " foo : 123 " , " fooval " ) ; <nl> + print $ sock " stats detail dump \ r \ n " ; <nl> + is ( scalar < $ sock > , " PREFIX foo get 1 hit 1 set 1 del 0 \ r \ n " , " details after get with hit " ) ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " end of details " ) ; <nl> + <nl> + mem_get_is ( $ sock , " foo : 124 " , undef ) ; <nl> + <nl> + print $ sock " stats detail dump \ r \ n " ; <nl> + is ( scalar < $ sock > , " PREFIX foo get 2 hit 1 set 1 del 0 \ r \ n " , " details after get without hit " ) ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " end of details " ) ; <nl> + <nl> + print $ sock " delete foo : 125 \ r \ n " ; <nl> + is ( scalar < $ sock > , " NOT_FOUND \ r \ n " , " sent delete command " ) ; <nl> + <nl> + print $ sock " stats detail dump \ r \ n " ; <nl> + is ( scalar < $ sock > , " PREFIX foo get 2 hit 1 set 1 del 1 \ r \ n " , " details after delete " ) ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " end of details " ) ; <nl> + <nl> + print $ sock " stats reset \ r \ n " ; <nl> + is ( scalar < $ sock > , " RESET \ r \ n " , " stats cleared " ) ; <nl> + <nl> + print $ sock " stats detail dump \ r \ n " ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " empty stats after clear " ) ; <nl> + <nl> + mem_get_is ( $ sock , " foo : 123 " , " fooval " ) ; <nl> + print $ sock " stats detail dump \ r \ n " ; <nl> + is ( scalar < $ sock > , " PREFIX foo get 1 hit 1 set 0 del 0 \ r \ n " , " details after clear and get " ) ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " end of details " ) ; <nl> + <nl> + print $ sock " stats detail off \ r \ n " ; <nl> + is ( scalar < $ sock > , " OK \ r \ n " , " detail collection turned off " ) ; <nl> + <nl> + mem_get_is ( $ sock , " foo : 124 " , undef ) ; <nl> + <nl> + mem_get_is ( $ sock , " foo : 123 " , " fooval " ) ; <nl> + print $ sock " stats detail dump \ r \ n " ; <nl> + is ( scalar < $ sock > , " PREFIX foo get 1 hit 1 set 0 del 0 \ r \ n " , " details after stats turned off " ) ; <nl> + is ( scalar < $ sock > , " END \ r \ n " , " end of details " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . 055bb06d2d0 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / stats . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 95 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + <nl> + # # Output looks like this : <nl> + # # <nl> + # # STAT pid 22969 <nl> + # # STAT uptime 13 <nl> + # # STAT time 1259170891 <nl> + # # STAT version 1 . 4 . 3 <nl> + # # STAT pointer_size 32 <nl> + # # STAT rusage_user 0 . 001198 <nl> + # # STAT rusage_system 0 . 003523 <nl> + # # STAT curr_connections 10 <nl> + # # STAT total_connections 11 <nl> + # # STAT connection_structures 11 <nl> + # # STAT cmd_get 0 <nl> + # # STAT cmd_set 0 <nl> + # # STAT cmd_flush 0 <nl> + # # STAT get_hits 0 <nl> + # # STAT get_misses 0 <nl> + # # STAT delete_misses 0 <nl> + # # STAT delete_hits 0 <nl> + # # STAT incr_misses 0 <nl> + # # STAT incr_hits 0 <nl> + # # STAT decr_misses 0 <nl> + # # STAT decr_hits 0 <nl> + # # STAT cas_misses 0 <nl> + # # STAT cas_hits 0 <nl> + # # STAT cas_badval 0 <nl> + # # STAT auth_cmds 0 <nl> + # # STAT auth_unknowns 0 <nl> + # # STAT bytes_read 7 <nl> + # # STAT bytes_written 0 <nl> + # # STAT limit_maxbytes 67108864 <nl> + # # STAT accepting_conns 1 <nl> + # # STAT listen_disabled_num 0 <nl> + # # STAT threads 4 <nl> + # # STAT conn_yields 0 <nl> + # # STAT bytes 0 <nl> + # # STAT curr_items 0 <nl> + # # STAT total_items 0 <nl> + # # STAT evictions 0 <nl> + # # STAT reclaimed 0 <nl> + <nl> + # note that auth stats are tested in auth specfic tests <nl> + <nl> + <nl> + my $ stats = mem_stats ( $ sock ) ; <nl> + <nl> + # Test number of keys <nl> + is ( scalar ( keys ( % $ stats ) ) , 38 , " 38 stats values " ) ; <nl> + <nl> + # Test initial state <nl> + foreach my $ key ( qw ( curr_items total_items bytes cmd_get cmd_set get_hits evictions get_misses <nl> + bytes_written delete_hits delete_misses incr_hits incr_misses decr_hits <nl> + decr_misses listen_disabled_num ) ) { <nl> + is ( $ stats - > { $ key } , 0 , " initial $ key is zero " ) ; <nl> + } <nl> + is ( $ stats - > { accepting_conns } , 1 , " initial accepting_conns is one " ) ; <nl> + <nl> + # Do some operations <nl> + <nl> + print $ sock " set foo 0 0 6 \ r \ nfooval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo " ) ; <nl> + mem_get_is ( $ sock , " foo " , " fooval " ) ; <nl> + <nl> + my $ stats = mem_stats ( $ sock ) ; <nl> + <nl> + foreach my $ key ( qw ( total_items curr_items cmd_get cmd_set get_hits ) ) { <nl> + is ( $ stats - > { $ key } , 1 , " after one set / one get $ key is 1 " ) ; <nl> + } <nl> + <nl> + my $ cache_dump = mem_stats ( $ sock , " cachedump 1 100 " ) ; <nl> + ok ( defined $ cache_dump - > { ' foo ' } , " got foo from cachedump " ) ; <nl> + <nl> + print $ sock " delete foo \ r \ n " ; <nl> + is ( scalar < $ sock > , " DELETED \ r \ n " , " deleted foo " ) ; <nl> + <nl> + my $ stats = mem_stats ( $ sock ) ; <nl> + is ( $ stats - > { delete_hits } , 1 ) ; <nl> + is ( $ stats - > { delete_misses } , 0 ) ; <nl> + <nl> + print $ sock " delete foo \ r \ n " ; <nl> + is ( scalar < $ sock > , " NOT_FOUND \ r \ n " , " shouldn ' t delete foo again " ) ; <nl> + <nl> + my $ stats = mem_stats ( $ sock ) ; <nl> + is ( $ stats - > { delete_hits } , 1 ) ; <nl> + is ( $ stats - > { delete_misses } , 1 ) ; <nl> + <nl> + # incr stats <nl> + <nl> + sub check_incr_stats { <nl> + my ( $ ih , $ im , $ dh , $ dm ) = @ _ ; <nl> + my $ stats = mem_stats ( $ sock ) ; <nl> + <nl> + is ( $ stats - > { incr_hits } , $ ih ) ; <nl> + is ( $ stats - > { incr_misses } , $ im ) ; <nl> + is ( $ stats - > { decr_hits } , $ dh ) ; <nl> + is ( $ stats - > { decr_misses } , $ dm ) ; <nl> + } <nl> + <nl> + print $ sock " incr i 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " NOT_FOUND \ r \ n " , " shouldn ' t incr a missing thing " ) ; <nl> + check_incr_stats ( 0 , 1 , 0 , 0 ) ; <nl> + <nl> + print $ sock " decr d 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " NOT_FOUND \ r \ n " , " shouldn ' t decr a missing thing " ) ; <nl> + check_incr_stats ( 0 , 1 , 0 , 1 ) ; <nl> + <nl> + print $ sock " set n 0 0 1 \ r \ n0 \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored n " ) ; <nl> + <nl> + print $ sock " incr n 3 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 3 \ r \ n " , " incr works " ) ; <nl> + check_incr_stats ( 1 , 1 , 0 , 1 ) ; <nl> + <nl> + print $ sock " decr n 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 2 \ r \ n " , " decr works " ) ; <nl> + check_incr_stats ( 1 , 1 , 1 , 1 ) ; <nl> + <nl> + # cas stats <nl> + <nl> + sub check_cas_stats { <nl> + my ( $ ch , $ cm , $ cb ) = @ _ ; <nl> + my $ stats = mem_stats ( $ sock ) ; <nl> + <nl> + is ( $ stats - > { cas_hits } , $ ch ) ; <nl> + is ( $ stats - > { cas_misses } , $ cm ) ; <nl> + is ( $ stats - > { cas_badval } , $ cb ) ; <nl> + } <nl> + <nl> + check_cas_stats ( 0 , 0 , 0 ) ; <nl> + <nl> + print $ sock " cas c 0 0 1 99999999 \ r \ nz \ r \ n " ; <nl> + is ( scalar < $ sock > , " NOT_FOUND \ r \ n " , " missed cas " ) ; <nl> + check_cas_stats ( 0 , 1 , 0 ) ; <nl> + <nl> + print $ sock " set c 0 0 1 \ r \ nx \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored c " ) ; <nl> + my ( $ id , $ v ) = mem_gets ( $ sock , ' c ' ) ; <nl> + is ( ' x ' , $ v , ' got the expected value ' ) ; <nl> + <nl> + print $ sock " cas c 0 0 1 99999999 \ r \ nz \ r \ n " ; <nl> + is ( scalar < $ sock > , " EXISTS \ r \ n " , " missed cas " ) ; <nl> + check_cas_stats ( 0 , 1 , 1 ) ; <nl> + my ( $ newid , $ v ) = mem_gets ( $ sock , ' c ' ) ; <nl> + is ( ' x ' , $ v , ' got the expected value ' ) ; <nl> + <nl> + print $ sock " cas c 0 0 1 $ id \ r \ nz \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " good cas " ) ; <nl> + check_cas_stats ( 1 , 1 , 1 ) ; <nl> + my ( $ newid , $ v ) = mem_gets ( $ sock , ' c ' ) ; <nl> + is ( ' z ' , $ v , ' got the expected value ' ) ; <nl> + <nl> + my $ settings = mem_stats ( $ sock , ' settings ' ) ; <nl> + is ( 1024 , $ settings - > { ' maxconns ' } ) ; <nl> + is ( ' NULL ' , $ settings - > { ' domain_socket ' } ) ; <nl> + is ( ' on ' , $ settings - > { ' evictions ' } ) ; <nl> + is ( ' yes ' , $ settings - > { ' cas_enabled ' } ) ; <nl> + is ( ' no ' , $ settings - > { ' auth_enabled_sasl ' } ) ; <nl> + <nl> + print $ sock " stats reset \ r \ n " ; <nl> + is ( scalar < $ sock > , " RESET \ r \ n " , " good stats reset " ) ; <nl> + <nl> + my $ stats = mem_stats ( $ sock ) ; <nl> + is ( 0 , $ stats - > { ' cmd_get ' } ) ; <nl> + is ( 0 , $ stats - > { ' cmd_set ' } ) ; <nl> + is ( 0 , $ stats - > { ' get_hits ' } ) ; <nl> + is ( 0 , $ stats - > { ' get_misses ' } ) ; <nl> + is ( 0 , $ stats - > { ' delete_misses ' } ) ; <nl> + is ( 0 , $ stats - > { ' delete_hits ' } ) ; <nl> + is ( 0 , $ stats - > { ' incr_misses ' } ) ; <nl> + is ( 0 , $ stats - > { ' incr_hits ' } ) ; <nl> + is ( 0 , $ stats - > { ' decr_misses ' } ) ; <nl> + is ( 0 , $ stats - > { ' decr_hits ' } ) ; <nl> + is ( 0 , $ stats - > { ' cas_misses ' } ) ; <nl> + is ( 0 , $ stats - > { ' cas_hits ' } ) ; <nl> + is ( 0 , $ stats - > { ' cas_badval ' } ) ; <nl> + is ( 0 , $ stats - > { ' evictions ' } ) ; <nl> + is ( 0 , $ stats - > { ' reclaimed ' } ) ; <nl> + <nl> + print $ sock " flush_all \ r \ n " ; <nl> + is ( scalar < $ sock > , " OK \ r \ n " , " flushed " ) ; <nl> + <nl> + my $ stats = mem_stats ( $ sock ) ; <nl> + is ( $ stats - > { cmd_flush } , 1 , " after one flush cmd_flush is 1 " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . 55dcf253f6f <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / stress - memcached . pl <nl> <nl> + # ! / usr / bin / perl <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + # <nl> + <nl> + use strict ; <nl> + use lib ' . . / . . / api / perl / lib ' ; <nl> + use Cache : : Memcached ; <nl> + use Time : : HiRes qw ( time ) ; <nl> + <nl> + unless ( @ ARGV = = 2 ) { <nl> + die " Usage : stress - memcached . pl ip : port threads \ n " ; <nl> + } <nl> + <nl> + my $ host = shift ; <nl> + my $ threads = shift ; <nl> + <nl> + my $ memc = new Cache : : Memcached ; <nl> + $ memc - > set_servers ( [ $ host ] ) ; <nl> + <nl> + unless ( $ memc - > set ( " foo " , " bar " ) & & <nl> + $ memc - > get ( " foo " ) eq " bar " ) { <nl> + die " memcached not running at $ host ? \ n " ; <nl> + } <nl> + $ memc - > disconnect_all ( ) ; <nl> + <nl> + <nl> + my $ running = 0 ; <nl> + while ( 1 ) { <nl> + if ( $ running < $ threads ) { <nl> + my $ cpid = fork ( ) ; <nl> + if ( $ cpid ) { <nl> + $ running + + ; <nl> + # print " Launched $ cpid . Running $ running threads . \ n " ; <nl> + } else { <nl> + stress ( ) ; <nl> + exit 0 ; <nl> + } <nl> + } else { <nl> + wait ( ) ; <nl> + $ running - - ; <nl> + } <nl> + } <nl> + <nl> + sub stress { <nl> + undef $ memc ; <nl> + $ memc = new Cache : : Memcached ; <nl> + $ memc - > set_servers ( [ $ host ] ) ; <nl> + <nl> + my ( $ t1 , $ t2 ) ; <nl> + my $ start = sub { $ t1 = time ( ) ; } ; <nl> + my $ stop = sub { <nl> + my $ op = shift ; <nl> + $ t2 = time ( ) ; <nl> + my $ td = sprintf ( " % 0 . 3f " , $ t2 - $ t1 ) ; <nl> + if ( $ td > 0 . 25 ) { print " Took $ td seconds for : $ op \ n " ; } <nl> + } ; <nl> + <nl> + my $ max = rand ( 50 ) ; <nl> + my $ sets = 0 ; <nl> + <nl> + for ( my $ i = 0 ; $ i < $ max ; $ i + + ) { <nl> + my $ key = key ( $ i ) ; <nl> + my $ set = $ memc - > set ( $ key , $ key ) ; <nl> + $ sets + + if $ set ; <nl> + } <nl> + <nl> + for ( 1 . . int ( rand ( 500 ) ) ) { <nl> + my $ rand = int ( rand ( $ max ) ) ; <nl> + my $ key = key ( $ rand ) ; <nl> + my $ meth = int ( rand ( 3 ) ) ; <nl> + my $ exp = int ( rand ( 3 ) ) ; <nl> + undef $ exp unless $ exp ; <nl> + $ start - > ( ) ; <nl> + if ( $ meth = = 0 ) { <nl> + $ memc - > add ( $ key , $ key , $ exp ) ; <nl> + $ stop - > ( " add " ) ; <nl> + } elsif ( $ meth = = 1 ) { <nl> + $ memc - > delete ( $ key ) ; <nl> + $ stop - > ( " delete " ) ; <nl> + } else { <nl> + $ memc - > set ( $ key , $ key , $ exp ) ; <nl> + $ stop - > ( " set " ) ; <nl> + } <nl> + $ rand = int ( rand ( $ max ) ) ; <nl> + $ key = key ( $ rand ) ; <nl> + $ start - > ( ) ; <nl> + my $ v = $ memc - > get ( $ key ) ; <nl> + $ stop - > ( " get " ) ; <nl> + if ( $ v & & $ v ne $ key ) { die " Bogus : $ v for key $ rand \ n " ; } <nl> + } <nl> + <nl> + $ start - > ( ) ; <nl> + my $ multi = $ memc - > get_multi ( map { key ( int ( rand ( $ max ) ) ) } ( 1 . . $ max ) ) ; <nl> + $ stop - > ( " get_multi " ) ; <nl> + } <nl> + <nl> + sub key { <nl> + my $ n = shift ; <nl> + $ _ = sprintf ( " % 04d " , $ n ) ; <nl> + if ( $ n % 2 ) { $ _ . = " a " x20 ; } <nl> + $ _ ; <nl> + } <nl> new file mode 100755 <nl> index 00000000000 . . cb63f0ea71a <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / udp . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 48 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + use constant IS_ASCII = > 0 ; <nl> + use constant IS_BINARY = > 1 ; <nl> + use constant ENTRY_EXISTS = > 0 ; <nl> + use constant ENTRY_MISSING = > 1 ; <nl> + use constant BIN_REQ_MAGIC = > 0x80 ; <nl> + use constant BIN_RES_MAGIC = > 0x81 ; <nl> + use constant CMD_GET = > 0x00 ; <nl> + use constant CMD_SET = > 0x01 ; <nl> + use constant CMD_ADD = > 0x02 ; <nl> + use constant CMD_REPLACE = > 0x03 ; <nl> + use constant CMD_DELETE = > 0x04 ; <nl> + use constant CMD_INCR = > 0x05 ; <nl> + use constant CMD_DECR = > 0x06 ; <nl> + use constant CMD_APPEND = > 0x0E ; <nl> + use constant CMD_PREPEND = > 0x0F ; <nl> + use constant REQ_PKT_FMT = > " CCnCCnNNNN " ; <nl> + use constant RES_PKT_FMT = > " CCnCCnNNNN " ; <nl> + use constant INCRDECR_PKT_FMT = > " NNNNN " ; <nl> + use constant MIN_RECV_BYTES = > length ( pack ( RES_PKT_FMT ) ) ; <nl> + <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + # set foo ( and should get it ) <nl> + print $ sock " set foo 0 0 6 \ r \ nfooval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo " ) ; <nl> + mem_get_is ( $ sock , " foo " , " fooval " ) ; <nl> + <nl> + my $ usock = $ server - > new_udp_sock <nl> + or die " Can ' t bind : $ @ \ n " ; <nl> + <nl> + # testing sequence of request ids <nl> + for my $ offt ( 1 , 1 , 2 ) { <nl> + my $ req = 160 + $ offt ; <nl> + my $ res = send_udp_request ( $ usock , $ req , " get foo \ r \ n " ) ; <nl> + ok ( $ res , " got result " ) ; <nl> + is ( keys % $ res , 1 , " one key ( one packet ) " ) ; <nl> + ok ( $ res - > { 0 } , " only got seq number 0 " ) ; <nl> + is ( substr ( $ res - > { 0 } , 8 ) , " VALUE foo 0 6 \ r \ nfooval \ r \ nEND \ r \ n " ) ; <nl> + is ( hexify ( substr ( $ res - > { 0 } , 0 , 2 ) ) , hexify ( pack ( " n " , $ req ) ) , " udp request number in response ( $ req ) is correct " ) ; <nl> + } <nl> + <nl> + # op tests <nl> + for my $ prot ( : : IS_ASCII , : : IS_BINARY ) { <nl> + udp_set_test ( $ prot , 45 , " aval $ prot " , " 1 " , 0 , 0 ) ; <nl> + udp_set_test ( $ prot , 45 , " bval $ prot " , " abcd " x 1024 , 0 , 0 ) ; <nl> + udp_get_test ( $ prot , 45 , " aval $ prot " , " 1 " , : : ENTRY_EXISTS ) ; <nl> + udp_get_test ( $ prot , 45 , " 404 $ prot " , " 1 " , : : ENTRY_MISSING ) ; <nl> + udp_incr_decr_test ( $ prot , 45 , " aval $ prot " , " 1 " , " incr " , 1 ) ; <nl> + udp_incr_decr_test ( $ prot , 45 , " aval $ prot " , " 1 " , " decr " , 2 ) ; <nl> + udp_delete_test ( $ prot , 45 , " aval $ prot " ) ; <nl> + } <nl> + <nl> + sub udp_set_test { <nl> + my ( $ protocol , $ req_id , $ key , $ value , $ flags , $ exp ) = @ _ ; <nl> + my $ req = " " ; <nl> + my $ val_len = length ( $ value ) ; <nl> + <nl> + if ( $ protocol = = : : IS_ASCII ) { <nl> + $ req = " set $ key $ flags $ exp $ val_len \ r \ n $ value \ r \ n " ; <nl> + } elsif ( $ protocol = = : : IS_BINARY ) { <nl> + my $ key_len = length ( $ key ) ; <nl> + my $ extra = pack " NN " , $ flags , $ exp ; <nl> + my $ extra_len = length ( $ extra ) ; <nl> + my $ total_len = $ val_len + $ extra_len + $ key_len ; <nl> + $ req = pack ( : : REQ_PKT_FMT , : : BIN_REQ_MAGIC , : : CMD_SET , $ key_len , $ extra_len , 0 , 0 , $ total_len , 0 , 0 , 0 ) ; <nl> + $ req . = $ extra . $ key . $ value ; <nl> + } <nl> + <nl> + my $ datagrams = send_udp_request ( $ usock , $ req_id , $ req ) ; <nl> + my $ resp = construct_udp_message ( $ datagrams ) ; <nl> + <nl> + if ( $ protocol = = : : IS_ASCII ) { <nl> + is ( $ resp , " STORED \ r \ n " , " Store key $ key using ASCII protocol " ) ; <nl> + } elsif ( $ protocol = = : : IS_BINARY ) { <nl> + my ( $ resp_magic , $ resp_op_code , $ resp_key_len , $ resp_extra_len , $ resp_data_type , $ resp_status , $ resp_total_len , <nl> + $ resp_opaque , $ resp_ident_hi , $ resp_ident_lo ) = unpack ( : : RES_PKT_FMT , $ resp ) ; <nl> + is ( $ resp_status , " 0 " , " Store key $ key using binary protocol " ) ; <nl> + } <nl> + } <nl> + <nl> + sub udp_get_test { <nl> + my ( $ protocol , $ req_id , $ key , $ value , $ exists ) = @ _ ; <nl> + my $ key_len = length ( $ key ) ; <nl> + my $ value_len = length ( $ value ) ; <nl> + my $ req = " " ; <nl> + <nl> + if ( $ protocol = = : : IS_ASCII ) { <nl> + $ req = " get $ key \ r \ n " ; <nl> + } elsif ( $ protocol = = : : IS_BINARY ) { <nl> + $ req = pack ( : : REQ_PKT_FMT , : : BIN_REQ_MAGIC , : : CMD_GET , $ key_len , 0 , 0 , 0 , $ key_len , 0 , 0 , 0 ) ; <nl> + $ req . = $ key ; <nl> + } <nl> + <nl> + my $ datagrams = send_udp_request ( $ usock , $ req_id , $ req ) ; <nl> + my $ resp = construct_udp_message ( $ datagrams ) ; <nl> + <nl> + if ( $ protocol = = : : IS_ASCII ) { <nl> + if ( $ exists = = : : ENTRY_EXISTS ) { <nl> + is ( $ resp , " VALUE $ key 0 $ value_len \ r \ n $ value \ r \ nEND \ r \ n " , " Retrieve entry with key $ key using ASCII protocol " ) ; <nl> + } else { <nl> + is ( $ resp , " END \ r \ n " , " Retrieve non existing entry with key $ key using ASCII protocol " ) ; <nl> + } <nl> + } elsif ( $ protocol = = : : IS_BINARY ) { <nl> + my ( $ resp_magic , $ resp_op_code , $ resp_key_len , $ resp_extra_len , $ resp_data_type , $ resp_status , $ resp_total_len , <nl> + $ resp_opaque , $ resp_ident_hi , $ resp_ident_lo ) = unpack ( : : RES_PKT_FMT , $ resp ) ; <nl> + if ( $ exists = = : : ENTRY_EXISTS ) { <nl> + is ( $ resp_status , " 0 " , " Retrieve entry with key $ key using binary protocol " ) ; <nl> + is ( substr ( $ resp , : : MIN_RECV_BYTES + $ resp_extra_len + $ resp_key_len , $ value_len ) , $ value , " Value for key $ key retrieved with binary protocol matches " ) ; <nl> + } else { <nl> + is ( $ resp_status , " 1 " , " Retrieve non existing entry with key $ key using binary protocol " ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + sub udp_delete_test { <nl> + my ( $ protocol , $ req_id , $ key ) = @ _ ; <nl> + my $ req = " " ; <nl> + my $ key_len = length ( $ key ) ; <nl> + <nl> + if ( $ protocol = = : : IS_ASCII ) { <nl> + $ req = " delete $ key \ r \ n " ; <nl> + } elsif ( $ protocol = = : : IS_BINARY ) { <nl> + $ req = pack ( : : REQ_PKT_FMT , : : BIN_REQ_MAGIC , : : CMD_DELETE , $ key_len , 0 , 0 , 0 , $ key_len , 0 , 0 , 0 ) ; <nl> + $ req . = $ key ; <nl> + } <nl> + <nl> + my $ datagrams = send_udp_request ( $ usock , $ req_id , $ req ) ; <nl> + my $ resp = construct_udp_message ( $ datagrams ) ; <nl> + <nl> + if ( $ protocol = = : : IS_ASCII ) { <nl> + is ( $ resp , " DELETED \ r \ n " , " Delete key $ key using ASCII protocol " ) ; <nl> + } elsif ( $ protocol = = : : IS_BINARY ) { <nl> + my ( $ resp_magic , $ resp_op_code , $ resp_key_len , $ resp_extra_len , $ resp_data_type , $ resp_status , $ resp_total_len , <nl> + $ resp_opaque , $ resp_ident_hi , $ resp_ident_lo ) = unpack ( : : RES_PKT_FMT , $ resp ) ; <nl> + is ( $ resp_status , " 0 " , " Delete key $ key using binary protocol " ) ; <nl> + } <nl> + } <nl> + <nl> + sub udp_incr_decr_test { <nl> + my ( $ protocol , $ req_id , $ key , $ val , $ optype , $ init_val ) = @ _ ; <nl> + my $ req = " " ; <nl> + my $ key_len = length ( $ key ) ; <nl> + my $ expected_value = 0 ; <nl> + my $ acmd = " incr " ; <nl> + my $ bcmd = : : CMD_INCR ; <nl> + if ( $ optype eq " incr " ) { <nl> + $ expected_value = $ init_val + $ val ; <nl> + } else { <nl> + $ acmd = " decr " ; <nl> + $ bcmd = : : CMD_DECR ; <nl> + $ expected_value = $ init_val - $ val ; <nl> + } <nl> + <nl> + if ( $ protocol = = : : IS_ASCII ) { <nl> + $ req = " $ acmd $ key $ val \ r \ n " ; <nl> + } elsif ( $ protocol = = : : IS_BINARY ) { <nl> + my $ extra = pack ( : : INCRDECR_PKT_FMT , ( $ val / 2 * * 32 ) , ( $ val % 2 * * 32 ) , 0 , 0 , 0 ) ; <nl> + my $ extra_len = length ( $ extra ) ; <nl> + $ req = pack ( : : REQ_PKT_FMT , : : BIN_REQ_MAGIC , $ bcmd , $ key_len , $ extra_len , 0 , 0 , $ key_len + $ extra_len , 0 , 0 , 0 ) ; <nl> + $ req . = $ extra . $ key ; <nl> + } <nl> + <nl> + my $ datagrams = send_udp_request ( $ usock , $ req_id , $ req ) ; <nl> + my $ resp = construct_udp_message ( $ datagrams ) ; <nl> + <nl> + if ( $ protocol = = : : IS_ASCII ) { <nl> + is ( $ resp , " $ expected_value \ r \ n " , " perform $ acmd math operation on key $ key with ASCII protocol " ) ; <nl> + } elsif ( $ protocol = = : : IS_BINARY ) { <nl> + my ( $ resp_magic , $ resp_op_code , $ resp_key_len , $ resp_extra_len , $ resp_data_type , $ resp_status , $ resp_total_len , <nl> + $ resp_opaque , $ resp_ident_hi , $ resp_ident_lo ) = unpack ( : : RES_PKT_FMT , $ resp ) ; <nl> + is ( $ resp_status , " 0 " , " perform $ acmd math operation on key $ key with binary protocol " ) ; <nl> + my ( $ resp_hi , $ resp_lo ) = unpack ( " NN " , substr ( $ resp , : : MIN_RECV_BYTES + $ resp_extra_len + $ resp_key_len , <nl> + $ resp_total_len - $ resp_extra_len - $ resp_key_len ) ) ; <nl> + is ( ( $ resp_hi * 2 * * 32 ) + $ resp_lo , $ expected_value , " validate result of binary protocol math operation $ acmd . Expected value $ expected_value " ) <nl> + } <nl> + } <nl> + <nl> + sub construct_udp_message { <nl> + my $ datagrams = shift ; <nl> + my $ num_datagram = keys ( % $ datagrams ) ; <nl> + my $ msg = " " ; <nl> + my $ cur_dg = " " ; <nl> + my $ cur_udp_header = " " ; <nl> + for ( my $ cur_dg_index = 0 ; $ cur_dg_index < $ num_datagram ; $ cur_dg_index + + ) { <nl> + $ cur_dg = % $ datagrams - > { $ cur_dg_index } ; <nl> + isnt ( $ cur_dg , " " , " missing datagram for segment $ cur_dg_index " ) ; <nl> + $ cur_udp_header = substr ( $ cur_dg , 0 , 8 ) ; <nl> + $ msg . = substr ( $ cur_dg , 8 ) ; <nl> + } <nl> + return $ msg ; <nl> + } <nl> + <nl> + sub hexify { <nl> + my $ val = shift ; <nl> + $ val = ~ s / ( . ) / sprintf ( " % 02x " , ord ( $ 1 ) ) / egs ; <nl> + return $ val ; <nl> + } <nl> + <nl> + # returns undef on select timeout , or hashref of " seqnum " - > payload ( including headers ) <nl> + # verifies that resp_id is equal to id sent in request <nl> + # ensures consistency in num packets that make up response <nl> + sub send_udp_request { <nl> + my ( $ sock , $ reqid , $ req ) = @ _ ; <nl> + <nl> + my $ pkt = pack ( " nnnn " , $ reqid , 0 , 1 , 0 ) ; # request id ( opaque ) , seq num , # packets , reserved ( must be 0 ) <nl> + $ pkt . = $ req ; <nl> + my $ fail = sub { <nl> + my $ msg = shift ; <nl> + warn " FAILING send_udp because : $ msg \ n " ; <nl> + return undef ; <nl> + } ; <nl> + return $ fail - > ( " send " ) unless send ( $ sock , $ pkt , 0 ) ; <nl> + <nl> + my $ ret = { } ; <nl> + <nl> + my $ got = 0 ; # packets got <nl> + my $ numpkts = undef ; <nl> + <nl> + while ( ! defined ( $ numpkts ) | | $ got < $ numpkts ) { <nl> + my $ rin = ' ' ; <nl> + vec ( $ rin , fileno ( $ sock ) , 1 ) = 1 ; <nl> + my $ rout ; <nl> + return $ fail - > ( " timeout after $ got packets " ) unless <nl> + select ( $ rout = $ rin , undef , undef , 1 . 5 ) ; <nl> + <nl> + my $ res ; <nl> + my $ sender = $ sock - > recv ( $ res , 1500 , 0 ) ; <nl> + my ( $ resid , $ seq , $ this_numpkts , $ resv ) = unpack ( " nnnn " , substr ( $ res , 0 , 8 ) ) ; <nl> + die " Response ID of $ resid doesn ' t match request if of $ reqid " unless $ resid = = $ reqid ; <nl> + die " Reserved area not zero " unless $ resv = = 0 ; <nl> + die " num packets changed midstream ! " if defined $ numpkts & & $ this_numpkts ! = $ numpkts ; <nl> + $ numpkts = $ this_numpkts ; <nl> + $ ret - > { $ seq } = $ res ; <nl> + $ got + + ; <nl> + } <nl> + return $ ret ; <nl> + } <nl> + <nl> + <nl> + __END__ <nl> + $ sender = recv ( $ usock , $ ans , 1050 , 0 ) ; <nl> + <nl> + __END__ <nl> + $ usock - > send <nl> + <nl> + <nl> + ( $ hispaddr = recv ( SOCKET , $ rtime , 4 , 0 ) ) | | die " recv : $ ! " ; <nl> + ( $ port , $ hisiaddr ) = sockaddr_in ( $ hispaddr ) ; <nl> + $ host = gethostbyaddr ( $ hisiaddr , AF_INET ) ; <nl> + $ histime = unpack ( " N " , $ rtime ) - $ SECS_of_70_YEARS ; <nl> new file mode 100755 <nl> index 00000000000 . . 492215edbb0 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / unixsocket . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 3 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ filename = " / tmp / memcachetest $ $ " ; <nl> + <nl> + my $ server = new_memcached ( " - s $ filename " ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + ok ( - S $ filename , " creating unix domain socket $ filename " ) ; <nl> + <nl> + # set foo ( and should get it ) <nl> + print $ sock " set foo 0 0 6 \ r \ nfooval \ r \ n " ; <nl> + <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo " ) ; <nl> + mem_get_is ( $ sock , " foo " , " fooval " ) ; <nl> + <nl> + unlink ( $ filename ) ; <nl> + <nl> + # # Just some basic stuff for now . . . <nl> new file mode 100755 <nl> index 00000000000 . . 22c610cbda2 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / disabled / whitespace . t <nl> <nl> + # ! / usr / bin / perl <nl> + use strict ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + our @ files ; <nl> + <nl> + BEGIN { <nl> + chdir " $ Bin / . . " or die ; <nl> + <nl> + my @ exempted = qw ( Makefile . am ChangeLog doc / Makefile . am ) ; <nl> + push ( @ exempted , glob ( " doc / * . xml " ) ) ; <nl> + push ( @ exempted , glob ( " doc / xml2rfc / * . xsl " ) ) ; <nl> + push ( @ exempted , glob ( " m4 / * backport * m4 " ) ) ; <nl> + my % exempted_hash = map { $ _ = > 1 } @ exempted ; <nl> + <nl> + my @ stuff = split / \ 0 / , ` git ls - files - z - c - m - o - - exclude - standard ` ; <nl> + @ files = grep { ! $ exempted_hash { $ _ } } @ stuff ; <nl> + <nl> + # We won ' t find any files if git isn ' t installed . If git isn ' t <nl> + # installed , they ' re probably not doing any useful development , or <nl> + # at the very least am will clean up whitespace when we receive <nl> + # their patch . <nl> + unless ( @ files ) { <nl> + use Test : : More ; <nl> + plan skip_all = > " Skipping tests probably because you don ' t have git . " ; <nl> + exit 0 ; <nl> + } <nl> + } <nl> + <nl> + use Test : : More tests = > scalar ( @ files ) ; <nl> + <nl> + foreach my $ f ( @ files ) { <nl> + open ( my $ fh , $ f ) or die ; <nl> + my $ before = do { local $ / ; < $ fh > ; } ; <nl> + close ( $ fh ) ; <nl> + my $ after = $ before ; <nl> + $ after = ~ s / \ t / / g ; <nl> + $ after = ~ s / + $ / / mg ; <nl> + $ after . = " \ n " unless $ after = ~ / \ n $ / ; <nl> + ok ( $ after eq $ before , " $ f ( see devtools / clean - whitespace . pl ) " ) ; <nl> + } <nl> new file mode 100755 <nl> index 00000000000 . . 38b4276a2f6 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / expirations . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 15 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + my $ expire ; <nl> + <nl> + sub wait_for_early_second { <nl> + my $ have_hires = eval " use Time : : HiRes ( ) ; 1 " ; <nl> + if ( $ have_hires ) { <nl> + my $ tsh = Time : : HiRes : : time ( ) ; <nl> + my $ ts = int ( $ tsh ) ; <nl> + return if ( $ tsh - $ ts ) < 0 . 5 ; <nl> + } <nl> + <nl> + my $ ts = int ( time ( ) ) ; <nl> + while ( 1 ) { <nl> + my $ t = int ( time ( ) ) ; <nl> + return if $ t ! = $ ts ; <nl> + select undef , undef , undef , 0 . 10 ; # 1 / 10th of a second sleeps until time changes . <nl> + } <nl> + } <nl> + <nl> + wait_for_early_second ( ) ; <nl> + <nl> + # Modified the expiration time to fix # 264 ( the old version of the line follows first ) <nl> + # print $ sock " set foo 0 1 6 \ r \ nfooval \ r \ n " ; <nl> + print $ sock " set foo 0 60 6 \ r \ nfooval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo " ) ; <nl> + <nl> + mem_get_is ( $ sock , " foo " , " fooval " ) ; <nl> + # Modified the expiration time to fix # 264 ( the old version of the line follows first ) <nl> + # sleep ( 1 . 5 ) ; <nl> + sleep ( 60 ) ; <nl> + mem_get_is ( $ sock , " foo " , undef ) ; <nl> + <nl> + $ expire = time ( ) - 1 ; <nl> + print $ sock " set foo 0 $ expire 6 \ r \ nfooval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo " ) ; <nl> + mem_get_is ( $ sock , " foo " , undef , " already expired " ) ; <nl> + <nl> + $ expire = time ( ) + 5 ; <nl> + print $ sock " set foo 0 $ expire 6 \ r \ nfoov + 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo " ) ; <nl> + mem_get_is ( $ sock , " foo " , " foov + 1 " ) ; <nl> + sleep ( 7 . 2 ) ; <nl> + mem_get_is ( $ sock , " foo " , undef , " now expired " ) ; <nl> + <nl> + $ expire = time ( ) - 20 ; <nl> + print $ sock " set boo 0 $ expire 6 \ r \ nbooval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored boo " ) ; <nl> + mem_get_is ( $ sock , " boo " , undef , " now expired " ) ; <nl> + <nl> + print $ sock " add add 0 5 6 \ r \ naddval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored add " ) ; <nl> + mem_get_is ( $ sock , " add " , " addval " ) ; <nl> + # second add fails <nl> + print $ sock " add add 0 5 7 \ r \ naddval2 \ r \ n " ; <nl> + is ( scalar < $ sock > , " NOT_STORED \ r \ n " , " add failure " ) ; <nl> + sleep ( 5 . 3 ) ; <nl> + print $ sock " add add 0 2 7 \ r \ naddval3 \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored add again " ) ; <nl> + mem_get_is ( $ sock , " add " , " addval3 " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . e5231965991 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / flags . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 6 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + # set foo ( and should get it ) <nl> + for my $ flags ( 0 , 123 , 2 * * 16 - 1 ) { <nl> + print $ sock " set foo $ flags 0 6 \ r \ nfooval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo " ) ; <nl> + mem_get_is ( { sock = > $ sock , <nl> + flags = > $ flags } , " foo " , " fooval " , " got flags $ flags back " ) ; <nl> + } <nl> new file mode 100755 <nl> index 00000000000 . . 5326052fc98 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / getset . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 73 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + <nl> + # set foo ( and should get it ) <nl> + print $ sock " set foo 0 0 6 \ r \ nfooval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo " ) ; <nl> + mem_get_is ( $ sock , " foo " , " fooval " ) ; <nl> + <nl> + # add bar ( and should get it ) <nl> + print $ sock " add bar 0 0 6 \ r \ nbarval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored barval " ) ; <nl> + mem_get_is ( $ sock , " bar " , " barval " ) ; <nl> + <nl> + # add foo ( but shouldn ' t get new value ) <nl> + print $ sock " add foo 0 0 5 \ r \ nfoov2 \ r \ n " ; <nl> + is ( scalar < $ sock > , " NOT_STORED \ r \ n " , " not stored " ) ; <nl> + mem_get_is ( $ sock , " foo " , " fooval " ) ; <nl> + <nl> + # replace bar ( should work ) <nl> + print $ sock " replace bar 0 0 6 \ r \ nbarva2 \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " replaced barval 2 " ) ; <nl> + <nl> + # replace notexist ( shouldn ' t work ) <nl> + print $ sock " replace notexist 0 0 6 \ r \ nbarva2 \ r \ n " ; <nl> + is ( scalar < $ sock > , " NOT_STORED \ r \ n " , " didn ' t replace notexist " ) ; <nl> + <nl> + # delete foo . <nl> + print $ sock " delete foo \ r \ n " ; <nl> + is ( scalar < $ sock > , " DELETED \ r \ n " , " deleted foo " ) ; <nl> + <nl> + # delete foo again . not found this time . <nl> + print $ sock " delete foo \ r \ n " ; <nl> + is ( scalar < $ sock > , " NOT_FOUND \ r \ n " , " deleted foo , but not found " ) ; <nl> + <nl> + # add moo <nl> + # <nl> + print $ sock " add moo 0 0 6 \ r \ nmooval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored barval " ) ; <nl> + mem_get_is ( $ sock , " moo " , " mooval " ) ; <nl> + <nl> + # check - and - set ( cas ) failure case , try to set value with incorrect cas unique val <nl> + print $ sock " cas moo 0 0 6 0 \ r \ nMOOVAL \ r \ n " ; <nl> + is ( scalar < $ sock > , " EXISTS \ r \ n " , " check and set with invalid id " ) ; <nl> + <nl> + # test " gets " , grab unique ID <nl> + print $ sock " gets moo \ r \ n " ; <nl> + # VALUE moo 0 6 3084947704 <nl> + # <nl> + my @ retvals = split ( / / , scalar < $ sock > ) ; <nl> + my $ data = scalar < $ sock > ; # grab data <nl> + my $ dot = scalar < $ sock > ; # grab dot on line by itself <nl> + is ( $ retvals [ 0 ] , " VALUE " , " get value using ' gets ' " ) ; <nl> + my $ unique_id = $ retvals [ 4 ] ; <nl> + # clean off \ r \ n <nl> + $ unique_id = ~ s / \ r \ n $ / / ; <nl> + ok ( $ unique_id = ~ / ^ \ d + $ / , " unique ID ' $ unique_id ' is an integer " ) ; <nl> + # now test that we can store moo with the correct unique id <nl> + print $ sock " cas moo 0 0 6 $ unique_id \ r \ nMOOVAL \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " ) ; <nl> + mem_get_is ( $ sock , " moo " , " MOOVAL " ) ; <nl> + <nl> + # pipeling is okay <nl> + print $ sock " set foo 0 0 6 \ r \ nfooval \ r \ ndelete foo \ r \ nset foo 0 0 6 \ r \ nfooval \ r \ ndelete foo \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " pipeline set " ) ; <nl> + is ( scalar < $ sock > , " DELETED \ r \ n " , " pipeline delete " ) ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " pipeline set " ) ; <nl> + is ( scalar < $ sock > , " DELETED \ r \ n " , " pipeline delete " ) ; <nl> + <nl> + <nl> + # Test sets up to a large size around 1MB . <nl> + # Everything up to 1MB - 1k should succeed , everything 1MB + 1k should fail . <nl> + <nl> + my $ len = 1024 ; <nl> + while ( $ len < 1024 * 1028 * 10 ) { <nl> + my $ val = " B " x $ len ; <nl> + if ( $ len > ( 1024 * 1024 * 10 ) ) { <nl> + # Ensure causing a memory overflow doesn ' t leave stale data . <nl> + print $ sock " set foo_ $ len 0 0 3 \ r \ nMOO \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " ) ; <nl> + print $ sock " set foo_ $ len 0 0 $ len \ r \ n $ val \ r \ n " ; <nl> + is ( scalar < $ sock > , " SERVER_ERROR object too large for cache \ r \ n " , " failed to store size $ len " ) ; <nl> + mem_get_is ( $ sock , " foo_ $ len " ) ; <nl> + } else { <nl> + print $ sock " set foo_ $ len 0 0 $ len \ r \ n $ val \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored size $ len " ) ; <nl> + } <nl> + $ len + = 204800 ; <nl> + } <nl> + <nl> new file mode 100755 <nl> index 00000000000 . . e0ba65f47e1 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / incrdecr . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 23 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + # Bug 21 <nl> + print $ sock " set bug21 0 0 19 \ r \ n9223372036854775807 \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored text " ) ; <nl> + print $ sock " incr bug21 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 9223372036854775808 \ r \ n " , " bug21 incr 1 " ) ; <nl> + print $ sock " incr bug21 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 9223372036854775809 \ r \ n " , " bug21 incr 2 " ) ; <nl> + print $ sock " decr bug21 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 9223372036854775808 \ r \ n " , " bug21 decr " ) ; <nl> + <nl> + print $ sock " set num 0 0 1 \ r \ n1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored num " ) ; <nl> + mem_get_is ( $ sock , " num " , 1 , " stored 1 " ) ; <nl> + <nl> + print $ sock " incr num 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 2 \ r \ n " , " + 1 = 2 " ) ; <nl> + mem_get_is ( $ sock , " num " , 2 ) ; <nl> + <nl> + print $ sock " incr num 8 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 10 \ r \ n " , " + 8 = 10 " ) ; <nl> + mem_get_is ( $ sock , " num " , 10 ) ; <nl> + <nl> + print $ sock " decr num 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 9 \ r \ n " , " - 1 = 9 " ) ; <nl> + <nl> + print $ sock " decr num 9 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 0 \ r \ n " , " - 9 = 0 " ) ; <nl> + <nl> + print $ sock " decr num 5 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 0 \ r \ n " , " - 5 = 0 " ) ; <nl> + <nl> + printf $ sock " set num 0 0 10 \ r \ n4294967296 \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored 2 * * 32 " ) ; <nl> + <nl> + print $ sock " incr num 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 4294967297 \ r \ n " , " 4294967296 + 1 = 4294967297 " ) ; <nl> + <nl> + printf $ sock " set num 0 0 % d \ r \ n18446744073709551615 \ r \ n " , length ( " 18446744073709551615 " ) ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored 2 * * 64 - 1 " ) ; <nl> + <nl> + print $ sock " incr num 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 0 \ r \ n " , " ( 2 * * 64 - 1 ) + 1 = 0 " ) ; <nl> + <nl> + print $ sock " decr bogus 5 \ r \ n " ; <nl> + is ( scalar < $ sock > , " NOT_FOUND \ r \ n " , " can ' t decr bogus key " ) ; <nl> + <nl> + print $ sock " decr incr 5 \ r \ n " ; <nl> + is ( scalar < $ sock > , " NOT_FOUND \ r \ n " , " can ' t incr bogus key " ) ; <nl> + <nl> + print $ sock " set bigincr 0 0 1 \ r \ n0 \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored bigincr " ) ; <nl> + print $ sock " incr bigincr 18446744073709551610 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 18446744073709551610 \ r \ n " ) ; <nl> + <nl> + print $ sock " set text 0 0 2 \ r \ nhi \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored hi " ) ; <nl> + print $ sock " incr text 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , <nl> + " CLIENT_ERROR cannot increment or decrement non - numeric value \ r \ n " , <nl> + " hi - 1 = 0 " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . 07a78b7c7c6 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / issue_108 . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 4 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + my $ key = " del_key " ; <nl> + <nl> + print $ sock " add $ key 0 0 1 \ r \ nx \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " Added a key " ) ; <nl> + <nl> + print $ sock " delete $ key 0 \ r \ n " ; <nl> + is ( scalar < $ sock > , " DELETED \ r \ n " , " Properly deleted with 0 " ) ; <nl> + <nl> + print $ sock " add $ key 0 0 1 \ r \ nx \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " Added again a key " ) ; <nl> + <nl> + print $ sock " delete $ key 0 noreply \ r \ n " ; <nl> + # will not reply , but a subsequent add will succeed <nl> + <nl> + print $ sock " add $ key 0 0 1 \ r \ nx \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " Add succeeded after quiet deletion . " ) ; <nl> + <nl> new file mode 100755 <nl> index 00000000000 . . 677bf9d4b23 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / issue_3 . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 8 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + my $ key = " del_key " ; <nl> + <nl> + print $ sock " delete $ key \ r \ n " ; <nl> + is ( scalar < $ sock > , " NOT_FOUND \ r \ n " , " not found on delete " ) ; <nl> + <nl> + print $ sock " delete $ key 10 \ r \ n " ; <nl> + # note this change is made so rethinkdb tests will pass . At some point we should get our error messages in line with memcacheds <nl> + # is ( scalar < $ sock > , " CLIENT_ERROR bad command line format . " <nl> + # . " Usage : delete < key > [ noreply ] \ r \ n " , " invalid delete " ) ; <nl> + is ( scalar < $ sock > , " CLIENT_ERROR bad command line format \ r \ n " , " invalid delete " ) ; <nl> + <nl> + print $ sock " add $ key 0 0 1 \ r \ nx \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " Add before a broken delete . " ) ; <nl> + <nl> + print $ sock " delete $ key 10 noreply \ r \ n " ; <nl> + # Does not reply <nl> + # is ( scalar < $ sock > , " ERROR \ r \ n " , " Even more invalid delete " ) ; <nl> + <nl> + print $ sock " add $ key 0 0 1 \ r \ nx \ r \ n " ; <nl> + is ( scalar < $ sock > , " NOT_STORED \ r \ n " , " Failed to add after failed silent delete . " ) ; <nl> + <nl> + print $ sock " delete $ key noreply \ r \ n " ; <nl> + # Will not reply , so let ' s do a set and check that . <nl> + <nl> + print $ sock " set $ key 0 0 1 \ r \ nx \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " Stored a key " ) ; <nl> + <nl> + print $ sock " delete $ key \ r \ n " ; <nl> + is ( scalar < $ sock > , " DELETED \ r \ n " , " Properly deleted " ) ; <nl> + <nl> + print $ sock " set $ key 0 0 1 \ r \ nx \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " Stored a key " ) ; <nl> + <nl> + print $ sock " delete $ key noreply \ r \ n " ; <nl> + # will not reply , but a subsequent add will succeed <nl> + <nl> + print $ sock " add $ key 0 0 1 \ r \ nx \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " Add succeeded after deletion . " ) ; <nl> + <nl> new file mode 100755 <nl> index 00000000000 . . 94e3e95793f <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / issue_68 . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 996 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + for ( my $ keyi = 1 ; $ keyi < 250 ; $ keyi + + ) { <nl> + my $ key = " x " x $ keyi ; <nl> + print $ sock " set $ key 0 0 1 \ r \ n9 \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored $ key " ) ; <nl> + mem_get_is ( $ sock , $ key , " 9 " ) ; <nl> + print $ sock " incr $ key 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " 10 \ r \ n " , " incr $ key to 10 " ) ; <nl> + mem_get_is ( $ sock , $ key , " 10 " ) ; <nl> + } <nl> + <nl> new file mode 100755 <nl> index 00000000000 . . 95e39db11c9 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / issue_70 . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 4 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + print $ sock " set issue70 0 0 0 \ r \ n \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored issue70 " ) ; <nl> + <nl> + print $ sock " set issue70 0 0 - 1 \ r \ n " ; <nl> + is ( scalar < $ sock > , " CLIENT_ERROR bad command line format \ r \ n " ) ; <nl> + <nl> + print $ sock " set issue70 0 0 4294967295 \ r \ n " ; <nl> + is ( scalar < $ sock > , " CLIENT_ERROR bad command line format \ r \ n " ) ; <nl> + <nl> + print $ sock " set issue70 0 0 2147483647 \ r \ nscoobyscoobydoo " ; <nl> + is ( scalar < $ sock > , " CLIENT_ERROR bad command line format \ r \ n " ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 690b7ddb1b5 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / MemcachedTest . pm <nl> <nl> + package MemcachedTest ; <nl> + use strict ; <nl> + use IO : : Socket : : INET ; <nl> + use IO : : Socket : : UNIX ; <nl> + use Exporter ' import ' ; <nl> + use Carp qw ( croak ) ; <nl> + use vars qw ( @ EXPORT ) ; <nl> + <nl> + # Instead of doing the substitution with Autoconf , we assume that <nl> + # cwd = = builddir . <nl> + use Cwd ; <nl> + my $ builddir = getcwd ; <nl> + <nl> + <nl> + @ EXPORT = qw ( new_memcached sleep mem_get_is mem_gets mem_gets_is mem_stats <nl> + supports_sasl free_port ) ; <nl> + <nl> + sub sleep { <nl> + my $ n = shift ; <nl> + select undef , undef , undef , $ n ; <nl> + } <nl> + <nl> + sub mem_stats { <nl> + my ( $ sock , $ type ) = @ _ ; <nl> + $ type = $ type ? " $ type " : " " ; <nl> + print $ sock " stats $ type \ r \ n " ; <nl> + my $ stats = { } ; <nl> + while ( < $ sock > ) { <nl> + last if / ^ ( \ . | END ) / ; <nl> + / ^ ( STAT | ITEM ) ( \ S + ) \ s + ( [ ^ \ r \ n ] + ) / ; <nl> + # print " slabs : $ _ " ; <nl> + $ stats - > { $ 2 } = $ 3 ; <nl> + } <nl> + return $ stats ; <nl> + } <nl> + <nl> + sub mem_get_is { <nl> + # works on single - line values only . no newlines in value . <nl> + my ( $ sock_opts , $ key , $ val , $ msg ) = @ _ ; <nl> + my $ opts = ref $ sock_opts eq " HASH " ? $ sock_opts : { } ; <nl> + my $ sock = ref $ sock_opts eq " HASH " ? $ opts - > { sock } : $ sock_opts ; <nl> + <nl> + my $ expect_flags = $ opts - > { flags } | | 0 ; <nl> + my $ dval = defined $ val ? " ' $ val ' " : " < undef > " ; <nl> + $ msg | | = " $ key = = $ dval " ; <nl> + <nl> + print $ sock " get $ key \ r \ n " ; <nl> + if ( ! defined $ val ) { <nl> + my $ line = scalar < $ sock > ; <nl> + if ( $ line = ~ / ^ VALUE / ) { <nl> + $ line . = scalar ( < $ sock > ) . scalar ( < $ sock > ) ; <nl> + } <nl> + Test : : More : : is ( $ line , " END \ r \ n " , $ msg ) ; <nl> + } else { <nl> + my $ len = length ( $ val ) ; <nl> + my $ body = scalar ( < $ sock > ) ; <nl> + my $ expected = " VALUE $ key $ expect_flags $ len \ r \ n $ val \ r \ nEND \ r \ n " ; <nl> + if ( ! $ body | | $ body = ~ / ^ END / ) { <nl> + Test : : More : : is ( $ body , $ expected , $ msg ) ; <nl> + return ; <nl> + } <nl> + $ body . = scalar ( < $ sock > ) . scalar ( < $ sock > ) ; <nl> + Test : : More : : is ( $ body , $ expected , $ msg ) ; <nl> + } <nl> + } <nl> + <nl> + sub mem_gets { <nl> + # works on single - line values only . no newlines in value . <nl> + my ( $ sock_opts , $ key ) = @ _ ; <nl> + my $ opts = ref $ sock_opts eq " HASH " ? $ sock_opts : { } ; <nl> + my $ sock = ref $ sock_opts eq " HASH " ? $ opts - > { sock } : $ sock_opts ; <nl> + my $ val ; <nl> + my $ expect_flags = $ opts - > { flags } | | 0 ; <nl> + <nl> + print $ sock " gets $ key \ r \ n " ; <nl> + my $ response = < $ sock > ; <nl> + if ( $ response = ~ / ^ END / ) { <nl> + return " NOT_FOUND " ; <nl> + } <nl> + else <nl> + { <nl> + $ response = ~ / VALUE ( . * ) ( \ d + ) ( \ d + ) ( \ d + ) / ; <nl> + my $ flags = $ 2 ; <nl> + my $ len = $ 3 ; <nl> + my $ identifier = $ 4 ; <nl> + read $ sock , $ val , $ len ; <nl> + # get the END <nl> + $ _ = < $ sock > ; <nl> + $ _ = < $ sock > ; <nl> + <nl> + return ( $ identifier , $ val ) ; <nl> + } <nl> + <nl> + } <nl> + sub mem_gets_is { <nl> + # works on single - line values only . no newlines in value . <nl> + my ( $ sock_opts , $ identifier , $ key , $ val , $ msg ) = @ _ ; <nl> + my $ opts = ref $ sock_opts eq " HASH " ? $ sock_opts : { } ; <nl> + my $ sock = ref $ sock_opts eq " HASH " ? $ opts - > { sock } : $ sock_opts ; <nl> + <nl> + my $ expect_flags = $ opts - > { flags } | | 0 ; <nl> + my $ dval = defined $ val ? " ' $ val ' " : " < undef > " ; <nl> + $ msg | | = " $ key = = $ dval " ; <nl> + <nl> + print $ sock " gets $ key \ r \ n " ; <nl> + if ( ! defined $ val ) { <nl> + my $ line = scalar < $ sock > ; <nl> + if ( $ line = ~ / ^ VALUE / ) { <nl> + $ line . = scalar ( < $ sock > ) . scalar ( < $ sock > ) ; <nl> + } <nl> + Test : : More : : is ( $ line , " END \ r \ n " , $ msg ) ; <nl> + } else { <nl> + my $ len = length ( $ val ) ; <nl> + my $ body = scalar ( < $ sock > ) ; <nl> + my $ expected = " VALUE $ key $ expect_flags $ len $ identifier \ r \ n $ val \ r \ nEND \ r \ n " ; <nl> + if ( ! $ body | | $ body = ~ / ^ END / ) { <nl> + Test : : More : : is ( $ body , $ expected , $ msg ) ; <nl> + return ; <nl> + } <nl> + $ body . = scalar ( < $ sock > ) . scalar ( < $ sock > ) ; <nl> + Test : : More : : is ( $ body , $ expected , $ msg ) ; <nl> + } <nl> + } <nl> + <nl> + sub free_port { <nl> + my $ type = shift | | " tcp " ; <nl> + my $ sock ; <nl> + my $ port ; <nl> + while ( ! $ sock ) { <nl> + $ port = int ( rand ( 20000 ) ) + 30000 ; <nl> + $ sock = IO : : Socket : : INET - > new ( LocalAddr = > ' 127 . 0 . 0 . 1 ' , <nl> + LocalPort = > $ port , <nl> + Proto = > $ type , <nl> + ReuseAddr = > 1 ) ; <nl> + } <nl> + return $ port ; <nl> + } <nl> + <nl> + sub supports_udp { <nl> + return 0 ; <nl> + } <nl> + <nl> + sub supports_sasl { <nl> + return 0 ; <nl> + } <nl> + <nl> + sub new_memcached { <nl> + # croak ( " No arguments supported " ) if @ _ ; <nl> + my $ conn = IO : : Socket : : INET - > new ( PeerAddr = > " 127 . 0 . 0 . 1 : $ ENV { ' RUN_PORT ' } " ) ; <nl> + return Memcached : : Handle - > new ( pid = > " NOPID " , conn = > $ conn , udpport = > " NOUDP " , host = > ' 127 . 0 . 0 . 1 ' , port = > $ ENV { ' RUN_PORT ' } ) ; <nl> + } <nl> + <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + package Memcached : : Handle ; <nl> + sub new { <nl> + my ( $ class , % params ) = @ _ ; <nl> + return bless \ % params , $ class ; <nl> + } <nl> + <nl> + sub DESTROY { <nl> + my $ self = shift ; <nl> + # kill 2 , $ self - > { pid } ; <nl> + } <nl> + <nl> + sub stop { <nl> + my $ self = shift ; <nl> + # kill 15 , $ self - > { pid } ; <nl> + } <nl> + <nl> + sub host { $ _ [ 0 ] { host } } <nl> + sub port { $ _ [ 0 ] { port } } <nl> + sub udpport { $ _ [ 0 ] { udpport } } <nl> + <nl> + sub sock { <nl> + my $ self = shift ; <nl> + <nl> + if ( $ self - > { conn } & & ( $ self - > { domainsocket } | | getpeername ( $ self - > { conn } ) ) ) { <nl> + return $ self - > { conn } ; <nl> + } <nl> + return $ self - > new_sock ; <nl> + } <nl> + <nl> + sub new_sock { <nl> + my $ self = shift ; <nl> + if ( $ self - > { domainsocket } ) { <nl> + return IO : : Socket : : UNIX - > new ( Peer = > $ self - > { domainsocket } ) ; <nl> + } else { <nl> + return IO : : Socket : : INET - > new ( PeerAddr = > " $ self - > { host } : $ self - > { port } " ) ; <nl> + } <nl> + } <nl> + <nl> + sub new_udp_sock { <nl> + my $ self = shift ; <nl> + return IO : : Socket : : INET - > new ( PeerAddr = > ' 127 . 0 . 0 . 1 ' , <nl> + PeerPort = > $ self - > { udpport } , <nl> + Proto = > ' udp ' , <nl> + LocalAddr = > ' 127 . 0 . 0 . 1 ' , <nl> + LocalPort = > MemcachedTest : : free_port ( ' udp ' ) , <nl> + ) ; <nl> + <nl> + } <nl> + <nl> + 1 ; <nl> new file mode 100644 <nl> index 00000000000 . . b4e63714fa4 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / Builder . pm <nl> <nl> + package Test : : Builder ; <nl> + <nl> + use 5 . 004 ; <nl> + <nl> + # $ ^ C was only introduced in 5 . 005 - ish . We do this to prevent <nl> + # use of uninitialized value warnings in older perls . <nl> + $ ^ C | | = 0 ; <nl> + <nl> + use strict ; <nl> + use vars qw ( $ VERSION ) ; <nl> + $ VERSION = ' 0 . 72 ' ; <nl> + $ VERSION = eval $ VERSION ; # make the alpha version come out as a number <nl> + <nl> + # Make Test : : Builder thread - safe for ithreads . <nl> + BEGIN { <nl> + use Config ; <nl> + # Load threads : : shared when threads are turned on . <nl> + # 5 . 8 . 0 ' s threads are so busted we no longer support them . <nl> + if ( $ ] > = 5 . 008001 & & $ Config { useithreads } & & $ INC { ' threads . pm ' } ) { <nl> + require threads : : shared ; <nl> + <nl> + # Hack around YET ANOTHER threads : : shared bug . It would <nl> + # occassionally forget the contents of the variable when sharing it . <nl> + # So we first copy the data , then share , then put our copy back . <nl> + * share = sub ( \ [ $ @ % ] ) { <nl> + my $ type = ref $ _ [ 0 ] ; <nl> + my $ data ; <nl> + <nl> + if ( $ type eq ' HASH ' ) { <nl> + % $ data = % { $ _ [ 0 ] } ; <nl> + } <nl> + elsif ( $ type eq ' ARRAY ' ) { <nl> + @ $ data = @ { $ _ [ 0 ] } ; <nl> + } <nl> + elsif ( $ type eq ' SCALAR ' ) { <nl> + $ $ data = $ { $ _ [ 0 ] } ; <nl> + } <nl> + else { <nl> + die ( " Unknown type : " . $ type ) ; <nl> + } <nl> + <nl> + $ _ [ 0 ] = & threads : : shared : : share ( $ _ [ 0 ] ) ; <nl> + <nl> + if ( $ type eq ' HASH ' ) { <nl> + % { $ _ [ 0 ] } = % $ data ; <nl> + } <nl> + elsif ( $ type eq ' ARRAY ' ) { <nl> + @ { $ _ [ 0 ] } = @ $ data ; <nl> + } <nl> + elsif ( $ type eq ' SCALAR ' ) { <nl> + $ { $ _ [ 0 ] } = $ $ data ; <nl> + } <nl> + else { <nl> + die ( " Unknown type : " . $ type ) ; <nl> + } <nl> + <nl> + return $ _ [ 0 ] ; <nl> + } ; <nl> + } <nl> + # 5 . 8 . 0 ' s threads : : shared is busted when threads are off <nl> + # and earlier Perls just don ' t have that module at all . <nl> + else { <nl> + * share = sub { return $ _ [ 0 ] } ; <nl> + * lock = sub { 0 } ; <nl> + } <nl> + } <nl> + <nl> + <nl> + = head1 NAME <nl> + <nl> + Test : : Builder - Backend for building test libraries <nl> + <nl> + = head1 SYNOPSIS <nl> + <nl> + package My : : Test : : Module ; <nl> + use Test : : Builder ; <nl> + require Exporter ; <nl> + @ ISA = qw ( Exporter ) ; <nl> + @ EXPORT = qw ( ok ) ; <nl> + <nl> + my $ Test = Test : : Builder - > new ; <nl> + $ Test - > output ( ' my_logfile ' ) ; <nl> + <nl> + sub import { <nl> + my ( $ self ) = shift ; <nl> + my $ pack = caller ; <nl> + <nl> + $ Test - > exported_to ( $ pack ) ; <nl> + $ Test - > plan ( @ _ ) ; <nl> + <nl> + $ self - > export_to_level ( 1 , $ self , ' ok ' ) ; <nl> + } <nl> + <nl> + sub ok { <nl> + my ( $ test , $ name ) = @ _ ; <nl> + <nl> + $ Test - > ok ( $ test , $ name ) ; <nl> + } <nl> + <nl> + <nl> + = head1 DESCRIPTION <nl> + <nl> + Test : : Simple and Test : : More have proven to be popular testing modules , <nl> + but they ' re not always flexible enough . Test : : Builder provides the a <nl> + building block upon which to write your own test libraries I < which can <nl> + work together > . <nl> + <nl> + = head2 Construction <nl> + <nl> + = over 4 <nl> + <nl> + = item B < new > <nl> + <nl> + my $ Test = Test : : Builder - > new ; <nl> + <nl> + Returns a Test : : Builder object representing the current state of the <nl> + test . <nl> + <nl> + Since you only run one test per program C < new > always returns the same <nl> + Test : : Builder object . No matter how many times you call new ( ) , you ' re <nl> + getting the same object . This is called a singleton . This is done so that <nl> + multiple modules share such global information as the test counter and <nl> + where test output is going . <nl> + <nl> + If you want a completely new Test : : Builder object different from the <nl> + singleton , use C < create > . <nl> + <nl> + = cut <nl> + <nl> + my $ Test = Test : : Builder - > new ; <nl> + sub new { <nl> + my ( $ class ) = shift ; <nl> + $ Test | | = $ class - > create ; <nl> + return $ Test ; <nl> + } <nl> + <nl> + <nl> + = item B < create > <nl> + <nl> + my $ Test = Test : : Builder - > create ; <nl> + <nl> + Ok , so there can be more than one Test : : Builder object and this is how <nl> + you get it . You might use this instead of C < new ( ) > if you ' re testing <nl> + a Test : : Builder based module , but otherwise you probably want C < new > . <nl> + <nl> + B < NOTE > : the implementation is not complete . C < level > , for example , is <nl> + still shared amongst B < all > Test : : Builder objects , even ones created using <nl> + this method . Also , the method name may change in the future . <nl> + <nl> + = cut <nl> + <nl> + sub create { <nl> + my $ class = shift ; <nl> + <nl> + my $ self = bless { } , $ class ; <nl> + $ self - > reset ; <nl> + <nl> + return $ self ; <nl> + } <nl> + <nl> + = item B < reset > <nl> + <nl> + $ Test - > reset ; <nl> + <nl> + Reinitializes the Test : : Builder singleton to its original state . <nl> + Mostly useful for tests run in persistent environments where the same <nl> + test might be run multiple times in the same process . <nl> + <nl> + = cut <nl> + <nl> + use vars qw ( $ Level ) ; <nl> + <nl> + sub reset { <nl> + my ( $ self ) = @ _ ; <nl> + <nl> + # We leave this a global because it has to be localized and localizing <nl> + # hash keys is just asking for pain . Also , it was documented . <nl> + $ Level = 1 ; <nl> + <nl> + $ self - > { Test_Died } = 0 ; <nl> + $ self - > { Have_Plan } = 0 ; <nl> + $ self - > { No_Plan } = 0 ; <nl> + $ self - > { Original_Pid } = $ $ ; <nl> + <nl> + share ( $ self - > { Curr_Test } ) ; <nl> + $ self - > { Curr_Test } = 0 ; <nl> + $ self - > { Test_Results } = & share ( [ ] ) ; <nl> + <nl> + $ self - > { Exported_To } = undef ; <nl> + $ self - > { Expected_Tests } = 0 ; <nl> + <nl> + $ self - > { Skip_All } = 0 ; <nl> + <nl> + $ self - > { Use_Nums } = 1 ; <nl> + <nl> + $ self - > { No_Header } = 0 ; <nl> + $ self - > { No_Ending } = 0 ; <nl> + <nl> + $ self - > _dup_stdhandles unless $ ^ C ; <nl> + <nl> + return undef ; <nl> + } <nl> + <nl> + = back <nl> + <nl> + = head2 Setting up tests <nl> + <nl> + These methods are for setting up tests and declaring how many there <nl> + are . You usually only want to call one of these methods . <nl> + <nl> + = over 4 <nl> + <nl> + = item B < exported_to > <nl> + <nl> + my $ pack = $ Test - > exported_to ; <nl> + $ Test - > exported_to ( $ pack ) ; <nl> + <nl> + Tells Test : : Builder what package you exported your functions to . <nl> + This is important for getting TODO tests right . <nl> + <nl> + = cut <nl> + <nl> + sub exported_to { <nl> + my ( $ self , $ pack ) = @ _ ; <nl> + <nl> + if ( defined $ pack ) { <nl> + $ self - > { Exported_To } = $ pack ; <nl> + } <nl> + return $ self - > { Exported_To } ; <nl> + } <nl> + <nl> + = item B < plan > <nl> + <nl> + $ Test - > plan ( ' no_plan ' ) ; <nl> + $ Test - > plan ( skip_all = > $ reason ) ; <nl> + $ Test - > plan ( tests = > $ num_tests ) ; <nl> + <nl> + A convenient way to set up your tests . Call this and Test : : Builder <nl> + will print the appropriate headers and take the appropriate actions . <nl> + <nl> + If you call plan ( ) , don ' t call any of the other methods below . <nl> + <nl> + = cut <nl> + <nl> + sub plan { <nl> + my ( $ self , $ cmd , $ arg ) = @ _ ; <nl> + <nl> + return unless $ cmd ; <nl> + <nl> + local $ Level = $ Level + 1 ; <nl> + <nl> + if ( $ self - > { Have_Plan } ) { <nl> + $ self - > croak ( " You tried to plan twice " ) ; <nl> + } <nl> + <nl> + if ( $ cmd eq ' no_plan ' ) { <nl> + $ self - > no_plan ; <nl> + } <nl> + elsif ( $ cmd eq ' skip_all ' ) { <nl> + return $ self - > skip_all ( $ arg ) ; <nl> + } <nl> + elsif ( $ cmd eq ' tests ' ) { <nl> + if ( $ arg ) { <nl> + local $ Level = $ Level + 1 ; <nl> + return $ self - > expected_tests ( $ arg ) ; <nl> + } <nl> + elsif ( ! defined $ arg ) { <nl> + $ self - > croak ( " Got an undefined number of tests " ) ; <nl> + } <nl> + elsif ( ! $ arg ) { <nl> + $ self - > croak ( " You said to run 0 tests " ) ; <nl> + } <nl> + } <nl> + else { <nl> + my @ args = grep { defined } ( $ cmd , $ arg ) ; <nl> + $ self - > croak ( " plan ( ) doesn ' t understand @ args " ) ; <nl> + } <nl> + <nl> + return 1 ; <nl> + } <nl> + <nl> + = item B < expected_tests > <nl> + <nl> + my $ max = $ Test - > expected_tests ; <nl> + $ Test - > expected_tests ( $ max ) ; <nl> + <nl> + Gets / sets the # of tests we expect this test to run and prints out <nl> + the appropriate headers . <nl> + <nl> + = cut <nl> + <nl> + sub expected_tests { <nl> + my $ self = shift ; <nl> + my ( $ max ) = @ _ ; <nl> + <nl> + if ( @ _ ) { <nl> + $ self - > croak ( " Number of tests must be a positive integer . You gave it ' $ max ' " ) <nl> + unless $ max = ~ / ^ \ + ? \ d + $ / and $ max > 0 ; <nl> + <nl> + $ self - > { Expected_Tests } = $ max ; <nl> + $ self - > { Have_Plan } = 1 ; <nl> + <nl> + $ self - > _print ( " 1 . . $ max \ n " ) unless $ self - > no_header ; <nl> + } <nl> + return $ self - > { Expected_Tests } ; <nl> + } <nl> + <nl> + <nl> + = item B < no_plan > <nl> + <nl> + $ Test - > no_plan ; <nl> + <nl> + Declares that this test will run an indeterminate # of tests . <nl> + <nl> + = cut <nl> + <nl> + sub no_plan { <nl> + my $ self = shift ; <nl> + <nl> + $ self - > { No_Plan } = 1 ; <nl> + $ self - > { Have_Plan } = 1 ; <nl> + } <nl> + <nl> + = item B < has_plan > <nl> + <nl> + $ plan = $ Test - > has_plan <nl> + <nl> + Find out whether a plan has been defined . $ plan is either C < undef > ( no plan has been set ) , C < no_plan > ( indeterminate # of tests ) or an integer ( the number of expected tests ) . <nl> + <nl> + = cut <nl> + <nl> + sub has_plan { <nl> + my $ self = shift ; <nl> + <nl> + return ( $ self - > { Expected_Tests } ) if $ self - > { Expected_Tests } ; <nl> + return ( ' no_plan ' ) if $ self - > { No_Plan } ; <nl> + return ( undef ) ; <nl> + } ; <nl> + <nl> + <nl> + = item B < skip_all > <nl> + <nl> + $ Test - > skip_all ; <nl> + $ Test - > skip_all ( $ reason ) ; <nl> + <nl> + Skips all the tests , using the given $ reason . Exits immediately with 0 . <nl> + <nl> + = cut <nl> + <nl> + sub skip_all { <nl> + my ( $ self , $ reason ) = @ _ ; <nl> + <nl> + my $ out = " 1 . . 0 " ; <nl> + $ out . = " # Skip $ reason " if $ reason ; <nl> + $ out . = " \ n " ; <nl> + <nl> + $ self - > { Skip_All } = 1 ; <nl> + <nl> + $ self - > _print ( $ out ) unless $ self - > no_header ; <nl> + exit ( 0 ) ; <nl> + } <nl> + <nl> + = back <nl> + <nl> + = head2 Running tests <nl> + <nl> + These actually run the tests , analogous to the functions in Test : : More . <nl> + <nl> + They all return true if the test passed , false if the test failed . <nl> + <nl> + $ name is always optional . <nl> + <nl> + = over 4 <nl> + <nl> + = item B < ok > <nl> + <nl> + $ Test - > ok ( $ test , $ name ) ; <nl> + <nl> + Your basic test . Pass if $ test is true , fail if $ test is false . Just <nl> + like Test : : Simple ' s ok ( ) . <nl> + <nl> + = cut <nl> + <nl> + sub ok { <nl> + my ( $ self , $ test , $ name ) = @ _ ; <nl> + <nl> + # $ test might contain an object which we don ' t want to accidentally <nl> + # store , so we turn it into a boolean . <nl> + $ test = $ test ? 1 : 0 ; <nl> + <nl> + $ self - > _plan_check ; <nl> + <nl> + lock $ self - > { Curr_Test } ; <nl> + $ self - > { Curr_Test } + + ; <nl> + <nl> + # In case $ name is a string overloaded object , force it to stringify . <nl> + $ self - > _unoverload_str ( \ $ name ) ; <nl> + <nl> + $ self - > diag ( < < ERR ) if defined $ name and $ name = ~ / ^ [ \ d \ s ] + $ / ; <nl> + You named your test ' $ name ' . You shouldn ' t use numbers for your test names . <nl> + Very confusing . <nl> + ERR <nl> + <nl> + my ( $ pack , $ file , $ line ) = $ self - > caller ; <nl> + <nl> + my $ todo = $ self - > todo ( $ pack ) ; <nl> + $ self - > _unoverload_str ( \ $ todo ) ; <nl> + <nl> + my $ out ; <nl> + my $ result = & share ( { } ) ; <nl> + <nl> + unless ( $ test ) { <nl> + $ out . = " not " ; <nl> + @ $ result { ' ok ' , ' actual_ok ' } = ( ( $ todo ? 1 : 0 ) , 0 ) ; <nl> + } <nl> + else { <nl> + @ $ result { ' ok ' , ' actual_ok ' } = ( 1 , $ test ) ; <nl> + } <nl> + <nl> + $ out . = " ok " ; <nl> + $ out . = " $ self - > { Curr_Test } " if $ self - > use_numbers ; <nl> + <nl> + if ( defined $ name ) { <nl> + $ name = ~ s | # | \ \ # | g ; # # in a name can confuse Test : : Harness . <nl> + $ out . = " - $ name " ; <nl> + $ result - > { name } = $ name ; <nl> + } <nl> + else { <nl> + $ result - > { name } = ' ' ; <nl> + } <nl> + <nl> + if ( $ todo ) { <nl> + $ out . = " # TODO $ todo " ; <nl> + $ result - > { reason } = $ todo ; <nl> + $ result - > { type } = ' todo ' ; <nl> + } <nl> + else { <nl> + $ result - > { reason } = ' ' ; <nl> + $ result - > { type } = ' ' ; <nl> + } <nl> + <nl> + $ self - > { Test_Results } [ $ self - > { Curr_Test } - 1 ] = $ result ; <nl> + $ out . = " \ n " ; <nl> + <nl> + $ self - > _print ( $ out ) ; <nl> + <nl> + unless ( $ test ) { <nl> + my $ msg = $ todo ? " Failed ( TODO ) " : " Failed " ; <nl> + $ self - > _print_diag ( " \ n " ) if $ ENV { HARNESS_ACTIVE } ; <nl> + <nl> + if ( defined $ name ) { <nl> + $ self - > diag ( qq [ $ msg test ' $ name ' \ n ] ) ; <nl> + $ self - > diag ( qq [ at $ file line $ line . \ n ] ) ; <nl> + } <nl> + else { <nl> + $ self - > diag ( qq [ $ msg test at $ file line $ line . \ n ] ) ; <nl> + } <nl> + } <nl> + <nl> + return $ test ? 1 : 0 ; <nl> + } <nl> + <nl> + <nl> + sub _unoverload { <nl> + my $ self = shift ; <nl> + my $ type = shift ; <nl> + <nl> + $ self - > _try ( sub { require overload } ) | | return ; <nl> + <nl> + foreach my $ thing ( @ _ ) { <nl> + if ( $ self - > _is_object ( $ $ thing ) ) { <nl> + if ( my $ string_meth = overload : : Method ( $ $ thing , $ type ) ) { <nl> + $ $ thing = $ $ thing - > $ string_meth ( ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + sub _is_object { <nl> + my ( $ self , $ thing ) = @ _ ; <nl> + <nl> + return $ self - > _try ( sub { ref $ thing & & $ thing - > isa ( ' UNIVERSAL ' ) } ) ? 1 : 0 ; <nl> + } <nl> + <nl> + <nl> + sub _unoverload_str { <nl> + my $ self = shift ; <nl> + <nl> + $ self - > _unoverload ( q [ " " ] , @ _ ) ; <nl> + } <nl> + <nl> + sub _unoverload_num { <nl> + my $ self = shift ; <nl> + <nl> + $ self - > _unoverload ( ' 0 + ' , @ _ ) ; <nl> + <nl> + for my $ val ( @ _ ) { <nl> + next unless $ self - > _is_dualvar ( $ $ val ) ; <nl> + $ $ val = $ $ val + 0 ; <nl> + } <nl> + } <nl> + <nl> + <nl> + # This is a hack to detect a dualvar such as $ ! <nl> + sub _is_dualvar { <nl> + my ( $ self , $ val ) = @ _ ; <nl> + <nl> + local $ ^ W = 0 ; <nl> + my $ numval = $ val + 0 ; <nl> + return 1 if $ numval ! = 0 and $ numval ne $ val ; <nl> + } <nl> + <nl> + <nl> + <nl> + = item B < is_eq > <nl> + <nl> + $ Test - > is_eq ( $ got , $ expected , $ name ) ; <nl> + <nl> + Like Test : : More ' s is ( ) . Checks if $ got eq $ expected . This is the <nl> + string version . <nl> + <nl> + = item B < is_num > <nl> + <nl> + $ Test - > is_num ( $ got , $ expected , $ name ) ; <nl> + <nl> + Like Test : : More ' s is ( ) . Checks if $ got = = $ expected . This is the <nl> + numeric version . <nl> + <nl> + = cut <nl> + <nl> + sub is_eq { <nl> + my ( $ self , $ got , $ expect , $ name ) = @ _ ; <nl> + local $ Level = $ Level + 1 ; <nl> + <nl> + $ self - > _unoverload_str ( \ $ got , \ $ expect ) ; <nl> + <nl> + if ( ! defined $ got | | ! defined $ expect ) { <nl> + # undef only matches undef and nothing else <nl> + my $ test = ! defined $ got & & ! defined $ expect ; <nl> + <nl> + $ self - > ok ( $ test , $ name ) ; <nl> + $ self - > _is_diag ( $ got , ' eq ' , $ expect ) unless $ test ; <nl> + return $ test ; <nl> + } <nl> + <nl> + return $ self - > cmp_ok ( $ got , ' eq ' , $ expect , $ name ) ; <nl> + } <nl> + <nl> + sub is_num { <nl> + my ( $ self , $ got , $ expect , $ name ) = @ _ ; <nl> + local $ Level = $ Level + 1 ; <nl> + <nl> + $ self - > _unoverload_num ( \ $ got , \ $ expect ) ; <nl> + <nl> + if ( ! defined $ got | | ! defined $ expect ) { <nl> + # undef only matches undef and nothing else <nl> + my $ test = ! defined $ got & & ! defined $ expect ; <nl> + <nl> + $ self - > ok ( $ test , $ name ) ; <nl> + $ self - > _is_diag ( $ got , ' = = ' , $ expect ) unless $ test ; <nl> + return $ test ; <nl> + } <nl> + <nl> + return $ self - > cmp_ok ( $ got , ' = = ' , $ expect , $ name ) ; <nl> + } <nl> + <nl> + sub _is_diag { <nl> + my ( $ self , $ got , $ type , $ expect ) = @ _ ; <nl> + <nl> + foreach my $ val ( \ $ got , \ $ expect ) { <nl> + if ( defined $ $ val ) { <nl> + if ( $ type eq ' eq ' ) { <nl> + # quote and force string context <nl> + $ $ val = " ' $ $ val ' " <nl> + } <nl> + else { <nl> + # force numeric context <nl> + $ self - > _unoverload_num ( $ val ) ; <nl> + } <nl> + } <nl> + else { <nl> + $ $ val = ' undef ' ; <nl> + } <nl> + } <nl> + <nl> + return $ self - > diag ( sprintf < < DIAGNOSTIC , $ got , $ expect ) ; <nl> + got : % s <nl> + expected : % s <nl> + DIAGNOSTIC <nl> + <nl> + } <nl> + <nl> + = item B < isnt_eq > <nl> + <nl> + $ Test - > isnt_eq ( $ got , $ dont_expect , $ name ) ; <nl> + <nl> + Like Test : : More ' s isnt ( ) . Checks if $ got ne $ dont_expect . This is <nl> + the string version . <nl> + <nl> + = item B < isnt_num > <nl> + <nl> + $ Test - > isnt_num ( $ got , $ dont_expect , $ name ) ; <nl> + <nl> + Like Test : : More ' s isnt ( ) . Checks if $ got ne $ dont_expect . This is <nl> + the numeric version . <nl> + <nl> + = cut <nl> + <nl> + sub isnt_eq { <nl> + my ( $ self , $ got , $ dont_expect , $ name ) = @ _ ; <nl> + local $ Level = $ Level + 1 ; <nl> + <nl> + if ( ! defined $ got | | ! defined $ dont_expect ) { <nl> + # undef only matches undef and nothing else <nl> + my $ test = defined $ got | | defined $ dont_expect ; <nl> + <nl> + $ self - > ok ( $ test , $ name ) ; <nl> + $ self - > _cmp_diag ( $ got , ' ne ' , $ dont_expect ) unless $ test ; <nl> + return $ test ; <nl> + } <nl> + <nl> + return $ self - > cmp_ok ( $ got , ' ne ' , $ dont_expect , $ name ) ; <nl> + } <nl> + <nl> + sub isnt_num { <nl> + my ( $ self , $ got , $ dont_expect , $ name ) = @ _ ; <nl> + local $ Level = $ Level + 1 ; <nl> + <nl> + if ( ! defined $ got | | ! defined $ dont_expect ) { <nl> + # undef only matches undef and nothing else <nl> + my $ test = defined $ got | | defined $ dont_expect ; <nl> + <nl> + $ self - > ok ( $ test , $ name ) ; <nl> + $ self - > _cmp_diag ( $ got , ' ! = ' , $ dont_expect ) unless $ test ; <nl> + return $ test ; <nl> + } <nl> + <nl> + return $ self - > cmp_ok ( $ got , ' ! = ' , $ dont_expect , $ name ) ; <nl> + } <nl> + <nl> + <nl> + = item B < like > <nl> + <nl> + $ Test - > like ( $ this , qr / $ regex / , $ name ) ; <nl> + $ Test - > like ( $ this , ' / $ regex / ' , $ name ) ; <nl> + <nl> + Like Test : : More ' s like ( ) . Checks if $ this matches the given $ regex . <nl> + <nl> + You ' ll want to avoid qr / / if you want your tests to work before 5 . 005 . <nl> + <nl> + = item B < unlike > <nl> + <nl> + $ Test - > unlike ( $ this , qr / $ regex / , $ name ) ; <nl> + $ Test - > unlike ( $ this , ' / $ regex / ' , $ name ) ; <nl> + <nl> + Like Test : : More ' s unlike ( ) . Checks if $ this B < does not match > the <nl> + given $ regex . <nl> + <nl> + = cut <nl> + <nl> + sub like { <nl> + my ( $ self , $ this , $ regex , $ name ) = @ _ ; <nl> + <nl> + local $ Level = $ Level + 1 ; <nl> + $ self - > _regex_ok ( $ this , $ regex , ' = ~ ' , $ name ) ; <nl> + } <nl> + <nl> + sub unlike { <nl> + my ( $ self , $ this , $ regex , $ name ) = @ _ ; <nl> + <nl> + local $ Level = $ Level + 1 ; <nl> + $ self - > _regex_ok ( $ this , $ regex , ' ! ~ ' , $ name ) ; <nl> + } <nl> + <nl> + <nl> + = item B < cmp_ok > <nl> + <nl> + $ Test - > cmp_ok ( $ this , $ type , $ that , $ name ) ; <nl> + <nl> + Works just like Test : : More ' s cmp_ok ( ) . <nl> + <nl> + $ Test - > cmp_ok ( $ big_num , ' ! = ' , $ other_big_num ) ; <nl> + <nl> + = cut <nl> + <nl> + <nl> + my % numeric_cmps = map { ( $ _ , 1 ) } <nl> + ( " < " , " < = " , " > " , " > = " , " = = " , " ! = " , " < = > " ) ; <nl> + <nl> + sub cmp_ok { <nl> + my ( $ self , $ got , $ type , $ expect , $ name ) = @ _ ; <nl> + <nl> + # Treat overloaded objects as numbers if we ' re asked to do a <nl> + # numeric comparison . <nl> + my $ unoverload = $ numeric_cmps { $ type } ? ' _unoverload_num ' <nl> + : ' _unoverload_str ' ; <nl> + <nl> + $ self - > $ unoverload ( \ $ got , \ $ expect ) ; <nl> + <nl> + <nl> + my $ test ; <nl> + { <nl> + local ( $ @ , $ ! , $ SIG { __DIE__ } ) ; # isolate eval <nl> + <nl> + my $ code = $ self - > _caller_context ; <nl> + <nl> + # Yes , it has to look like this or 5 . 4 . 5 won ' t see the # line directive . <nl> + # Don ' t ask me , man , I just work here . <nl> + $ test = eval " <nl> + $ code " . " \ $ got $ type \ $ expect ; " ; <nl> + <nl> + } <nl> + local $ Level = $ Level + 1 ; <nl> + my $ ok = $ self - > ok ( $ test , $ name ) ; <nl> + <nl> + unless ( $ ok ) { <nl> + if ( $ type = ~ / ^ ( eq | = = ) $ / ) { <nl> + $ self - > _is_diag ( $ got , $ type , $ expect ) ; <nl> + } <nl> + else { <nl> + $ self - > _cmp_diag ( $ got , $ type , $ expect ) ; <nl> + } <nl> + } <nl> + return $ ok ; <nl> + } <nl> + <nl> + sub _cmp_diag { <nl> + my ( $ self , $ got , $ type , $ expect ) = @ _ ; <nl> + <nl> + $ got = defined $ got ? " ' $ got ' " : ' undef ' ; <nl> + $ expect = defined $ expect ? " ' $ expect ' " : ' undef ' ; <nl> + return $ self - > diag ( sprintf < < DIAGNOSTIC , $ got , $ type , $ expect ) ; <nl> + % s <nl> + % s <nl> + % s <nl> + DIAGNOSTIC <nl> + } <nl> + <nl> + <nl> + sub _caller_context { <nl> + my $ self = shift ; <nl> + <nl> + my ( $ pack , $ file , $ line ) = $ self - > caller ( 1 ) ; <nl> + <nl> + my $ code = ' ' ; <nl> + $ code . = " # line $ line $ file \ n " if defined $ file and defined $ line ; <nl> + <nl> + return $ code ; <nl> + } <nl> + <nl> + = back <nl> + <nl> + <nl> + = head2 Other Testing Methods <nl> + <nl> + These are methods which are used in the course of writing a test but are not themselves tests . <nl> + <nl> + = over 4 <nl> + <nl> + = item B < BAIL_OUT > <nl> + <nl> + $ Test - > BAIL_OUT ( $ reason ) ; <nl> + <nl> + Indicates to the Test : : Harness that things are going so badly all <nl> + testing should terminate . This includes running any additional test <nl> + scripts . <nl> + <nl> + It will exit with 255 . <nl> + <nl> + = cut <nl> + <nl> + sub BAIL_OUT { <nl> + my ( $ self , $ reason ) = @ _ ; <nl> + <nl> + $ self - > { Bailed_Out } = 1 ; <nl> + $ self - > _print ( " Bail out ! $ reason " ) ; <nl> + exit 255 ; <nl> + } <nl> + <nl> + = for deprecated <nl> + BAIL_OUT ( ) used to be BAILOUT ( ) <nl> + <nl> + = cut <nl> + <nl> + * BAILOUT = \ & BAIL_OUT ; <nl> + <nl> + <nl> + = item B < skip > <nl> + <nl> + $ Test - > skip ; <nl> + $ Test - > skip ( $ why ) ; <nl> + <nl> + Skips the current test , reporting $ why . <nl> + <nl> + = cut <nl> + <nl> + sub skip { <nl> + my ( $ self , $ why ) = @ _ ; <nl> + $ why | | = ' ' ; <nl> + $ self - > _unoverload_str ( \ $ why ) ; <nl> + <nl> + $ self - > _plan_check ; <nl> + <nl> + lock ( $ self - > { Curr_Test } ) ; <nl> + $ self - > { Curr_Test } + + ; <nl> + <nl> + $ self - > { Test_Results } [ $ self - > { Curr_Test } - 1 ] = & share ( { <nl> + ' ok ' = > 1 , <nl> + actual_ok = > 1 , <nl> + name = > ' ' , <nl> + type = > ' skip ' , <nl> + reason = > $ why , <nl> + } ) ; <nl> + <nl> + my $ out = " ok " ; <nl> + $ out . = " $ self - > { Curr_Test } " if $ self - > use_numbers ; <nl> + $ out . = " # skip " ; <nl> + $ out . = " $ why " if length $ why ; <nl> + $ out . = " \ n " ; <nl> + <nl> + $ self - > _print ( $ out ) ; <nl> + <nl> + return 1 ; <nl> + } <nl> + <nl> + <nl> + = item B < todo_skip > <nl> + <nl> + $ Test - > todo_skip ; <nl> + $ Test - > todo_skip ( $ why ) ; <nl> + <nl> + Like skip ( ) , only it will declare the test as failing and TODO . Similar <nl> + to <nl> + <nl> + print " not ok $ tnum # TODO $ why \ n " ; <nl> + <nl> + = cut <nl> + <nl> + sub todo_skip { <nl> + my ( $ self , $ why ) = @ _ ; <nl> + $ why | | = ' ' ; <nl> + <nl> + $ self - > _plan_check ; <nl> + <nl> + lock ( $ self - > { Curr_Test } ) ; <nl> + $ self - > { Curr_Test } + + ; <nl> + <nl> + $ self - > { Test_Results } [ $ self - > { Curr_Test } - 1 ] = & share ( { <nl> + ' ok ' = > 1 , <nl> + actual_ok = > 0 , <nl> + name = > ' ' , <nl> + type = > ' todo_skip ' , <nl> + reason = > $ why , <nl> + } ) ; <nl> + <nl> + my $ out = " not ok " ; <nl> + $ out . = " $ self - > { Curr_Test } " if $ self - > use_numbers ; <nl> + $ out . = " # TODO & SKIP $ why \ n " ; <nl> + <nl> + $ self - > _print ( $ out ) ; <nl> + <nl> + return 1 ; <nl> + } <nl> + <nl> + <nl> + = begin _unimplemented <nl> + <nl> + = item B < skip_rest > <nl> + <nl> + $ Test - > skip_rest ; <nl> + $ Test - > skip_rest ( $ reason ) ; <nl> + <nl> + Like skip ( ) , only it skips all the rest of the tests you plan to run <nl> + and terminates the test . <nl> + <nl> + If you ' re running under no_plan , it skips once and terminates the <nl> + test . <nl> + <nl> + = end _unimplemented <nl> + <nl> + = back <nl> + <nl> + <nl> + = head2 Test building utility methods <nl> + <nl> + These methods are useful when writing your own test methods . <nl> + <nl> + = over 4 <nl> + <nl> + = item B < maybe_regex > <nl> + <nl> + $ Test - > maybe_regex ( qr / $ regex / ) ; <nl> + $ Test - > maybe_regex ( ' / $ regex / ' ) ; <nl> + <nl> + Convenience method for building testing functions that take regular <nl> + expressions as arguments , but need to work before perl 5 . 005 . <nl> + <nl> + Takes a quoted regular expression produced by qr / / , or a string <nl> + representing a regular expression . <nl> + <nl> + Returns a Perl value which may be used instead of the corresponding <nl> + regular expression , or undef if it ' s argument is not recognised . <nl> + <nl> + For example , a version of like ( ) , sans the useful diagnostic messages , <nl> + could be written as : <nl> + <nl> + sub laconic_like { <nl> + my ( $ self , $ this , $ regex , $ name ) = @ _ ; <nl> + my $ usable_regex = $ self - > maybe_regex ( $ regex ) ; <nl> + die " expecting regex , found ' $ regex ' \ n " <nl> + unless $ usable_regex ; <nl> + $ self - > ok ( $ this = ~ m / $ usable_regex / , $ name ) ; <nl> + } <nl> + <nl> + = cut <nl> + <nl> + <nl> + sub maybe_regex { <nl> + my ( $ self , $ regex ) = @ _ ; <nl> + my $ usable_regex = undef ; <nl> + <nl> + return $ usable_regex unless defined $ regex ; <nl> + <nl> + my ( $ re , $ opts ) ; <nl> + <nl> + # Check for qr / foo / <nl> + if ( ref $ regex eq ' Regexp ' ) { <nl> + $ usable_regex = $ regex ; <nl> + } <nl> + # Check for ' / foo / ' or ' m , foo , ' <nl> + elsif ( ( $ re , $ opts ) = $ regex = ~ m { ^ / ( . * ) / ( \ w * ) $ } sx or <nl> + ( undef , $ re , $ opts ) = $ regex = ~ m , ^ m ( [ ^ \ w \ s ] ) ( . + ) \ 1 ( \ w * ) $ , sx <nl> + ) <nl> + { <nl> + $ usable_regex = length $ opts ? " ( ? $ opts ) $ re " : $ re ; <nl> + } <nl> + <nl> + return $ usable_regex ; <nl> + } ; <nl> + <nl> + sub _regex_ok { <nl> + my ( $ self , $ this , $ regex , $ cmp , $ name ) = @ _ ; <nl> + <nl> + my $ ok = 0 ; <nl> + my $ usable_regex = $ self - > maybe_regex ( $ regex ) ; <nl> + unless ( defined $ usable_regex ) { <nl> + $ ok = $ self - > ok ( 0 , $ name ) ; <nl> + $ self - > diag ( " ' $ regex ' doesn ' t look much like a regex to me . " ) ; <nl> + return $ ok ; <nl> + } <nl> + <nl> + { <nl> + my $ test ; <nl> + my $ code = $ self - > _caller_context ; <nl> + <nl> + local ( $ @ , $ ! , $ SIG { __DIE__ } ) ; # isolate eval <nl> + <nl> + # Yes , it has to look like this or 5 . 4 . 5 won ' t see the # line directive . <nl> + # Don ' t ask me , man , I just work here . <nl> + $ test = eval " <nl> + $ code " . q { $ test = $ this = ~ / $ usable_regex / ? 1 : 0 } ; <nl> + <nl> + $ test = ! $ test if $ cmp eq ' ! ~ ' ; <nl> + <nl> + local $ Level = $ Level + 1 ; <nl> + $ ok = $ self - > ok ( $ test , $ name ) ; <nl> + } <nl> + <nl> + unless ( $ ok ) { <nl> + $ this = defined $ this ? " ' $ this ' " : ' undef ' ; <nl> + my $ match = $ cmp eq ' = ~ ' ? " doesn ' t match " : " matches " ; <nl> + $ self - > diag ( sprintf < < DIAGNOSTIC , $ this , $ match , $ regex ) ; <nl> + % s <nl> + % 13s ' % s ' <nl> + DIAGNOSTIC <nl> + <nl> + } <nl> + <nl> + return $ ok ; <nl> + } <nl> + <nl> + <nl> + # I ' m not ready to publish this . It doesn ' t deal with array return <nl> + # values from the code or context . <nl> + <nl> + = begin private <nl> + <nl> + = item B < _try > <nl> + <nl> + my $ return_from_code = $ Test - > try ( sub { code } ) ; <nl> + my ( $ return_from_code , $ error ) = $ Test - > try ( sub { code } ) ; <nl> + <nl> + Works like eval BLOCK except it ensures it has no effect on the rest of the test ( ie . $ @ is not set ) nor is effected by outside interference ( ie . $ SIG { __DIE__ } ) and works around some quirks in older Perls . <nl> + <nl> + $ error is what would normally be in $ @ . <nl> + <nl> + It is suggested you use this in place of eval BLOCK . <nl> + <nl> + = cut <nl> + <nl> + sub _try { <nl> + my ( $ self , $ code ) = @ _ ; <nl> + <nl> + local $ ! ; # eval can mess up $ ! <nl> + local $ @ ; # don ' t set $ @ in the test <nl> + local $ SIG { __DIE__ } ; # don ' t trip an outside DIE handler . <nl> + my $ return = eval { $ code - > ( ) } ; <nl> + <nl> + return wantarray ? ( $ return , $ @ ) : $ return ; <nl> + } <nl> + <nl> + = end private <nl> + <nl> + <nl> + = item B < is_fh > <nl> + <nl> + my $ is_fh = $ Test - > is_fh ( $ thing ) ; <nl> + <nl> + Determines if the given $ thing can be used as a filehandle . <nl> + <nl> + = cut <nl> + <nl> + sub is_fh { <nl> + my $ self = shift ; <nl> + my $ maybe_fh = shift ; <nl> + return 0 unless defined $ maybe_fh ; <nl> + <nl> + return 1 if ref $ maybe_fh eq ' GLOB ' ; # its a glob ref <nl> + return 1 if ref \ $ maybe_fh eq ' GLOB ' ; # its a glob <nl> + <nl> + return eval { $ maybe_fh - > isa ( " IO : : Handle " ) } | | <nl> + # 5 . 5 . 4 ' s tied ( ) and can ( ) doesn ' t like getting undef <nl> + eval { ( tied ( $ maybe_fh ) | | ' ' ) - > can ( ' TIEHANDLE ' ) } ; <nl> + } <nl> + <nl> + <nl> + = back <nl> + <nl> + <nl> + = head2 Test style <nl> + <nl> + <nl> + = over 4 <nl> + <nl> + = item B < level > <nl> + <nl> + $ Test - > level ( $ how_high ) ; <nl> + <nl> + How far up the call stack should $ Test look when reporting where the <nl> + test failed . <nl> + <nl> + Defaults to 1 . <nl> + <nl> + Setting L < $ Test : : Builder : : Level > overrides . This is typically useful <nl> + localized : <nl> + <nl> + sub my_ok { <nl> + my $ test = shift ; <nl> + <nl> + local $ Test : : Builder : : Level = $ Test : : Builder : : Level + 1 ; <nl> + $ TB - > ok ( $ test ) ; <nl> + } <nl> + <nl> + To be polite to other functions wrapping your own you usually want to increment C < $ Level > rather than set it to a constant . <nl> + <nl> + = cut <nl> + <nl> + sub level { <nl> + my ( $ self , $ level ) = @ _ ; <nl> + <nl> + if ( defined $ level ) { <nl> + $ Level = $ level ; <nl> + } <nl> + return $ Level ; <nl> + } <nl> + <nl> + <nl> + = item B < use_numbers > <nl> + <nl> + $ Test - > use_numbers ( $ on_or_off ) ; <nl> + <nl> + Whether or not the test should output numbers . That is , this if true : <nl> + <nl> + ok 1 <nl> + ok 2 <nl> + ok 3 <nl> + <nl> + or this if false <nl> + <nl> + ok <nl> + ok <nl> + ok <nl> + <nl> + Most useful when you can ' t depend on the test output order , such as <nl> + when threads or forking is involved . <nl> + <nl> + Defaults to on . <nl> + <nl> + = cut <nl> + <nl> + sub use_numbers { <nl> + my ( $ self , $ use_nums ) = @ _ ; <nl> + <nl> + if ( defined $ use_nums ) { <nl> + $ self - > { Use_Nums } = $ use_nums ; <nl> + } <nl> + return $ self - > { Use_Nums } ; <nl> + } <nl> + <nl> + <nl> + = item B < no_diag > <nl> + <nl> + $ Test - > no_diag ( $ no_diag ) ; <nl> + <nl> + If set true no diagnostics will be printed . This includes calls to <nl> + diag ( ) . <nl> + <nl> + = item B < no_ending > <nl> + <nl> + $ Test - > no_ending ( $ no_ending ) ; <nl> + <nl> + Normally , Test : : Builder does some extra diagnostics when the test <nl> + ends . It also changes the exit code as described below . <nl> + <nl> + If this is true , none of that will be done . <nl> + <nl> + = item B < no_header > <nl> + <nl> + $ Test - > no_header ( $ no_header ) ; <nl> + <nl> + If set to true , no " 1 . . N " header will be printed . <nl> + <nl> + = cut <nl> + <nl> + foreach my $ attribute ( qw ( No_Header No_Ending No_Diag ) ) { <nl> + my $ method = lc $ attribute ; <nl> + <nl> + my $ code = sub { <nl> + my ( $ self , $ no ) = @ _ ; <nl> + <nl> + if ( defined $ no ) { <nl> + $ self - > { $ attribute } = $ no ; <nl> + } <nl> + return $ self - > { $ attribute } ; <nl> + } ; <nl> + <nl> + no strict ' refs ' ; <nl> + * { __PACKAGE__ . ' : : ' . $ method } = $ code ; <nl> + } <nl> + <nl> + <nl> + = back <nl> + <nl> + = head2 Output <nl> + <nl> + Controlling where the test output goes . <nl> + <nl> + It ' s ok for your test to change where STDOUT and STDERR point to , <nl> + Test : : Builder ' s default output settings will not be affected . <nl> + <nl> + = over 4 <nl> + <nl> + = item B < diag > <nl> + <nl> + $ Test - > diag ( @ msgs ) ; <nl> + <nl> + Prints out the given @ msgs . Like C < print > , arguments are simply <nl> + appended together . <nl> + <nl> + Normally , it uses the failure_output ( ) handle , but if this is for a <nl> + TODO test , the todo_output ( ) handle is used . <nl> + <nl> + Output will be indented and marked with a # so as not to interfere <nl> + with test output . A newline will be put on the end if there isn ' t one <nl> + already . <nl> + <nl> + We encourage using this rather than calling print directly . <nl> + <nl> + Returns false . Why ? Because diag ( ) is often used in conjunction with <nl> + a failing test ( C < ok ( ) | | diag ( ) > ) it " passes through " the failure . <nl> + <nl> + return ok ( . . . ) | | diag ( . . . ) ; <nl> + <nl> + = for blame transfer <nl> + Mark Fowler < mark @ twoshortplanks . com > <nl> + <nl> + = cut <nl> + <nl> + sub diag { <nl> + my ( $ self , @ msgs ) = @ _ ; <nl> + <nl> + return if $ self - > no_diag ; <nl> + return unless @ msgs ; <nl> + <nl> + # Prevent printing headers when compiling ( i . e . - c ) <nl> + return if $ ^ C ; <nl> + <nl> + # Smash args together like print does . <nl> + # Convert undef to ' undef ' so its readable . <nl> + my $ msg = join ' ' , map { defined ( $ _ ) ? $ _ : ' undef ' } @ msgs ; <nl> + <nl> + # Escape each line with a # . <nl> + $ msg = ~ s / ^ / # / gm ; <nl> + <nl> + # Stick a newline on the end if it needs it . <nl> + $ msg . = " \ n " unless $ msg = ~ / \ n \ Z / ; <nl> + <nl> + local $ Level = $ Level + 1 ; <nl> + $ self - > _print_diag ( $ msg ) ; <nl> + <nl> + return 0 ; <nl> + } <nl> + <nl> + = begin _private <nl> + <nl> + = item B < _print > <nl> + <nl> + $ Test - > _print ( @ msgs ) ; <nl> + <nl> + Prints to the output ( ) filehandle . <nl> + <nl> + = end _private <nl> + <nl> + = cut <nl> + <nl> + sub _print { <nl> + my ( $ self , @ msgs ) = @ _ ; <nl> + <nl> + # Prevent printing headers when only compiling . Mostly for when <nl> + # tests are deparsed with B : : Deparse <nl> + return if $ ^ C ; <nl> + <nl> + my $ msg = join ' ' , @ msgs ; <nl> + <nl> + local ( $ \ , $ " , $ , ) = ( undef , ' ' , ' ' ) ; <nl> + my $ fh = $ self - > output ; <nl> + <nl> + # Escape each line after the first with a # so we don ' t <nl> + # confuse Test : : Harness . <nl> + $ msg = ~ s / \ n ( . ) / \ n # $ 1 / sg ; <nl> + <nl> + # Stick a newline on the end if it needs it . <nl> + $ msg . = " \ n " unless $ msg = ~ / \ n \ Z / ; <nl> + <nl> + print $ fh $ msg ; <nl> + } <nl> + <nl> + = begin private <nl> + <nl> + = item B < _print_diag > <nl> + <nl> + $ Test - > _print_diag ( @ msg ) ; <nl> + <nl> + Like _print , but prints to the current diagnostic filehandle . <nl> + <nl> + = end private <nl> + <nl> + = cut <nl> + <nl> + sub _print_diag { <nl> + my $ self = shift ; <nl> + <nl> + local ( $ \ , $ " , $ , ) = ( undef , ' ' , ' ' ) ; <nl> + my $ fh = $ self - > todo ? $ self - > todo_output : $ self - > failure_output ; <nl> + print $ fh @ _ ; <nl> + } <nl> + <nl> + = item B < output > <nl> + <nl> + $ Test - > output ( $ fh ) ; <nl> + $ Test - > output ( $ file ) ; <nl> + <nl> + Where normal " ok / not ok " test output should go . <nl> + <nl> + Defaults to STDOUT . <nl> + <nl> + = item B < failure_output > <nl> + <nl> + $ Test - > failure_output ( $ fh ) ; <nl> + $ Test - > failure_output ( $ file ) ; <nl> + <nl> + Where diagnostic output on test failures and diag ( ) should go . <nl> + <nl> + Defaults to STDERR . <nl> + <nl> + = item B < todo_output > <nl> + <nl> + $ Test - > todo_output ( $ fh ) ; <nl> + $ Test - > todo_output ( $ file ) ; <nl> + <nl> + Where diagnostics about todo test failures and diag ( ) should go . <nl> + <nl> + Defaults to STDOUT . <nl> + <nl> + = cut <nl> + <nl> + sub output { <nl> + my ( $ self , $ fh ) = @ _ ; <nl> + <nl> + if ( defined $ fh ) { <nl> + $ self - > { Out_FH } = $ self - > _new_fh ( $ fh ) ; <nl> + } <nl> + return $ self - > { Out_FH } ; <nl> + } <nl> + <nl> + sub failure_output { <nl> + my ( $ self , $ fh ) = @ _ ; <nl> + <nl> + if ( defined $ fh ) { <nl> + $ self - > { Fail_FH } = $ self - > _new_fh ( $ fh ) ; <nl> + } <nl> + return $ self - > { Fail_FH } ; <nl> + } <nl> + <nl> + sub todo_output { <nl> + my ( $ self , $ fh ) = @ _ ; <nl> + <nl> + if ( defined $ fh ) { <nl> + $ self - > { Todo_FH } = $ self - > _new_fh ( $ fh ) ; <nl> + } <nl> + return $ self - > { Todo_FH } ; <nl> + } <nl> + <nl> + <nl> + sub _new_fh { <nl> + my $ self = shift ; <nl> + my ( $ file_or_fh ) = shift ; <nl> + <nl> + my $ fh ; <nl> + if ( $ self - > is_fh ( $ file_or_fh ) ) { <nl> + $ fh = $ file_or_fh ; <nl> + } <nl> + else { <nl> + $ fh = do { local * FH } ; <nl> + open $ fh , " > $ file_or_fh " or <nl> + $ self - > croak ( " Can ' t open test output log $ file_or_fh : $ ! " ) ; <nl> + _autoflush ( $ fh ) ; <nl> + } <nl> + <nl> + return $ fh ; <nl> + } <nl> + <nl> + <nl> + sub _autoflush { <nl> + my ( $ fh ) = shift ; <nl> + my $ old_fh = select $ fh ; <nl> + $ | = 1 ; <nl> + select $ old_fh ; <nl> + } <nl> + <nl> + <nl> + sub _dup_stdhandles { <nl> + my $ self = shift ; <nl> + <nl> + $ self - > _open_testhandles ; <nl> + <nl> + # Set everything to unbuffered else plain prints to STDOUT will <nl> + # come out in the wrong order from our own prints . <nl> + _autoflush ( \ * TESTOUT ) ; <nl> + _autoflush ( \ * STDOUT ) ; <nl> + _autoflush ( \ * TESTERR ) ; <nl> + _autoflush ( \ * STDERR ) ; <nl> + <nl> + $ self - > output ( \ * TESTOUT ) ; <nl> + $ self - > failure_output ( \ * TESTERR ) ; <nl> + $ self - > todo_output ( \ * TESTOUT ) ; <nl> + } <nl> + <nl> + <nl> + my $ Opened_Testhandles = 0 ; <nl> + sub _open_testhandles { <nl> + return if $ Opened_Testhandles ; <nl> + # We dup STDOUT and STDERR so people can change them in their <nl> + # test suites while still getting normal test output . <nl> + open ( TESTOUT , " > & STDOUT " ) or die " Can ' t dup STDOUT : $ ! " ; <nl> + open ( TESTERR , " > & STDERR " ) or die " Can ' t dup STDERR : $ ! " ; <nl> + $ Opened_Testhandles = 1 ; <nl> + } <nl> + <nl> + <nl> + = item carp <nl> + <nl> + $ tb - > carp ( @ message ) ; <nl> + <nl> + Warns with C < @ message > but the message will appear to come from the <nl> + point where the original test function was called ( C < $ tb - > caller > ) . <nl> + <nl> + = item croak <nl> + <nl> + $ tb - > croak ( @ message ) ; <nl> + <nl> + Dies with C < @ message > but the message will appear to come from the <nl> + point where the original test function was called ( C < $ tb - > caller > ) . <nl> + <nl> + = cut <nl> + <nl> + sub _message_at_caller { <nl> + my $ self = shift ; <nl> + <nl> + local $ Level = $ Level + 1 ; <nl> + my ( $ pack , $ file , $ line ) = $ self - > caller ; <nl> + return join ( " " , @ _ ) . " at $ file line $ line . \ n " ; <nl> + } <nl> + <nl> + sub carp { <nl> + my $ self = shift ; <nl> + warn $ self - > _message_at_caller ( @ _ ) ; <nl> + } <nl> + <nl> + sub croak { <nl> + my $ self = shift ; <nl> + die $ self - > _message_at_caller ( @ _ ) ; <nl> + } <nl> + <nl> + sub _plan_check { <nl> + my $ self = shift ; <nl> + <nl> + unless ( $ self - > { Have_Plan } ) { <nl> + local $ Level = $ Level + 2 ; <nl> + $ self - > croak ( " You tried to run a test without a plan " ) ; <nl> + } <nl> + } <nl> + <nl> + = back <nl> + <nl> + <nl> + = head2 Test Status and Info <nl> + <nl> + = over 4 <nl> + <nl> + = item B < current_test > <nl> + <nl> + my $ curr_test = $ Test - > current_test ; <nl> + $ Test - > current_test ( $ num ) ; <nl> + <nl> + Gets / sets the current test number we ' re on . You usually shouldn ' t <nl> + have to set this . <nl> + <nl> + If set forward , the details of the missing tests are filled in as ' unknown ' . <nl> + if set backward , the details of the intervening tests are deleted . You <nl> + can erase history if you really want to . <nl> + <nl> + = cut <nl> + <nl> + sub current_test { <nl> + my ( $ self , $ num ) = @ _ ; <nl> + <nl> + lock ( $ self - > { Curr_Test } ) ; <nl> + if ( defined $ num ) { <nl> + unless ( $ self - > { Have_Plan } ) { <nl> + $ self - > croak ( " Can ' t change the current test number without a plan ! " ) ; <nl> + } <nl> + <nl> + $ self - > { Curr_Test } = $ num ; <nl> + <nl> + # If the test counter is being pushed forward fill in the details . <nl> + my $ test_results = $ self - > { Test_Results } ; <nl> + if ( $ num > @ $ test_results ) { <nl> + my $ start = @ $ test_results ? @ $ test_results : 0 ; <nl> + for ( $ start . . $ num - 1 ) { <nl> + $ test_results - > [ $ _ ] = & share ( { <nl> + ' ok ' = > 1 , <nl> + actual_ok = > undef , <nl> + reason = > ' incrementing test number ' , <nl> + type = > ' unknown ' , <nl> + name = > undef <nl> + } ) ; <nl> + } <nl> + } <nl> + # If backward , wipe history . Its their funeral . <nl> + elsif ( $ num < @ $ test_results ) { <nl> + $ # { $ test_results } = $ num - 1 ; <nl> + } <nl> + } <nl> + return $ self - > { Curr_Test } ; <nl> + } <nl> + <nl> + <nl> + = item B < summary > <nl> + <nl> + my @ tests = $ Test - > summary ; <nl> + <nl> + A simple summary of the tests so far . True for pass , false for fail . <nl> + This is a logical pass / fail , so todos are passes . <nl> + <nl> + Of course , test # 1 is $ tests [ 0 ] , etc . . . <nl> + <nl> + = cut <nl> + <nl> + sub summary { <nl> + my ( $ self ) = shift ; <nl> + <nl> + return map { $ _ - > { ' ok ' } } @ { $ self - > { Test_Results } } ; <nl> + } <nl> + <nl> + = item B < details > <nl> + <nl> + my @ tests = $ Test - > details ; <nl> + <nl> + Like summary ( ) , but with a lot more detail . <nl> + <nl> + $ tests [ $ test_num - 1 ] = <nl> + { ' ok ' = > is the test considered a pass ? <nl> + actual_ok = > did it literally say ' ok ' ? <nl> + name = > name of the test ( if any ) <nl> + type = > type of test ( if any , see below ) . <nl> + reason = > reason for the above ( if any ) <nl> + } ; <nl> + <nl> + ' ok ' is true if Test : : Harness will consider the test to be a pass . <nl> + <nl> + ' actual_ok ' is a reflection of whether or not the test literally <nl> + printed ' ok ' or ' not ok ' . This is for examining the result of ' todo ' <nl> + tests . <nl> + <nl> + ' name ' is the name of the test . <nl> + <nl> + ' type ' indicates if it was a special test . Normal tests have a type <nl> + of ' ' . Type can be one of the following : <nl> + <nl> + skip see skip ( ) <nl> + todo see todo ( ) <nl> + todo_skip see todo_skip ( ) <nl> + unknown see below <nl> + <nl> + Sometimes the Test : : Builder test counter is incremented without it <nl> + printing any test output , for example , when current_test ( ) is changed . <nl> + In these cases , Test : : Builder doesn ' t know the result of the test , so <nl> + it ' s type is ' unkown ' . These details for these tests are filled in . <nl> + They are considered ok , but the name and actual_ok is left undef . <nl> + <nl> + For example " not ok 23 - hole count # TODO insufficient donuts " would <nl> + result in this structure : <nl> + <nl> + $ tests [ 22 ] = # 23 - 1 , since arrays start from 0 . <nl> + { ok = > 1 , # logically , the test passed since it ' s todo <nl> + actual_ok = > 0 , # in absolute terms , it failed <nl> + name = > ' hole count ' , <nl> + type = > ' todo ' , <nl> + reason = > ' insufficient donuts ' <nl> + } ; <nl> + <nl> + = cut <nl> + <nl> + sub details { <nl> + my $ self = shift ; <nl> + return @ { $ self - > { Test_Results } } ; <nl> + } <nl> + <nl> + = item B < todo > <nl> + <nl> + my $ todo_reason = $ Test - > todo ; <nl> + my $ todo_reason = $ Test - > todo ( $ pack ) ; <nl> + <nl> + todo ( ) looks for a $ TODO variable in your tests . If set , all tests <nl> + will be considered ' todo ' ( see Test : : More and Test : : Harness for <nl> + details ) . Returns the reason ( ie . the value of $ TODO ) if running as <nl> + todo tests , false otherwise . <nl> + <nl> + todo ( ) is about finding the right package to look for $ TODO in . It <nl> + uses the exported_to ( ) package to find it . If that ' s not set , it ' s <nl> + pretty good at guessing the right package to look at based on $ Level . <nl> + <nl> + Sometimes there is some confusion about where todo ( ) should be looking <nl> + for the $ TODO variable . If you want to be sure , tell it explicitly <nl> + what $ pack to use . <nl> + <nl> + = cut <nl> + <nl> + sub todo { <nl> + my ( $ self , $ pack ) = @ _ ; <nl> + <nl> + $ pack = $ pack | | $ self - > exported_to | | $ self - > caller ( $ Level ) ; <nl> + return 0 unless $ pack ; <nl> + <nl> + no strict ' refs ' ; <nl> + return defined $ { $ pack . ' : : TODO ' } ? $ { $ pack . ' : : TODO ' } <nl> + : 0 ; <nl> + } <nl> + <nl> + = item B < caller > <nl> + <nl> + my $ package = $ Test - > caller ; <nl> + my ( $ pack , $ file , $ line ) = $ Test - > caller ; <nl> + my ( $ pack , $ file , $ line ) = $ Test - > caller ( $ height ) ; <nl> + <nl> + Like the normal caller ( ) , except it reports according to your level ( ) . <nl> + <nl> + = cut <nl> + <nl> + sub caller { <nl> + my ( $ self , $ height ) = @ _ ; <nl> + $ height | | = 0 ; <nl> + <nl> + my @ caller = CORE : : caller ( $ self - > level + $ height + 1 ) ; <nl> + return wantarray ? @ caller : $ caller [ 0 ] ; <nl> + } <nl> + <nl> + = back <nl> + <nl> + = cut <nl> + <nl> + = begin _private <nl> + <nl> + = over 4 <nl> + <nl> + = item B < _sanity_check > <nl> + <nl> + $ self - > _sanity_check ( ) ; <nl> + <nl> + Runs a bunch of end of test sanity checks to make sure reality came <nl> + through ok . If anything is wrong it will die with a fairly friendly <nl> + error message . <nl> + <nl> + = cut <nl> + <nl> + # ' # <nl> + sub _sanity_check { <nl> + my $ self = shift ; <nl> + <nl> + $ self - > _whoa ( $ self - > { Curr_Test } < 0 , ' Says here you ran a negative number of tests ! ' ) ; <nl> + $ self - > _whoa ( ! $ self - > { Have_Plan } and $ self - > { Curr_Test } , <nl> + ' Somehow your tests ran without a plan ! ' ) ; <nl> + $ self - > _whoa ( $ self - > { Curr_Test } ! = @ { $ self - > { Test_Results } } , <nl> + ' Somehow you got a different number of results than tests ran ! ' ) ; <nl> + } <nl> + <nl> + = item B < _whoa > <nl> + <nl> + $ self - > _whoa ( $ check , $ description ) ; <nl> + <nl> + A sanity check , similar to assert ( ) . If the $ check is true , something <nl> + has gone horribly wrong . It will die with the given $ description and <nl> + a note to contact the author . <nl> + <nl> + = cut <nl> + <nl> + sub _whoa { <nl> + my ( $ self , $ check , $ desc ) = @ _ ; <nl> + if ( $ check ) { <nl> + local $ Level = $ Level + 1 ; <nl> + $ self - > croak ( < < " WHOA " ) ; <nl> + WHOA ! $ desc <nl> + This should never happen ! Please contact the author immediately ! <nl> + WHOA <nl> + } <nl> + } <nl> + <nl> + = item B < _my_exit > <nl> + <nl> + _my_exit ( $ exit_num ) ; <nl> + <nl> + Perl seems to have some trouble with exiting inside an END block . 5 . 005_03 <nl> + and 5 . 6 . 1 both seem to do odd things . Instead , this function edits $ ? <nl> + directly . It should ONLY be called from inside an END block . It <nl> + doesn ' t actually exit , that ' s your job . <nl> + <nl> + = cut <nl> + <nl> + sub _my_exit { <nl> + $ ? = $ _ [ 0 ] ; <nl> + <nl> + return 1 ; <nl> + } <nl> + <nl> + <nl> + = back <nl> + <nl> + = end _private <nl> + <nl> + = cut <nl> + <nl> + $ SIG { __DIE__ } = sub { <nl> + # We don ' t want to muck with death in an eval , but $ ^ S isn ' t <nl> + # totally reliable . 5 . 005_03 and 5 . 6 . 1 both do the wrong thing <nl> + # with it . Instead , we use caller . This also means it runs under <nl> + # 5 . 004 ! <nl> + my $ in_eval = 0 ; <nl> + for ( my $ stack = 1 ; my $ sub = ( CORE : : caller ( $ stack ) ) [ 3 ] ; $ stack + + ) { <nl> + $ in_eval = 1 if $ sub = ~ / ^ \ ( eval \ ) / ; <nl> + } <nl> + $ Test - > { Test_Died } = 1 unless $ in_eval ; <nl> + } ; <nl> + <nl> + sub _ending { <nl> + my $ self = shift ; <nl> + <nl> + $ self - > _sanity_check ( ) ; <nl> + <nl> + # Don ' t bother with an ending if this is a forked copy . Only the parent <nl> + # should do the ending . <nl> + # Exit if plan ( ) was never called . This is so " require Test : : Simple " <nl> + # doesn ' t puke . <nl> + # Don ' t do an ending if we bailed out . <nl> + if ( ( $ self - > { Original_Pid } ! = $ $ ) or <nl> + ( ! $ self - > { Have_Plan } & & ! $ self - > { Test_Died } ) or <nl> + $ self - > { Bailed_Out } <nl> + ) <nl> + { <nl> + _my_exit ( $ ? ) ; <nl> + return ; <nl> + } <nl> + <nl> + # Figure out if we passed or failed and print helpful messages . <nl> + my $ test_results = $ self - > { Test_Results } ; <nl> + if ( @ $ test_results ) { <nl> + # The plan ? We have no plan . <nl> + if ( $ self - > { No_Plan } ) { <nl> + $ self - > _print ( " 1 . . $ self - > { Curr_Test } \ n " ) unless $ self - > no_header ; <nl> + $ self - > { Expected_Tests } = $ self - > { Curr_Test } ; <nl> + } <nl> + <nl> + # Auto - extended arrays and elements which aren ' t explicitly <nl> + # filled in with a shared reference will puke under 5 . 8 . 0 <nl> + # ithreads . So we have to fill them in by hand . : ( <nl> + my $ empty_result = & share ( { } ) ; <nl> + for my $ idx ( 0 . . $ self - > { Expected_Tests } - 1 ) { <nl> + $ test_results - > [ $ idx ] = $ empty_result <nl> + unless defined $ test_results - > [ $ idx ] ; <nl> + } <nl> + <nl> + my $ num_failed = grep ! $ _ - > { ' ok ' } , <nl> + @ { $ test_results } [ 0 . . $ self - > { Curr_Test } - 1 ] ; <nl> + <nl> + my $ num_extra = $ self - > { Curr_Test } - $ self - > { Expected_Tests } ; <nl> + <nl> + if ( $ num_extra < 0 ) { <nl> + my $ s = $ self - > { Expected_Tests } = = 1 ? ' ' : ' s ' ; <nl> + $ self - > diag ( < < " FAIL " ) ; <nl> + Looks like you planned $ self - > { Expected_Tests } test $ s but only ran $ self - > { Curr_Test } . <nl> + FAIL <nl> + } <nl> + elsif ( $ num_extra > 0 ) { <nl> + my $ s = $ self - > { Expected_Tests } = = 1 ? ' ' : ' s ' ; <nl> + $ self - > diag ( < < " FAIL " ) ; <nl> + Looks like you planned $ self - > { Expected_Tests } test $ s but ran $ num_extra extra . <nl> + FAIL <nl> + } <nl> + <nl> + if ( $ num_failed ) { <nl> + my $ num_tests = $ self - > { Curr_Test } ; <nl> + my $ s = $ num_failed = = 1 ? ' ' : ' s ' ; <nl> + <nl> + my $ qualifier = $ num_extra = = 0 ? ' ' : ' run ' ; <nl> + <nl> + $ self - > diag ( < < " FAIL " ) ; <nl> + Looks like you failed $ num_failed test $ s of $ num_tests $ qualifier . <nl> + FAIL <nl> + } <nl> + <nl> + if ( $ self - > { Test_Died } ) { <nl> + $ self - > diag ( < < " FAIL " ) ; <nl> + Looks like your test died just after $ self - > { Curr_Test } . <nl> + FAIL <nl> + <nl> + _my_exit ( 255 ) & & return ; <nl> + } <nl> + <nl> + my $ exit_code ; <nl> + if ( $ num_failed ) { <nl> + $ exit_code = $ num_failed < = 254 ? $ num_failed : 254 ; <nl> + } <nl> + elsif ( $ num_extra ! = 0 ) { <nl> + $ exit_code = 255 ; <nl> + } <nl> + else { <nl> + $ exit_code = 0 ; <nl> + } <nl> + <nl> + _my_exit ( $ exit_code ) & & return ; <nl> + } <nl> + elsif ( $ self - > { Skip_All } ) { <nl> + _my_exit ( 0 ) & & return ; <nl> + } <nl> + elsif ( $ self - > { Test_Died } ) { <nl> + $ self - > diag ( < < ' FAIL ' ) ; <nl> + Looks like your test died before it could output anything . <nl> + FAIL <nl> + _my_exit ( 255 ) & & return ; <nl> + } <nl> + else { <nl> + $ self - > diag ( " No tests run ! \ n " ) ; <nl> + _my_exit ( 255 ) & & return ; <nl> + } <nl> + } <nl> + <nl> + END { <nl> + $ Test - > _ending if defined $ Test and ! $ Test - > no_ending ; <nl> + } <nl> + <nl> + = head1 EXIT CODES <nl> + <nl> + If all your tests passed , Test : : Builder will exit with zero ( which is <nl> + normal ) . If anything failed it will exit with how many failed . If <nl> + you run less ( or more ) tests than you planned , the missing ( or extras ) <nl> + will be considered failures . If no tests were ever run Test : : Builder <nl> + will throw a warning and exit with 255 . If the test died , even after <nl> + having successfully completed all its tests , it will still be <nl> + considered a failure and will exit with 255 . <nl> + <nl> + So the exit codes are . . . <nl> + <nl> + 0 all tests successful <nl> + 255 test died or all passed but wrong # of tests run <nl> + any other number how many failed ( including missing or extras ) <nl> + <nl> + If you fail more than 254 tests , it will be reported as 254 . <nl> + <nl> + <nl> + = head1 THREADS <nl> + <nl> + In perl 5 . 8 . 1 and later , Test : : Builder is thread - safe . The test <nl> + number is shared amongst all threads . This means if one thread sets <nl> + the test number using current_test ( ) they will all be effected . <nl> + <nl> + While versions earlier than 5 . 8 . 1 had threads they contain too many <nl> + bugs to support . <nl> + <nl> + Test : : Builder is only thread - aware if threads . pm is loaded I < before > <nl> + Test : : Builder . <nl> + <nl> + = head1 EXAMPLES <nl> + <nl> + CPAN can provide the best examples . Test : : Simple , Test : : More , <nl> + Test : : Exception and Test : : Differences all use Test : : Builder . <nl> + <nl> + = head1 SEE ALSO <nl> + <nl> + Test : : Simple , Test : : More , Test : : Harness <nl> + <nl> + = head1 AUTHORS <nl> + <nl> + Original code by chromatic , maintained by Michael G Schwern <nl> + E < lt > schwern @ pobox . comE < gt > <nl> + <nl> + = head1 COPYRIGHT <nl> + <nl> + Copyright 2002 , 2004 by chromatic E < lt > chromatic @ wgz . orgE < gt > and <nl> + Michael G Schwern E < lt > schwern @ pobox . comE < gt > . <nl> + <nl> + This program is free software ; you can redistribute it and / or <nl> + modify it under the same terms as Perl itself . <nl> + <nl> + See F < http : / / www . perl . com / perl / misc / Artistic . html > <nl> + <nl> + = cut <nl> + <nl> + 1 ; <nl> new file mode 100644 <nl> index 00000000000 . . a7d56dd0fdf <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / Builder / Module . pm <nl> <nl> + package Test : : Builder : : Module ; <nl> + <nl> + use Test : : Builder ; <nl> + <nl> + require Exporter ; <nl> + @ ISA = qw ( Exporter ) ; <nl> + <nl> + $ VERSION = ' 0 . 72 ' ; <nl> + <nl> + use strict ; <nl> + <nl> + # 5 . 004 ' s Exporter doesn ' t have export_to_level . <nl> + my $ _export_to_level = sub { <nl> + my $ pkg = shift ; <nl> + my $ level = shift ; <nl> + ( undef ) = shift ; # redundant arg <nl> + my $ callpkg = caller ( $ level ) ; <nl> + $ pkg - > export ( $ callpkg , @ _ ) ; <nl> + } ; <nl> + <nl> + <nl> + = head1 NAME <nl> + <nl> + Test : : Builder : : Module - Base class for test modules <nl> + <nl> + = head1 SYNOPSIS <nl> + <nl> + # Emulates Test : : Simple <nl> + package Your : : Module ; <nl> + <nl> + my $ CLASS = __PACKAGE__ ; <nl> + <nl> + use base ' Test : : Builder : : Module ' ; <nl> + @ EXPORT = qw ( ok ) ; <nl> + <nl> + sub ok ( $ ; $ ) { <nl> + my $ tb = $ CLASS - > builder ; <nl> + return $ tb - > ok ( @ _ ) ; <nl> + } <nl> + <nl> + 1 ; <nl> + <nl> + <nl> + = head1 DESCRIPTION <nl> + <nl> + This is a superclass for Test : : Builder - based modules . It provides a <nl> + handful of common functionality and a method of getting at the underlying <nl> + Test : : Builder object . <nl> + <nl> + <nl> + = head2 Importing <nl> + <nl> + Test : : Builder : : Module is a subclass of Exporter which means your <nl> + module is also a subclass of Exporter . @ EXPORT , @ EXPORT_OK , etc . . . <nl> + all act normally . <nl> + <nl> + A few methods are provided to do the C < use Your : : Module tests = > 23 > part <nl> + for you . <nl> + <nl> + = head3 import <nl> + <nl> + Test : : Builder : : Module provides an import ( ) method which acts in the <nl> + same basic way as Test : : More ' s , setting the plan and controling <nl> + exporting of functions and variables . This allows your module to set <nl> + the plan independent of Test : : More . <nl> + <nl> + All arguments passed to import ( ) are passed onto <nl> + C < < Your : : Module - > builder - > plan ( ) > > with the exception of <nl> + C < import = > [ qw ( things to import ) ] > . <nl> + <nl> + use Your : : Module import = > [ qw ( this that ) ] , tests = > 23 ; <nl> + <nl> + says to import the functions this ( ) and that ( ) as well as set the plan <nl> + to be 23 tests . <nl> + <nl> + import ( ) also sets the exported_to ( ) attribute of your builder to be <nl> + the caller of the import ( ) function . <nl> + <nl> + Additional behaviors can be added to your import ( ) method by overriding <nl> + import_extra ( ) . <nl> + <nl> + = cut <nl> + <nl> + sub import { <nl> + my ( $ class ) = shift ; <nl> + <nl> + my $ test = $ class - > builder ; <nl> + <nl> + my $ caller = caller ; <nl> + <nl> + $ test - > exported_to ( $ caller ) ; <nl> + <nl> + $ class - > import_extra ( \ @ _ ) ; <nl> + my ( @ imports ) = $ class - > _strip_imports ( \ @ _ ) ; <nl> + <nl> + $ test - > plan ( @ _ ) ; <nl> + <nl> + $ class - > $ _export_to_level ( 1 , $ class , @ imports ) ; <nl> + } <nl> + <nl> + <nl> + sub _strip_imports { <nl> + my $ class = shift ; <nl> + my $ list = shift ; <nl> + <nl> + my @ imports = ( ) ; <nl> + my @ other = ( ) ; <nl> + my $ idx = 0 ; <nl> + while ( $ idx < = $ # { $ list } ) { <nl> + my $ item = $ list - > [ $ idx ] ; <nl> + <nl> + if ( defined $ item and $ item eq ' import ' ) { <nl> + push @ imports , @ { $ list - > [ $ idx + 1 ] } ; <nl> + $ idx + + ; <nl> + } <nl> + else { <nl> + push @ other , $ item ; <nl> + } <nl> + <nl> + $ idx + + ; <nl> + } <nl> + <nl> + @ $ list = @ other ; <nl> + <nl> + return @ imports ; <nl> + } <nl> + <nl> + <nl> + = head3 import_extra <nl> + <nl> + Your : : Module - > import_extra ( \ @ import_args ) ; <nl> + <nl> + import_extra ( ) is called by import ( ) . It provides an opportunity for you <nl> + to add behaviors to your module based on its import list . <nl> + <nl> + Any extra arguments which shouldn ' t be passed on to plan ( ) should be <nl> + stripped off by this method . <nl> + <nl> + See Test : : More for an example of its use . <nl> + <nl> + B < NOTE > This mechanism is I < VERY ALPHA AND LIKELY TO CHANGE > as it <nl> + feels like a bit of an ugly hack in its current form . <nl> + <nl> + = cut <nl> + <nl> + sub import_extra { } <nl> + <nl> + <nl> + = head2 Builder <nl> + <nl> + Test : : Builder : : Module provides some methods of getting at the underlying <nl> + Test : : Builder object . <nl> + <nl> + = head3 builder <nl> + <nl> + my $ builder = Your : : Class - > builder ; <nl> + <nl> + This method returns the Test : : Builder object associated with Your : : Class . <nl> + It is not a constructor so you can call it as often as you like . <nl> + <nl> + This is the preferred way to get the Test : : Builder object . You should <nl> + I < not > get it via C < < Test : : Builder - > new > > as was previously <nl> + recommended . <nl> + <nl> + The object returned by builder ( ) may change at runtime so you should <nl> + call builder ( ) inside each function rather than store it in a global . <nl> + <nl> + sub ok { <nl> + my $ builder = Your : : Class - > builder ; <nl> + <nl> + return $ builder - > ok ( @ _ ) ; <nl> + } <nl> + <nl> + <nl> + = cut <nl> + <nl> + sub builder { <nl> + return Test : : Builder - > new ; <nl> + } <nl> + <nl> + <nl> + 1 ; <nl> new file mode 100644 <nl> index 00000000000 . . 598eb19e690 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / Builder / Tester . pm <nl> <nl> + package Test : : Builder : : Tester ; <nl> + <nl> + use strict ; <nl> + use vars qw ( @ EXPORT $ VERSION @ ISA ) ; <nl> + $ VERSION = " 1 . 09 " ; <nl> + <nl> + use Test : : Builder ; <nl> + use Symbol ; <nl> + use Carp ; <nl> + <nl> + = head1 NAME <nl> + <nl> + Test : : Builder : : Tester - test testsuites that have been built with <nl> + Test : : Builder <nl> + <nl> + = head1 SYNOPSIS <nl> + <nl> + use Test : : Builder : : Tester tests = > 1 ; <nl> + use Test : : More ; <nl> + <nl> + test_out ( " not ok 1 - foo " ) ; <nl> + test_fail ( + 1 ) ; <nl> + fail ( " foo " ) ; <nl> + test_test ( " fail works " ) ; <nl> + <nl> + = head1 DESCRIPTION <nl> + <nl> + A module that helps you test testing modules that are built with <nl> + B < Test : : Builder > . <nl> + <nl> + The testing system is designed to be used by performing a three step <nl> + process for each test you wish to test . This process starts with using <nl> + C < test_out > and C < test_err > in advance to declare what the testsuite you <nl> + are testing will output with B < Test : : Builder > to stdout and stderr . <nl> + <nl> + You then can run the test ( s ) from your test suite that call <nl> + B < Test : : Builder > . At this point the output of B < Test : : Builder > is <nl> + safely captured by B < Test : : Builder : : Tester > rather than being <nl> + interpreted as real test output . <nl> + <nl> + The final stage is to call C < test_test > that will simply compare what you <nl> + predeclared to what B < Test : : Builder > actually outputted , and report the <nl> + results back with a " ok " or " not ok " ( with debugging ) to the normal <nl> + output . <nl> + <nl> + = cut <nl> + <nl> + # # # # <nl> + # set up testing <nl> + # # # # <nl> + <nl> + my $ t = Test : : Builder - > new ; <nl> + <nl> + # # # <nl> + # make us an exporter <nl> + # # # <nl> + <nl> + use Exporter ; <nl> + @ ISA = qw ( Exporter ) ; <nl> + <nl> + @ EXPORT = qw ( test_out test_err test_fail test_diag test_test line_num ) ; <nl> + <nl> + # _export_to_level and import stolen directly from Test : : More . I am <nl> + # the king of cargo cult programming ; - ) <nl> + <nl> + # 5 . 004 ' s Exporter doesn ' t have export_to_level . <nl> + sub _export_to_level <nl> + { <nl> + my $ pkg = shift ; <nl> + my $ level = shift ; <nl> + ( undef ) = shift ; # XXX redundant arg <nl> + my $ callpkg = caller ( $ level ) ; <nl> + $ pkg - > export ( $ callpkg , @ _ ) ; <nl> + } <nl> + <nl> + sub import { <nl> + my $ class = shift ; <nl> + my ( @ plan ) = @ _ ; <nl> + <nl> + my $ caller = caller ; <nl> + <nl> + $ t - > exported_to ( $ caller ) ; <nl> + $ t - > plan ( @ plan ) ; <nl> + <nl> + my @ imports = ( ) ; <nl> + foreach my $ idx ( 0 . . $ # plan ) { <nl> + if ( $ plan [ $ idx ] eq ' import ' ) { <nl> + @ imports = @ { $ plan [ $ idx + 1 ] } ; <nl> + last ; <nl> + } <nl> + } <nl> + <nl> + __PACKAGE__ - > _export_to_level ( 1 , __PACKAGE__ , @ imports ) ; <nl> + } <nl> + <nl> + # # # <nl> + # set up file handles <nl> + # # # <nl> + <nl> + # create some private file handles <nl> + my $ output_handle = gensym ; <nl> + my $ error_handle = gensym ; <nl> + <nl> + # and tie them to this package <nl> + my $ out = tie * $ output_handle , " Test : : Builder : : Tester : : Tie " , " STDOUT " ; <nl> + my $ err = tie * $ error_handle , " Test : : Builder : : Tester : : Tie " , " STDERR " ; <nl> + <nl> + # # # # <nl> + # exported functions <nl> + # # # # <nl> + <nl> + # for remembering that we ' re testing and where we ' re testing at <nl> + my $ testing = 0 ; <nl> + my $ testing_num ; <nl> + <nl> + # remembering where the file handles were originally connected <nl> + my $ original_output_handle ; <nl> + my $ original_failure_handle ; <nl> + my $ original_todo_handle ; <nl> + <nl> + my $ original_test_number ; <nl> + my $ original_harness_state ; <nl> + <nl> + my $ original_harness_env ; <nl> + <nl> + # function that starts testing and redirects the filehandles for now <nl> + sub _start_testing <nl> + { <nl> + # even if we ' re running under Test : : Harness pretend we ' re not <nl> + # for now . This needed so Test : : Builder doesn ' t add extra spaces <nl> + $ original_harness_env = $ ENV { HARNESS_ACTIVE } | | 0 ; <nl> + $ ENV { HARNESS_ACTIVE } = 0 ; <nl> + <nl> + # remember what the handles were set to <nl> + $ original_output_handle = $ t - > output ( ) ; <nl> + $ original_failure_handle = $ t - > failure_output ( ) ; <nl> + $ original_todo_handle = $ t - > todo_output ( ) ; <nl> + <nl> + # switch out to our own handles <nl> + $ t - > output ( $ output_handle ) ; <nl> + $ t - > failure_output ( $ error_handle ) ; <nl> + $ t - > todo_output ( $ error_handle ) ; <nl> + <nl> + # clear the expected list <nl> + $ out - > reset ( ) ; <nl> + $ err - > reset ( ) ; <nl> + <nl> + # remeber that we ' re testing <nl> + $ testing = 1 ; <nl> + $ testing_num = $ t - > current_test ; <nl> + $ t - > current_test ( 0 ) ; <nl> + <nl> + # look , we shouldn ' t do the ending stuff <nl> + $ t - > no_ending ( 1 ) ; <nl> + } <nl> + <nl> + = head2 Functions <nl> + <nl> + These are the six methods that are exported as default . <nl> + <nl> + = over 4 <nl> + <nl> + = item test_out <nl> + <nl> + = item test_err <nl> + <nl> + Procedures for predeclaring the output that your test suite is <nl> + expected to produce until C < test_test > is called . These procedures <nl> + automatically assume that each line terminates with " \ n " . So <nl> + <nl> + test_out ( " ok 1 " , " ok 2 " ) ; <nl> + <nl> + is the same as <nl> + <nl> + test_out ( " ok 1 \ nok 2 " ) ; <nl> + <nl> + which is even the same as <nl> + <nl> + test_out ( " ok 1 " ) ; <nl> + test_out ( " ok 2 " ) ; <nl> + <nl> + Once C < test_out > or C < test_err > ( or C < test_fail > or C < test_diag > ) have <nl> + been called once all further output from B < Test : : Builder > will be <nl> + captured by B < Test : : Builder : : Tester > . This means that your will not <nl> + be able perform further tests to the normal output in the normal way <nl> + until you call C < test_test > ( well , unless you manually meddle with the <nl> + output filehandles ) <nl> + <nl> + = cut <nl> + <nl> + sub test_out ( @ ) <nl> + { <nl> + # do we need to do any setup ? <nl> + _start_testing ( ) unless $ testing ; <nl> + <nl> + $ out - > expect ( @ _ ) <nl> + } <nl> + <nl> + sub test_err ( @ ) <nl> + { <nl> + # do we need to do any setup ? <nl> + _start_testing ( ) unless $ testing ; <nl> + <nl> + $ err - > expect ( @ _ ) <nl> + } <nl> + <nl> + = item test_fail <nl> + <nl> + Because the standard failure message that B < Test : : Builder > produces <nl> + whenever a test fails will be a common occurrence in your test error <nl> + output , and because has changed between Test : : Builder versions , rather <nl> + than forcing you to call C < test_err > with the string all the time like <nl> + so <nl> + <nl> + test_err ( " # Failed test ( $ 0 at line " . line_num ( + 1 ) . " ) " ) ; <nl> + <nl> + C < test_fail > exists as a convenience function that can be called <nl> + instead . It takes one argument , the offset from the current line that <nl> + the line that causes the fail is on . <nl> + <nl> + test_fail ( + 1 ) ; <nl> + <nl> + This means that the example in the synopsis could be rewritten <nl> + more simply as : <nl> + <nl> + test_out ( " not ok 1 - foo " ) ; <nl> + test_fail ( + 1 ) ; <nl> + fail ( " foo " ) ; <nl> + test_test ( " fail works " ) ; <nl> + <nl> + = cut <nl> + <nl> + sub test_fail <nl> + { <nl> + # do we need to do any setup ? <nl> + _start_testing ( ) unless $ testing ; <nl> + <nl> + # work out what line we should be on <nl> + my ( $ package , $ filename , $ line ) = caller ; <nl> + $ line = $ line + ( shift ( ) | | 0 ) ; # prevent warnings <nl> + <nl> + # expect that on stderr <nl> + $ err - > expect ( " # Failed test ( $ 0 at line $ line ) " ) ; <nl> + } <nl> + <nl> + = item test_diag <nl> + <nl> + As most of the remaining expected output to the error stream will be <nl> + created by Test : : Builder ' s C < diag > function , B < Test : : Builder : : Tester > <nl> + provides a convience function C < test_diag > that you can use instead of <nl> + C < test_err > . <nl> + <nl> + The C < test_diag > function prepends comment hashes and spacing to the <nl> + start and newlines to the end of the expected output passed to it and <nl> + adds it to the list of expected error output . So , instead of writing <nl> + <nl> + test_err ( " # Couldn ' t open file " ) ; <nl> + <nl> + you can write <nl> + <nl> + test_diag ( " Couldn ' t open file " ) ; <nl> + <nl> + Remember that B < Test : : Builder > ' s diag function will not add newlines to <nl> + the end of output and test_diag will . So to check <nl> + <nl> + Test : : Builder - > new - > diag ( " foo \ n " , " bar \ n " ) ; <nl> + <nl> + You would do <nl> + <nl> + test_diag ( " foo " , " bar " ) <nl> + <nl> + without the newlines . <nl> + <nl> + = cut <nl> + <nl> + sub test_diag <nl> + { <nl> + # do we need to do any setup ? <nl> + _start_testing ( ) unless $ testing ; <nl> + <nl> + # expect the same thing , but prepended with " # " <nl> + local $ _ ; <nl> + $ err - > expect ( map { " # $ _ " } @ _ ) <nl> + } <nl> + <nl> + = item test_test <nl> + <nl> + Actually performs the output check testing the tests , comparing the <nl> + data ( with C < eq > ) that we have captured from B < Test : : Builder > against <nl> + that that was declared with C < test_out > and C < test_err > . <nl> + <nl> + This takes name / value pairs that effect how the test is run . <nl> + <nl> + = over <nl> + <nl> + = item title ( synonym ' name ' , ' label ' ) <nl> + <nl> + The name of the test that will be displayed after the C < ok > or C < not <nl> + ok > . <nl> + <nl> + = item skip_out <nl> + <nl> + Setting this to a true value will cause the test to ignore if the <nl> + output sent by the test to the output stream does not match that <nl> + declared with C < test_out > . <nl> + <nl> + = item skip_err <nl> + <nl> + Setting this to a true value will cause the test to ignore if the <nl> + output sent by the test to the error stream does not match that <nl> + declared with C < test_err > . <nl> + <nl> + = back <nl> + <nl> + As a convience , if only one argument is passed then this argument <nl> + is assumed to be the name of the test ( as in the above examples . ) <nl> + <nl> + Once C < test_test > has been run test output will be redirected back to <nl> + the original filehandles that B < Test : : Builder > was connected to <nl> + ( probably STDOUT and STDERR , ) meaning any further tests you run <nl> + will function normally and cause success / errors for B < Test : : Harness > . <nl> + <nl> + = cut <nl> + <nl> + sub test_test <nl> + { <nl> + # decode the arguements as described in the pod <nl> + my $ mess ; <nl> + my % args ; <nl> + if ( @ _ = = 1 ) <nl> + { $ mess = shift } <nl> + else <nl> + { <nl> + % args = @ _ ; <nl> + $ mess = $ args { name } if exists ( $ args { name } ) ; <nl> + $ mess = $ args { title } if exists ( $ args { title } ) ; <nl> + $ mess = $ args { label } if exists ( $ args { label } ) ; <nl> + } <nl> + <nl> + # er , are we testing ? <nl> + croak " Not testing . You must declare output with a test function first . " <nl> + unless $ testing ; <nl> + <nl> + # okay , reconnect the test suite back to the saved handles <nl> + $ t - > output ( $ original_output_handle ) ; <nl> + $ t - > failure_output ( $ original_failure_handle ) ; <nl> + $ t - > todo_output ( $ original_todo_handle ) ; <nl> + <nl> + # restore the test no , etc , back to the original point <nl> + $ t - > current_test ( $ testing_num ) ; <nl> + $ testing = 0 ; <nl> + <nl> + # re - enable the original setting of the harness <nl> + $ ENV { HARNESS_ACTIVE } = $ original_harness_env ; <nl> + <nl> + # check the output we ' ve stashed <nl> + unless ( $ t - > ok ( ( $ args { skip_out } | | $ out - > check ) <nl> + & & ( $ args { skip_err } | | $ err - > check ) , <nl> + $ mess ) ) <nl> + { <nl> + # print out the diagnostic information about why this <nl> + # test failed <nl> + <nl> + local $ _ ; <nl> + <nl> + $ t - > diag ( map { " $ _ \ n " } $ out - > complaint ) <nl> + unless $ args { skip_out } | | $ out - > check ; <nl> + <nl> + $ t - > diag ( map { " $ _ \ n " } $ err - > complaint ) <nl> + unless $ args { skip_err } | | $ err - > check ; <nl> + } <nl> + } <nl> + <nl> + = item line_num <nl> + <nl> + A utility function that returns the line number that the function was <nl> + called on . You can pass it an offset which will be added to the <nl> + result . This is very useful for working out the correct text of <nl> + diagnostic functions that contain line numbers . <nl> + <nl> + Essentially this is the same as the C < __LINE__ > macro , but the <nl> + C < line_num ( + 3 ) > idiom is arguably nicer . <nl> + <nl> + = cut <nl> + <nl> + sub line_num <nl> + { <nl> + my ( $ package , $ filename , $ line ) = caller ; <nl> + return $ line + ( shift ( ) | | 0 ) ; # prevent warnings <nl> + } <nl> + <nl> + = back <nl> + <nl> + In addition to the six exported functions there there exists one <nl> + function that can only be accessed with a fully qualified function <nl> + call . <nl> + <nl> + = over 4 <nl> + <nl> + = item color <nl> + <nl> + When C < test_test > is called and the output that your tests generate <nl> + does not match that which you declared , C < test_test > will print out <nl> + debug information showing the two conflicting versions . As this <nl> + output itself is debug information it can be confusing which part of <nl> + the output is from C < test_test > and which was the original output from <nl> + your original tests . Also , it may be hard to spot things like <nl> + extraneous whitespace at the end of lines that may cause your test to <nl> + fail even though the output looks similar . <nl> + <nl> + To assist you , if you have the B < Term : : ANSIColor > module installed <nl> + ( which you should do by default from perl 5 . 005 onwards ) , C < test_test > <nl> + can colour the background of the debug information to disambiguate the <nl> + different types of output . The debug output will have it ' s background <nl> + coloured green and red . The green part represents the text which is <nl> + the same between the executed and actual output , the red shows which <nl> + part differs . <nl> + <nl> + The C < color > function determines if colouring should occur or not . <nl> + Passing it a true or false value will enable or disable colouring <nl> + respectively , and the function called with no argument will return the <nl> + current setting . <nl> + <nl> + To enable colouring from the command line , you can use the <nl> + B < Text : : Builder : : Tester : : Color > module like so : <nl> + <nl> + perl - Mlib = Text : : Builder : : Tester : : Color test . t <nl> + <nl> + Or by including the B < Test : : Builder : : Tester : : Color > module directly in <nl> + the PERL5LIB . <nl> + <nl> + = cut <nl> + <nl> + my $ color ; <nl> + sub color <nl> + { <nl> + $ color = shift if @ _ ; <nl> + $ color ; <nl> + } <nl> + <nl> + = back <nl> + <nl> + = head1 BUGS <nl> + <nl> + Calls C < < Test : : Builder - > no_ending > > turning off the ending tests . <nl> + This is needed as otherwise it will trip out because we ' ve run more <nl> + tests than we strictly should have and it ' ll register any failures we <nl> + had that we were testing for as real failures . <nl> + <nl> + The color function doesn ' t work unless B < Term : : ANSIColor > is installed <nl> + and is compatible with your terminal . <nl> + <nl> + Bugs ( and requests for new features ) can be reported to the author <nl> + though the CPAN RT system : <nl> + L < http : / / rt . cpan . org / NoAuth / ReportBug . html ? Queue = Test - Builder - Tester > <nl> + <nl> + = head1 AUTHOR <nl> + <nl> + Copyright Mark Fowler E < lt > mark @ twoshortplanks . comE < gt > 2002 , 2004 . <nl> + <nl> + Some code taken from B < Test : : More > and B < Test : : Catch > , written by by <nl> + Michael G Schwern E < lt > schwern @ pobox . comE < gt > . Hence , those parts <nl> + Copyright Micheal G Schwern 2001 . Used and distributed with <nl> + permission . <nl> + <nl> + This program is free software ; you can redistribute it <nl> + and / or modify it under the same terms as Perl itself . <nl> + <nl> + = head1 NOTES <nl> + <nl> + This code has been tested explicitly on the following versions <nl> + of perl : 5 . 7 . 3 , 5 . 6 . 1 , 5 . 6 . 0 , 5 . 005_03 , 5 . 004_05 and 5 . 004 . <nl> + <nl> + Thanks to Richard Clamp E < lt > richardc @ unixbeard . netE < gt > for letting <nl> + me use his testing system to try this module out on . <nl> + <nl> + = head1 SEE ALSO <nl> + <nl> + L < Test : : Builder > , L < Test : : Builder : : Tester : : Color > , L < Test : : More > . <nl> + <nl> + = cut <nl> + <nl> + 1 ; <nl> + <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # Helper class that is used to remember expected and received data <nl> + <nl> + package Test : : Builder : : Tester : : Tie ; <nl> + <nl> + # # <nl> + # add line ( s ) to be expected <nl> + <nl> + sub expect <nl> + { <nl> + my $ self = shift ; <nl> + <nl> + my @ checks = @ _ ; <nl> + foreach my $ check ( @ checks ) { <nl> + $ check = $ self - > _translate_Failed_check ( $ check ) ; <nl> + push @ { $ self - > { wanted } } , ref $ check ? $ check : " $ check \ n " ; <nl> + } <nl> + } <nl> + <nl> + <nl> + sub _translate_Failed_check <nl> + { <nl> + my ( $ self , $ check ) = @ _ ; <nl> + <nl> + if ( $ check = ~ / \ A ( . * ) # ( Failed . * test ) \ ( ( . * ? ) at line ( \ d + ) \ ) \ Z ( ? ! \ n ) / ) { <nl> + $ check = " / \ Q $ 1 \ E # \ \ s + \ Q $ 2 \ E . * ? \ \ n ? . * ? \ Qat $ 3 \ E line \ Q $ 4 \ E . * \ \ n ? / " ; <nl> + } <nl> + <nl> + return $ check ; <nl> + } <nl> + <nl> + <nl> + # # <nl> + # return true iff the expected data matches the got data <nl> + <nl> + sub check <nl> + { <nl> + my $ self = shift ; <nl> + <nl> + # turn off warnings as these might be undef <nl> + local $ ^ W = 0 ; <nl> + <nl> + my @ checks = @ { $ self - > { wanted } } ; <nl> + my $ got = $ self - > { got } ; <nl> + foreach my $ check ( @ checks ) { <nl> + $ check = " \ Q $ check \ E " unless ( $ check = ~ s , ^ / ( . * ) / $ , $ 1 , or ref $ check ) ; <nl> + return 0 unless $ got = ~ s / ^ $ check / / ; <nl> + } <nl> + <nl> + return length $ got = = 0 ; <nl> + } <nl> + <nl> + # # <nl> + # a complaint message about the inputs not matching ( to be <nl> + # used for debugging messages ) <nl> + <nl> + sub complaint <nl> + { <nl> + my $ self = shift ; <nl> + my $ type = $ self - > type ; <nl> + my $ got = $ self - > got ; <nl> + my $ wanted = join " \ n " , @ { $ self - > wanted } ; <nl> + <nl> + # are we running in colour mode ? <nl> + if ( Test : : Builder : : Tester : : color ) <nl> + { <nl> + # get color <nl> + eval " require Term : : ANSIColor " ; <nl> + unless ( $ @ ) <nl> + { <nl> + # colours <nl> + <nl> + my $ green = Term : : ANSIColor : : color ( " black " ) . <nl> + Term : : ANSIColor : : color ( " on_green " ) ; <nl> + my $ red = Term : : ANSIColor : : color ( " black " ) . <nl> + Term : : ANSIColor : : color ( " on_red " ) ; <nl> + my $ reset = Term : : ANSIColor : : color ( " reset " ) ; <nl> + <nl> + # work out where the two strings start to differ <nl> + my $ char = 0 ; <nl> + $ char + + while substr ( $ got , $ char , 1 ) eq substr ( $ wanted , $ char , 1 ) ; <nl> + <nl> + # get the start string and the two end strings <nl> + my $ start = $ green . substr ( $ wanted , 0 , $ char ) ; <nl> + my $ gotend = $ red . substr ( $ got , $ char ) . $ reset ; <nl> + my $ wantedend = $ red . substr ( $ wanted , $ char ) . $ reset ; <nl> + <nl> + # make the start turn green on and off <nl> + $ start = ~ s / \ n / $ reset \ n $ green / g ; <nl> + <nl> + # make the ends turn red on and off <nl> + $ gotend = ~ s / \ n / $ reset \ n $ red / g ; <nl> + $ wantedend = ~ s / \ n / $ reset \ n $ red / g ; <nl> + <nl> + # rebuild the strings <nl> + $ got = $ start . $ gotend ; <nl> + $ wanted = $ start . $ wantedend ; <nl> + } <nl> + } <nl> + <nl> + return " $ type is : \ n " . <nl> + " $ got \ nnot : \ n $ wanted \ nas expected " <nl> + } <nl> + <nl> + # # <nl> + # forget all expected and got data <nl> + <nl> + sub reset <nl> + { <nl> + my $ self = shift ; <nl> + % $ self = ( <nl> + type = > $ self - > { type } , <nl> + got = > ' ' , <nl> + wanted = > [ ] , <nl> + ) ; <nl> + } <nl> + <nl> + <nl> + sub got <nl> + { <nl> + my $ self = shift ; <nl> + return $ self - > { got } ; <nl> + } <nl> + <nl> + sub wanted <nl> + { <nl> + my $ self = shift ; <nl> + return $ self - > { wanted } ; <nl> + } <nl> + <nl> + sub type <nl> + { <nl> + my $ self = shift ; <nl> + return $ self - > { type } ; <nl> + } <nl> + <nl> + # # # <nl> + # tie interface <nl> + # # # <nl> + <nl> + sub PRINT { <nl> + my $ self = shift ; <nl> + $ self - > { got } . = join ' ' , @ _ ; <nl> + } <nl> + <nl> + sub TIEHANDLE { <nl> + my ( $ class , $ type ) = @ _ ; <nl> + <nl> + my $ self = bless { <nl> + type = > $ type <nl> + } , $ class ; <nl> + <nl> + $ self - > reset ; <nl> + <nl> + return $ self ; <nl> + } <nl> + <nl> + sub READ { } <nl> + sub READLINE { } <nl> + sub GETC { } <nl> + sub FILENO { } <nl> + <nl> + 1 ; <nl> new file mode 100644 <nl> index 00000000000 . . b479e71a946 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / Builder / Tester / Color . pm <nl> <nl> + package Test : : Builder : : Tester : : Color ; <nl> + <nl> + use strict ; <nl> + <nl> + require Test : : Builder : : Tester ; <nl> + <nl> + = head1 NAME <nl> + <nl> + Test : : Builder : : Tester : : Color - turn on colour in Test : : Builder : : Tester <nl> + <nl> + = head1 SYNOPSIS <nl> + <nl> + When running a test script <nl> + <nl> + perl - MTest : : Builder : : Tester : : Color test . t <nl> + <nl> + = head1 DESCRIPTION <nl> + <nl> + Importing this module causes the subroutine color in Test : : Builder : : Tester <nl> + to be called with a true value causing colour highlighting to be turned <nl> + on in debug output . <nl> + <nl> + The sole purpose of this module is to enable colour highlighting <nl> + from the command line . <nl> + <nl> + = cut <nl> + <nl> + sub import <nl> + { <nl> + Test : : Builder : : Tester : : color ( 1 ) ; <nl> + } <nl> + <nl> + = head1 AUTHOR <nl> + <nl> + Copyright Mark Fowler E < lt > mark @ twoshortplanks . comE < gt > 2002 . <nl> + <nl> + This program is free software ; you can redistribute it <nl> + and / or modify it under the same terms as Perl itself . <nl> + <nl> + = head1 BUGS <nl> + <nl> + This module will have no effect unless Term : : ANSIColor is installed . <nl> + <nl> + = head1 SEE ALSO <nl> + <nl> + L < Test : : Builder : : Tester > , L < Term : : ANSIColor > <nl> + <nl> + = cut <nl> + <nl> + 1 ; <nl> new file mode 100644 <nl> index 00000000000 . . 1991a60f673 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / Harness . pm <nl> <nl> + # - * - Mode : cperl ; cperl - indent - level : 4 - * - <nl> + <nl> + package Test : : Harness ; <nl> + <nl> + require 5 . 00405 ; <nl> + use Test : : Harness : : Straps ; <nl> + use Test : : Harness : : Assert ; <nl> + use Exporter ; <nl> + use Benchmark ; <nl> + use Config ; <nl> + use strict ; <nl> + <nl> + <nl> + use vars qw ( <nl> + $ VERSION <nl> + @ ISA @ EXPORT @ EXPORT_OK <nl> + $ Verbose $ Switches $ Debug <nl> + $ verbose $ switches $ debug <nl> + $ Columns <nl> + $ Timer <nl> + $ ML $ Last_ML_Print <nl> + $ Strap <nl> + $ has_time_hires <nl> + ) ; <nl> + <nl> + BEGIN { <nl> + eval q { use Time : : HiRes ' time ' } ; <nl> + $ has_time_hires = ! $ @ ; <nl> + } <nl> + <nl> + = head1 NAME <nl> + <nl> + Test : : Harness - Run Perl standard test scripts with statistics <nl> + <nl> + = head1 VERSION <nl> + <nl> + Version 2 . 64 <nl> + <nl> + = cut <nl> + <nl> + $ VERSION = ' 2 . 64 ' ; <nl> + <nl> + # Backwards compatibility for exportable variable names . <nl> + * verbose = * Verbose ; <nl> + * switches = * Switches ; <nl> + * debug = * Debug ; <nl> + <nl> + $ ENV { HARNESS_ACTIVE } = 1 ; <nl> + $ ENV { HARNESS_VERSION } = $ VERSION ; <nl> + <nl> + END { <nl> + # For VMS . <nl> + delete $ ENV { HARNESS_ACTIVE } ; <nl> + delete $ ENV { HARNESS_VERSION } ; <nl> + } <nl> + <nl> + my $ Files_In_Dir = $ ENV { HARNESS_FILELEAK_IN_DIR } ; <nl> + <nl> + # Stolen from Params : : Util <nl> + sub _CLASS { <nl> + ( defined $ _ [ 0 ] and ! ref $ _ [ 0 ] and $ _ [ 0 ] = ~ m / ^ [ ^ \ W \ d ] \ w * ( ? : : : \ w + ) * $ / s ) ? $ _ [ 0 ] : undef ; <nl> + } <nl> + <nl> + # Strap Overloading <nl> + if ( $ ENV { HARNESS_STRAPS_CLASS } ) { <nl> + die ' Set HARNESS_STRAP_CLASS , singular , not HARNESS_STRAPS_CLASS ' ; <nl> + } <nl> + my $ HARNESS_STRAP_CLASS = $ ENV { HARNESS_STRAP_CLASS } | | ' Test : : Harness : : Straps ' ; <nl> + if ( $ HARNESS_STRAP_CLASS = ~ / \ . pm $ / ) { <nl> + # " Class " is actually a filename , that should return the <nl> + # class name as its true return value . <nl> + $ HARNESS_STRAP_CLASS = require $ HARNESS_STRAP_CLASS ; <nl> + if ( ! _CLASS ( $ HARNESS_STRAP_CLASS ) ) { <nl> + die " HARNESS_STRAP_CLASS ' $ HARNESS_STRAP_CLASS ' is not a valid class name " ; <nl> + } <nl> + } <nl> + else { <nl> + # It is a class name within the current @ INC <nl> + if ( ! _CLASS ( $ HARNESS_STRAP_CLASS ) ) { <nl> + die " HARNESS_STRAP_CLASS ' $ HARNESS_STRAP_CLASS ' is not a valid class name " ; <nl> + } <nl> + eval " require $ HARNESS_STRAP_CLASS " ; <nl> + die $ @ if $ @ ; <nl> + } <nl> + if ( ! $ HARNESS_STRAP_CLASS - > isa ( ' Test : : Harness : : Straps ' ) ) { <nl> + die " HARNESS_STRAP_CLASS ' $ HARNESS_STRAP_CLASS ' must be a Test : : Harness : : Straps subclass " ; <nl> + } <nl> + <nl> + $ Strap = $ HARNESS_STRAP_CLASS - > new ; <nl> + <nl> + sub strap { return $ Strap } ; <nl> + <nl> + @ ISA = ( ' Exporter ' ) ; <nl> + @ EXPORT = qw ( & runtests ) ; <nl> + @ EXPORT_OK = qw ( & execute_tests $ verbose $ switches ) ; <nl> + <nl> + $ Verbose = $ ENV { HARNESS_VERBOSE } | | 0 ; <nl> + $ Debug = $ ENV { HARNESS_DEBUG } | | 0 ; <nl> + $ Switches = ' - w ' ; <nl> + $ Columns = $ ENV { HARNESS_COLUMNS } | | $ ENV { COLUMNS } | | 80 ; <nl> + $ Columns - - ; # Some shells have trouble with a full line of text . <nl> + $ Timer = $ ENV { HARNESS_TIMER } | | 0 ; <nl> + <nl> + = head1 SYNOPSIS <nl> + <nl> + use Test : : Harness ; <nl> + <nl> + runtests ( @ test_files ) ; <nl> + <nl> + = head1 DESCRIPTION <nl> + <nl> + B < STOP ! > If all you want to do is write a test script , consider <nl> + using Test : : Simple . Test : : Harness is the module that reads the <nl> + output from Test : : Simple , Test : : More and other modules based on <nl> + Test : : Builder . You don ' t need to know about Test : : Harness to use <nl> + those modules . <nl> + <nl> + Test : : Harness runs tests and expects output from the test in a <nl> + certain format . That format is called TAP , the Test Anything <nl> + Protocol . It is defined in L < Test : : Harness : : TAP > . <nl> + <nl> + C < Test : : Harness : : runtests ( @ tests ) > runs all the testscripts named <nl> + as arguments and checks standard output for the expected strings <nl> + in TAP format . <nl> + <nl> + The F < prove > utility is a thin wrapper around Test : : Harness . <nl> + <nl> + = head2 Taint mode <nl> + <nl> + Test : : Harness will honor the C < - T > or C < - t > in the # ! line on your <nl> + test files . So if you begin a test with : <nl> + <nl> + # ! perl - T <nl> + <nl> + the test will be run with taint mode on . <nl> + <nl> + = head2 Configuration variables . <nl> + <nl> + These variables can be used to configure the behavior of <nl> + Test : : Harness . They are exported on request . <nl> + <nl> + = over 4 <nl> + <nl> + = item C < $ Test : : Harness : : Verbose > <nl> + <nl> + The package variable C < $ Test : : Harness : : Verbose > is exportable and can be <nl> + used to let C < runtests ( ) > display the standard output of the script <nl> + without altering the behavior otherwise . The F < prove > utility ' s C < - v > <nl> + flag will set this . <nl> + <nl> + = item C < $ Test : : Harness : : switches > <nl> + <nl> + The package variable C < $ Test : : Harness : : switches > is exportable and can be <nl> + used to set perl command line options used for running the test <nl> + script ( s ) . The default value is C < - w > . It overrides C < HARNESS_PERL_SWITCHES > . <nl> + <nl> + = item C < $ Test : : Harness : : Timer > <nl> + <nl> + If set to true , and C < Time : : HiRes > is available , print elapsed seconds <nl> + after each test file . <nl> + <nl> + = back <nl> + <nl> + <nl> + = head2 Failure <nl> + <nl> + When tests fail , analyze the summary report : <nl> + <nl> + t / base . . . . . . . . . . . . . . ok <nl> + t / nonumbers . . . . . . . . . ok <nl> + t / ok . . . . . . . . . . . . . . . . ok <nl> + t / test - harness . . . . . . ok <nl> + t / waterloo . . . . . . . . . . dubious <nl> + Test returned status 3 ( wstat 768 , 0x300 ) <nl> + DIED . FAILED tests 1 , 3 , 5 , 7 , 9 , 11 , 13 , 15 , 17 , 19 <nl> + Failed 10 / 20 tests , 50 . 00 % okay <nl> + Failed Test Stat Wstat Total Fail List of Failed <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> + t / waterloo . t 3 768 20 10 1 3 5 7 9 11 13 15 17 19 <nl> + Failed 1 / 5 test scripts , 80 . 00 % okay . 10 / 44 subtests failed , 77 . 27 % okay . <nl> + <nl> + Everything passed but F < t / waterloo . t > . It failed 10 of 20 tests and <nl> + exited with non - zero status indicating something dubious happened . <nl> + <nl> + The columns in the summary report mean : <nl> + <nl> + = over 4 <nl> + <nl> + = item B < Failed Test > <nl> + <nl> + The test file which failed . <nl> + <nl> + = item B < Stat > <nl> + <nl> + If the test exited with non - zero , this is its exit status . <nl> + <nl> + = item B < Wstat > <nl> + <nl> + The wait status of the test . <nl> + <nl> + = item B < Total > <nl> + <nl> + Total number of tests expected to run . <nl> + <nl> + = item B < Fail > <nl> + <nl> + Number which failed , either from " not ok " or because they never ran . <nl> + <nl> + = item B < List of Failed > <nl> + <nl> + A list of the tests which failed . Successive failures may be <nl> + abbreviated ( ie . 15 - 20 to indicate that tests 15 , 16 , 17 , 18 , 19 and <nl> + 20 failed ) . <nl> + <nl> + = back <nl> + <nl> + <nl> + = head1 FUNCTIONS <nl> + <nl> + The following functions are available . <nl> + <nl> + = head2 runtests ( @ test_files ) <nl> + <nl> + This runs all the given I < @ test_files > and divines whether they passed <nl> + or failed based on their output to STDOUT ( details above ) . It prints <nl> + out each individual test which failed along with a summary report and <nl> + a how long it all took . <nl> + <nl> + It returns true if everything was ok . Otherwise it will C < die ( ) > with <nl> + one of the messages in the DIAGNOSTICS section . <nl> + <nl> + = cut <nl> + <nl> + sub runtests { <nl> + my ( @ tests ) = @ _ ; <nl> + <nl> + local ( $ \ , $ , ) ; <nl> + <nl> + my ( $ tot , $ failedtests , $ todo_passed ) = execute_tests ( tests = > \ @ tests ) ; <nl> + print get_results ( $ tot , $ failedtests , $ todo_passed ) ; <nl> + <nl> + my $ ok = _all_ok ( $ tot ) ; <nl> + <nl> + assert ( ( $ ok xor keys % $ failedtests ) , <nl> + q { ok status jives with $ failedtests } ) ; <nl> + <nl> + if ( ! $ ok ) { <nl> + die ( " Failed $ tot - > { bad } / $ tot - > { tests } test programs . " . <nl> + " @ { [ $ tot - > { max } - $ tot - > { ok } ] } / $ tot - > { max } subtests failed . \ n " ) ; <nl> + } <nl> + <nl> + return $ ok ; <nl> + } <nl> + <nl> + # my $ ok = _all_ok ( \ % tot ) ; <nl> + # Tells you if this test run is overall successful or not . <nl> + <nl> + sub _all_ok { <nl> + my ( $ tot ) = shift ; <nl> + <nl> + return $ tot - > { bad } = = 0 & & ( $ tot - > { max } | | $ tot - > { skipped } ) ? 1 : 0 ; <nl> + } <nl> + <nl> + # Returns all the files in a directory . This is shorthand for backwards <nl> + # compatibility on systems where C < glob ( ) > doesn ' t work right . <nl> + <nl> + sub _globdir { <nl> + local * DIRH ; <nl> + <nl> + opendir DIRH , shift ; <nl> + my @ f = readdir DIRH ; <nl> + closedir DIRH ; <nl> + <nl> + return @ f ; <nl> + } <nl> + <nl> + = head2 execute_tests ( tests = > \ @ test_files , out = > \ * FH ) <nl> + <nl> + Runs all the given C < @ test_files > ( just like C < runtests ( ) > ) but <nl> + doesn ' t generate the final report . During testing , progress <nl> + information will be written to the currently selected output <nl> + filehandle ( usually C < STDOUT > ) , or to the filehandle given by the <nl> + C < out > parameter . The I < out > is optional . <nl> + <nl> + Returns a list of two values , C < $ total > and C < $ failed > , describing the <nl> + results . C < $ total > is a hash ref summary of all the tests run . Its <nl> + keys and values are this : <nl> + <nl> + bonus Number of individual todo tests unexpectedly passed <nl> + max Number of individual tests ran <nl> + ok Number of individual tests passed <nl> + sub_skipped Number of individual tests skipped <nl> + todo Number of individual todo tests <nl> + <nl> + files Number of test files ran <nl> + good Number of test files passed <nl> + bad Number of test files failed <nl> + tests Number of test files originally given <nl> + skipped Number of test files skipped <nl> + <nl> + If C < < $ total - > { bad } = = 0 > > and C < < $ total - > { max } > 0 > > , you ' ve <nl> + got a successful test . <nl> + <nl> + C < $ failed > is a hash ref of all the test scripts that failed . Each key <nl> + is the name of a test script , each value is another hash representing <nl> + how that script failed . Its keys are these : <nl> + <nl> + name Name of the test which failed <nl> + estat Script ' s exit value <nl> + wstat Script ' s wait status <nl> + max Number of individual tests <nl> + failed Number which failed <nl> + canon List of tests which failed ( as string ) . <nl> + <nl> + C < $ failed > should be empty if everything passed . <nl> + <nl> + = cut <nl> + <nl> + sub execute_tests { <nl> + my % args = @ _ ; <nl> + my @ tests = @ { $ args { tests } } ; <nl> + my $ out = $ args { out } | | select ( ) ; <nl> + <nl> + # We allow filehandles that are symbolic refs <nl> + no strict ' refs ' ; <nl> + _autoflush ( $ out ) ; <nl> + _autoflush ( \ * STDERR ) ; <nl> + <nl> + my % failedtests ; <nl> + my % todo_passed ; <nl> + <nl> + # Test - wide totals . <nl> + my ( % tot ) = ( <nl> + bonus = > 0 , <nl> + max = > 0 , <nl> + ok = > 0 , <nl> + files = > 0 , <nl> + bad = > 0 , <nl> + good = > 0 , <nl> + tests = > scalar @ tests , <nl> + sub_skipped = > 0 , <nl> + todo = > 0 , <nl> + skipped = > 0 , <nl> + bench = > 0 , <nl> + ) ; <nl> + <nl> + my @ dir_files ; <nl> + @ dir_files = _globdir $ Files_In_Dir if defined $ Files_In_Dir ; <nl> + my $ run_start_time = new Benchmark ; <nl> + <nl> + my $ width = _leader_width ( @ tests ) ; <nl> + foreach my $ tfile ( @ tests ) { <nl> + $ Last_ML_Print = 0 ; # so each test prints at least once <nl> + my ( $ leader , $ ml ) = _mk_leader ( $ tfile , $ width ) ; <nl> + local $ ML = $ ml ; <nl> + <nl> + print $ out $ leader ; <nl> + <nl> + $ tot { files } + + ; <nl> + <nl> + $ Strap - > { _seen_header } = 0 ; <nl> + if ( $ Test : : Harness : : Debug ) { <nl> + print $ out " # Running : " , $ Strap - > _command_line ( $ tfile ) , " \ n " ; <nl> + } <nl> + my $ test_start_time = $ Timer ? time : 0 ; <nl> + my $ results = $ Strap - > analyze_file ( $ tfile ) or <nl> + do { warn $ Strap - > { error } , " \ n " ; next } ; <nl> + my $ elapsed ; <nl> + if ( $ Timer ) { <nl> + $ elapsed = time - $ test_start_time ; <nl> + if ( $ has_time_hires ) { <nl> + $ elapsed = sprintf ( " % 8d ms " , $ elapsed * 1000 ) ; <nl> + } <nl> + else { <nl> + $ elapsed = sprintf ( " % 8s s " , $ elapsed ? $ elapsed : " < 1 " ) ; <nl> + } <nl> + } <nl> + else { <nl> + $ elapsed = " " ; <nl> + } <nl> + <nl> + # state of the current test . <nl> + my @ failed = grep { ! $ results - > details - > [ $ _ - 1 ] { ok } } <nl> + 1 . . @ { $ results - > details } ; <nl> + my @ todo_pass = grep { $ results - > details - > [ $ _ - 1 ] { actual_ok } & & <nl> + $ results - > details - > [ $ _ - 1 ] { type } eq ' todo ' } <nl> + 1 . . @ { $ results - > details } ; <nl> + <nl> + my % test = ( <nl> + ok = > $ results - > ok , <nl> + ' next ' = > $ Strap - > { ' next ' } , <nl> + max = > $ results - > max , <nl> + failed = > \ @ failed , <nl> + todo_pass = > \ @ todo_pass , <nl> + todo = > $ results - > todo , <nl> + bonus = > $ results - > bonus , <nl> + skipped = > $ results - > skip , <nl> + skip_reason = > $ results - > skip_reason , <nl> + skip_all = > $ Strap - > { skip_all } , <nl> + ml = > $ ml , <nl> + ) ; <nl> + <nl> + $ tot { bonus } + = $ results - > bonus ; <nl> + $ tot { max } + = $ results - > max ; <nl> + $ tot { ok } + = $ results - > ok ; <nl> + $ tot { todo } + = $ results - > todo ; <nl> + $ tot { sub_skipped } + = $ results - > skip ; <nl> + <nl> + my $ estatus = $ results - > exit ; <nl> + my $ wstatus = $ results - > wait ; <nl> + <nl> + if ( $ results - > passing ) { <nl> + # XXX Combine these first two <nl> + if ( $ test { max } and $ test { skipped } + $ test { bonus } ) { <nl> + my @ msg ; <nl> + push ( @ msg , " $ test { skipped } / $ test { max } skipped : $ test { skip_reason } " ) <nl> + if $ test { skipped } ; <nl> + if ( $ test { bonus } ) { <nl> + my ( $ txt , $ canon ) = _canondetail ( $ test { todo } , 0 , ' TODO passed ' , <nl> + @ { $ test { todo_pass } } ) ; <nl> + $ todo_passed { $ tfile } = { <nl> + canon = > $ canon , <nl> + max = > $ test { todo } , <nl> + failed = > $ test { bonus } , <nl> + name = > $ tfile , <nl> + estat = > ' ' , <nl> + wstat = > ' ' , <nl> + } ; <nl> + <nl> + push ( @ msg , " $ test { bonus } / $ test { max } unexpectedly succeeded \ n $ txt " ) ; <nl> + } <nl> + print $ out " $ test { ml } ok $ elapsed \ n " . join ( ' , ' , @ msg ) . " \ n " ; <nl> + } <nl> + elsif ( $ test { max } ) { <nl> + print $ out " $ test { ml } ok $ elapsed \ n " ; <nl> + } <nl> + elsif ( defined $ test { skip_all } and length $ test { skip_all } ) { <nl> + print $ out " skipped \ n all skipped : $ test { skip_all } \ n " ; <nl> + $ tot { skipped } + + ; <nl> + } <nl> + else { <nl> + print $ out " skipped \ n all skipped : no reason given \ n " ; <nl> + $ tot { skipped } + + ; <nl> + } <nl> + $ tot { good } + + ; <nl> + } <nl> + else { <nl> + # List unrun tests as failures . <nl> + if ( $ test { ' next ' } < = $ test { max } ) { <nl> + push @ { $ test { failed } } , $ test { ' next ' } . . $ test { max } ; <nl> + } <nl> + # List overruns as failures . <nl> + else { <nl> + my $ details = $ results - > details ; <nl> + foreach my $ overrun ( $ test { max } + 1 . . @ $ details ) { <nl> + next unless ref $ details - > [ $ overrun - 1 ] ; <nl> + push @ { $ test { failed } } , $ overrun <nl> + } <nl> + } <nl> + <nl> + if ( $ wstatus ) { <nl> + $ failedtests { $ tfile } = _dubious_return ( \ % test , \ % tot , <nl> + $ estatus , $ wstatus ) ; <nl> + $ failedtests { $ tfile } { name } = $ tfile ; <nl> + } <nl> + elsif ( $ results - > seen ) { <nl> + if ( @ { $ test { failed } } and $ test { max } ) { <nl> + my ( $ txt , $ canon ) = _canondetail ( $ test { max } , $ test { skipped } , ' Failed ' , <nl> + @ { $ test { failed } } ) ; <nl> + print $ out " $ test { ml } $ txt " ; <nl> + $ failedtests { $ tfile } = { canon = > $ canon , <nl> + max = > $ test { max } , <nl> + failed = > scalar @ { $ test { failed } } , <nl> + name = > $ tfile , <nl> + estat = > ' ' , <nl> + wstat = > ' ' , <nl> + } ; <nl> + } <nl> + else { <nl> + print $ out " Don ' t know which tests failed : got $ test { ok } ok , " . <nl> + " expected $ test { max } \ n " ; <nl> + $ failedtests { $ tfile } = { canon = > ' ? ? ' , <nl> + max = > $ test { max } , <nl> + failed = > ' ? ? ' , <nl> + name = > $ tfile , <nl> + estat = > ' ' , <nl> + wstat = > ' ' , <nl> + } ; <nl> + } <nl> + $ tot { bad } + + ; <nl> + } <nl> + else { <nl> + print $ out " FAILED before any test output arrived \ n " ; <nl> + $ tot { bad } + + ; <nl> + $ failedtests { $ tfile } = { canon = > ' ? ? ' , <nl> + max = > ' ? ? ' , <nl> + failed = > ' ? ? ' , <nl> + name = > $ tfile , <nl> + estat = > ' ' , <nl> + wstat = > ' ' , <nl> + } ; <nl> + } <nl> + } <nl> + <nl> + if ( defined $ Files_In_Dir ) { <nl> + my @ new_dir_files = _globdir $ Files_In_Dir ; <nl> + if ( @ new_dir_files ! = @ dir_files ) { <nl> + my % f ; <nl> + @ f { @ new_dir_files } = ( 1 ) x @ new_dir_files ; <nl> + delete @ f { @ dir_files } ; <nl> + my @ f = sort keys % f ; <nl> + print $ out " LEAKED FILES : @ f \ n " ; <nl> + @ dir_files = @ new_dir_files ; <nl> + } <nl> + } <nl> + } # foreach test <nl> + $ tot { bench } = timediff ( new Benchmark , $ run_start_time ) ; <nl> + <nl> + $ Strap - > _restore_PERL5LIB ; <nl> + <nl> + return ( \ % tot , \ % failedtests , \ % todo_passed ) ; <nl> + } <nl> + <nl> + # Turns on autoflush for the handle passed <nl> + sub _autoflush { <nl> + my $ flushy_fh = shift ; <nl> + my $ old_fh = select $ flushy_fh ; <nl> + $ | = 1 ; <nl> + select $ old_fh ; <nl> + } <nl> + <nl> + = for private _mk_leader <nl> + <nl> + my ( $ leader , $ ml ) = _mk_leader ( $ test_file , $ width ) ; <nl> + <nl> + Generates the ' t / foo . . . . . . . . ' leader for the given C < $ test_file > as well <nl> + as a similar version which will overwrite the current line ( by use of <nl> + \ r and such ) . C < $ ml > may be empty if Test : : Harness doesn ' t think you ' re <nl> + on TTY . <nl> + <nl> + The C < $ width > is the width of the " yada / blah . . " string . <nl> + <nl> + = cut <nl> + <nl> + sub _mk_leader { <nl> + my ( $ te , $ width ) = @ _ ; <nl> + chomp ( $ te ) ; <nl> + $ te = ~ s / \ . \ w + $ / . / ; <nl> + <nl> + if ( $ ^ O eq ' VMS ' ) { <nl> + $ te = ~ s / ^ . * \ . t \ . / \ [ . t . / s ; <nl> + } <nl> + my $ leader = " $ te " . ' . ' x ( $ width - length ( $ te ) ) ; <nl> + my $ ml = " " ; <nl> + <nl> + if ( - t STDOUT and not $ ENV { HARNESS_NOTTY } and not $ Verbose ) { <nl> + $ ml = " \ r " . ( ' ' x 77 ) . " \ r $ leader " <nl> + } <nl> + <nl> + return ( $ leader , $ ml ) ; <nl> + } <nl> + <nl> + = for private _leader_width <nl> + <nl> + my ( $ width ) = _leader_width ( @ test_files ) ; <nl> + <nl> + Calculates how wide the leader should be based on the length of the <nl> + longest test name . <nl> + <nl> + = cut <nl> + <nl> + sub _leader_width { <nl> + my $ maxlen = 0 ; <nl> + my $ maxsuflen = 0 ; <nl> + foreach ( @ _ ) { <nl> + my $ suf = / \ . ( \ w + ) $ / ? $ 1 : ' ' ; <nl> + my $ len = length ; <nl> + my $ suflen = length $ suf ; <nl> + $ maxlen = $ len if $ len > $ maxlen ; <nl> + $ maxsuflen = $ suflen if $ suflen > $ maxsuflen ; <nl> + } <nl> + # + 3 : we want three dots between the test name and the " ok " <nl> + return $ maxlen + 3 - $ maxsuflen ; <nl> + } <nl> + <nl> + sub get_results { <nl> + my $ tot = shift ; <nl> + my $ failedtests = shift ; <nl> + my $ todo_passed = shift ; <nl> + <nl> + my $ out = ' ' ; <nl> + <nl> + my $ bonusmsg = _bonusmsg ( $ tot ) ; <nl> + <nl> + if ( _all_ok ( $ tot ) ) { <nl> + $ out . = " All tests successful $ bonusmsg . \ n " ; <nl> + if ( $ tot - > { bonus } ) { <nl> + my ( $ fmt_top , $ fmt ) = _create_fmts ( " Passed TODO " , $ todo_passed ) ; <nl> + # Now write to formats <nl> + $ out . = swrite ( $ fmt_top ) ; <nl> + for my $ script ( sort keys % { $ todo_passed | | { } } ) { <nl> + my $ Curtest = $ todo_passed - > { $ script } ; <nl> + $ out . = swrite ( $ fmt , @ { $ Curtest } { qw ( name estat wstat max failed canon ) } ) ; <nl> + } <nl> + } <nl> + } <nl> + elsif ( ! $ tot - > { tests } ) { <nl> + die " FAILED - - no tests were run for some reason . \ n " ; <nl> + } <nl> + elsif ( ! $ tot - > { max } ) { <nl> + my $ blurb = $ tot - > { tests } = = 1 ? " script " : " scripts " ; <nl> + die " FAILED - - $ tot - > { tests } test $ blurb could be run , " . <nl> + " alas - - no output ever seen \ n " ; <nl> + } <nl> + else { <nl> + my $ subresults = sprintf ( " % d / % d subtests failed . " , <nl> + $ tot - > { max } - $ tot - > { ok } , $ tot - > { max } ) ; <nl> + <nl> + my ( $ fmt_top , $ fmt1 , $ fmt2 ) = _create_fmts ( " Failed Test " , $ failedtests ) ; <nl> + <nl> + # Now write to formats <nl> + $ out . = swrite ( $ fmt_top ) ; <nl> + for my $ script ( sort keys % $ failedtests ) { <nl> + my $ Curtest = $ failedtests - > { $ script } ; <nl> + $ out . = swrite ( $ fmt1 , @ { $ Curtest } { qw ( name estat wstat max failed canon ) } ) ; <nl> + $ out . = swrite ( $ fmt2 , $ Curtest - > { canon } ) ; <nl> + } <nl> + if ( $ tot - > { bad } ) { <nl> + $ bonusmsg = ~ s / ^ , \ s * / / ; <nl> + $ out . = " $ bonusmsg . \ n " if $ bonusmsg ; <nl> + $ out . = " Failed $ tot - > { bad } / $ tot - > { tests } test scripts . $ subresults \ n " ; <nl> + } <nl> + } <nl> + <nl> + $ out . = sprintf ( " Files = % d , Tests = % d , % s \ n " , <nl> + $ tot - > { files } , $ tot - > { max } , timestr ( $ tot - > { bench } , ' nop ' ) ) ; <nl> + return $ out ; <nl> + } <nl> + <nl> + sub swrite { <nl> + my $ format = shift ; <nl> + $ ^ A = ' ' ; <nl> + formline ( $ format , @ _ ) ; <nl> + my $ out = $ ^ A ; <nl> + $ ^ A = ' ' ; <nl> + return $ out ; <nl> + } <nl> + <nl> + <nl> + my % Handlers = ( <nl> + header = > \ & header_handler , <nl> + test = > \ & test_handler , <nl> + bailout = > \ & bailout_handler , <nl> + ) ; <nl> + <nl> + $ Strap - > set_callback ( \ & strap_callback ) ; <nl> + sub strap_callback { <nl> + my ( $ self , $ line , $ type , $ totals ) = @ _ ; <nl> + print $ line if $ Verbose ; <nl> + <nl> + my $ meth = $ Handlers { $ type } ; <nl> + $ meth - > ( $ self , $ line , $ type , $ totals ) if $ meth ; <nl> + } ; <nl> + <nl> + <nl> + sub header_handler { <nl> + my ( $ self , $ line , $ type , $ totals ) = @ _ ; <nl> + <nl> + warn " Test header seen more than once ! \ n " if $ self - > { _seen_header } ; <nl> + <nl> + $ self - > { _seen_header } + + ; <nl> + <nl> + warn " 1 . . M can only appear at the beginning or end of tests \ n " <nl> + if $ totals - > seen & & ( $ totals - > max < $ totals - > seen ) ; <nl> + } ; <nl> + <nl> + sub test_handler { <nl> + my ( $ self , $ line , $ type , $ totals ) = @ _ ; <nl> + <nl> + my $ curr = $ totals - > seen ; <nl> + my $ next = $ self - > { ' next ' } ; <nl> + my $ max = $ totals - > max ; <nl> + my $ detail = $ totals - > details - > [ - 1 ] ; <nl> + <nl> + if ( $ detail - > { ok } ) { <nl> + _print_ml_less ( " ok $ curr / $ max " ) ; <nl> + <nl> + if ( $ detail - > { type } eq ' skip ' ) { <nl> + $ totals - > set_skip_reason ( $ detail - > { reason } ) <nl> + unless defined $ totals - > skip_reason ; <nl> + $ totals - > set_skip_reason ( ' various reasons ' ) <nl> + if $ totals - > skip_reason ne $ detail - > { reason } ; <nl> + } <nl> + } <nl> + else { <nl> + _print_ml ( " NOK $ curr / $ max " ) ; <nl> + } <nl> + <nl> + if ( $ curr > $ next ) { <nl> + print " Test output counter mismatch [ test $ curr ] \ n " ; <nl> + } <nl> + elsif ( $ curr < $ next ) { <nl> + print " Confused test output : test $ curr answered after " . <nl> + " test " , $ next - 1 , " \ n " ; <nl> + } <nl> + <nl> + } ; <nl> + <nl> + sub bailout_handler { <nl> + my ( $ self , $ line , $ type , $ totals ) = @ _ ; <nl> + <nl> + die " FAILED - - Further testing stopped " . <nl> + ( $ self - > { bailout_reason } ? " : $ self - > { bailout_reason } \ n " : " . \ n " ) ; <nl> + } ; <nl> + <nl> + <nl> + sub _print_ml { <nl> + print join ' ' , $ ML , @ _ if $ ML ; <nl> + } <nl> + <nl> + <nl> + # Print updates only once per second . <nl> + sub _print_ml_less { <nl> + my $ now = CORE : : time ; <nl> + if ( $ Last_ML_Print ! = $ now ) { <nl> + _print_ml ( @ _ ) ; <nl> + $ Last_ML_Print = $ now ; <nl> + } <nl> + } <nl> + <nl> + sub _bonusmsg { <nl> + my ( $ tot ) = @ _ ; <nl> + <nl> + my $ bonusmsg = ' ' ; <nl> + $ bonusmsg = ( " ( $ tot - > { bonus } subtest " . ( $ tot - > { bonus } > 1 ? ' s ' : ' ' ) . <nl> + " UNEXPECTEDLY SUCCEEDED ) " ) <nl> + if $ tot - > { bonus } ; <nl> + <nl> + if ( $ tot - > { skipped } ) { <nl> + $ bonusmsg . = " , $ tot - > { skipped } test " <nl> + . ( $ tot - > { skipped } ! = 1 ? ' s ' : ' ' ) ; <nl> + if ( $ tot - > { sub_skipped } ) { <nl> + $ bonusmsg . = " and $ tot - > { sub_skipped } subtest " <nl> + . ( $ tot - > { sub_skipped } ! = 1 ? ' s ' : ' ' ) ; <nl> + } <nl> + $ bonusmsg . = ' skipped ' ; <nl> + } <nl> + elsif ( $ tot - > { sub_skipped } ) { <nl> + $ bonusmsg . = " , $ tot - > { sub_skipped } subtest " <nl> + . ( $ tot - > { sub_skipped } ! = 1 ? ' s ' : ' ' ) <nl> + . " skipped " ; <nl> + } <nl> + return $ bonusmsg ; <nl> + } <nl> + <nl> + # Test program go boom . <nl> + sub _dubious_return { <nl> + my ( $ test , $ tot , $ estatus , $ wstatus ) = @ _ ; <nl> + <nl> + my $ failed = ' ? ? ' ; <nl> + my $ canon = ' ? ? ' ; <nl> + <nl> + printf " $ test - > { ml } dubious \ n \ tTest returned status $ estatus " . <nl> + " ( wstat % d , 0x % x ) \ n " , <nl> + $ wstatus , $ wstatus ; <nl> + print " \ t \ t ( VMS status is $ estatus ) \ n " if $ ^ O eq ' VMS ' ; <nl> + <nl> + $ tot - > { bad } + + ; <nl> + <nl> + if ( $ test - > { max } ) { <nl> + if ( $ test - > { ' next ' } = = $ test - > { max } + 1 and not @ { $ test - > { failed } } ) { <nl> + print " \ tafter all the subtests completed successfully \ n " ; <nl> + $ failed = 0 ; # But we do not set $ canon ! <nl> + } <nl> + else { <nl> + push @ { $ test - > { failed } } , $ test - > { ' next ' } . . $ test - > { max } ; <nl> + $ failed = @ { $ test - > { failed } } ; <nl> + ( my $ txt , $ canon ) = _canondetail ( $ test - > { max } , $ test - > { skipped } , ' Failed ' , @ { $ test - > { failed } } ) ; <nl> + print " DIED . " , $ txt ; <nl> + } <nl> + } <nl> + <nl> + return { canon = > $ canon , max = > $ test - > { max } | | ' ? ? ' , <nl> + failed = > $ failed , <nl> + estat = > $ estatus , wstat = > $ wstatus , <nl> + } ; <nl> + } <nl> + <nl> + <nl> + sub _create_fmts { <nl> + my $ failed_str = shift ; <nl> + my $ failedtests = shift ; <nl> + <nl> + my ( $ type ) = split / \ s / , $ failed_str ; <nl> + my $ short = substr ( $ type , 0 , 4 ) ; <nl> + my $ total = $ short eq ' Pass ' ? ' TODOs ' : ' Total ' ; <nl> + my $ middle_str = " Stat Wstat $ total $ short " ; <nl> + my $ list_str = " List of $ type " ; <nl> + <nl> + # Figure out our longest name string for formatting purposes . <nl> + my $ max_namelen = length ( $ failed_str ) ; <nl> + foreach my $ script ( keys % $ failedtests ) { <nl> + my $ namelen = length $ failedtests - > { $ script } - > { name } ; <nl> + $ max_namelen = $ namelen if $ namelen > $ max_namelen ; <nl> + } <nl> + <nl> + my $ list_len = $ Columns - length ( $ middle_str ) - $ max_namelen ; <nl> + if ( $ list_len < length ( $ list_str ) ) { <nl> + $ list_len = length ( $ list_str ) ; <nl> + $ max_namelen = $ Columns - length ( $ middle_str ) - $ list_len ; <nl> + if ( $ max_namelen < length ( $ failed_str ) ) { <nl> + $ max_namelen = length ( $ failed_str ) ; <nl> + $ Columns = $ max_namelen + length ( $ middle_str ) + $ list_len ; <nl> + } <nl> + } <nl> + <nl> + my $ fmt_top = sprintf ( " % - $ { max_namelen } s " , $ failed_str ) <nl> + . $ middle_str <nl> + . $ list_str . " \ n " <nl> + . " - " x $ Columns <nl> + . " \ n " ; <nl> + <nl> + my $ fmt1 = " @ " . " < " x ( $ max_namelen - 1 ) <nl> + . " @ > > @ > > > > @ > > > > @ > > > " <nl> + . " ^ " . " < " x ( $ list_len - 1 ) . " \ n " ; <nl> + my $ fmt2 = " ~ ~ " . " " x ( $ Columns - $ list_len - 2 ) . " ^ " <nl> + . " < " x ( $ list_len - 1 ) . " \ n " ; <nl> + <nl> + return ( $ fmt_top , $ fmt1 , $ fmt2 ) ; <nl> + } <nl> + <nl> + sub _canondetail { <nl> + my $ max = shift ; <nl> + my $ skipped = shift ; <nl> + my $ type = shift ; <nl> + my @ detail = @ _ ; <nl> + my % seen ; <nl> + @ detail = sort { $ a < = > $ b } grep ! $ seen { $ _ } + + , @ detail ; <nl> + my $ detail = @ detail ; <nl> + my @ result = ( ) ; <nl> + my @ canon = ( ) ; <nl> + my $ min ; <nl> + my $ last = $ min = shift @ detail ; <nl> + my $ canon ; <nl> + my $ uc_type = uc ( $ type ) ; <nl> + if ( @ detail ) { <nl> + for ( @ detail , $ detail [ - 1 ] ) { # don ' t forget the last one <nl> + if ( $ _ > $ last + 1 | | $ _ = = $ last ) { <nl> + push @ canon , ( $ min = = $ last ) ? $ last : " $ min - $ last " ; <nl> + $ min = $ _ ; <nl> + } <nl> + $ last = $ _ ; <nl> + } <nl> + local $ " = " , " ; <nl> + push @ result , " $ uc_type tests @ canon \ n " ; <nl> + $ canon = join ' ' , @ canon ; <nl> + } <nl> + else { <nl> + push @ result , " $ uc_type test $ last \ n " ; <nl> + $ canon = $ last ; <nl> + } <nl> + <nl> + return ( join ( " " , @ result ) , $ canon ) <nl> + if $ type = ~ / todo / i ; <nl> + push @ result , " \ t $ type $ detail / $ max tests , " ; <nl> + if ( $ max ) { <nl> + push @ result , sprintf ( " % . 2f " , 100 * ( 1 - $ detail / $ max ) ) , " % okay " ; <nl> + } <nl> + else { <nl> + push @ result , " ? % okay " ; <nl> + } <nl> + my $ ender = ' s ' x ( $ skipped > 1 ) ; <nl> + if ( $ skipped ) { <nl> + my $ good = $ max - $ detail - $ skipped ; <nl> + my $ skipmsg = " ( less $ skipped skipped test $ ender : $ good okay , " ; <nl> + if ( $ max ) { <nl> + my $ goodper = sprintf ( " % . 2f " , 100 * ( $ good / $ max ) ) ; <nl> + $ skipmsg . = " $ goodper % ) " ; <nl> + } <nl> + else { <nl> + $ skipmsg . = " ? % ) " ; <nl> + } <nl> + push @ result , $ skipmsg ; <nl> + } <nl> + push @ result , " \ n " ; <nl> + my $ txt = join " " , @ result ; <nl> + return ( $ txt , $ canon ) ; <nl> + } <nl> + <nl> + 1 ; <nl> + __END__ <nl> + <nl> + <nl> + = head1 EXPORT <nl> + <nl> + C < & runtests > is exported by Test : : Harness by default . <nl> + <nl> + C < & execute_tests > , C < $ verbose > , C < $ switches > and C < $ debug > are <nl> + exported upon request . <nl> + <nl> + = head1 DIAGNOSTICS <nl> + <nl> + = over 4 <nl> + <nl> + = item C < All tests successful . \ nFiles = % d , Tests = % d , % s > <nl> + <nl> + If all tests are successful some statistics about the performance are <nl> + printed . <nl> + <nl> + = item C < FAILED tests % s \ n \ tFailed % d / % d tests , % . 2f % % okay . > <nl> + <nl> + For any single script that has failing subtests statistics like the <nl> + above are printed . <nl> + <nl> + = item C < Test returned status % d ( wstat % d ) > <nl> + <nl> + Scripts that return a non - zero exit status , both C < $ ? E < gt > E < gt > 8 > <nl> + and C < $ ? > are printed in a message similar to the above . <nl> + <nl> + = item C < Failed 1 test , % . 2f % % okay . % s > <nl> + <nl> + = item C < Failed % d / % d tests , % . 2f % % okay . % s > <nl> + <nl> + If not all tests were successful , the script dies with one of the <nl> + above messages . <nl> + <nl> + = item C < FAILED - - Further testing stopped : % s > <nl> + <nl> + If a single subtest decides that further testing will not make sense , <nl> + the script dies with this message . <nl> + <nl> + = back <nl> + <nl> + = head1 ENVIRONMENT VARIABLES THAT TEST : : HARNESS SETS <nl> + <nl> + Test : : Harness sets these before executing the individual tests . <nl> + <nl> + = over 4 <nl> + <nl> + = item C < HARNESS_ACTIVE > <nl> + <nl> + This is set to a true value . It allows the tests to determine if they <nl> + are being executed through the harness or by any other means . <nl> + <nl> + = item C < HARNESS_VERSION > <nl> + <nl> + This is the version of Test : : Harness . <nl> + <nl> + = back <nl> + <nl> + = head1 ENVIRONMENT VARIABLES THAT AFFECT TEST : : HARNESS <nl> + <nl> + = over 4 <nl> + <nl> + = item C < HARNESS_COLUMNS > <nl> + <nl> + This value will be used for the width of the terminal . If it is not <nl> + set then it will default to C < COLUMNS > . If this is not set , it will <nl> + default to 80 . Note that users of Bourne - sh based shells will need to <nl> + C < export COLUMNS > for this module to use that variable . <nl> + <nl> + = item C < HARNESS_COMPILE_TEST > <nl> + <nl> + When true it will make harness attempt to compile the test using <nl> + C < perlcc > before running it . <nl> + <nl> + B < NOTE > This currently only works when sitting in the perl source <nl> + directory ! <nl> + <nl> + = item C < HARNESS_DEBUG > <nl> + <nl> + If true , Test : : Harness will print debugging information about itself as <nl> + it runs the tests . This is different from C < HARNESS_VERBOSE > , which prints <nl> + the output from the test being run . Setting C < $ Test : : Harness : : Debug > will <nl> + override this , or you can use the C < - d > switch in the F < prove > utility . <nl> + <nl> + = item C < HARNESS_FILELEAK_IN_DIR > <nl> + <nl> + When set to the name of a directory , harness will check after each <nl> + test whether new files appeared in that directory , and report them as <nl> + <nl> + LEAKED FILES : scr . tmp 0 my . db <nl> + <nl> + If relative , directory name is with respect to the current directory at <nl> + the moment runtests ( ) was called . Putting absolute path into <nl> + C < HARNESS_FILELEAK_IN_DIR > may give more predictable results . <nl> + <nl> + = item C < HARNESS_NOTTY > <nl> + <nl> + When set to a true value , forces it to behave as though STDOUT were <nl> + not a console . You may need to set this if you don ' t want harness to <nl> + output more frequent progress messages using carriage returns . Some <nl> + consoles may not handle carriage returns properly ( which results in a <nl> + somewhat messy output ) . <nl> + <nl> + = item C < HARNESS_PERL > <nl> + <nl> + Usually your tests will be run by C < $ ^ X > , the currently - executing Perl . <nl> + However , you may want to have it run by a different executable , such as <nl> + a threading perl , or a different version . <nl> + <nl> + If you ' re using the F < prove > utility , you can use the C < - - perl > switch . <nl> + <nl> + = item C < HARNESS_PERL_SWITCHES > <nl> + <nl> + Its value will be prepended to the switches used to invoke perl on <nl> + each test . For example , setting C < HARNESS_PERL_SWITCHES > to C < - W > will <nl> + run all tests with all warnings enabled . <nl> + <nl> + = item C < HARNESS_TIMER > <nl> + <nl> + Setting this to true will make the harness display the number of <nl> + milliseconds each test took . You can also use F < prove > ' s C < - - timer > <nl> + switch . <nl> + <nl> + = item C < HARNESS_VERBOSE > <nl> + <nl> + If true , Test : : Harness will output the verbose results of running <nl> + its tests . Setting C < $ Test : : Harness : : verbose > will override this , <nl> + or you can use the C < - v > switch in the F < prove > utility . <nl> + <nl> + If true , Test : : Harness will output the verbose results of running <nl> + its tests . Setting C < $ Test : : Harness : : verbose > will override this , <nl> + or you can use the C < - v > switch in the F < prove > utility . <nl> + <nl> + = item C < HARNESS_STRAP_CLASS > <nl> + <nl> + Defines the Test : : Harness : : Straps subclass to use . The value may either <nl> + be a filename or a class name . <nl> + <nl> + If HARNESS_STRAP_CLASS is a class name , the class must be in C < @ INC > <nl> + like any other class . <nl> + <nl> + If HARNESS_STRAP_CLASS is a filename , the . pm file must return the name <nl> + of the class , instead of the canonical " 1 " . <nl> + <nl> + = back <nl> + <nl> + = head1 EXAMPLE <nl> + <nl> + Here ' s how Test : : Harness tests itself <nl> + <nl> + $ cd ~ / src / devel / Test - Harness <nl> + $ perl - Mblib - e ' use Test : : Harness qw ( & runtests $ verbose ) ; <nl> + $ verbose = 0 ; runtests @ ARGV ; ' t / * . t <nl> + Using / home / schwern / src / devel / Test - Harness / blib <nl> + t / base . . . . . . . . . . . . . . ok <nl> + t / nonumbers . . . . . . . . . ok <nl> + t / ok . . . . . . . . . . . . . . . . ok <nl> + t / test - harness . . . . . . ok <nl> + All tests successful . <nl> + Files = 4 , Tests = 24 , 2 wallclock secs ( 0 . 61 cusr + 0 . 41 csys = 1 . 02 CPU ) <nl> + <nl> + = head1 SEE ALSO <nl> + <nl> + The included F < prove > utility for running test scripts from the command line , <nl> + L < Test > and L < Test : : Simple > for writing test scripts , L < Benchmark > for <nl> + the underlying timing routines , and L < Devel : : Cover > for test coverage <nl> + analysis . <nl> + <nl> + = head1 TODO <nl> + <nl> + Provide a way of running tests quietly ( ie . no printing ) for automated <nl> + validation of tests . This will probably take the form of a version <nl> + of runtests ( ) which rather than printing its output returns raw data <nl> + on the state of the tests . ( Partially done in Test : : Harness : : Straps ) <nl> + <nl> + Document the format . <nl> + <nl> + Fix HARNESS_COMPILE_TEST without breaking its core usage . <nl> + <nl> + Figure a way to report test names in the failure summary . <nl> + <nl> + Rework the test summary so long test names are not truncated as badly . <nl> + ( Partially done with new skip test styles ) <nl> + <nl> + Add option for coverage analysis . <nl> + <nl> + Trap STDERR . <nl> + <nl> + Implement Straps total_results ( ) <nl> + <nl> + Remember exit code <nl> + <nl> + Completely redo the print summary code . <nl> + <nl> + Straps - > analyze_file ( ) not taint clean , don ' t know if it can be <nl> + <nl> + Fix that damned VMS nit . <nl> + <nl> + Add a test for verbose . <nl> + <nl> + Change internal list of test results to a hash . <nl> + <nl> + Fix stats display when there ' s an overrun . <nl> + <nl> + Fix so perls with spaces in the filename work . <nl> + <nl> + Keeping whittling away at _run_all_tests ( ) <nl> + <nl> + Clean up how the summary is printed . Get rid of those damned formats . <nl> + <nl> + = head1 BUGS <nl> + <nl> + Please report any bugs or feature requests to <nl> + C < bug - test - harness at rt . cpan . org > , or through the web interface at <nl> + L < http : / / rt . cpan . org / NoAuth / ReportBug . html ? Queue = Test - Harness > . <nl> + I will be notified , and then you ' ll automatically be notified of progress on <nl> + your bug as I make changes . <nl> + <nl> + = head1 SUPPORT <nl> + <nl> + You can find documentation for this module with the F < perldoc > command . <nl> + <nl> + perldoc Test : : Harness <nl> + <nl> + You can get docs for F < prove > with <nl> + <nl> + prove - - man <nl> + <nl> + You can also look for information at : <nl> + <nl> + = over 4 <nl> + <nl> + = item * AnnoCPAN : Annotated CPAN documentation <nl> + <nl> + L < http : / / annocpan . org / dist / Test - Harness > <nl> + <nl> + = item * CPAN Ratings <nl> + <nl> + L < http : / / cpanratings . perl . org / d / Test - Harness > <nl> + <nl> + = item * RT : CPAN ' s request tracker <nl> + <nl> + L < http : / / rt . cpan . org / NoAuth / Bugs . html ? Dist = Test - Harness > <nl> + <nl> + = item * Search CPAN <nl> + <nl> + L < http : / / search . cpan . org / dist / Test - Harness > <nl> + <nl> + = back <nl> + <nl> + = head1 SOURCE CODE <nl> + <nl> + The source code repository for Test : : Harness is at <nl> + L < http : / / svn . perl . org / modules / Test - Harness > . <nl> + <nl> + = head1 AUTHORS <nl> + <nl> + Either Tim Bunce or Andreas Koenig , we don ' t know . What we know for <nl> + sure is , that it was inspired by Larry Wall ' s F < TEST > script that came <nl> + with perl distributions for ages . Numerous anonymous contributors <nl> + exist . Andreas Koenig held the torch for many years , and then <nl> + Michael G Schwern . <nl> + <nl> + Current maintainer is Andy Lester C < < < andy at petdance . com > > > . <nl> + <nl> + = head1 COPYRIGHT <nl> + <nl> + Copyright 2002 - 2006 <nl> + by Michael G Schwern C < < < schwern at pobox . com > > > , <nl> + Andy Lester C < < < andy at petdance . com > > > . <nl> + <nl> + This program is free software ; you can redistribute it and / or <nl> + modify it under the same terms as Perl itself . <nl> + <nl> + See L < http : / / www . perl . com / perl / misc / Artistic . html > . <nl> + <nl> + = cut <nl> new file mode 100644 <nl> index 00000000000 . . 29f6c7ada95 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / Harness / Assert . pm <nl> <nl> + package Test : : Harness : : Assert ; <nl> + <nl> + use strict ; <nl> + require Exporter ; <nl> + use vars qw ( $ VERSION @ EXPORT @ ISA ) ; <nl> + <nl> + $ VERSION = ' 0 . 02 ' ; <nl> + <nl> + @ ISA = qw ( Exporter ) ; <nl> + @ EXPORT = qw ( assert ) ; <nl> + <nl> + <nl> + = head1 NAME <nl> + <nl> + Test : : Harness : : Assert - simple assert <nl> + <nl> + = head1 SYNOPSIS <nl> + <nl> + # # # FOR INTERNAL USE ONLY # # # <nl> + <nl> + use Test : : Harness : : Assert ; <nl> + <nl> + assert ( EXPR , $ name ) ; <nl> + <nl> + = head1 DESCRIPTION <nl> + <nl> + A simple assert routine since we don ' t have Carp : : Assert handy . <nl> + <nl> + B < For internal use by Test : : Harness ONLY ! > <nl> + <nl> + = head1 FUNCTIONS <nl> + <nl> + = head2 C < assert ( ) > <nl> + <nl> + assert ( EXPR , $ name ) ; <nl> + <nl> + If the expression is false the program aborts . <nl> + <nl> + = cut <nl> + <nl> + sub assert ( $ ; $ ) { <nl> + my ( $ assert , $ name ) = @ _ ; <nl> + <nl> + unless ( $ assert ) { <nl> + require Carp ; <nl> + my $ msg = ' Assert failed ' ; <nl> + $ msg . = " - ' $ name ' " if defined $ name ; <nl> + $ msg . = ' ! ' ; <nl> + Carp : : croak ( $ msg ) ; <nl> + } <nl> + <nl> + } <nl> + <nl> + = head1 AUTHOR <nl> + <nl> + Michael G Schwern C < < < schwern at pobox . com > > > <nl> + <nl> + = head1 SEE ALSO <nl> + <nl> + L < Carp : : Assert > <nl> + <nl> + = cut <nl> + <nl> + 1 ; <nl> new file mode 100644 <nl> index 00000000000 . . 2648cea7079 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / Harness / Iterator . pm <nl> <nl> + package Test : : Harness : : Iterator ; <nl> + <nl> + use strict ; <nl> + use vars qw ( $ VERSION ) ; <nl> + $ VERSION = 0 . 02 ; <nl> + <nl> + = head1 NAME <nl> + <nl> + Test : : Harness : : Iterator - Internal Test : : Harness Iterator <nl> + <nl> + = head1 SYNOPSIS <nl> + <nl> + use Test : : Harness : : Iterator ; <nl> + my $ it = Test : : Harness : : Iterator - > new ( \ * TEST ) ; <nl> + my $ it = Test : : Harness : : Iterator - > new ( \ @ array ) ; <nl> + <nl> + my $ line = $ it - > next ; <nl> + <nl> + = head1 DESCRIPTION <nl> + <nl> + B < FOR INTERNAL USE ONLY ! > <nl> + <nl> + This is a simple iterator wrapper for arrays and filehandles . <nl> + <nl> + = head2 new ( ) <nl> + <nl> + Create an iterator . <nl> + <nl> + = head2 next ( ) <nl> + <nl> + Iterate through it , of course . <nl> + <nl> + = cut <nl> + <nl> + sub new { <nl> + my ( $ proto , $ thing ) = @ _ ; <nl> + <nl> + my $ self = { } ; <nl> + if ( ref $ thing eq ' GLOB ' ) { <nl> + bless $ self , ' Test : : Harness : : Iterator : : FH ' ; <nl> + $ self - > { fh } = $ thing ; <nl> + } <nl> + elsif ( ref $ thing eq ' ARRAY ' ) { <nl> + bless $ self , ' Test : : Harness : : Iterator : : ARRAY ' ; <nl> + $ self - > { idx } = 0 ; <nl> + $ self - > { array } = $ thing ; <nl> + } <nl> + else { <nl> + warn " Can ' t iterate with a " , ref $ thing ; <nl> + } <nl> + <nl> + return $ self ; <nl> + } <nl> + <nl> + package Test : : Harness : : Iterator : : FH ; <nl> + sub next { <nl> + my $ fh = $ _ [ 0 ] - > { fh } ; <nl> + <nl> + # readline ( ) doesn ' t work so good on 5 . 5 . 4 . <nl> + return scalar < $ fh > ; <nl> + } <nl> + <nl> + <nl> + package Test : : Harness : : Iterator : : ARRAY ; <nl> + sub next { <nl> + my $ self = shift ; <nl> + return $ self - > { array } - > [ $ self - > { idx } + + ] ; <nl> + } <nl> + <nl> + " Steve Peters , Master Of True Value Finding , was here . " ; <nl> new file mode 100644 <nl> index 00000000000 . . df0706ac614 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / Harness / Point . pm <nl> <nl> + # - * - Mode : cperl ; cperl - indent - level : 4 - * - <nl> + package Test : : Harness : : Point ; <nl> + <nl> + use strict ; <nl> + use vars qw ( $ VERSION ) ; <nl> + $ VERSION = ' 0 . 01 ' ; <nl> + <nl> + = head1 NAME <nl> + <nl> + Test : : Harness : : Point - object for tracking a single test point <nl> + <nl> + = head1 SYNOPSIS <nl> + <nl> + One Test : : Harness : : Point object represents a single test point . <nl> + <nl> + = head1 CONSTRUCTION <nl> + <nl> + = head2 new ( ) <nl> + <nl> + my $ point = new Test : : Harness : : Point ; <nl> + <nl> + Create a test point object . <nl> + <nl> + = cut <nl> + <nl> + sub new { <nl> + my $ class = shift ; <nl> + my $ self = bless { } , $ class ; <nl> + <nl> + return $ self ; <nl> + } <nl> + <nl> + = head1 from_test_line ( $ line ) <nl> + <nl> + Constructor from a TAP test line , or empty return if the test line <nl> + is not a test line . <nl> + <nl> + = cut <nl> + <nl> + sub from_test_line { <nl> + my $ class = shift ; <nl> + my $ line = shift or return ; <nl> + <nl> + # We pulverize the line down into pieces in three parts . <nl> + my ( $ not , $ number , $ extra ) = ( $ line = ~ / ^ ( not ) ? ok \ b ( ? : \ s + ( \ d + ) ) ? \ s * ( . * ) / ) or return ; <nl> + <nl> + my $ point = $ class - > new ; <nl> + $ point - > set_number ( $ number ) ; <nl> + $ point - > set_ok ( ! $ not ) ; <nl> + <nl> + if ( $ extra ) { <nl> + my ( $ description , $ directive ) = split ( / ( ? : [ ^ \ \ ] | ^ ) # / , $ extra , 2 ) ; <nl> + $ description = ~ s / ^ - / / ; # Test : : More puts it in there <nl> + $ point - > set_description ( $ description ) ; <nl> + if ( $ directive ) { <nl> + $ point - > set_directive ( $ directive ) ; <nl> + } <nl> + } # if $ extra <nl> + <nl> + return $ point ; <nl> + } # from_test_line ( ) <nl> + <nl> + = head1 ACCESSORS <nl> + <nl> + Each of the following fields has a getter and setter method . <nl> + <nl> + = over 4 <nl> + <nl> + = item * ok <nl> + <nl> + = item * number <nl> + <nl> + = cut <nl> + <nl> + sub ok { my $ self = shift ; $ self - > { ok } } <nl> + sub set_ok { <nl> + my $ self = shift ; <nl> + my $ ok = shift ; <nl> + $ self - > { ok } = $ ok ? 1 : 0 ; <nl> + } <nl> + sub pass { <nl> + my $ self = shift ; <nl> + <nl> + return ( $ self - > ok | | $ self - > is_todo | | $ self - > is_skip ) ? 1 : 0 ; <nl> + } <nl> + <nl> + sub number { my $ self = shift ; $ self - > { number } } <nl> + sub set_number { my $ self = shift ; $ self - > { number } = shift } <nl> + <nl> + sub description { my $ self = shift ; $ self - > { description } } <nl> + sub set_description { <nl> + my $ self = shift ; <nl> + $ self - > { description } = shift ; <nl> + $ self - > { name } = $ self - > { description } ; # history <nl> + } <nl> + <nl> + sub directive { my $ self = shift ; $ self - > { directive } } <nl> + sub set_directive { <nl> + my $ self = shift ; <nl> + my $ directive = shift ; <nl> + <nl> + $ directive = ~ s / ^ \ s + / / ; <nl> + $ directive = ~ s / \ s + $ / / ; <nl> + $ self - > { directive } = $ directive ; <nl> + <nl> + my ( $ type , $ reason ) = ( $ directive = ~ / ^ \ s * ( \ S + ) ( ? : \ s + ( . * ) ) ? $ / ) ; <nl> + $ self - > set_directive_type ( $ type ) ; <nl> + $ reason = " " unless defined $ reason ; <nl> + $ self - > { directive_reason } = $ reason ; <nl> + } <nl> + sub set_directive_type { <nl> + my $ self = shift ; <nl> + $ self - > { directive_type } = lc shift ; <nl> + $ self - > { type } = $ self - > { directive_type } ; # History <nl> + } <nl> + sub set_directive_reason { <nl> + my $ self = shift ; <nl> + $ self - > { directive_reason } = shift ; <nl> + } <nl> + sub directive_type { my $ self = shift ; $ self - > { directive_type } } <nl> + sub type { my $ self = shift ; $ self - > { directive_type } } <nl> + sub directive_reason { my $ self = shift ; $ self - > { directive_reason } } <nl> + sub reason { my $ self = shift ; $ self - > { directive_reason } } <nl> + sub is_todo { <nl> + my $ self = shift ; <nl> + my $ type = $ self - > directive_type ; <nl> + return $ type & & ( $ type eq ' todo ' ) ; <nl> + } <nl> + sub is_skip { <nl> + my $ self = shift ; <nl> + my $ type = $ self - > directive_type ; <nl> + return $ type & & ( $ type eq ' skip ' ) ; <nl> + } <nl> + <nl> + sub diagnostics { <nl> + my $ self = shift ; <nl> + return @ { $ self - > { diagnostics } } if wantarray ; <nl> + return join ( " \ n " , @ { $ self - > { diagnostics } } ) ; <nl> + } <nl> + sub add_diagnostic { my $ self = shift ; push @ { $ self - > { diagnostics } } , @ _ } <nl> + <nl> + <nl> + 1 ; <nl> new file mode 100644 <nl> index 00000000000 . . f4f4c4eca0d <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / Harness / Results . pm <nl> <nl> + # - * - Mode : cperl ; cperl - indent - level : 4 - * - <nl> + package Test : : Harness : : Results ; <nl> + <nl> + use strict ; <nl> + use vars qw ( $ VERSION ) ; <nl> + $ VERSION = ' 0 . 01 ' ; <nl> + <nl> + = head1 NAME <nl> + <nl> + Test : : Harness : : Results - object for tracking results from a single test file <nl> + <nl> + = head1 SYNOPSIS <nl> + <nl> + One Test : : Harness : : Results object represents the results from one <nl> + test file getting analyzed . <nl> + <nl> + = head1 CONSTRUCTION <nl> + <nl> + = head2 new ( ) <nl> + <nl> + my $ results = new Test : : Harness : : Results ; <nl> + <nl> + Create a test point object . Typically , however , you ' ll not create <nl> + one yourself , but access a Results object returned to you by <nl> + Test : : Harness : : Results . <nl> + <nl> + = cut <nl> + <nl> + sub new { <nl> + my $ class = shift ; <nl> + my $ self = bless { } , $ class ; <nl> + <nl> + return $ self ; <nl> + } <nl> + <nl> + = head1 ACCESSORS <nl> + <nl> + The following data points are defined : <nl> + <nl> + passing true if the whole test is considered a pass <nl> + ( or skipped ) , false if its a failure <nl> + <nl> + exit the exit code of the test run , if from a file <nl> + wait the wait code of the test run , if from a file <nl> + <nl> + max total tests which should have been run <nl> + seen total tests actually seen <nl> + skip_all if the whole test was skipped , this will <nl> + contain the reason . <nl> + <nl> + ok number of tests which passed <nl> + ( including todo and skips ) <nl> + <nl> + todo number of todo tests seen <nl> + bonus number of todo tests which <nl> + unexpectedly passed <nl> + <nl> + skip number of tests skipped <nl> + <nl> + So a successful test should have max = = seen = = ok . <nl> + <nl> + <nl> + There is one final item , the details . <nl> + <nl> + details an array ref reporting the result of <nl> + each test looks like this : <nl> + <nl> + $ results { details } [ $ test_num - 1 ] = <nl> + { ok = > is the test considered ok ? <nl> + actual_ok = > did it literally say ' ok ' ? <nl> + name = > name of the test ( if any ) <nl> + diagnostics = > test diagnostics ( if any ) <nl> + type = > ' skip ' or ' todo ' ( if any ) <nl> + reason = > reason for the above ( if any ) <nl> + } ; <nl> + <nl> + Element 0 of the details is test # 1 . I tried it with element 1 being <nl> + # 1 and 0 being empty , this is less awkward . <nl> + <nl> + <nl> + Each of the following fields has a getter and setter method . <nl> + <nl> + = over 4 <nl> + <nl> + = item * wait <nl> + <nl> + = item * exit <nl> + <nl> + = cut <nl> + <nl> + sub set_wait { my $ self = shift ; $ self - > { wait } = shift } <nl> + sub wait { <nl> + my $ self = shift ; <nl> + return $ self - > { wait } | | 0 ; <nl> + } <nl> + <nl> + sub set_skip_all { my $ self = shift ; $ self - > { skip_all } = shift } <nl> + sub skip_all { <nl> + my $ self = shift ; <nl> + return $ self - > { skip_all } ; <nl> + } <nl> + <nl> + sub inc_max { my $ self = shift ; $ self - > { max } + = ( @ _ ? shift : 1 ) } <nl> + sub max { <nl> + my $ self = shift ; <nl> + return $ self - > { max } | | 0 ; <nl> + } <nl> + <nl> + sub set_passing { my $ self = shift ; $ self - > { passing } = shift } <nl> + sub passing { <nl> + my $ self = shift ; <nl> + return $ self - > { passing } | | 0 ; <nl> + } <nl> + <nl> + sub inc_ok { my $ self = shift ; $ self - > { ok } + = ( @ _ ? shift : 1 ) } <nl> + sub ok { <nl> + my $ self = shift ; <nl> + return $ self - > { ok } | | 0 ; <nl> + } <nl> + <nl> + sub set_exit { <nl> + my $ self = shift ; <nl> + if ( $ ^ O eq ' VMS ' ) { <nl> + eval { <nl> + use vmsish q ( status ) ; <nl> + $ self - > { exit } = shift ; # must be in same scope as pragma <nl> + } <nl> + } <nl> + else { <nl> + $ self - > { exit } = shift ; <nl> + } <nl> + } <nl> + sub exit { <nl> + my $ self = shift ; <nl> + return $ self - > { exit } | | 0 ; <nl> + } <nl> + <nl> + sub inc_bonus { my $ self = shift ; $ self - > { bonus } + + } <nl> + sub bonus { <nl> + my $ self = shift ; <nl> + return $ self - > { bonus } | | 0 ; <nl> + } <nl> + <nl> + sub set_skip_reason { my $ self = shift ; $ self - > { skip_reason } = shift } <nl> + sub skip_reason { <nl> + my $ self = shift ; <nl> + return $ self - > { skip_reason } | | 0 ; <nl> + } <nl> + <nl> + sub inc_skip { my $ self = shift ; $ self - > { skip } + + } <nl> + sub skip { <nl> + my $ self = shift ; <nl> + return $ self - > { skip } | | 0 ; <nl> + } <nl> + <nl> + sub inc_todo { my $ self = shift ; $ self - > { todo } + + } <nl> + sub todo { <nl> + my $ self = shift ; <nl> + return $ self - > { todo } | | 0 ; <nl> + } <nl> + <nl> + sub inc_seen { my $ self = shift ; $ self - > { seen } + + } <nl> + sub seen { <nl> + my $ self = shift ; <nl> + return $ self - > { seen } | | 0 ; <nl> + } <nl> + <nl> + sub set_details { <nl> + my $ self = shift ; <nl> + my $ index = shift ; <nl> + my $ details = shift ; <nl> + <nl> + my $ array = ( $ self - > { details } | | = [ ] ) ; <nl> + $ array - > [ $ index - 1 ] = $ details ; <nl> + } <nl> + <nl> + sub details { <nl> + my $ self = shift ; <nl> + return $ self - > { details } | | [ ] ; <nl> + } <nl> + <nl> + 1 ; <nl> new file mode 100644 <nl> index 00000000000 . . 3ee529c2a0d <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / Harness / Straps . pm <nl> <nl> + # - * - Mode : cperl ; cperl - indent - level : 4 - * - <nl> + package Test : : Harness : : Straps ; <nl> + <nl> + use strict ; <nl> + use vars qw ( $ VERSION ) ; <nl> + $ VERSION = ' 0 . 26_01 ' ; <nl> + <nl> + use Config ; <nl> + use Test : : Harness : : Assert ; <nl> + use Test : : Harness : : Iterator ; <nl> + use Test : : Harness : : Point ; <nl> + use Test : : Harness : : Results ; <nl> + <nl> + # Flags used as return values from our methods . Just for internal <nl> + # clarification . <nl> + my $ YES = ( 1 = = 1 ) ; <nl> + my $ NO = ! $ YES ; <nl> + <nl> + = head1 NAME <nl> + <nl> + Test : : Harness : : Straps - detailed analysis of test results <nl> + <nl> + = head1 SYNOPSIS <nl> + <nl> + use Test : : Harness : : Straps ; <nl> + <nl> + my $ strap = Test : : Harness : : Straps - > new ; <nl> + <nl> + # Various ways to interpret a test <nl> + my $ results = $ strap - > analyze ( $ name , \ @ test_output ) ; <nl> + my $ results = $ strap - > analyze_fh ( $ name , $ test_filehandle ) ; <nl> + my $ results = $ strap - > analyze_file ( $ test_file ) ; <nl> + <nl> + # UNIMPLEMENTED <nl> + my % total = $ strap - > total_results ; <nl> + <nl> + # Altering the behavior of the strap UNIMPLEMENTED <nl> + my $ verbose_output = $ strap - > dump_verbose ( ) ; <nl> + $ strap - > dump_verbose_fh ( $ output_filehandle ) ; <nl> + <nl> + <nl> + = head1 DESCRIPTION <nl> + <nl> + B < THIS IS ALPHA SOFTWARE > in that the interface is subject to change <nl> + in incompatible ways . It is otherwise stable . <nl> + <nl> + Test : : Harness is limited to printing out its results . This makes <nl> + analysis of the test results difficult for anything but a human . To <nl> + make it easier for programs to work with test results , we provide <nl> + Test : : Harness : : Straps . Instead of printing the results , straps <nl> + provide them as raw data . You can also configure how the tests are to <nl> + be run . <nl> + <nl> + The interface is currently incomplete . I < Please > contact the author <nl> + if you ' d like a feature added or something change or just have <nl> + comments . <nl> + <nl> + = head1 CONSTRUCTION <nl> + <nl> + = head2 new ( ) <nl> + <nl> + my $ strap = Test : : Harness : : Straps - > new ; <nl> + <nl> + Initialize a new strap . <nl> + <nl> + = cut <nl> + <nl> + sub new { <nl> + my $ class = shift ; <nl> + my $ self = bless { } , $ class ; <nl> + <nl> + $ self - > _init ; <nl> + <nl> + return $ self ; <nl> + } <nl> + <nl> + = for private $ strap - > _init <nl> + <nl> + $ strap - > _init ; <nl> + <nl> + Initialize the internal state of a strap to make it ready for parsing . <nl> + <nl> + = cut <nl> + <nl> + sub _init { <nl> + my ( $ self ) = shift ; <nl> + <nl> + $ self - > { _is_vms } = ( $ ^ O eq ' VMS ' ) ; <nl> + $ self - > { _is_win32 } = ( $ ^ O = ~ / ^ ( MS ) ? Win32 $ / ) ; <nl> + $ self - > { _is_macos } = ( $ ^ O eq ' MacOS ' ) ; <nl> + } <nl> + <nl> + = head1 ANALYSIS <nl> + <nl> + = head2 $ strap - > analyze ( $ name , \ @ output_lines ) <nl> + <nl> + my $ results = $ strap - > analyze ( $ name , \ @ test_output ) ; <nl> + <nl> + Analyzes the output of a single test , assigning it the given C < $ name > <nl> + for use in the total report . Returns the C < $ results > of the test . <nl> + See L < Results > . <nl> + <nl> + C < @ test_output > should be the raw output from the test , including <nl> + newlines . <nl> + <nl> + = cut <nl> + <nl> + sub analyze { <nl> + my ( $ self , $ name , $ test_output ) = @ _ ; <nl> + <nl> + my $ it = Test : : Harness : : Iterator - > new ( $ test_output ) ; <nl> + return $ self - > _analyze_iterator ( $ name , $ it ) ; <nl> + } <nl> + <nl> + <nl> + sub _analyze_iterator { <nl> + my ( $ self , $ name , $ it ) = @ _ ; <nl> + <nl> + $ self - > _reset_file_state ; <nl> + $ self - > { file } = $ name ; <nl> + <nl> + my $ results = Test : : Harness : : Results - > new ; <nl> + <nl> + # Set them up here so callbacks can have them . <nl> + $ self - > { totals } { $ name } = $ results ; <nl> + while ( defined ( my $ line = $ it - > next ) ) { <nl> + $ self - > _analyze_line ( $ line , $ results ) ; <nl> + last if $ self - > { saw_bailout } ; <nl> + } <nl> + <nl> + $ results - > set_skip_all ( $ self - > { skip_all } ) if defined $ self - > { skip_all } ; <nl> + <nl> + my $ passed = <nl> + ( ( $ results - > max = = 0 ) & & defined $ results - > skip_all ) | | <nl> + ( $ results - > max & & <nl> + $ results - > seen & & <nl> + $ results - > max = = $ results - > seen & & <nl> + $ results - > max = = $ results - > ok ) ; <nl> + <nl> + $ results - > set_passing ( $ passed ? 1 : 0 ) ; <nl> + <nl> + return $ results ; <nl> + } <nl> + <nl> + <nl> + sub _analyze_line { <nl> + my $ self = shift ; <nl> + my $ line = shift ; <nl> + my $ results = shift ; <nl> + <nl> + $ self - > { line } + + ; <nl> + <nl> + my $ linetype ; <nl> + my $ point = Test : : Harness : : Point - > from_test_line ( $ line ) ; <nl> + if ( $ point ) { <nl> + $ linetype = ' test ' ; <nl> + <nl> + $ results - > inc_seen ; <nl> + $ point - > set_number ( $ self - > { ' next ' } ) unless $ point - > number ; <nl> + <nl> + # sometimes the ' not ' and the ' ok ' are on different lines , <nl> + # happens often on VMS if you do : <nl> + # print " not " unless $ test ; <nl> + # print " ok $ num \ n " ; <nl> + if ( $ self - > { lone_not_line } & & ( $ self - > { lone_not_line } = = $ self - > { line } - 1 ) ) { <nl> + $ point - > set_ok ( 0 ) ; <nl> + } <nl> + <nl> + if ( $ self - > { todo } { $ point - > number } ) { <nl> + $ point - > set_directive_type ( ' todo ' ) ; <nl> + } <nl> + <nl> + if ( $ point - > is_todo ) { <nl> + $ results - > inc_todo ; <nl> + $ results - > inc_bonus if $ point - > ok ; <nl> + } <nl> + elsif ( $ point - > is_skip ) { <nl> + $ results - > inc_skip ; <nl> + } <nl> + <nl> + $ results - > inc_ok if $ point - > pass ; <nl> + <nl> + if ( ( $ point - > number > 100_000 ) & & ( $ point - > number > ( $ self - > { max } | | 100_000 ) ) ) { <nl> + if ( ! $ self - > { too_many_tests } + + ) { <nl> + warn " Enormous test number seen [ test " , $ point - > number , " ] \ n " ; <nl> + warn " Can ' t detailize , too big . \ n " ; <nl> + } <nl> + } <nl> + else { <nl> + my $ details = { <nl> + ok = > $ point - > pass , <nl> + actual_ok = > $ point - > ok , <nl> + name = > _def_or_blank ( $ point - > description ) , <nl> + type = > _def_or_blank ( $ point - > directive_type ) , <nl> + reason = > _def_or_blank ( $ point - > directive_reason ) , <nl> + } ; <nl> + <nl> + assert ( defined ( $ details - > { ok } ) & & defined ( $ details - > { actual_ok } ) ) ; <nl> + $ results - > set_details ( $ point - > number , $ details ) ; <nl> + } <nl> + } # test point <nl> + elsif ( $ line = ~ / ^ not \ s + $ / ) { <nl> + $ linetype = ' other ' ; <nl> + # Sometimes the " not " and " ok " will be on separate lines on VMS . <nl> + # We catch this and remember we saw it . <nl> + $ self - > { lone_not_line } = $ self - > { line } ; <nl> + } <nl> + elsif ( $ self - > _is_header ( $ line ) ) { <nl> + $ linetype = ' header ' ; <nl> + <nl> + $ self - > { saw_header } + + ; <nl> + <nl> + $ results - > inc_max ( $ self - > { max } ) ; <nl> + } <nl> + elsif ( $ self - > _is_bail_out ( $ line , \ $ self - > { bailout_reason } ) ) { <nl> + $ linetype = ' bailout ' ; <nl> + $ self - > { saw_bailout } = 1 ; <nl> + } <nl> + elsif ( my $ diagnostics = $ self - > _is_diagnostic_line ( $ line ) ) { <nl> + $ linetype = ' other ' ; <nl> + # XXX We can throw this away , really . <nl> + my $ test = $ results - > details - > [ - 1 ] ; <nl> + $ test - > { diagnostics } | | = ' ' ; <nl> + $ test - > { diagnostics } . = $ diagnostics ; <nl> + } <nl> + else { <nl> + $ linetype = ' other ' ; <nl> + } <nl> + <nl> + $ self - > callback - > ( $ self , $ line , $ linetype , $ results ) if $ self - > callback ; <nl> + <nl> + $ self - > { ' next ' } = $ point - > number + 1 if $ point ; <nl> + } # _analyze_line <nl> + <nl> + <nl> + sub _is_diagnostic_line { <nl> + my ( $ self , $ line ) = @ _ ; <nl> + return if index ( $ line , ' # Looks like you failed ' ) = = 0 ; <nl> + $ line = ~ s / ^ # \ s / / ; <nl> + return $ line ; <nl> + } <nl> + <nl> + = for private $ strap - > analyze_fh ( $ name , $ test_filehandle ) <nl> + <nl> + my $ results = $ strap - > analyze_fh ( $ name , $ test_filehandle ) ; <nl> + <nl> + Like C < analyze > , but it reads from the given filehandle . <nl> + <nl> + = cut <nl> + <nl> + sub analyze_fh { <nl> + my ( $ self , $ name , $ fh ) = @ _ ; <nl> + <nl> + my $ it = Test : : Harness : : Iterator - > new ( $ fh ) ; <nl> + return $ self - > _analyze_iterator ( $ name , $ it ) ; <nl> + } <nl> + <nl> + = head2 $ strap - > analyze_file ( $ test_file ) <nl> + <nl> + my $ results = $ strap - > analyze_file ( $ test_file ) ; <nl> + <nl> + Like C < analyze > , but it runs the given C < $ test_file > and parses its <nl> + results . It will also use that name for the total report . <nl> + <nl> + = cut <nl> + <nl> + sub analyze_file { <nl> + my ( $ self , $ file ) = @ _ ; <nl> + <nl> + unless ( - e $ file ) { <nl> + $ self - > { error } = " $ file does not exist " ; <nl> + return ; <nl> + } <nl> + <nl> + unless ( - r $ file ) { <nl> + $ self - > { error } = " $ file is not readable " ; <nl> + return ; <nl> + } <nl> + <nl> + local $ ENV { PERL5LIB } = $ self - > _INC2PERL5LIB ; <nl> + if ( $ Test : : Harness : : Debug ) { <nl> + local $ ^ W = 0 ; # ignore undef warnings <nl> + print " # PERL5LIB = $ ENV { PERL5LIB } \ n " ; <nl> + } <nl> + <nl> + # * sigh * this breaks under taint , but open - | is unportable . <nl> + my $ line = $ self - > _command_line ( $ file ) ; <nl> + <nl> + unless ( open ( FILE , " $ line | " ) ) { <nl> + print " can ' t run $ file . $ ! \ n " ; <nl> + return ; <nl> + } <nl> + <nl> + my $ results = $ self - > analyze_fh ( $ file , \ * FILE ) ; <nl> + my $ exit = close FILE ; <nl> + <nl> + $ results - > set_wait ( $ ? ) ; <nl> + if ( $ ? & & $ self - > { _is_vms } ) { <nl> + $ results - > set_exit ( $ ? ) ; <nl> + } <nl> + else { <nl> + $ results - > set_exit ( _wait2exit ( $ ? ) ) ; <nl> + } <nl> + $ results - > set_passing ( 0 ) unless $ ? = = 0 ; <nl> + <nl> + $ self - > _restore_PERL5LIB ( ) ; <nl> + <nl> + return $ results ; <nl> + } <nl> + <nl> + <nl> + eval { require POSIX ; & POSIX : : WEXITSTATUS ( 0 ) } ; <nl> + if ( $ @ ) { <nl> + * _wait2exit = sub { $ _ [ 0 ] > > 8 } ; <nl> + } <nl> + else { <nl> + * _wait2exit = sub { POSIX : : WEXITSTATUS ( $ _ [ 0 ] ) } <nl> + } <nl> + <nl> + = for private $ strap - > _command_line ( $ file ) <nl> + <nl> + Returns the full command line that will be run to test I < $ file > . <nl> + <nl> + = cut <nl> + <nl> + sub _command_line { <nl> + my $ self = shift ; <nl> + my $ file = shift ; <nl> + <nl> + my $ command = $ self - > _command ( ) ; <nl> + my $ switches = $ self - > _switches ( $ file ) ; <nl> + <nl> + $ file = qq [ " $ file " ] if ( $ file = ~ / \ s / ) & & ( $ file ! ~ / ^ " . * " $ / ) ; <nl> + my $ line = " $ command $ switches $ file " ; <nl> + <nl> + return $ line ; <nl> + } <nl> + <nl> + <nl> + = for private $ strap - > _command ( ) <nl> + <nl> + Returns the command that runs the test . Combine this with C < _switches ( ) > <nl> + to build a command line . <nl> + <nl> + Typically this is C < $ ^ X > , but you can set C < $ ENV { HARNESS_PERL } > <nl> + to use a different Perl than what you ' re running the harness under . <nl> + This might be to run a threaded Perl , for example . <nl> + <nl> + You can also overload this method if you ' ve built your own strap subclass , <nl> + such as a PHP interpreter for a PHP - based strap . <nl> + <nl> + = cut <nl> + <nl> + sub _command { <nl> + my $ self = shift ; <nl> + <nl> + return $ ENV { HARNESS_PERL } if defined $ ENV { HARNESS_PERL } ; <nl> + # return qq [ " $ ^ X " ] if $ self - > { _is_win32 } & & ( $ ^ X = ~ / [ ^ \ w \ . \ / \ \ ] / ) ; <nl> + return qq [ " $ ^ X " ] if $ ^ X = ~ / \ s / and $ ^ X ! ~ / ^ [ " ' ] / ; <nl> + return $ ^ X ; <nl> + } <nl> + <nl> + <nl> + = for private $ strap - > _switches ( $ file ) <nl> + <nl> + Formats and returns the switches necessary to run the test . <nl> + <nl> + = cut <nl> + <nl> + sub _switches { <nl> + my ( $ self , $ file ) = @ _ ; <nl> + <nl> + my @ existing_switches = $ self - > _cleaned_switches ( $ Test : : Harness : : Switches , $ ENV { HARNESS_PERL_SWITCHES } ) ; <nl> + my @ derived_switches ; <nl> + <nl> + local * TEST ; <nl> + open ( TEST , $ file ) or print " can ' t open $ file . $ ! \ n " ; <nl> + my $ shebang = < TEST > ; <nl> + close ( TEST ) or print " can ' t close $ file . $ ! \ n " ; <nl> + <nl> + my $ taint = ( $ shebang = ~ / ^ # ! . * \ bperl . * \ s - \ w * ( [ Tt ] + ) / ) ; <nl> + push ( @ derived_switches , " - $ 1 " ) if $ taint ; <nl> + <nl> + # When taint mode is on , PERL5LIB is ignored . So we need to put <nl> + # all that on the command line as - Is . <nl> + # MacPerl ' s putenv is broken , so it will not see PERL5LIB , tainted or not . <nl> + if ( $ taint | | $ self - > { _is_macos } ) { <nl> + my @ inc = $ self - > _filtered_INC ; <nl> + push @ derived_switches , map { " - I $ _ " } @ inc ; <nl> + } <nl> + <nl> + # Quote the argument if there ' s any whitespace in it , or if <nl> + # we ' re VMS , since VMS requires all parms quoted . Also , don ' t quote <nl> + # it if it ' s already quoted . <nl> + for ( @ derived_switches ) { <nl> + $ _ = qq [ " $ _ " ] if ( ( / \ s / | | $ self - > { _is_vms } ) & & ! / ^ " . * " $ / ) ; <nl> + } <nl> + return join ( " " , @ existing_switches , @ derived_switches ) ; <nl> + } <nl> + <nl> + = for private $ strap - > _cleaned_switches ( @ switches_from_user ) <nl> + <nl> + Returns only defined , non - blank , trimmed switches from the parms passed . <nl> + <nl> + = cut <nl> + <nl> + sub _cleaned_switches { <nl> + my $ self = shift ; <nl> + <nl> + local $ _ ; <nl> + <nl> + my @ switches ; <nl> + for ( @ _ ) { <nl> + my $ switch = $ _ ; <nl> + next unless defined $ switch ; <nl> + $ switch = ~ s / ^ \ s + / / ; <nl> + $ switch = ~ s / \ s + $ / / ; <nl> + push ( @ switches , $ switch ) if $ switch ne " " ; <nl> + } <nl> + <nl> + return @ switches ; <nl> + } <nl> + <nl> + = for private $ strap - > _INC2PERL5LIB <nl> + <nl> + local $ ENV { PERL5LIB } = $ self - > _INC2PERL5LIB ; <nl> + <nl> + Takes the current value of C < @ INC > and turns it into something suitable <nl> + for putting onto C < PERL5LIB > . <nl> + <nl> + = cut <nl> + <nl> + sub _INC2PERL5LIB { <nl> + my ( $ self ) = shift ; <nl> + <nl> + $ self - > { _old5lib } = $ ENV { PERL5LIB } ; <nl> + <nl> + return join $ Config { path_sep } , $ self - > _filtered_INC ; <nl> + } <nl> + <nl> + = for private $ strap - > _filtered_INC ( ) <nl> + <nl> + my @ filtered_inc = $ self - > _filtered_INC ; <nl> + <nl> + Shortens C < @ INC > by removing redundant and unnecessary entries . <nl> + Necessary for OSes with limited command line lengths , like VMS . <nl> + <nl> + = cut <nl> + <nl> + sub _filtered_INC { <nl> + my ( $ self , @ inc ) = @ _ ; <nl> + @ inc = @ INC unless @ inc ; <nl> + <nl> + if ( $ self - > { _is_vms } ) { <nl> + # VMS has a 255 - byte limit on the length of % ENV entries , so <nl> + # toss the ones that involve perl_root , the install location <nl> + @ inc = grep ! / perl_root / i , @ inc ; <nl> + <nl> + } <nl> + elsif ( $ self - > { _is_win32 } ) { <nl> + # Lose any trailing backslashes in the Win32 paths <nl> + s / [ \ \ \ / + ] $ / / foreach @ inc ; <nl> + } <nl> + <nl> + my % seen ; <nl> + $ seen { $ _ } + + foreach $ self - > _default_inc ( ) ; <nl> + @ inc = grep ! $ seen { $ _ } + + , @ inc ; <nl> + <nl> + return @ inc ; <nl> + } <nl> + <nl> + <nl> + { # Without caching , _default_inc ( ) takes a huge amount of time <nl> + my % cache ; <nl> + sub _default_inc { <nl> + my $ self = shift ; <nl> + my $ perl = $ self - > _command ; <nl> + $ cache { $ perl } | | = [ do { <nl> + local $ ENV { PERL5LIB } ; <nl> + my @ inc = ` $ perl - le " print join qq [ \ \ n ] , \ @ INC " ` ; <nl> + chomp @ inc ; <nl> + } ] ; <nl> + return @ { $ cache { $ perl } } ; <nl> + } <nl> + } <nl> + <nl> + <nl> + = for private $ strap - > _restore_PERL5LIB ( ) <nl> + <nl> + $ self - > _restore_PERL5LIB ; <nl> + <nl> + This restores the original value of the C < PERL5LIB > environment variable . <nl> + Necessary on VMS , otherwise a no - op . <nl> + <nl> + = cut <nl> + <nl> + sub _restore_PERL5LIB { <nl> + my ( $ self ) = shift ; <nl> + <nl> + return unless $ self - > { _is_vms } ; <nl> + <nl> + if ( defined $ self - > { _old5lib } ) { <nl> + $ ENV { PERL5LIB } = $ self - > { _old5lib } ; <nl> + } <nl> + } <nl> + <nl> + = head1 Parsing <nl> + <nl> + Methods for identifying what sort of line you ' re looking at . <nl> + <nl> + = for private _is_diagnostic <nl> + <nl> + my $ is_diagnostic = $ strap - > _is_diagnostic ( $ line , \ $ comment ) ; <nl> + <nl> + Checks if the given line is a comment . If so , it will place it into <nl> + C < $ comment > ( sans # ) . <nl> + <nl> + = cut <nl> + <nl> + sub _is_diagnostic { <nl> + my ( $ self , $ line , $ comment ) = @ _ ; <nl> + <nl> + if ( $ line = ~ / ^ \ s * \ # ( . * ) / ) { <nl> + $ $ comment = $ 1 ; <nl> + return $ YES ; <nl> + } <nl> + else { <nl> + return $ NO ; <nl> + } <nl> + } <nl> + <nl> + = for private _is_header <nl> + <nl> + my $ is_header = $ strap - > _is_header ( $ line ) ; <nl> + <nl> + Checks if the given line is a header ( 1 . . M ) line . If so , it places how <nl> + many tests there will be in C < < $ strap - > { max } > > , a list of which tests <nl> + are todo in C < < $ strap - > { todo } > > and if the whole test was skipped <nl> + C < < $ strap - > { skip_all } > > contains the reason . <nl> + <nl> + = cut <nl> + <nl> + # Regex for parsing a header . Will be run with / x <nl> + my $ Extra_Header_Re = < < ' REGEX ' ; <nl> + ^ <nl> + ( ? : \ s + todo \ s + ( [ \ d \ t ] + ) ) ? # optional todo set <nl> + ( ? : \ s * \ # \ s * ( [ \ w : ] + \ s ? ) ( . * ) ) ? # optional skip with optional reason <nl> + REGEX <nl> + <nl> + sub _is_header { <nl> + my ( $ self , $ line ) = @ _ ; <nl> + <nl> + if ( my ( $ max , $ extra ) = $ line = ~ / ^ 1 \ . \ . ( \ d + ) ( . * ) / ) { <nl> + $ self - > { max } = $ max ; <nl> + assert ( $ self - > { max } > = 0 , ' Max # of tests looks right ' ) ; <nl> + <nl> + if ( defined $ extra ) { <nl> + my ( $ todo , $ skip , $ reason ) = $ extra = ~ / $ Extra_Header_Re / xo ; <nl> + <nl> + $ self - > { todo } = { map { $ _ = > 1 } split / \ s + / , $ todo } if $ todo ; <nl> + <nl> + if ( $ self - > { max } = = 0 ) { <nl> + $ reason = ' ' unless defined $ skip and $ skip = ~ / ^ Skip / i ; <nl> + } <nl> + <nl> + $ self - > { skip_all } = $ reason ; <nl> + } <nl> + <nl> + return $ YES ; <nl> + } <nl> + else { <nl> + return $ NO ; <nl> + } <nl> + } <nl> + <nl> + = for private _is_bail_out <nl> + <nl> + my $ is_bail_out = $ strap - > _is_bail_out ( $ line , \ $ reason ) ; <nl> + <nl> + Checks if the line is a " Bail out ! " . Places the reason for bailing <nl> + ( if any ) in $ reason . <nl> + <nl> + = cut <nl> + <nl> + sub _is_bail_out { <nl> + my ( $ self , $ line , $ reason ) = @ _ ; <nl> + <nl> + if ( $ line = ~ / ^ Bail out ! \ s * ( . * ) / i ) { <nl> + $ $ reason = $ 1 if $ 1 ; <nl> + return $ YES ; <nl> + } <nl> + else { <nl> + return $ NO ; <nl> + } <nl> + } <nl> + <nl> + = for private _reset_file_state <nl> + <nl> + $ strap - > _reset_file_state ; <nl> + <nl> + Resets things like C < < $ strap - > { max } > > , C < < $ strap - > { skip_all } > > , <nl> + etc . so it ' s ready to parse the next file . <nl> + <nl> + = cut <nl> + <nl> + sub _reset_file_state { <nl> + my ( $ self ) = shift ; <nl> + <nl> + delete @ { $ self } { qw ( max skip_all todo too_many_tests ) } ; <nl> + $ self - > { line } = 0 ; <nl> + $ self - > { saw_header } = 0 ; <nl> + $ self - > { saw_bailout } = 0 ; <nl> + $ self - > { lone_not_line } = 0 ; <nl> + $ self - > { bailout_reason } = ' ' ; <nl> + $ self - > { ' next ' } = 1 ; <nl> + } <nl> + <nl> + = head1 EXAMPLES <nl> + <nl> + See F < examples / mini_harness . plx > for an example of use . <nl> + <nl> + = head1 AUTHOR <nl> + <nl> + Michael G Schwern C < < < schwern at pobox . com > > > , currently maintained by <nl> + Andy Lester C < < < andy at petdance . com > > > . <nl> + <nl> + = head1 SEE ALSO <nl> + <nl> + L < Test : : Harness > <nl> + <nl> + = cut <nl> + <nl> + sub _def_or_blank { <nl> + return $ _ [ 0 ] if defined $ _ [ 0 ] ; <nl> + return " " ; <nl> + } <nl> + <nl> + sub set_callback { <nl> + my $ self = shift ; <nl> + $ self - > { callback } = shift ; <nl> + } <nl> + <nl> + sub callback { <nl> + my $ self = shift ; <nl> + return $ self - > { callback } ; <nl> + } <nl> + <nl> + 1 ; <nl> new file mode 100644 <nl> index 00000000000 . . deb506dbebe <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / Harness / TAP . pod <nl> <nl> + = head1 NAME <nl> + <nl> + Test : : Harness : : TAP - Documentation for the TAP format <nl> + <nl> + = head1 SYNOPSIS <nl> + <nl> + TAP , the Test Anything Protocol , is Perl ' s simple text - based interface <nl> + between testing modules such as Test : : More and the test harness <nl> + Test : : Harness . <nl> + <nl> + = head1 TODO <nl> + <nl> + Exit code of the process . <nl> + <nl> + = head1 THE TAP FORMAT <nl> + <nl> + TAP ' s general format is : <nl> + <nl> + 1 . . N <nl> + ok 1 Description # Directive <nl> + # Diagnostic <nl> + . . . . <nl> + ok 47 Description <nl> + ok 48 Description <nl> + more tests . . . . <nl> + <nl> + For example , a test file ' s output might look like : <nl> + <nl> + 1 . . 4 <nl> + ok 1 - Input file opened <nl> + not ok 2 - First line of the input valid <nl> + ok 3 - Read the rest of the file <nl> + not ok 4 - Summarized correctly # TODO Not written yet <nl> + <nl> + = head1 HARNESS BEHAVIOR <nl> + <nl> + In this document , the " harness " is any program analyzing TAP output . <nl> + Typically this will be Perl ' s I < prove > program , or the underlying <nl> + C < Test : : Harness : : runtests > subroutine . <nl> + <nl> + A harness must only read TAP output from standard output and not <nl> + from standard error . Lines written to standard output matching <nl> + C < / ^ ( not ) ? ok \ b / > must be interpreted as test lines . All other <nl> + lines must not be considered test output . <nl> + <nl> + = head1 TESTS LINES AND THE PLAN <nl> + <nl> + = head2 The plan <nl> + <nl> + The plan tells how many tests will be run , or how many tests have <nl> + run . It ' s a check that the test file hasn ' t stopped prematurely . <nl> + It must appear once , whether at the beginning or end of the output . <nl> + <nl> + The plan is usually the first line of TAP output and it specifies how <nl> + many test points are to follow . For example , <nl> + <nl> + 1 . . 10 <nl> + <nl> + means you plan on running 10 tests . This is a safeguard in case your test <nl> + file dies silently in the middle of its run . The plan is optional but if <nl> + there is a plan before the test points it must be the first non - diagnostic <nl> + line output by the test file . <nl> + <nl> + In certain instances a test file may not know how many test points <nl> + it will ultimately be running . In this case the plan can be the last <nl> + non - diagnostic line in the output . <nl> + <nl> + The plan cannot appear in the middle of the output , nor can it appear more <nl> + than once . <nl> + <nl> + = head2 The test line <nl> + <nl> + The core of TAP is the test line . A test file prints one test line test <nl> + point executed . There must be at least one test line in TAP output . Each <nl> + test line comprises the following elements : <nl> + <nl> + = over 4 <nl> + <nl> + = item * C < ok > or C < not ok > <nl> + <nl> + This tells whether the test point passed or failed . It must be <nl> + at the beginning of the line . C < / ^ not ok / > indicates a failed test <nl> + point . C < / ^ ok / > is a successful test point . This is the only mandatory <nl> + part of the line . <nl> + <nl> + Note that unlike the Directives below , C < ok > and C < not ok > are <nl> + case - sensitive . <nl> + <nl> + = item * Test number <nl> + <nl> + TAP expects the C < ok > or C < not ok > to be followed by a test point <nl> + number . If there is no number the harness must maintain <nl> + its own counter until the script supplies test numbers again . So <nl> + the following test output <nl> + <nl> + 1 . . 6 <nl> + not ok <nl> + ok <nl> + not ok <nl> + ok <nl> + ok <nl> + <nl> + has five tests . The sixth is missing . Test : : Harness will generate <nl> + <nl> + FAILED tests 1 , 3 , 6 <nl> + Failed 3 / 6 tests , 50 . 00 % okay <nl> + <nl> + = item * Description <nl> + <nl> + Any text after the test number but before a C < # > is the description of <nl> + the test point . <nl> + <nl> + ok 42 this is the description of the test <nl> + <nl> + Descriptions should not begin with a digit so that they are not confused <nl> + with the test point number . <nl> + <nl> + The harness may do whatever it wants with the description . <nl> + <nl> + = item * Directive <nl> + <nl> + The test point may include a directive , following a hash on the <nl> + test line . There are currently two directives allowed : C < TODO > and <nl> + C < SKIP > . These are discussed below . <nl> + <nl> + = back <nl> + <nl> + To summarize : <nl> + <nl> + = over 4 <nl> + <nl> + = item * ok / not ok ( required ) <nl> + <nl> + = item * Test number ( recommended ) <nl> + <nl> + = item * Description ( recommended ) <nl> + <nl> + = item * Directive ( only when necessary ) <nl> + <nl> + = back <nl> + <nl> + = head1 DIRECTIVES <nl> + <nl> + Directives are special notes that follow a C < # > on the test line . <nl> + Only two are currently defined : C < TODO > and C < SKIP > . Note that <nl> + these two keywords are not case - sensitive . <nl> + <nl> + = head2 TODO tests <nl> + <nl> + If the directive starts with C < # TODO > , the test is counted as a <nl> + todo test , and the text after C < TODO > is the explanation . <nl> + <nl> + not ok 13 # TODO bend space and time <nl> + <nl> + Note that if the TODO has an explanation it must be separated from <nl> + C < TODO > by a space . <nl> + <nl> + These tests represent a feature to be implemented or a bug to be fixed <nl> + and act as something of an executable " things to do " list . They are <nl> + B < not > expected to succeed . Should a todo test point begin succeeding , <nl> + the harness should report it as a bonus . This indicates that whatever <nl> + you were supposed to do has been done and you should promote this to a <nl> + normal test point . <nl> + <nl> + = head2 Skipping tests <nl> + <nl> + If the directive starts with C < # SKIP > , the test is counted as having <nl> + been skipped . If the whole test file succeeds , the count of skipped <nl> + tests is included in the generated output . The harness should report <nl> + the text after C < # SKIP \ S * \ s + > as a reason for skipping . <nl> + <nl> + ok 23 # skip Insufficient flogiston pressure . <nl> + <nl> + Similarly , one can include an explanation in a plan line , <nl> + emitted if the test file is skipped completely : <nl> + <nl> + 1 . . 0 # Skipped : WWW : : Mechanize not installed <nl> + <nl> + = head1 OTHER LINES <nl> + <nl> + = head2 Bail out ! <nl> + <nl> + As an emergency measure a test script can decide that further tests <nl> + are useless ( e . g . missing dependencies ) and testing should stop <nl> + immediately . In that case the test script prints the magic words <nl> + <nl> + Bail out ! <nl> + <nl> + to standard output . Any message after these words must be displayed <nl> + by the interpreter as the reason why testing must be stopped , as <nl> + in <nl> + <nl> + Bail out ! MySQL is not running . <nl> + <nl> + = head2 Diagnostics <nl> + <nl> + Additional information may be put into the testing output on separate <nl> + lines . Diagnostic lines should begin with a C < # > , which the harness must <nl> + ignore , at least as far as analyzing the test results . The harness is <nl> + free , however , to display the diagnostics . Typically diagnostics are <nl> + used to provide information about the environment in which test file is <nl> + running , or to delineate a group of tests . <nl> + <nl> + . . . <nl> + ok 18 - Closed database connection <nl> + # End of database section . <nl> + # This starts the network part of the test . <nl> + # Daemon started on port 2112 <nl> + ok 19 - Opened socket <nl> + . . . <nl> + ok 47 - Closed socket <nl> + # End of network tests <nl> + <nl> + = head2 Anything else <nl> + <nl> + Any output line that is not a plan , a test line or a diagnostic is <nl> + incorrect . How a harness handles the incorrect line is undefined . <nl> + Test : : Harness silently ignores incorrect lines , but will become more <nl> + stringent in the future . <nl> + <nl> + = head1 EXAMPLES <nl> + <nl> + All names , places , and events depicted in any example are wholly <nl> + fictitious and bear no resemblance to , connection with , or relation to any <nl> + real entity . Any such similarity is purely coincidental , unintentional , <nl> + and unintended . <nl> + <nl> + = head2 Common with explanation <nl> + <nl> + The following TAP listing declares that six tests follow as well as <nl> + provides handy feedback as to what the test is about to do . All six <nl> + tests pass . <nl> + <nl> + 1 . . 6 <nl> + # <nl> + # Create a new Board and Tile , then place <nl> + # the Tile onto the board . <nl> + # <nl> + ok 1 - The object isa Board <nl> + ok 2 - Board size is zero <nl> + ok 3 - The object isa Tile <nl> + ok 4 - Get possible places to put the Tile <nl> + ok 5 - Placing the tile produces no error <nl> + ok 6 - Board size is 1 <nl> + <nl> + = head2 Unknown amount and failures <nl> + <nl> + This hypothetical test program ensures that a handful of servers are <nl> + online and network - accessible . Because it retrieves the hypothetical <nl> + servers from a database , it doesn ' t know exactly how many servers it <nl> + will need to ping . Thus , the test count is declared at the bottom after <nl> + all the test points have run . Also , two of the tests fail . <nl> + <nl> + ok 1 - retrieving servers from the database <nl> + # need to ping 6 servers <nl> + ok 2 - pinged diamond <nl> + ok 3 - pinged ruby <nl> + not ok 4 - pinged saphire <nl> + ok 5 - pinged onyx <nl> + not ok 6 - pinged quartz <nl> + ok 7 - pinged gold <nl> + 1 . . 7 <nl> + <nl> + = head2 Giving up <nl> + <nl> + This listing reports that a pile of tests are going to be run . However , <nl> + the first test fails , reportedly because a connection to the database <nl> + could not be established . The program decided that continuing was <nl> + pointless and exited . <nl> + <nl> + 1 . . 573 <nl> + not ok 1 - database handle <nl> + Bail out ! Couldn ' t connect to database . <nl> + <nl> + = head2 Skipping a few <nl> + <nl> + The following listing plans on running 5 tests . However , our program <nl> + decided to not run tests 2 thru 5 at all . To properly report this , <nl> + the tests are marked as being skipped . <nl> + <nl> + 1 . . 5 <nl> + ok 1 - approved operating system <nl> + # $ ^ 0 is solaris <nl> + ok 2 - # SKIP no / sys directory <nl> + ok 3 - # SKIP no / sys directory <nl> + ok 4 - # SKIP no / sys directory <nl> + ok 5 - # SKIP no / sys directory <nl> + <nl> + = head2 Skipping everything <nl> + <nl> + This listing shows that the entire listing is a skip . No tests were run . <nl> + <nl> + 1 . . 0 # skip because English - to - French translator isn ' t installed <nl> + <nl> + = head2 Got spare tuits ? <nl> + <nl> + The following example reports that four tests are run and the last two <nl> + tests failed . However , because the failing tests are marked as things <nl> + to do later , they are considered successes . Thus , a harness should report <nl> + this entire listing as a success . <nl> + <nl> + 1 . . 4 <nl> + ok 1 - Creating test program <nl> + ok 2 - Test program runs , no error <nl> + not ok 3 - infinite loop # TODO halting problem unsolved <nl> + not ok 4 - infinite loop 2 # TODO halting problem unsolved <nl> + <nl> + = head2 Creative liberties <nl> + <nl> + This listing shows an alternate output where the test numbers aren ' t <nl> + provided . The test also reports the state of a ficticious board game in <nl> + diagnostic form . Finally , the test count is reported at the end . <nl> + <nl> + ok - created Board <nl> + ok <nl> + ok <nl> + ok <nl> + ok <nl> + ok <nl> + ok <nl> + ok <nl> + # + mmmmmm + mmmmmm + mmmmmm + mmmmmm + <nl> + # | | 16G | | 05C | <nl> + # | | G N C | | C C G | <nl> + # | | G | | C + | <nl> + # + mmmmmm + mmmmmm + mmmmmm + mmmmmm + <nl> + # | 10C | 01G | | 03C | <nl> + # | R N G | G A G | | C C C | <nl> + # | R | G | | C + | <nl> + # + mmmmmm + mmmmmm + mmmmmm + mmmmmm + <nl> + # | | 01G | 17C | 00C | <nl> + # | | G A G | G N R | R N R | <nl> + # | | G | R | G | <nl> + # + mmmmmm + mmmmmm + mmmmmm + mmmmmm + <nl> + ok - board has 7 tiles + starter tile <nl> + 1 . . 9 <nl> + <nl> + = head1 Non - Perl TAP <nl> + <nl> + In Perl , we use Test : : Simple and Test : : More to generate TAP output . <nl> + Other languages have solutions that generate TAP , so that they can take <nl> + advantage of Test : : Harness . <nl> + <nl> + The following sections are provided by their maintainers , and may not <nl> + be up - to - date . <nl> + <nl> + = head2 C / C + + <nl> + <nl> + libtap makes it easy to write test programs in C that produce <nl> + TAP - compatible output . Modeled on the Test : : More API , libtap contains <nl> + all the functions you need to : <nl> + <nl> + = over 4 <nl> + <nl> + = item * Specify a test plan <nl> + <nl> + = item * Run tests <nl> + <nl> + = item * Skip tests in certain situations <nl> + <nl> + = item * Have TODO tests <nl> + <nl> + = item * Produce TAP compatible diagnostics <nl> + <nl> + = back <nl> + <nl> + More information about libtap , including download links , checksums , <nl> + anonymous access to the Subersion repository , and a bug tracking <nl> + system , can be found at : <nl> + <nl> + http : / / jc . ngo . org . uk / trac - bin / trac . cgi / wiki / LibTap <nl> + <nl> + ( Nik Clayton , April 17 , 2006 ) <nl> + <nl> + = head2 Python <nl> + <nl> + PyTap will , when it ' s done , provide a simple , assertive ( Test : : More - like ) <nl> + interface for writing tests in Python . It will output TAP and will <nl> + include the functionality found in Test : : Builder and Test : : More . It will <nl> + try to make it easy to add more test code ( so you can write your own <nl> + C < TAP . StringDiff > , for example . <nl> + <nl> + Right now , it ' s got a fair bit of the basics needed to emulate Test : : More , <nl> + and I think it ' s easy to add more stuff - - just like Test : : Builder , <nl> + there ' s a singleton that you can get at easily . <nl> + <nl> + I need to better identify and finish implementing the most basic tests . <nl> + I am not a Python guru , I just use it from time to time , so my aim may <nl> + not be true . I need to write tests for it , which means either relying <nl> + on Perl for the tester tester , or writing one in Python . <nl> + <nl> + Here ' s a sample test , as found in my Subversion : <nl> + <nl> + from TAP . Simple import * <nl> + <nl> + plan ( 15 ) <nl> + <nl> + ok ( 1 ) <nl> + ok ( 1 , " everything is OK ! " ) <nl> + ok ( 0 , " always fails " ) <nl> + <nl> + is_ok ( 10 , 10 , " is ten ten ? " ) <nl> + is_ok ( ok , ok , " even ok is ok ! " ) <nl> + ok ( id ( ok ) , " ok is not the null pointer " ) <nl> + ok ( True , " the Truth will set you ok " ) <nl> + ok ( not False , " and nothing but the truth " ) <nl> + ok ( False , " and we ' ll know if you lie to us " ) <nl> + <nl> + isa_ok ( 10 , int , " 10 " ) <nl> + isa_ok ( ' ok ' , str , " some string " ) <nl> + <nl> + ok ( 0 , " zero is true " , todo = " be more like Ruby ! " ) <nl> + ok ( None , " none is true " , skip = " not possible in this universe " ) <nl> + <nl> + eq_ok ( " not " , " equal " , " two strings are not equal " ) ; <nl> + <nl> + ( Ricardo Signes , April 17 , 2006 ) <nl> + <nl> + = head2 JavaScript <nl> + <nl> + Test . Simple looks and acts just like TAP , although in reality it ' s <nl> + tracking test results in an object rather than scraping them from a <nl> + print buffer . <nl> + <nl> + http : / / openjsan . org / doc / t / th / theory / Test / Simple / <nl> + <nl> + ( David Wheeler , April 17 , 2006 ) <nl> + <nl> + = head2 PHP <nl> + <nl> + All the big PHP players now produce TAP <nl> + <nl> + = over <nl> + <nl> + = item * phpt <nl> + <nl> + Outputs TAP by default as of the yet - to - be - released PEAR 1 . 5 . 0 <nl> + <nl> + http : / / pear . php . net / PEAR <nl> + <nl> + = item * PHPUnit <nl> + <nl> + Has a TAP logger ( since 2 . 3 . 4 ) <nl> + <nl> + http : / / www . phpunit . de / wiki / Main_Page <nl> + <nl> + = item * SimpleTest <nl> + <nl> + There ' s a third - party TAP reporting extension for SimpleTest <nl> + <nl> + http : / / www . digitalsandwich . com / archives / 51 - Updated - Simpletest + Apache - Test . html <nl> + <nl> + = item * Apache - Test <nl> + <nl> + Apache - Test ' s PHP writes TAP by default and includes the standalone <nl> + test - more . php <nl> + <nl> + http : / / search . cpan . org / dist / Apache - Test / <nl> + <nl> + = back <nl> + <nl> + ( Geoffrey Young , April 17 , 2006 ) <nl> + <nl> + = head1 AUTHORS <nl> + <nl> + Andy Lester , based on the original Test : : Harness documentation by Michael Schwern . <nl> + <nl> + = head1 ACKNOWLEDGEMENTS <nl> + <nl> + Thanks to <nl> + Pete Krawczyk , <nl> + Paul Johnson , <nl> + Ian Langworth <nl> + and Nik Clayton <nl> + for help and contributions on this document . <nl> + <nl> + The basis for the TAP format was created by Larry Wall in the <nl> + original test script for Perl 1 . Tim Bunce and Andreas Koenig <nl> + developed it further with their modifications to Test : : Harness . <nl> + <nl> + = head1 COPYRIGHT <nl> + <nl> + Copyright 2003 - 2005 by <nl> + Michael G Schwern C < < < schwern @ pobox . com > > > , <nl> + Andy Lester C < < < andy @ petdance . com > > > . <nl> + <nl> + This program is free software ; you can redistribute it and / or <nl> + modify it under the same terms as Perl itself . <nl> + <nl> + See L < http : / / www . perl . com / perl / misc / Artistic . html > . <nl> + <nl> + = cut <nl> new file mode 100644 <nl> index 00000000000 . . 0cda2fee6f6 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / Harness / Util . pm <nl> <nl> + package Test : : Harness : : Util ; <nl> + <nl> + use strict ; <nl> + use vars qw ( $ VERSION ) ; <nl> + $ VERSION = ' 0 . 01 ' ; <nl> + <nl> + use File : : Spec ; <nl> + use Exporter ; <nl> + use vars qw ( @ ISA @ EXPORT @ EXPORT_OK ) ; <nl> + <nl> + @ ISA = qw ( Exporter ) ; <nl> + @ EXPORT = ( ) ; <nl> + @ EXPORT_OK = qw ( all_in shuffle blibdirs ) ; <nl> + <nl> + = head1 NAME <nl> + <nl> + Test : : Harness : : Util - Utility functions for Test : : Harness : : * <nl> + <nl> + = head1 SYNOPSIS <nl> + <nl> + Utility functions for Test : : Harness : : * <nl> + <nl> + = head1 PUBLIC FUNCTIONS <nl> + <nl> + The following are all available to be imported to your module . No symbols <nl> + are exported by default . <nl> + <nl> + = head2 all_in ( { parm = > value , parm = > value } ) <nl> + <nl> + Finds all the F < * . t > in a directory . Knows to skip F < . svn > and F < CVS > <nl> + directories . <nl> + <nl> + Valid parms are : <nl> + <nl> + = over <nl> + <nl> + = item start <nl> + <nl> + Starting point for the search . Defaults to " . " . <nl> + <nl> + = item recurse <nl> + <nl> + Flag to say whether it should recurse . Default to true . <nl> + <nl> + = back <nl> + <nl> + = cut <nl> + <nl> + sub all_in { <nl> + my $ parms = shift ; <nl> + my % parms = ( <nl> + start = > " . " , <nl> + recurse = > 1 , <nl> + % $ parms , <nl> + ) ; <nl> + <nl> + my @ hits = ( ) ; <nl> + my $ start = $ parms { start } ; <nl> + <nl> + local * DH ; <nl> + if ( opendir ( DH , $ start ) ) { <nl> + my @ files = sort readdir DH ; <nl> + closedir DH ; <nl> + for my $ file ( @ files ) { <nl> + next if $ file eq File : : Spec - > updir | | $ file eq File : : Spec - > curdir ; <nl> + next if $ file eq " . svn " ; <nl> + next if $ file eq " CVS " ; <nl> + <nl> + my $ currfile = File : : Spec - > catfile ( $ start , $ file ) ; <nl> + if ( - d $ currfile ) { <nl> + push ( @ hits , all_in ( { % parms , start = > $ currfile } ) ) if $ parms { recurse } ; <nl> + } <nl> + else { <nl> + push ( @ hits , $ currfile ) if $ currfile = ~ / \ . t $ / ; <nl> + } <nl> + } <nl> + } <nl> + else { <nl> + warn " $ start : $ ! \ n " ; <nl> + } <nl> + <nl> + return @ hits ; <nl> + } <nl> + <nl> + = head1 shuffle ( @ list ) <nl> + <nl> + Returns a shuffled copy of I < @ list > . <nl> + <nl> + = cut <nl> + <nl> + sub shuffle { <nl> + # Fisher - Yates shuffle <nl> + my $ i = @ _ ; <nl> + while ( $ i ) { <nl> + my $ j = rand $ i - - ; <nl> + @ _ [ $ i , $ j ] = @ _ [ $ j , $ i ] ; <nl> + } <nl> + } <nl> + <nl> + <nl> + = head2 blibdir ( ) <nl> + <nl> + Finds all the blib directories . Stolen directly from blib . pm <nl> + <nl> + = cut <nl> + <nl> + sub blibdirs { <nl> + my $ dir = File : : Spec - > curdir ; <nl> + if ( $ ^ O eq ' VMS ' ) { <nl> + ( $ dir = VMS : : Filespec : : unixify ( $ dir ) ) = ~ s - / \ z - - ; <nl> + } <nl> + my $ archdir = " arch " ; <nl> + if ( $ ^ O eq " MacOS " ) { <nl> + # Double up the MP : : A so that it ' s not used only once . <nl> + $ archdir = $ MacPerl : : Architecture = $ MacPerl : : Architecture ; <nl> + } <nl> + <nl> + my $ i = 5 ; <nl> + while ( $ i - - ) { <nl> + my $ blib = File : : Spec - > catdir ( $ dir , " blib " ) ; <nl> + my $ blib_lib = File : : Spec - > catdir ( $ blib , " lib " ) ; <nl> + my $ blib_arch = File : : Spec - > catdir ( $ blib , $ archdir ) ; <nl> + <nl> + if ( - d $ blib & & - d $ blib_arch & & - d $ blib_lib ) { <nl> + return ( $ blib_arch , $ blib_lib ) ; <nl> + } <nl> + $ dir = File : : Spec - > catdir ( $ dir , File : : Spec - > updir ) ; <nl> + } <nl> + warn " $ 0 : Cannot find blib \ n " ; <nl> + return ; <nl> + } <nl> + <nl> + 1 ; <nl> new file mode 100644 <nl> index 00000000000 . . a7bd13a1442 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / More . pm <nl> <nl> + package Test : : More ; <nl> + <nl> + use 5 . 004 ; <nl> + <nl> + use strict ; <nl> + <nl> + <nl> + # Can ' t use Carp because it might cause use_ok ( ) to accidentally succeed <nl> + # even though the module being used forgot to use Carp . Yes , this <nl> + # actually happened . <nl> + sub _carp { <nl> + my ( $ file , $ line ) = ( caller ( 1 ) ) [ 1 , 2 ] ; <nl> + warn @ _ , " at $ file line $ line \ n " ; <nl> + } <nl> + <nl> + <nl> + <nl> + use vars qw ( $ VERSION @ ISA @ EXPORT % EXPORT_TAGS $ TODO ) ; <nl> + $ VERSION = ' 0 . 72 ' ; <nl> + $ VERSION = eval $ VERSION ; # make the alpha version come out as a number <nl> + <nl> + use Test : : Builder : : Module ; <nl> + @ ISA = qw ( Test : : Builder : : Module ) ; <nl> + @ EXPORT = qw ( ok use_ok require_ok <nl> + is isnt like unlike is_deeply <nl> + cmp_ok <nl> + skip todo todo_skip <nl> + pass fail <nl> + eq_array eq_hash eq_set <nl> + $ TODO <nl> + plan <nl> + can_ok isa_ok <nl> + diag <nl> + BAIL_OUT <nl> + ) ; <nl> + <nl> + <nl> + = head1 NAME <nl> + <nl> + Test : : More - yet another framework for writing test scripts <nl> + <nl> + = head1 SYNOPSIS <nl> + <nl> + use Test : : More tests = > 23 ; <nl> + # or <nl> + use Test : : More qw ( no_plan ) ; <nl> + # or <nl> + use Test : : More skip_all = > $ reason ; <nl> + <nl> + BEGIN { use_ok ( ' Some : : Module ' ) ; } <nl> + require_ok ( ' Some : : Module ' ) ; <nl> + <nl> + # Various ways to say " ok " <nl> + ok ( $ got eq $ expected , $ test_name ) ; <nl> + <nl> + is ( $ got , $ expected , $ test_name ) ; <nl> + isnt ( $ got , $ expected , $ test_name ) ; <nl> + <nl> + # Rather than print STDERR " # here ' s what went wrong \ n " <nl> + diag ( " here ' s what went wrong " ) ; <nl> + <nl> + like ( $ got , qr / expected / , $ test_name ) ; <nl> + unlike ( $ got , qr / expected / , $ test_name ) ; <nl> + <nl> + cmp_ok ( $ got , ' = = ' , $ expected , $ test_name ) ; <nl> + <nl> + is_deeply ( $ got_complex_structure , $ expected_complex_structure , $ test_name ) ; <nl> + <nl> + SKIP : { <nl> + skip $ why , $ how_many unless $ have_some_feature ; <nl> + <nl> + ok ( foo ( ) , $ test_name ) ; <nl> + is ( foo ( 42 ) , 23 , $ test_name ) ; <nl> + } ; <nl> + <nl> + TODO : { <nl> + local $ TODO = $ why ; <nl> + <nl> + ok ( foo ( ) , $ test_name ) ; <nl> + is ( foo ( 42 ) , 23 , $ test_name ) ; <nl> + } ; <nl> + <nl> + can_ok ( $ module , @ methods ) ; <nl> + isa_ok ( $ object , $ class ) ; <nl> + <nl> + pass ( $ test_name ) ; <nl> + fail ( $ test_name ) ; <nl> + <nl> + BAIL_OUT ( $ why ) ; <nl> + <nl> + # UNIMPLEMENTED ! ! ! <nl> + my @ status = Test : : More : : status ; <nl> + <nl> + <nl> + = head1 DESCRIPTION <nl> + <nl> + B < STOP ! > If you ' re just getting started writing tests , have a look at <nl> + Test : : Simple first . This is a drop in replacement for Test : : Simple <nl> + which you can switch to once you get the hang of basic testing . <nl> + <nl> + The purpose of this module is to provide a wide range of testing <nl> + utilities . Various ways to say " ok " with better diagnostics , <nl> + facilities to skip tests , test future features and compare complicated <nl> + data structures . While you can do almost anything with a simple <nl> + C < ok ( ) > function , it doesn ' t provide good diagnostic output . <nl> + <nl> + <nl> + = head2 I love it when a plan comes together <nl> + <nl> + Before anything else , you need a testing plan . This basically declares <nl> + how many tests your script is going to run to protect against premature <nl> + failure . <nl> + <nl> + The preferred way to do this is to declare a plan when you C < use Test : : More > . <nl> + <nl> + use Test : : More tests = > 23 ; <nl> + <nl> + There are rare cases when you will not know beforehand how many tests <nl> + your script is going to run . In this case , you can declare that you <nl> + have no plan . ( Try to avoid using this as it weakens your test . ) <nl> + <nl> + use Test : : More qw ( no_plan ) ; <nl> + <nl> + B < NOTE > : using no_plan requires a Test : : Harness upgrade else it will <nl> + think everything has failed . See L < CAVEATS and NOTES > ) . <nl> + <nl> + In some cases , you ' ll want to completely skip an entire testing script . <nl> + <nl> + use Test : : More skip_all = > $ skip_reason ; <nl> + <nl> + Your script will declare a skip with the reason why you skipped and <nl> + exit immediately with a zero ( success ) . See L < Test : : Harness > for <nl> + details . <nl> + <nl> + If you want to control what functions Test : : More will export , you <nl> + have to use the ' import ' option . For example , to import everything <nl> + but ' fail ' , you ' d do : <nl> + <nl> + use Test : : More tests = > 23 , import = > [ ' ! fail ' ] ; <nl> + <nl> + Alternatively , you can use the plan ( ) function . Useful for when you <nl> + have to calculate the number of tests . <nl> + <nl> + use Test : : More ; <nl> + plan tests = > keys % Stuff * 3 ; <nl> + <nl> + or for deciding between running the tests at all : <nl> + <nl> + use Test : : More ; <nl> + if ( $ ^ O eq ' MacOS ' ) { <nl> + plan skip_all = > ' Test irrelevant on MacOS ' ; <nl> + } <nl> + else { <nl> + plan tests = > 42 ; <nl> + } <nl> + <nl> + = cut <nl> + <nl> + sub plan { <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + $ tb - > plan ( @ _ ) ; <nl> + } <nl> + <nl> + <nl> + # This implements " use Test : : More ' no_diag ' " but the behavior is <nl> + # deprecated . <nl> + sub import_extra { <nl> + my $ class = shift ; <nl> + my $ list = shift ; <nl> + <nl> + my @ other = ( ) ; <nl> + my $ idx = 0 ; <nl> + while ( $ idx < = $ # { $ list } ) { <nl> + my $ item = $ list - > [ $ idx ] ; <nl> + <nl> + if ( defined $ item and $ item eq ' no_diag ' ) { <nl> + $ class - > builder - > no_diag ( 1 ) ; <nl> + } <nl> + else { <nl> + push @ other , $ item ; <nl> + } <nl> + <nl> + $ idx + + ; <nl> + } <nl> + <nl> + @ $ list = @ other ; <nl> + } <nl> + <nl> + <nl> + = head2 Test names <nl> + <nl> + By convention , each test is assigned a number in order . This is <nl> + largely done automatically for you . However , it ' s often very useful to <nl> + assign a name to each test . Which would you rather see : <nl> + <nl> + ok 4 <nl> + not ok 5 <nl> + ok 6 <nl> + <nl> + or <nl> + <nl> + ok 4 - basic multi - variable <nl> + not ok 5 - simple exponential <nl> + ok 6 - force = = mass * acceleration <nl> + <nl> + The later gives you some idea of what failed . It also makes it easier <nl> + to find the test in your script , simply search for " simple <nl> + exponential " . <nl> + <nl> + All test functions take a name argument . It ' s optional , but highly <nl> + suggested that you use it . <nl> + <nl> + <nl> + = head2 I ' m ok , you ' re not ok . <nl> + <nl> + The basic purpose of this module is to print out either " ok # " or " not <nl> + ok # " depending on if a given test succeeded or failed . Everything <nl> + else is just gravy . <nl> + <nl> + All of the following print " ok " or " not ok " depending on if the test <nl> + succeeded or failed . They all also return true or false , <nl> + respectively . <nl> + <nl> + = over 4 <nl> + <nl> + = item B < ok > <nl> + <nl> + ok ( $ got eq $ expected , $ test_name ) ; <nl> + <nl> + This simply evaluates any expression ( C < $ got eq $ expected > is just a <nl> + simple example ) and uses that to determine if the test succeeded or <nl> + failed . A true expression passes , a false one fails . Very simple . <nl> + <nl> + For example : <nl> + <nl> + ok ( $ exp { 9 } = = 81 , ' simple exponential ' ) ; <nl> + ok ( Film - > can ( ' db_Main ' ) , ' set_db ( ) ' ) ; <nl> + ok ( $ p - > tests = = 4 , ' saw tests ' ) ; <nl> + ok ( ! grep ! defined $ _ , @ items , ' items populated ' ) ; <nl> + <nl> + ( Mnemonic : " This is ok . " ) <nl> + <nl> + $ test_name is a very short description of the test that will be printed <nl> + out . It makes it very easy to find a test in your script when it fails <nl> + and gives others an idea of your intentions . $ test_name is optional , <nl> + but we B < very > strongly encourage its use . <nl> + <nl> + Should an ok ( ) fail , it will produce some diagnostics : <nl> + <nl> + not ok 18 - sufficient mucus <nl> + # Failed test ' sufficient mucus ' <nl> + # in foo . t at line 42 . <nl> + <nl> + This is the same as Test : : Simple ' s ok ( ) routine . <nl> + <nl> + = cut <nl> + <nl> + sub ok ( $ ; $ ) { <nl> + my ( $ test , $ name ) = @ _ ; <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + $ tb - > ok ( $ test , $ name ) ; <nl> + } <nl> + <nl> + = item B < is > <nl> + <nl> + = item B < isnt > <nl> + <nl> + is ( $ got , $ expected , $ test_name ) ; <nl> + isnt ( $ got , $ expected , $ test_name ) ; <nl> + <nl> + Similar to ok ( ) , is ( ) and isnt ( ) compare their two arguments <nl> + with C < eq > and C < ne > respectively and use the result of that to <nl> + determine if the test succeeded or failed . So these : <nl> + <nl> + # Is the ultimate answer 42 ? <nl> + is ( ultimate_answer ( ) , 42 , " Meaning of Life " ) ; <nl> + <nl> + # $ foo isn ' t empty <nl> + isnt ( $ foo , ' ' , " Got some foo " ) ; <nl> + <nl> + are similar to these : <nl> + <nl> + ok ( ultimate_answer ( ) eq 42 , " Meaning of Life " ) ; <nl> + ok ( $ foo ne ' ' , " Got some foo " ) ; <nl> + <nl> + ( Mnemonic : " This is that . " " This isn ' t that . " ) <nl> + <nl> + So why use these ? They produce better diagnostics on failure . ok ( ) <nl> + cannot know what you are testing for ( beyond the name ) , but is ( ) and <nl> + isnt ( ) know what the test was and why it failed . For example this <nl> + test : <nl> + <nl> + my $ foo = ' waffle ' ; my $ bar = ' yarblokos ' ; <nl> + is ( $ foo , $ bar , ' Is foo the same as bar ? ' ) ; <nl> + <nl> + Will produce something like this : <nl> + <nl> + not ok 17 - Is foo the same as bar ? <nl> + # Failed test ' Is foo the same as bar ? ' <nl> + # in foo . t at line 139 . <nl> + # got : ' waffle ' <nl> + # expected : ' yarblokos ' <nl> + <nl> + So you can figure out what went wrong without rerunning the test . <nl> + <nl> + You are encouraged to use is ( ) and isnt ( ) over ok ( ) where possible , <nl> + however do not be tempted to use them to find out if something is <nl> + true or false ! <nl> + <nl> + # XXX BAD ! <nl> + is ( exists $ brooklyn { tree } , 1 , ' A tree grows in Brooklyn ' ) ; <nl> + <nl> + This does not check if C < exists $ brooklyn { tree } > is true , it checks if <nl> + it returns 1 . Very different . Similar caveats exist for false and 0 . <nl> + In these cases , use ok ( ) . <nl> + <nl> + ok ( exists $ brooklyn { tree } , ' A tree grows in Brooklyn ' ) ; <nl> + <nl> + For those grammatical pedants out there , there ' s an C < isn ' t ( ) > <nl> + function which is an alias of isnt ( ) . <nl> + <nl> + = cut <nl> + <nl> + sub is ( $ $ ; $ ) { <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + $ tb - > is_eq ( @ _ ) ; <nl> + } <nl> + <nl> + sub isnt ( $ $ ; $ ) { <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + $ tb - > isnt_eq ( @ _ ) ; <nl> + } <nl> + <nl> + * isn ' t = \ & isnt ; <nl> + <nl> + <nl> + = item B < like > <nl> + <nl> + like ( $ got , qr / expected / , $ test_name ) ; <nl> + <nl> + Similar to ok ( ) , like ( ) matches $ got against the regex C < qr / expected / > . <nl> + <nl> + So this : <nl> + <nl> + like ( $ got , qr / expected / , ' this is like that ' ) ; <nl> + <nl> + is similar to : <nl> + <nl> + ok ( $ got = ~ / expected / , ' this is like that ' ) ; <nl> + <nl> + ( Mnemonic " This is like that " . ) <nl> + <nl> + The second argument is a regular expression . It may be given as a <nl> + regex reference ( i . e . C < qr / / > ) or ( for better compatibility with older <nl> + perls ) as a string that looks like a regex ( alternative delimiters are <nl> + currently not supported ) : <nl> + <nl> + like ( $ got , ' / expected / ' , ' this is like that ' ) ; <nl> + <nl> + Regex options may be placed on the end ( C < ' / expected / i ' > ) . <nl> + <nl> + Its advantages over ok ( ) are similar to that of is ( ) and isnt ( ) . Better <nl> + diagnostics on failure . <nl> + <nl> + = cut <nl> + <nl> + sub like ( $ $ ; $ ) { <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + $ tb - > like ( @ _ ) ; <nl> + } <nl> + <nl> + <nl> + = item B < unlike > <nl> + <nl> + unlike ( $ got , qr / expected / , $ test_name ) ; <nl> + <nl> + Works exactly as like ( ) , only it checks if $ got B < does not > match the <nl> + given pattern . <nl> + <nl> + = cut <nl> + <nl> + sub unlike ( $ $ ; $ ) { <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + $ tb - > unlike ( @ _ ) ; <nl> + } <nl> + <nl> + <nl> + = item B < cmp_ok > <nl> + <nl> + cmp_ok ( $ got , $ op , $ expected , $ test_name ) ; <nl> + <nl> + Halfway between ok ( ) and is ( ) lies cmp_ok ( ) . This allows you to <nl> + compare two arguments using any binary perl operator . <nl> + <nl> + # ok ( $ got eq $ expected ) ; <nl> + cmp_ok ( $ got , ' eq ' , $ expected , ' this eq that ' ) ; <nl> + <nl> + # ok ( $ got = = $ expected ) ; <nl> + cmp_ok ( $ got , ' = = ' , $ expected , ' this = = that ' ) ; <nl> + <nl> + # ok ( $ got & & $ expected ) ; <nl> + cmp_ok ( $ got , ' & & ' , $ expected , ' this & & that ' ) ; <nl> + . . . etc . . . <nl> + <nl> + Its advantage over ok ( ) is when the test fails you ' ll know what $ got <nl> + and $ expected were : <nl> + <nl> + not ok 1 <nl> + # Failed test in foo . t at line 12 . <nl> + # ' 23 ' <nl> + # & & <nl> + # undef <nl> + <nl> + It ' s also useful in those cases where you are comparing numbers and <nl> + is ( ) ' s use of C < eq > will interfere : <nl> + <nl> + cmp_ok ( $ big_hairy_number , ' = = ' , $ another_big_hairy_number ) ; <nl> + <nl> + = cut <nl> + <nl> + sub cmp_ok ( $ $ $ ; $ ) { <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + $ tb - > cmp_ok ( @ _ ) ; <nl> + } <nl> + <nl> + <nl> + = item B < can_ok > <nl> + <nl> + can_ok ( $ module , @ methods ) ; <nl> + can_ok ( $ object , @ methods ) ; <nl> + <nl> + Checks to make sure the $ module or $ object can do these @ methods <nl> + ( works with functions , too ) . <nl> + <nl> + can_ok ( ' Foo ' , qw ( this that whatever ) ) ; <nl> + <nl> + is almost exactly like saying : <nl> + <nl> + ok ( Foo - > can ( ' this ' ) & & <nl> + Foo - > can ( ' that ' ) & & <nl> + Foo - > can ( ' whatever ' ) <nl> + ) ; <nl> + <nl> + only without all the typing and with a better interface . Handy for <nl> + quickly testing an interface . <nl> + <nl> + No matter how many @ methods you check , a single can_ok ( ) call counts <nl> + as one test . If you desire otherwise , use : <nl> + <nl> + foreach my $ meth ( @ methods ) { <nl> + can_ok ( ' Foo ' , $ meth ) ; <nl> + } <nl> + <nl> + = cut <nl> + <nl> + sub can_ok ( $ @ ) { <nl> + my ( $ proto , @ methods ) = @ _ ; <nl> + my $ class = ref $ proto | | $ proto ; <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + unless ( $ class ) { <nl> + my $ ok = $ tb - > ok ( 0 , " - > can ( . . . ) " ) ; <nl> + $ tb - > diag ( ' can_ok ( ) called with empty class or reference ' ) ; <nl> + return $ ok ; <nl> + } <nl> + <nl> + unless ( @ methods ) { <nl> + my $ ok = $ tb - > ok ( 0 , " $ class - > can ( . . . ) " ) ; <nl> + $ tb - > diag ( ' can_ok ( ) called with no methods ' ) ; <nl> + return $ ok ; <nl> + } <nl> + <nl> + my @ nok = ( ) ; <nl> + foreach my $ method ( @ methods ) { <nl> + $ tb - > _try ( sub { $ proto - > can ( $ method ) } ) or push @ nok , $ method ; <nl> + } <nl> + <nl> + my $ name ; <nl> + $ name = @ methods = = 1 ? " $ class - > can ( ' $ methods [ 0 ] ' ) " <nl> + : " $ class - > can ( . . . ) " ; <nl> + <nl> + my $ ok = $ tb - > ok ( ! @ nok , $ name ) ; <nl> + <nl> + $ tb - > diag ( map " $ class - > can ( ' $ _ ' ) failed \ n " , @ nok ) ; <nl> + <nl> + return $ ok ; <nl> + } <nl> + <nl> + = item B < isa_ok > <nl> + <nl> + isa_ok ( $ object , $ class , $ object_name ) ; <nl> + isa_ok ( $ ref , $ type , $ ref_name ) ; <nl> + <nl> + Checks to see if the given C < < $ object - > isa ( $ class ) > > . Also checks to make <nl> + sure the object was defined in the first place . Handy for this sort <nl> + of thing : <nl> + <nl> + my $ obj = Some : : Module - > new ; <nl> + isa_ok ( $ obj , ' Some : : Module ' ) ; <nl> + <nl> + where you ' d otherwise have to write <nl> + <nl> + my $ obj = Some : : Module - > new ; <nl> + ok ( defined $ obj & & $ obj - > isa ( ' Some : : Module ' ) ) ; <nl> + <nl> + to safeguard against your test script blowing up . <nl> + <nl> + It works on references , too : <nl> + <nl> + isa_ok ( $ array_ref , ' ARRAY ' ) ; <nl> + <nl> + The diagnostics of this test normally just refer to ' the object ' . If <nl> + you ' d like them to be more specific , you can supply an $ object_name <nl> + ( for example ' Test customer ' ) . <nl> + <nl> + = cut <nl> + <nl> + sub isa_ok ( $ $ ; $ ) { <nl> + my ( $ object , $ class , $ obj_name ) = @ _ ; <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + my $ diag ; <nl> + $ obj_name = ' The object ' unless defined $ obj_name ; <nl> + my $ name = " $ obj_name isa $ class " ; <nl> + if ( ! defined $ object ) { <nl> + $ diag = " $ obj_name isn ' t defined " ; <nl> + } <nl> + elsif ( ! ref $ object ) { <nl> + $ diag = " $ obj_name isn ' t a reference " ; <nl> + } <nl> + else { <nl> + # We can ' t use UNIVERSAL : : isa because we want to honor isa ( ) overrides <nl> + my ( $ rslt , $ error ) = $ tb - > _try ( sub { $ object - > isa ( $ class ) } ) ; <nl> + if ( $ error ) { <nl> + if ( $ error = ~ / ^ Can ' t call method " isa " on unblessed reference / ) { <nl> + # Its an unblessed reference <nl> + if ( ! UNIVERSAL : : isa ( $ object , $ class ) ) { <nl> + my $ ref = ref $ object ; <nl> + $ diag = " $ obj_name isn ' t a ' $ class ' it ' s a ' $ ref ' " ; <nl> + } <nl> + } else { <nl> + die < < WHOA ; <nl> + WHOA ! I tried to call - > isa on your object and got some weird error . <nl> + Here ' s the error . <nl> + $ error <nl> + WHOA <nl> + } <nl> + } <nl> + elsif ( ! $ rslt ) { <nl> + my $ ref = ref $ object ; <nl> + $ diag = " $ obj_name isn ' t a ' $ class ' it ' s a ' $ ref ' " ; <nl> + } <nl> + } <nl> + <nl> + <nl> + <nl> + my $ ok ; <nl> + if ( $ diag ) { <nl> + $ ok = $ tb - > ok ( 0 , $ name ) ; <nl> + $ tb - > diag ( " $ diag \ n " ) ; <nl> + } <nl> + else { <nl> + $ ok = $ tb - > ok ( 1 , $ name ) ; <nl> + } <nl> + <nl> + return $ ok ; <nl> + } <nl> + <nl> + <nl> + = item B < pass > <nl> + <nl> + = item B < fail > <nl> + <nl> + pass ( $ test_name ) ; <nl> + fail ( $ test_name ) ; <nl> + <nl> + Sometimes you just want to say that the tests have passed . Usually <nl> + the case is you ' ve got some complicated condition that is difficult to <nl> + wedge into an ok ( ) . In this case , you can simply use pass ( ) ( to <nl> + declare the test ok ) or fail ( for not ok ) . They are synonyms for <nl> + ok ( 1 ) and ok ( 0 ) . <nl> + <nl> + Use these very , very , very sparingly . <nl> + <nl> + = cut <nl> + <nl> + sub pass ( ; $ ) { <nl> + my $ tb = Test : : More - > builder ; <nl> + $ tb - > ok ( 1 , @ _ ) ; <nl> + } <nl> + <nl> + sub fail ( ; $ ) { <nl> + my $ tb = Test : : More - > builder ; <nl> + $ tb - > ok ( 0 , @ _ ) ; <nl> + } <nl> + <nl> + = back <nl> + <nl> + <nl> + = head2 Module tests <nl> + <nl> + You usually want to test if the module you ' re testing loads ok , rather <nl> + than just vomiting if its load fails . For such purposes we have <nl> + C < use_ok > and C < require_ok > . <nl> + <nl> + = over 4 <nl> + <nl> + = item B < use_ok > <nl> + <nl> + BEGIN { use_ok ( $ module ) ; } <nl> + BEGIN { use_ok ( $ module , @ imports ) ; } <nl> + <nl> + These simply use the given $ module and test to make sure the load <nl> + happened ok . It ' s recommended that you run use_ok ( ) inside a BEGIN <nl> + block so its functions are exported at compile - time and prototypes are <nl> + properly honored . <nl> + <nl> + If @ imports are given , they are passed through to the use . So this : <nl> + <nl> + BEGIN { use_ok ( ' Some : : Module ' , qw ( foo bar ) ) } <nl> + <nl> + is like doing this : <nl> + <nl> + use Some : : Module qw ( foo bar ) ; <nl> + <nl> + Version numbers can be checked like so : <nl> + <nl> + # Just like " use Some : : Module 1 . 02 " <nl> + BEGIN { use_ok ( ' Some : : Module ' , 1 . 02 ) } <nl> + <nl> + Don ' t try to do this : <nl> + <nl> + BEGIN { <nl> + use_ok ( ' Some : : Module ' ) ; <nl> + <nl> + . . . some code that depends on the use . . . <nl> + . . . happening at compile time . . . <nl> + } <nl> + <nl> + because the notion of " compile - time " is relative . Instead , you want : <nl> + <nl> + BEGIN { use_ok ( ' Some : : Module ' ) } <nl> + BEGIN { . . . some code that depends on the use . . . } <nl> + <nl> + <nl> + = cut <nl> + <nl> + sub use_ok ( $ ; @ ) { <nl> + my ( $ module , @ imports ) = @ _ ; <nl> + @ imports = ( ) unless @ imports ; <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + my ( $ pack , $ filename , $ line ) = caller ; <nl> + <nl> + local ( $ @ , $ ! , $ SIG { __DIE__ } ) ; # isolate eval <nl> + <nl> + if ( @ imports = = 1 and $ imports [ 0 ] = ~ / ^ \ d + ( ? : \ . \ d + ) ? $ / ) { <nl> + # probably a version check . Perl needs to see the bare number <nl> + # for it to work with non - Exporter based modules . <nl> + eval < < USE ; <nl> + package $ pack ; <nl> + use $ module $ imports [ 0 ] ; <nl> + USE <nl> + } <nl> + else { <nl> + eval < < USE ; <nl> + package $ pack ; <nl> + use $ module \ @ imports ; <nl> + USE <nl> + } <nl> + <nl> + my $ ok = $ tb - > ok ( ! $ @ , " use $ module ; " ) ; <nl> + <nl> + unless ( $ ok ) { <nl> + chomp $ @ ; <nl> + $ @ = ~ s { ^ BEGIN failed - - compilation aborted at . * $ } <nl> + { BEGIN failed - - compilation aborted at $ filename line $ line . } m ; <nl> + $ tb - > diag ( < < DIAGNOSTIC ) ; <nl> + Tried to use ' $ module ' . <nl> + Error : $ @ <nl> + DIAGNOSTIC <nl> + <nl> + } <nl> + <nl> + return $ ok ; <nl> + } <nl> + <nl> + = item B < require_ok > <nl> + <nl> + require_ok ( $ module ) ; <nl> + require_ok ( $ file ) ; <nl> + <nl> + Like use_ok ( ) , except it requires the $ module or $ file . <nl> + <nl> + = cut <nl> + <nl> + sub require_ok ( $ ) { <nl> + my ( $ module ) = shift ; <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + my $ pack = caller ; <nl> + <nl> + # Try to deterine if we ' ve been given a module name or file . <nl> + # Module names must be barewords , files not . <nl> + $ module = qq [ ' $ module ' ] unless _is_module_name ( $ module ) ; <nl> + <nl> + local ( $ ! , $ @ , $ SIG { __DIE__ } ) ; # isolate eval <nl> + local $ SIG { __DIE__ } ; <nl> + eval < < REQUIRE ; <nl> + package $ pack ; <nl> + require $ module ; <nl> + REQUIRE <nl> + <nl> + my $ ok = $ tb - > ok ( ! $ @ , " require $ module ; " ) ; <nl> + <nl> + unless ( $ ok ) { <nl> + chomp $ @ ; <nl> + $ tb - > diag ( < < DIAGNOSTIC ) ; <nl> + Tried to require ' $ module ' . <nl> + Error : $ @ <nl> + DIAGNOSTIC <nl> + <nl> + } <nl> + <nl> + return $ ok ; <nl> + } <nl> + <nl> + <nl> + sub _is_module_name { <nl> + my $ module = shift ; <nl> + <nl> + # Module names start with a letter . <nl> + # End with an alphanumeric . <nl> + # The rest is an alphanumeric or : : <nl> + $ module = ~ s / \ b : : \ b / / g ; <nl> + $ module = ~ / ^ [ a - zA - Z ] \ w * $ / ; <nl> + } <nl> + <nl> + = back <nl> + <nl> + <nl> + = head2 Complex data structures <nl> + <nl> + Not everything is a simple eq check or regex . There are times you <nl> + need to see if two data structures are equivalent . For these <nl> + instances Test : : More provides a handful of useful functions . <nl> + <nl> + B < NOTE > I ' m not quite sure what will happen with filehandles . <nl> + <nl> + = over 4 <nl> + <nl> + = item B < is_deeply > <nl> + <nl> + is_deeply ( $ got , $ expected , $ test_name ) ; <nl> + <nl> + Similar to is ( ) , except that if $ got and $ expected are references , it <nl> + does a deep comparison walking each data structure to see if they are <nl> + equivalent . If the two structures are different , it will display the <nl> + place where they start differing . <nl> + <nl> + is_deeply ( ) compares the dereferenced values of references , the <nl> + references themselves ( except for their type ) are ignored . This means <nl> + aspects such as blessing and ties are not considered " different " . <nl> + <nl> + is_deeply ( ) current has very limited handling of function reference <nl> + and globs . It merely checks if they have the same referent . This may <nl> + improve in the future . <nl> + <nl> + Test : : Differences and Test : : Deep provide more in - depth functionality <nl> + along these lines . <nl> + <nl> + = cut <nl> + <nl> + use vars qw ( @ Data_Stack % Refs_Seen ) ; <nl> + my $ DNE = bless [ ] , ' Does : : Not : : Exist ' ; <nl> + <nl> + sub _dne { <nl> + ref $ _ [ 0 ] eq ref $ DNE ; <nl> + } <nl> + <nl> + <nl> + sub is_deeply { <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + unless ( @ _ = = 2 or @ _ = = 3 ) { <nl> + my $ msg = < < WARNING ; <nl> + is_deeply ( ) takes two or three args , you gave % d . <nl> + This usually means you passed an array or hash instead <nl> + of a reference to it <nl> + WARNING <nl> + chop $ msg ; # clip off newline so carp ( ) will put in line / file <nl> + <nl> + _carp sprintf $ msg , scalar @ _ ; <nl> + <nl> + return $ tb - > ok ( 0 ) ; <nl> + } <nl> + <nl> + my ( $ got , $ expected , $ name ) = @ _ ; <nl> + <nl> + $ tb - > _unoverload_str ( \ $ expected , \ $ got ) ; <nl> + <nl> + my $ ok ; <nl> + if ( ! ref $ got and ! ref $ expected ) { # neither is a reference <nl> + $ ok = $ tb - > is_eq ( $ got , $ expected , $ name ) ; <nl> + } <nl> + elsif ( ! ref $ got xor ! ref $ expected ) { # one ' s a reference , one isn ' t <nl> + $ ok = $ tb - > ok ( 0 , $ name ) ; <nl> + $ tb - > diag ( _format_stack ( { vals = > [ $ got , $ expected ] } ) ) ; <nl> + } <nl> + else { # both references <nl> + local @ Data_Stack = ( ) ; <nl> + if ( _deep_check ( $ got , $ expected ) ) { <nl> + $ ok = $ tb - > ok ( 1 , $ name ) ; <nl> + } <nl> + else { <nl> + $ ok = $ tb - > ok ( 0 , $ name ) ; <nl> + $ tb - > diag ( _format_stack ( @ Data_Stack ) ) ; <nl> + } <nl> + } <nl> + <nl> + return $ ok ; <nl> + } <nl> + <nl> + sub _format_stack { <nl> + my ( @ Stack ) = @ _ ; <nl> + <nl> + my $ var = ' $ FOO ' ; <nl> + my $ did_arrow = 0 ; <nl> + foreach my $ entry ( @ Stack ) { <nl> + my $ type = $ entry - > { type } | | ' ' ; <nl> + my $ idx = $ entry - > { ' idx ' } ; <nl> + if ( $ type eq ' HASH ' ) { <nl> + $ var . = " - > " unless $ did_arrow + + ; <nl> + $ var . = " { $ idx } " ; <nl> + } <nl> + elsif ( $ type eq ' ARRAY ' ) { <nl> + $ var . = " - > " unless $ did_arrow + + ; <nl> + $ var . = " [ $ idx ] " ; <nl> + } <nl> + elsif ( $ type eq ' REF ' ) { <nl> + $ var = " \ $ { $ var } " ; <nl> + } <nl> + } <nl> + <nl> + my @ vals = @ { $ Stack [ - 1 ] { vals } } [ 0 , 1 ] ; <nl> + my @ vars = ( ) ; <nl> + ( $ vars [ 0 ] = $ var ) = ~ s / \ $ FOO / \ $ got / ; <nl> + ( $ vars [ 1 ] = $ var ) = ~ s / \ $ FOO / \ $ expected / ; <nl> + <nl> + my $ out = " Structures begin differing at : \ n " ; <nl> + foreach my $ idx ( 0 . . $ # vals ) { <nl> + my $ val = $ vals [ $ idx ] ; <nl> + $ vals [ $ idx ] = ! defined $ val ? ' undef ' : <nl> + _dne ( $ val ) ? " Does not exist " : <nl> + ref $ val ? " $ val " : <nl> + " ' $ val ' " ; <nl> + } <nl> + <nl> + $ out . = " $ vars [ 0 ] = $ vals [ 0 ] \ n " ; <nl> + $ out . = " $ vars [ 1 ] = $ vals [ 1 ] \ n " ; <nl> + <nl> + $ out = ~ s / ^ / / msg ; <nl> + return $ out ; <nl> + } <nl> + <nl> + <nl> + sub _type { <nl> + my $ thing = shift ; <nl> + <nl> + return ' ' if ! ref $ thing ; <nl> + <nl> + for my $ type ( qw ( ARRAY HASH REF SCALAR GLOB CODE Regexp ) ) { <nl> + return $ type if UNIVERSAL : : isa ( $ thing , $ type ) ; <nl> + } <nl> + <nl> + return ' ' ; <nl> + } <nl> + <nl> + = back <nl> + <nl> + <nl> + = head2 Diagnostics <nl> + <nl> + If you pick the right test function , you ' ll usually get a good idea of <nl> + what went wrong when it failed . But sometimes it doesn ' t work out <nl> + that way . So here we have ways for you to write your own diagnostic <nl> + messages which are safer than just C < print STDERR > . <nl> + <nl> + = over 4 <nl> + <nl> + = item B < diag > <nl> + <nl> + diag ( @ diagnostic_message ) ; <nl> + <nl> + Prints a diagnostic message which is guaranteed not to interfere with <nl> + test output . Like C < print > @ diagnostic_message is simply concatenated <nl> + together . <nl> + <nl> + Handy for this sort of thing : <nl> + <nl> + ok ( grep ( / foo / , @ users ) , " There ' s a foo user " ) or <nl> + diag ( " Since there ' s no foo , check that / etc / bar is set up right " ) ; <nl> + <nl> + which would produce : <nl> + <nl> + not ok 42 - There ' s a foo user <nl> + # Failed test ' There ' s a foo user ' <nl> + # in foo . t at line 52 . <nl> + # Since there ' s no foo , check that / etc / bar is set up right . <nl> + <nl> + You might remember C < ok ( ) or diag ( ) > with the mnemonic C < open ( ) or <nl> + die ( ) > . <nl> + <nl> + B < NOTE > The exact formatting of the diagnostic output is still <nl> + changing , but it is guaranteed that whatever you throw at it it won ' t <nl> + interfere with the test . <nl> + <nl> + = cut <nl> + <nl> + sub diag { <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + $ tb - > diag ( @ _ ) ; <nl> + } <nl> + <nl> + <nl> + = back <nl> + <nl> + <nl> + = head2 Conditional tests <nl> + <nl> + Sometimes running a test under certain conditions will cause the <nl> + test script to die . A certain function or method isn ' t implemented <nl> + ( such as fork ( ) on MacOS ) , some resource isn ' t available ( like a <nl> + net connection ) or a module isn ' t available . In these cases it ' s <nl> + necessary to skip tests , or declare that they are supposed to fail <nl> + but will work in the future ( a todo test ) . <nl> + <nl> + For more details on the mechanics of skip and todo tests see <nl> + L < Test : : Harness > . <nl> + <nl> + The way Test : : More handles this is with a named block . Basically , a <nl> + block of tests which can be skipped over or made todo . It ' s best if I <nl> + just show you . . . <nl> + <nl> + = over 4 <nl> + <nl> + = item B < SKIP : BLOCK > <nl> + <nl> + SKIP : { <nl> + skip $ why , $ how_many if $ condition ; <nl> + <nl> + . . . normal testing code goes here . . . <nl> + } <nl> + <nl> + This declares a block of tests that might be skipped , $ how_many tests <nl> + there are , $ why and under what $ condition to skip them . An example is <nl> + the easiest way to illustrate : <nl> + <nl> + SKIP : { <nl> + eval { require HTML : : Lint } ; <nl> + <nl> + skip " HTML : : Lint not installed " , 2 if $ @ ; <nl> + <nl> + my $ lint = new HTML : : Lint ; <nl> + isa_ok ( $ lint , " HTML : : Lint " ) ; <nl> + <nl> + $ lint - > parse ( $ html ) ; <nl> + is ( $ lint - > errors , 0 , " No errors found in HTML " ) ; <nl> + } <nl> + <nl> + If the user does not have HTML : : Lint installed , the whole block of <nl> + code I < won ' t be run at all > . Test : : More will output special ok ' s <nl> + which Test : : Harness interprets as skipped , but passing , tests . <nl> + <nl> + It ' s important that $ how_many accurately reflects the number of tests <nl> + in the SKIP block so the # of tests run will match up with your plan . <nl> + If your plan is C < no_plan > $ how_many is optional and will default to 1 . <nl> + <nl> + It ' s perfectly safe to nest SKIP blocks . Each SKIP block must have <nl> + the label C < SKIP > , or Test : : More can ' t work its magic . <nl> + <nl> + You don ' t skip tests which are failing because there ' s a bug in your <nl> + program , or for which you don ' t yet have code written . For that you <nl> + use TODO . Read on . <nl> + <nl> + = cut <nl> + <nl> + # ' # <nl> + sub skip { <nl> + my ( $ why , $ how_many ) = @ _ ; <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + unless ( defined $ how_many ) { <nl> + # $ how_many can only be avoided when no_plan is in use . <nl> + _carp " skip ( ) needs to know \ $ how_many tests are in the block " <nl> + unless $ tb - > has_plan eq ' no_plan ' ; <nl> + $ how_many = 1 ; <nl> + } <nl> + <nl> + if ( defined $ how_many and $ how_many = ~ / \ D / ) { <nl> + _carp " skip ( ) was passed a non - numeric number of tests . Did you get the arguments backwards ? " ; <nl> + $ how_many = 1 ; <nl> + } <nl> + <nl> + for ( 1 . . $ how_many ) { <nl> + $ tb - > skip ( $ why ) ; <nl> + } <nl> + <nl> + local $ ^ W = 0 ; <nl> + last SKIP ; <nl> + } <nl> + <nl> + <nl> + = item B < TODO : BLOCK > <nl> + <nl> + TODO : { <nl> + local $ TODO = $ why if $ condition ; <nl> + <nl> + . . . normal testing code goes here . . . <nl> + } <nl> + <nl> + Declares a block of tests you expect to fail and $ why . Perhaps it ' s <nl> + because you haven ' t fixed a bug or haven ' t finished a new feature : <nl> + <nl> + TODO : { <nl> + local $ TODO = " URI : : Geller not finished " ; <nl> + <nl> + my $ card = " Eight of clubs " ; <nl> + is ( URI : : Geller - > your_card , $ card , ' Is THIS your card ? ' ) ; <nl> + <nl> + my $ spoon ; <nl> + URI : : Geller - > bend_spoon ; <nl> + is ( $ spoon , ' bent ' , " Spoon bending , that ' s original " ) ; <nl> + } <nl> + <nl> + With a todo block , the tests inside are expected to fail . Test : : More <nl> + will run the tests normally , but print out special flags indicating <nl> + they are " todo " . Test : : Harness will interpret failures as being ok . <nl> + Should anything succeed , it will report it as an unexpected success . <nl> + You then know the thing you had todo is done and can remove the <nl> + TODO flag . <nl> + <nl> + The nice part about todo tests , as opposed to simply commenting out a <nl> + block of tests , is it ' s like having a programmatic todo list . You know <nl> + how much work is left to be done , you ' re aware of what bugs there are , <nl> + and you ' ll know immediately when they ' re fixed . <nl> + <nl> + Once a todo test starts succeeding , simply move it outside the block . <nl> + When the block is empty , delete it . <nl> + <nl> + B < NOTE > : TODO tests require a Test : : Harness upgrade else it will <nl> + treat it as a normal failure . See L < CAVEATS and NOTES > ) . <nl> + <nl> + <nl> + = item B < todo_skip > <nl> + <nl> + TODO : { <nl> + todo_skip $ why , $ how_many if $ condition ; <nl> + <nl> + . . . normal testing code . . . <nl> + } <nl> + <nl> + With todo tests , it ' s best to have the tests actually run . That way <nl> + you ' ll know when they start passing . Sometimes this isn ' t possible . <nl> + Often a failing test will cause the whole program to die or hang , even <nl> + inside an C < eval BLOCK > with and using C < alarm > . In these extreme <nl> + cases you have no choice but to skip over the broken tests entirely . <nl> + <nl> + The syntax and behavior is similar to a C < SKIP : BLOCK > except the <nl> + tests will be marked as failing but todo . Test : : Harness will <nl> + interpret them as passing . <nl> + <nl> + = cut <nl> + <nl> + sub todo_skip { <nl> + my ( $ why , $ how_many ) = @ _ ; <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + unless ( defined $ how_many ) { <nl> + # $ how_many can only be avoided when no_plan is in use . <nl> + _carp " todo_skip ( ) needs to know \ $ how_many tests are in the block " <nl> + unless $ tb - > has_plan eq ' no_plan ' ; <nl> + $ how_many = 1 ; <nl> + } <nl> + <nl> + for ( 1 . . $ how_many ) { <nl> + $ tb - > todo_skip ( $ why ) ; <nl> + } <nl> + <nl> + local $ ^ W = 0 ; <nl> + last TODO ; <nl> + } <nl> + <nl> + = item When do I use SKIP vs . TODO ? <nl> + <nl> + B < If it ' s something the user might not be able to do > , use SKIP . <nl> + This includes optional modules that aren ' t installed , running under <nl> + an OS that doesn ' t have some feature ( like fork ( ) or symlinks ) , or maybe <nl> + you need an Internet connection and one isn ' t available . <nl> + <nl> + B < If it ' s something the programmer hasn ' t done yet > , use TODO . This <nl> + is for any code you haven ' t written yet , or bugs you have yet to fix , <nl> + but want to put tests in your testing script ( always a good idea ) . <nl> + <nl> + <nl> + = back <nl> + <nl> + <nl> + = head2 Test control <nl> + <nl> + = over 4 <nl> + <nl> + = item B < BAIL_OUT > <nl> + <nl> + BAIL_OUT ( $ reason ) ; <nl> + <nl> + Indicates to the harness that things are going so badly all testing <nl> + should terminate . This includes the running any additional test scripts . <nl> + <nl> + This is typically used when testing cannot continue such as a critical <nl> + module failing to compile or a necessary external utility not being <nl> + available such as a database connection failing . <nl> + <nl> + The test will exit with 255 . <nl> + <nl> + = cut <nl> + <nl> + sub BAIL_OUT { <nl> + my $ reason = shift ; <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + $ tb - > BAIL_OUT ( $ reason ) ; <nl> + } <nl> + <nl> + = back <nl> + <nl> + <nl> + = head2 Discouraged comparison functions <nl> + <nl> + The use of the following functions is discouraged as they are not <nl> + actually testing functions and produce no diagnostics to help figure <nl> + out what went wrong . They were written before is_deeply ( ) existed <nl> + because I couldn ' t figure out how to display a useful diff of two <nl> + arbitrary data structures . <nl> + <nl> + These functions are usually used inside an ok ( ) . <nl> + <nl> + ok ( eq_array ( \ @ got , \ @ expected ) ) ; <nl> + <nl> + C < is_deeply ( ) > can do that better and with diagnostics . <nl> + <nl> + is_deeply ( \ @ got , \ @ expected ) ; <nl> + <nl> + They may be deprecated in future versions . <nl> + <nl> + = over 4 <nl> + <nl> + = item B < eq_array > <nl> + <nl> + my $ is_eq = eq_array ( \ @ got , \ @ expected ) ; <nl> + <nl> + Checks if two arrays are equivalent . This is a deep check , so <nl> + multi - level structures are handled correctly . <nl> + <nl> + = cut <nl> + <nl> + # ' # <nl> + sub eq_array { <nl> + local @ Data_Stack ; <nl> + _deep_check ( @ _ ) ; <nl> + } <nl> + <nl> + sub _eq_array { <nl> + my ( $ a1 , $ a2 ) = @ _ ; <nl> + <nl> + if ( grep ! _type ( $ _ ) eq ' ARRAY ' , $ a1 , $ a2 ) { <nl> + warn " eq_array passed a non - array ref " ; <nl> + return 0 ; <nl> + } <nl> + <nl> + return 1 if $ a1 eq $ a2 ; <nl> + <nl> + my $ ok = 1 ; <nl> + my $ max = $ # $ a1 > $ # $ a2 ? $ # $ a1 : $ # $ a2 ; <nl> + for ( 0 . . $ max ) { <nl> + my $ e1 = $ _ > $ # $ a1 ? $ DNE : $ a1 - > [ $ _ ] ; <nl> + my $ e2 = $ _ > $ # $ a2 ? $ DNE : $ a2 - > [ $ _ ] ; <nl> + <nl> + push @ Data_Stack , { type = > ' ARRAY ' , idx = > $ _ , vals = > [ $ e1 , $ e2 ] } ; <nl> + $ ok = _deep_check ( $ e1 , $ e2 ) ; <nl> + pop @ Data_Stack if $ ok ; <nl> + <nl> + last unless $ ok ; <nl> + } <nl> + <nl> + return $ ok ; <nl> + } <nl> + <nl> + sub _deep_check { <nl> + my ( $ e1 , $ e2 ) = @ _ ; <nl> + my $ tb = Test : : More - > builder ; <nl> + <nl> + my $ ok = 0 ; <nl> + <nl> + # Effectively turn % Refs_Seen into a stack . This avoids picking up <nl> + # the same referenced used twice ( such as [ \ $ a , \ $ a ] ) to be considered <nl> + # circular . <nl> + local % Refs_Seen = % Refs_Seen ; <nl> + <nl> + { <nl> + # Quiet uninitialized value warnings when comparing undefs . <nl> + local $ ^ W = 0 ; <nl> + <nl> + $ tb - > _unoverload_str ( \ $ e1 , \ $ e2 ) ; <nl> + <nl> + # Either they ' re both references or both not . <nl> + my $ same_ref = ! ( ! ref $ e1 xor ! ref $ e2 ) ; <nl> + my $ not_ref = ( ! ref $ e1 and ! ref $ e2 ) ; <nl> + <nl> + if ( defined $ e1 xor defined $ e2 ) { <nl> + $ ok = 0 ; <nl> + } <nl> + elsif ( _dne ( $ e1 ) xor _dne ( $ e2 ) ) { <nl> + $ ok = 0 ; <nl> + } <nl> + elsif ( $ same_ref and ( $ e1 eq $ e2 ) ) { <nl> + $ ok = 1 ; <nl> + } <nl> + elsif ( $ not_ref ) { <nl> + push @ Data_Stack , { type = > ' ' , vals = > [ $ e1 , $ e2 ] } ; <nl> + $ ok = 0 ; <nl> + } <nl> + else { <nl> + if ( $ Refs_Seen { $ e1 } ) { <nl> + return $ Refs_Seen { $ e1 } eq $ e2 ; <nl> + } <nl> + else { <nl> + $ Refs_Seen { $ e1 } = " $ e2 " ; <nl> + } <nl> + <nl> + my $ type = _type ( $ e1 ) ; <nl> + $ type = ' DIFFERENT ' unless _type ( $ e2 ) eq $ type ; <nl> + <nl> + if ( $ type eq ' DIFFERENT ' ) { <nl> + push @ Data_Stack , { type = > $ type , vals = > [ $ e1 , $ e2 ] } ; <nl> + $ ok = 0 ; <nl> + } <nl> + elsif ( $ type eq ' ARRAY ' ) { <nl> + $ ok = _eq_array ( $ e1 , $ e2 ) ; <nl> + } <nl> + elsif ( $ type eq ' HASH ' ) { <nl> + $ ok = _eq_hash ( $ e1 , $ e2 ) ; <nl> + } <nl> + elsif ( $ type eq ' REF ' ) { <nl> + push @ Data_Stack , { type = > $ type , vals = > [ $ e1 , $ e2 ] } ; <nl> + $ ok = _deep_check ( $ $ e1 , $ $ e2 ) ; <nl> + pop @ Data_Stack if $ ok ; <nl> + } <nl> + elsif ( $ type eq ' SCALAR ' ) { <nl> + push @ Data_Stack , { type = > ' REF ' , vals = > [ $ e1 , $ e2 ] } ; <nl> + $ ok = _deep_check ( $ $ e1 , $ $ e2 ) ; <nl> + pop @ Data_Stack if $ ok ; <nl> + } <nl> + elsif ( $ type ) { <nl> + push @ Data_Stack , { type = > $ type , vals = > [ $ e1 , $ e2 ] } ; <nl> + $ ok = 0 ; <nl> + } <nl> + else { <nl> + _whoa ( 1 , " No type in _deep_check " ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + return $ ok ; <nl> + } <nl> + <nl> + <nl> + sub _whoa { <nl> + my ( $ check , $ desc ) = @ _ ; <nl> + if ( $ check ) { <nl> + die < < WHOA ; <nl> + WHOA ! $ desc <nl> + This should never happen ! Please contact the author immediately ! <nl> + WHOA <nl> + } <nl> + } <nl> + <nl> + <nl> + = item B < eq_hash > <nl> + <nl> + my $ is_eq = eq_hash ( \ % got , \ % expected ) ; <nl> + <nl> + Determines if the two hashes contain the same keys and values . This <nl> + is a deep check . <nl> + <nl> + = cut <nl> + <nl> + sub eq_hash { <nl> + local @ Data_Stack ; <nl> + return _deep_check ( @ _ ) ; <nl> + } <nl> + <nl> + sub _eq_hash { <nl> + my ( $ a1 , $ a2 ) = @ _ ; <nl> + <nl> + if ( grep ! _type ( $ _ ) eq ' HASH ' , $ a1 , $ a2 ) { <nl> + warn " eq_hash passed a non - hash ref " ; <nl> + return 0 ; <nl> + } <nl> + <nl> + return 1 if $ a1 eq $ a2 ; <nl> + <nl> + my $ ok = 1 ; <nl> + my $ bigger = keys % $ a1 > keys % $ a2 ? $ a1 : $ a2 ; <nl> + foreach my $ k ( keys % $ bigger ) { <nl> + my $ e1 = exists $ a1 - > { $ k } ? $ a1 - > { $ k } : $ DNE ; <nl> + my $ e2 = exists $ a2 - > { $ k } ? $ a2 - > { $ k } : $ DNE ; <nl> + <nl> + push @ Data_Stack , { type = > ' HASH ' , idx = > $ k , vals = > [ $ e1 , $ e2 ] } ; <nl> + $ ok = _deep_check ( $ e1 , $ e2 ) ; <nl> + pop @ Data_Stack if $ ok ; <nl> + <nl> + last unless $ ok ; <nl> + } <nl> + <nl> + return $ ok ; <nl> + } <nl> + <nl> + = item B < eq_set > <nl> + <nl> + my $ is_eq = eq_set ( \ @ got , \ @ expected ) ; <nl> + <nl> + Similar to eq_array ( ) , except the order of the elements is B < not > <nl> + important . This is a deep check , but the irrelevancy of order only <nl> + applies to the top level . <nl> + <nl> + ok ( eq_set ( \ @ got , \ @ expected ) ) ; <nl> + <nl> + Is better written : <nl> + <nl> + is_deeply ( [ sort @ got ] , [ sort @ expected ] ) ; <nl> + <nl> + B < NOTE > By historical accident , this is not a true set comparison . <nl> + While the order of elements does not matter , duplicate elements do . <nl> + <nl> + B < NOTE > eq_set ( ) does not know how to deal with references at the top <nl> + level . The following is an example of a comparison which might not work : <nl> + <nl> + eq_set ( [ \ 1 , \ 2 ] , [ \ 2 , \ 1 ] ) ; <nl> + <nl> + Test : : Deep contains much better set comparison functions . <nl> + <nl> + = cut <nl> + <nl> + sub eq_set { <nl> + my ( $ a1 , $ a2 ) = @ _ ; <nl> + return 0 unless @ $ a1 = = @ $ a2 ; <nl> + <nl> + # There ' s faster ways to do this , but this is easiest . <nl> + local $ ^ W = 0 ; <nl> + <nl> + # It really doesn ' t matter how we sort them , as long as both arrays are <nl> + # sorted with the same algorithm . <nl> + # <nl> + # Ensure that references are not accidentally treated the same as a <nl> + # string containing the reference . <nl> + # <nl> + # Have to inline the sort routine due to a threading / sort bug . <nl> + # See [ rt . cpan . org 6782 ] <nl> + # <nl> + # I don ' t know how references would be sorted so we just don ' t sort <nl> + # them . This means eq_set doesn ' t really work with refs . <nl> + return eq_array ( <nl> + [ grep ( ref , @ $ a1 ) , sort ( grep ( ! ref , @ $ a1 ) ) ] , <nl> + [ grep ( ref , @ $ a2 ) , sort ( grep ( ! ref , @ $ a2 ) ) ] , <nl> + ) ; <nl> + } <nl> + <nl> + = back <nl> + <nl> + <nl> + = head2 Extending and Embedding Test : : More <nl> + <nl> + Sometimes the Test : : More interface isn ' t quite enough . Fortunately , <nl> + Test : : More is built on top of Test : : Builder which provides a single , <nl> + unified backend for any test library to use . This means two test <nl> + libraries which both use Test : : Builder B < can be used together in the <nl> + same program > . <nl> + <nl> + If you simply want to do a little tweaking of how the tests behave , <nl> + you can access the underlying Test : : Builder object like so : <nl> + <nl> + = over 4 <nl> + <nl> + = item B < builder > <nl> + <nl> + my $ test_builder = Test : : More - > builder ; <nl> + <nl> + Returns the Test : : Builder object underlying Test : : More for you to play <nl> + with . <nl> + <nl> + <nl> + = back <nl> + <nl> + <nl> + = head1 EXIT CODES <nl> + <nl> + If all your tests passed , Test : : Builder will exit with zero ( which is <nl> + normal ) . If anything failed it will exit with how many failed . If <nl> + you run less ( or more ) tests than you planned , the missing ( or extras ) <nl> + will be considered failures . If no tests were ever run Test : : Builder <nl> + will throw a warning and exit with 255 . If the test died , even after <nl> + having successfully completed all its tests , it will still be <nl> + considered a failure and will exit with 255 . <nl> + <nl> + So the exit codes are . . . <nl> + <nl> + 0 all tests successful <nl> + 255 test died or all passed but wrong # of tests run <nl> + any other number how many failed ( including missing or extras ) <nl> + <nl> + If you fail more than 254 tests , it will be reported as 254 . <nl> + <nl> + B < NOTE > This behavior may go away in future versions . <nl> + <nl> + <nl> + = head1 CAVEATS and NOTES <nl> + <nl> + = over 4 <nl> + <nl> + = item Backwards compatibility <nl> + <nl> + Test : : More works with Perls as old as 5 . 004_05 . <nl> + <nl> + <nl> + = item Overloaded objects <nl> + <nl> + String overloaded objects are compared B < as strings > ( or in cmp_ok ( ) ' s <nl> + case , strings or numbers as appropriate to the comparison op ) . This <nl> + prevents Test : : More from piercing an object ' s interface allowing <nl> + better blackbox testing . So if a function starts returning overloaded <nl> + objects instead of bare strings your tests won ' t notice the <nl> + difference . This is good . <nl> + <nl> + However , it does mean that functions like is_deeply ( ) cannot be used to <nl> + test the internals of string overloaded objects . In this case I would <nl> + suggest Test : : Deep which contains more flexible testing functions for <nl> + complex data structures . <nl> + <nl> + <nl> + = item Threads <nl> + <nl> + Test : : More will only be aware of threads if " use threads " has been done <nl> + I < before > Test : : More is loaded . This is ok : <nl> + <nl> + use threads ; <nl> + use Test : : More ; <nl> + <nl> + This may cause problems : <nl> + <nl> + use Test : : More <nl> + use threads ; <nl> + <nl> + 5 . 8 . 1 and above are supported . Anything below that has too many bugs . <nl> + <nl> + <nl> + = item Test : : Harness upgrade <nl> + <nl> + no_plan and todo depend on new Test : : Harness features and fixes . If <nl> + you ' re going to distribute tests that use no_plan or todo your <nl> + end - users will have to upgrade Test : : Harness to the latest one on <nl> + CPAN . If you avoid no_plan and TODO tests , the stock Test : : Harness <nl> + will work fine . <nl> + <nl> + Installing Test : : More should also upgrade Test : : Harness . <nl> + <nl> + = back <nl> + <nl> + <nl> + = head1 HISTORY <nl> + <nl> + This is a case of convergent evolution with Joshua Pritikin ' s Test <nl> + module . I was largely unaware of its existence when I ' d first <nl> + written my own ok ( ) routines . This module exists because I can ' t <nl> + figure out how to easily wedge test names into Test ' s interface ( along <nl> + with a few other problems ) . <nl> + <nl> + The goal here is to have a testing utility that ' s simple to learn , <nl> + quick to use and difficult to trip yourself up with while still <nl> + providing more flexibility than the existing Test . pm . As such , the <nl> + names of the most common routines are kept tiny , special cases and <nl> + magic side - effects are kept to a minimum . WYSIWYG . <nl> + <nl> + <nl> + = head1 SEE ALSO <nl> + <nl> + L < Test : : Simple > if all this confuses you and you just want to write <nl> + some tests . You can upgrade to Test : : More later ( it ' s forward <nl> + compatible ) . <nl> + <nl> + L < Test > is the old testing module . Its main benefit is that it has <nl> + been distributed with Perl since 5 . 004_05 . <nl> + <nl> + L < Test : : Harness > for details on how your test results are interpreted <nl> + by Perl . <nl> + <nl> + L < Test : : Differences > for more ways to test complex data structures . <nl> + And it plays well with Test : : More . <nl> + <nl> + L < Test : : Class > is like XUnit but more perlish . <nl> + <nl> + L < Test : : Deep > gives you more powerful complex data structure testing . <nl> + <nl> + L < Test : : Unit > is XUnit style testing . <nl> + <nl> + L < Test : : Inline > shows the idea of embedded testing . <nl> + <nl> + L < Bundle : : Test > installs a whole bunch of useful test modules . <nl> + <nl> + <nl> + = head1 AUTHORS <nl> + <nl> + Michael G Schwern E < lt > schwern @ pobox . comE < gt > with much inspiration <nl> + from Joshua Pritikin ' s Test module and lots of help from Barrie <nl> + Slaymaker , Tony Bowden , blackstar . co . uk , chromatic , Fergal Daly and <nl> + the perl - qa gang . <nl> + <nl> + <nl> + = head1 BUGS <nl> + <nl> + See F < http : / / rt . cpan . org > to report and view bugs . <nl> + <nl> + <nl> + = head1 COPYRIGHT <nl> + <nl> + Copyright 2001 - 2002 , 2004 - 2006 by Michael G Schwern E < lt > schwern @ pobox . comE < gt > . <nl> + <nl> + This program is free software ; you can redistribute it and / or <nl> + modify it under the same terms as Perl itself . <nl> + <nl> + See F < http : / / www . perl . com / perl / misc / Artistic . html > <nl> + <nl> + = cut <nl> + <nl> + 1 ; <nl> new file mode 100644 <nl> index 00000000000 . . 34fba63fed2 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / Simple . pm <nl> <nl> + package Test : : Simple ; <nl> + <nl> + use 5 . 004 ; <nl> + <nl> + use strict ' vars ' ; <nl> + use vars qw ( $ VERSION @ ISA @ EXPORT ) ; <nl> + $ VERSION = ' 0 . 72 ' ; <nl> + $ VERSION = eval $ VERSION ; # make the alpha version come out as a number <nl> + <nl> + use Test : : Builder : : Module ; <nl> + @ ISA = qw ( Test : : Builder : : Module ) ; <nl> + @ EXPORT = qw ( ok ) ; <nl> + <nl> + my $ CLASS = __PACKAGE__ ; <nl> + <nl> + <nl> + = head1 NAME <nl> + <nl> + Test : : Simple - Basic utilities for writing tests . <nl> + <nl> + = head1 SYNOPSIS <nl> + <nl> + use Test : : Simple tests = > 1 ; <nl> + <nl> + ok ( $ foo eq $ bar , ' foo is bar ' ) ; <nl> + <nl> + <nl> + = head1 DESCRIPTION <nl> + <nl> + * * If you are unfamiliar with testing B < read Test : : Tutorial > first ! * * <nl> + <nl> + This is an extremely simple , extremely basic module for writing tests <nl> + suitable for CPAN modules and other pursuits . If you wish to do more <nl> + complicated testing , use the Test : : More module ( a drop - in replacement <nl> + for this one ) . <nl> + <nl> + The basic unit of Perl testing is the ok . For each thing you want to <nl> + test your program will print out an " ok " or " not ok " to indicate pass <nl> + or fail . You do this with the ok ( ) function ( see below ) . <nl> + <nl> + The only other constraint is you must pre - declare how many tests you <nl> + plan to run . This is in case something goes horribly wrong during the <nl> + test and your test program aborts , or skips a test or whatever . You <nl> + do this like so : <nl> + <nl> + use Test : : Simple tests = > 23 ; <nl> + <nl> + You must have a plan . <nl> + <nl> + <nl> + = over 4 <nl> + <nl> + = item B < ok > <nl> + <nl> + ok ( $ foo eq $ bar , $ name ) ; <nl> + ok ( $ foo eq $ bar ) ; <nl> + <nl> + ok ( ) is given an expression ( in this case C < $ foo eq $ bar > ) . If it ' s <nl> + true , the test passed . If it ' s false , it didn ' t . That ' s about it . <nl> + <nl> + ok ( ) prints out either " ok " or " not ok " along with a test number ( it <nl> + keeps track of that for you ) . <nl> + <nl> + # This produces " ok 1 - Hell not yet frozen over " ( or not ok ) <nl> + ok ( get_temperature ( $ hell ) > 0 , ' Hell not yet frozen over ' ) ; <nl> + <nl> + If you provide a $ name , that will be printed along with the " ok / not <nl> + ok " to make it easier to find your test when if fails ( just search for <nl> + the name ) . It also makes it easier for the next guy to understand <nl> + what your test is for . It ' s highly recommended you use test names . <nl> + <nl> + All tests are run in scalar context . So this : <nl> + <nl> + ok ( @ stuff , ' I have some stuff ' ) ; <nl> + <nl> + will do what you mean ( fail if stuff is empty ) <nl> + <nl> + = cut <nl> + <nl> + sub ok ( $ ; $ ) { <nl> + $ CLASS - > builder - > ok ( @ _ ) ; <nl> + } <nl> + <nl> + <nl> + = back <nl> + <nl> + Test : : Simple will start by printing number of tests run in the form <nl> + " 1 . . M " ( so " 1 . . 5 " means you ' re going to run 5 tests ) . This strange <nl> + format lets Test : : Harness know how many tests you plan on running in <nl> + case something goes horribly wrong . <nl> + <nl> + If all your tests passed , Test : : Simple will exit with zero ( which is <nl> + normal ) . If anything failed it will exit with how many failed . If <nl> + you run less ( or more ) tests than you planned , the missing ( or extras ) <nl> + will be considered failures . If no tests were ever run Test : : Simple <nl> + will throw a warning and exit with 255 . If the test died , even after <nl> + having successfully completed all its tests , it will still be <nl> + considered a failure and will exit with 255 . <nl> + <nl> + So the exit codes are . . . <nl> + <nl> + 0 all tests successful <nl> + 255 test died or all passed but wrong # of tests run <nl> + any other number how many failed ( including missing or extras ) <nl> + <nl> + If you fail more than 254 tests , it will be reported as 254 . <nl> + <nl> + This module is by no means trying to be a complete testing system . <nl> + It ' s just to get you started . Once you ' re off the ground its <nl> + recommended you look at L < Test : : More > . <nl> + <nl> + <nl> + = head1 EXAMPLE <nl> + <nl> + Here ' s an example of a simple . t file for the fictional Film module . <nl> + <nl> + use Test : : Simple tests = > 5 ; <nl> + <nl> + use Film ; # What you ' re testing . <nl> + <nl> + my $ btaste = Film - > new ( { Title = > ' Bad Taste ' , <nl> + Director = > ' Peter Jackson ' , <nl> + Rating = > ' R ' , <nl> + NumExplodingSheep = > 1 <nl> + } ) ; <nl> + ok ( defined ( $ btaste ) & & ref $ btaste eq ' Film , ' new ( ) works ' ) ; <nl> + <nl> + ok ( $ btaste - > Title eq ' Bad Taste ' , ' Title ( ) get ' ) ; <nl> + ok ( $ btaste - > Director eq ' Peter Jackson ' , ' Director ( ) get ' ) ; <nl> + ok ( $ btaste - > Rating eq ' R ' , ' Rating ( ) get ' ) ; <nl> + ok ( $ btaste - > NumExplodingSheep = = 1 , ' NumExplodingSheep ( ) get ' ) ; <nl> + <nl> + It will produce output like this : <nl> + <nl> + 1 . . 5 <nl> + ok 1 - new ( ) works <nl> + ok 2 - Title ( ) get <nl> + ok 3 - Director ( ) get <nl> + not ok 4 - Rating ( ) get <nl> + # Failed test ' Rating ( ) get ' <nl> + # in t / film . t at line 14 . <nl> + ok 5 - NumExplodingSheep ( ) get <nl> + # Looks like you failed 1 tests of 5 <nl> + <nl> + Indicating the Film : : Rating ( ) method is broken . <nl> + <nl> + <nl> + = head1 CAVEATS <nl> + <nl> + Test : : Simple will only report a maximum of 254 failures in its exit <nl> + code . If this is a problem , you probably have a huge test script . <nl> + Split it into multiple files . ( Otherwise blame the Unix folks for <nl> + using an unsigned short integer as the exit status ) . <nl> + <nl> + Because VMS ' s exit codes are much , much different than the rest of the <nl> + universe , and perl does horrible mangling to them that gets in my way , <nl> + it works like this on VMS . <nl> + <nl> + 0 SS $ _NORMAL all tests successful <nl> + 4 SS $ _ABORT something went wrong <nl> + <nl> + Unfortunately , I can ' t differentiate any further . <nl> + <nl> + <nl> + = head1 NOTES <nl> + <nl> + Test : : Simple is B < explicitly > tested all the way back to perl 5 . 004 . <nl> + <nl> + Test : : Simple is thread - safe in perl 5 . 8 . 0 and up . <nl> + <nl> + = head1 HISTORY <nl> + <nl> + This module was conceived while talking with Tony Bowden in his <nl> + kitchen one night about the problems I was having writing some really <nl> + complicated feature into the new Testing module . He observed that the <nl> + main problem is not dealing with these edge cases but that people hate <nl> + to write tests B < at all > . What was needed was a dead simple module <nl> + that took all the hard work out of testing and was really , really easy <nl> + to learn . Paul Johnson simultaneously had this idea ( unfortunately , <nl> + he wasn ' t in Tony ' s kitchen ) . This is it . <nl> + <nl> + <nl> + = head1 SEE ALSO <nl> + <nl> + = over 4 <nl> + <nl> + = item L < Test : : More > <nl> + <nl> + More testing functions ! Once you outgrow Test : : Simple , look at <nl> + Test : : More . Test : : Simple is 100 % forward compatible with Test : : More <nl> + ( i . e . you can just use Test : : More instead of Test : : Simple in your <nl> + programs and things will still work ) . <nl> + <nl> + = item L < Test > <nl> + <nl> + The original Perl testing module . <nl> + <nl> + = item L < Test : : Unit > <nl> + <nl> + Elaborate unit testing . <nl> + <nl> + = item L < Test : : Inline > , L < SelfTest > <nl> + <nl> + Embed tests in your code ! <nl> + <nl> + = item L < Test : : Harness > <nl> + <nl> + Interprets the output of your test program . <nl> + <nl> + = back <nl> + <nl> + <nl> + = head1 AUTHORS <nl> + <nl> + Idea by Tony Bowden and Paul Johnson , code by Michael G Schwern <nl> + E < lt > schwern @ pobox . comE < gt > , wardrobe by Calvin Klein . <nl> + <nl> + <nl> + = head1 COPYRIGHT <nl> + <nl> + Copyright 2001 , 2002 , 2004 by Michael G Schwern E < lt > schwern @ pobox . comE < gt > . <nl> + <nl> + This program is free software ; you can redistribute it and / or <nl> + modify it under the same terms as Perl itself . <nl> + <nl> + See F < http : / / www . perl . com / perl / misc / Artistic . html > <nl> + <nl> + = cut <nl> + <nl> + 1 ; <nl> new file mode 100644 <nl> index 00000000000 . . b730918c755 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / lib / Test / Tutorial . pod <nl> <nl> + = head1 NAME <nl> + <nl> + Test : : Tutorial - A tutorial about writing really basic tests <nl> + <nl> + = head1 DESCRIPTION <nl> + <nl> + <nl> + I < AHHHHHHH ! ! ! ! NOT TESTING ! Anything but testing ! <nl> + Beat me , whip me , send me to Detroit , but don ' t make <nl> + me write tests ! > <nl> + <nl> + I < * sob * > <nl> + <nl> + I < Besides , I don ' t know how to write the damned things . > <nl> + <nl> + <nl> + Is this you ? Is writing tests right up there with writing <nl> + documentation and having your fingernails pulled out ? Did you open up <nl> + a test and read <nl> + <nl> + # # # # # # # # We start with some black magic <nl> + <nl> + and decide that ' s quite enough for you ? <nl> + <nl> + It ' s ok . That ' s all gone now . We ' ve done all the black magic for <nl> + you . And here are the tricks . . . <nl> + <nl> + <nl> + = head2 Nuts and bolts of testing . <nl> + <nl> + Here ' s the most basic test program . <nl> + <nl> + # ! / usr / bin / perl - w <nl> + <nl> + print " 1 . . 1 \ n " ; <nl> + <nl> + print 1 + 1 = = 2 ? " ok 1 \ n " : " not ok 1 \ n " ; <nl> + <nl> + since 1 + 1 is 2 , it prints : <nl> + <nl> + 1 . . 1 <nl> + ok 1 <nl> + <nl> + What this says is : C < 1 . . 1 > " I ' m going to run one test . " [ 1 ] C < ok 1 > <nl> + " The first test passed " . And that ' s about all magic there is to <nl> + testing . Your basic unit of testing is the I < ok > . For each thing you <nl> + test , an C < ok > is printed . Simple . B < Test : : Harness > interprets your test <nl> + results to determine if you succeeded or failed ( more on that later ) . <nl> + <nl> + Writing all these print statements rapidly gets tedious . Fortunately , <nl> + there ' s B < Test : : Simple > . It has one function , C < ok ( ) > . <nl> + <nl> + # ! / usr / bin / perl - w <nl> + <nl> + use Test : : Simple tests = > 1 ; <nl> + <nl> + ok ( 1 + 1 = = 2 ) ; <nl> + <nl> + and that does the same thing as the code above . C < ok ( ) > is the backbone <nl> + of Perl testing , and we ' ll be using it instead of roll - your - own from <nl> + here on . If C < ok ( ) > gets a true value , the test passes . False , it <nl> + fails . <nl> + <nl> + # ! / usr / bin / perl - w <nl> + <nl> + use Test : : Simple tests = > 2 ; <nl> + ok ( 1 + 1 = = 2 ) ; <nl> + ok ( 2 + 2 = = 5 ) ; <nl> + <nl> + from that comes <nl> + <nl> + 1 . . 2 <nl> + ok 1 <nl> + not ok 2 <nl> + # Failed test ( test . pl at line 5 ) <nl> + # Looks like you failed 1 tests of 2 . <nl> + <nl> + C < 1 . . 2 > " I ' m going to run two tests . " This number is used to ensure <nl> + your test program ran all the way through and didn ' t die or skip some <nl> + tests . C < ok 1 > " The first test passed . " C < not ok 2 > " The second test <nl> + failed " . Test : : Simple helpfully prints out some extra commentary about <nl> + your tests . <nl> + <nl> + It ' s not scary . Come , hold my hand . We ' re going to give an example <nl> + of testing a module . For our example , we ' ll be testing a date <nl> + library , B < Date : : ICal > . It ' s on CPAN , so download a copy and follow <nl> + along . [ 2 ] <nl> + <nl> + <nl> + = head2 Where to start ? <nl> + <nl> + This is the hardest part of testing , where do you start ? People often <nl> + get overwhelmed at the apparent enormity of the task of testing a <nl> + whole module . Best place to start is at the beginning . Date : : ICal is <nl> + an object - oriented module , and that means you start by making an <nl> + object . So we test C < new ( ) > . <nl> + <nl> + # ! / usr / bin / perl - w <nl> + <nl> + use Test : : Simple tests = > 2 ; <nl> + <nl> + use Date : : ICal ; <nl> + <nl> + my $ ical = Date : : ICal - > new ; # create an object <nl> + ok ( defined $ ical ) ; # check that we got something <nl> + ok ( $ ical - > isa ( ' Date : : ICal ' ) ) ; # and it ' s the right class <nl> + <nl> + run that and you should get : <nl> + <nl> + 1 . . 2 <nl> + ok 1 <nl> + ok 2 <nl> + <nl> + congratulations , you ' ve written your first useful test . <nl> + <nl> + <nl> + = head2 Names <nl> + <nl> + That output isn ' t terribly descriptive , is it ? When you have two <nl> + tests you can figure out which one is # 2 , but what if you have 102 ? <nl> + <nl> + Each test can be given a little descriptive name as the second <nl> + argument to C < ok ( ) > . <nl> + <nl> + use Test : : Simple tests = > 2 ; <nl> + <nl> + ok ( defined $ ical , ' new ( ) returned something ' ) ; <nl> + ok ( $ ical - > isa ( ' Date : : ICal ' ) , " and it ' s the right class " ) ; <nl> + <nl> + So now you ' d see . . . <nl> + <nl> + 1 . . 2 <nl> + ok 1 - new ( ) returned something <nl> + ok 2 - and it ' s the right class <nl> + <nl> + <nl> + = head2 Test the manual <nl> + <nl> + Simplest way to build up a decent testing suite is to just test what <nl> + the manual says it does . [ 3 ] Let ' s pull something out of the <nl> + L < Date : : ICal / SYNOPSIS > and test that all its bits work . <nl> + <nl> + # ! / usr / bin / perl - w <nl> + <nl> + use Test : : Simple tests = > 8 ; <nl> + <nl> + use Date : : ICal ; <nl> + <nl> + $ ical = Date : : ICal - > new ( year = > 1964 , month = > 10 , day = > 16 , <nl> + hour = > 16 , min = > 12 , sec = > 47 , <nl> + tz = > ' 0530 ' ) ; <nl> + <nl> + ok ( defined $ ical , ' new ( ) returned something ' ) ; <nl> + ok ( $ ical - > isa ( ' Date : : ICal ' ) , " and it ' s the right class " ) ; <nl> + ok ( $ ical - > sec = = 47 , ' sec ( ) ' ) ; <nl> + ok ( $ ical - > min = = 12 , ' min ( ) ' ) ; <nl> + ok ( $ ical - > hour = = 16 , ' hour ( ) ' ) ; <nl> + ok ( $ ical - > day = = 17 , ' day ( ) ' ) ; <nl> + ok ( $ ical - > month = = 10 , ' month ( ) ' ) ; <nl> + ok ( $ ical - > year = = 1964 , ' year ( ) ' ) ; <nl> + <nl> + run that and you get : <nl> + <nl> + 1 . . 8 <nl> + ok 1 - new ( ) returned something <nl> + ok 2 - and it ' s the right class <nl> + ok 3 - sec ( ) <nl> + ok 4 - min ( ) <nl> + ok 5 - hour ( ) <nl> + not ok 6 - day ( ) <nl> + # Failed test ( - at line 16 ) <nl> + ok 7 - month ( ) <nl> + ok 8 - year ( ) <nl> + # Looks like you failed 1 tests of 8 . <nl> + <nl> + Whoops , a failure ! [ 4 ] Test : : Simple helpfully lets us know on what line <nl> + the failure occurred , but not much else . We were supposed to get 17 , <nl> + but we didn ' t . What did we get ? ? Dunno . We ' ll have to re - run the <nl> + test in the debugger or throw in some print statements to find out . <nl> + <nl> + Instead , we ' ll switch from B < Test : : Simple > to B < Test : : More > . B < Test : : More > <nl> + does everything B < Test : : Simple > does , and more ! In fact , Test : : More does <nl> + things I < exactly > the way Test : : Simple does . You can literally swap <nl> + Test : : Simple out and put Test : : More in its place . That ' s just what <nl> + we ' re going to do . <nl> + <nl> + Test : : More does more than Test : : Simple . The most important difference <nl> + at this point is it provides more informative ways to say " ok " . <nl> + Although you can write almost any test with a generic C < ok ( ) > , it <nl> + can ' t tell you what went wrong . Instead , we ' ll use the C < is ( ) > <nl> + function , which lets us declare that something is supposed to be the <nl> + same as something else : <nl> + <nl> + # ! / usr / bin / perl - w <nl> + <nl> + use Test : : More tests = > 8 ; <nl> + <nl> + use Date : : ICal ; <nl> + <nl> + $ ical = Date : : ICal - > new ( year = > 1964 , month = > 10 , day = > 16 , <nl> + hour = > 16 , min = > 12 , sec = > 47 , <nl> + tz = > ' 0530 ' ) ; <nl> + <nl> + ok ( defined $ ical , ' new ( ) returned something ' ) ; <nl> + ok ( $ ical - > isa ( ' Date : : ICal ' ) , " and it ' s the right class " ) ; <nl> + is ( $ ical - > sec , 47 , ' sec ( ) ' ) ; <nl> + is ( $ ical - > min , 12 , ' min ( ) ' ) ; <nl> + is ( $ ical - > hour , 16 , ' hour ( ) ' ) ; <nl> + is ( $ ical - > day , 17 , ' day ( ) ' ) ; <nl> + is ( $ ical - > month , 10 , ' month ( ) ' ) ; <nl> + is ( $ ical - > year , 1964 , ' year ( ) ' ) ; <nl> + <nl> + " Is C < $ ical - E < gt > sec > 47 ? " " Is C < $ ical - E < gt > min > 12 ? " With C < is ( ) > in place , <nl> + you get some more information <nl> + <nl> + 1 . . 8 <nl> + ok 1 - new ( ) returned something <nl> + ok 2 - and it ' s the right class <nl> + ok 3 - sec ( ) <nl> + ok 4 - min ( ) <nl> + ok 5 - hour ( ) <nl> + not ok 6 - day ( ) <nl> + # Failed test ( - at line 16 ) <nl> + # got : ' 16 ' <nl> + # expected : ' 17 ' <nl> + ok 7 - month ( ) <nl> + ok 8 - year ( ) <nl> + # Looks like you failed 1 tests of 8 . <nl> + <nl> + letting us know that C < $ ical - E < gt > day > returned 16 , but we expected 17 . A <nl> + quick check shows that the code is working fine , we made a mistake <nl> + when writing up the tests . Just change it to : <nl> + <nl> + is ( $ ical - > day , 16 , ' day ( ) ' ) ; <nl> + <nl> + and everything works . <nl> + <nl> + So any time you ' re doing a " this equals that " sort of test , use C < is ( ) > . <nl> + It even works on arrays . The test is always in scalar context , so you <nl> + can test how many elements are in a list this way . [ 5 ] <nl> + <nl> + is ( @ foo , 5 , ' foo has 5 elements ' ) ; <nl> + <nl> + <nl> + = head2 Sometimes the tests are wrong <nl> + <nl> + Which brings us to a very important lesson . Code has bugs . Tests are <nl> + code . Ergo , tests have bugs . A failing test could mean a bug in the <nl> + code , but don ' t discount the possibility that the test is wrong . <nl> + <nl> + On the flip side , don ' t be tempted to prematurely declare a test <nl> + incorrect just because you ' re having trouble finding the bug . <nl> + Invalidating a test isn ' t something to be taken lightly , and don ' t use <nl> + it as a cop out to avoid work . <nl> + <nl> + <nl> + = head2 Testing lots of values <nl> + <nl> + We ' re going to be wanting to test a lot of dates here , trying to trick <nl> + the code with lots of different edge cases . Does it work before 1970 ? <nl> + After 2038 ? Before 1904 ? Do years after 10 , 000 give it trouble ? <nl> + Does it get leap years right ? We could keep repeating the code above , <nl> + or we could set up a little try / expect loop . <nl> + <nl> + use Test : : More tests = > 32 ; <nl> + use Date : : ICal ; <nl> + <nl> + my % ICal_Dates = ( <nl> + # An ICal string And the year , month , date <nl> + # hour , minute and second we expect . <nl> + ' 19971024T120000 ' = > # from the docs . <nl> + [ 1997 , 10 , 24 , 12 , 0 , 0 ] , <nl> + ' 20390123T232832 ' = > # after the Unix epoch <nl> + [ 2039 , 1 , 23 , 23 , 28 , 32 ] , <nl> + ' 19671225T000000 ' = > # before the Unix epoch <nl> + [ 1967 , 12 , 25 , 0 , 0 , 0 ] , <nl> + ' 18990505T232323 ' = > # before the MacOS epoch <nl> + [ 1899 , 5 , 5 , 23 , 23 , 23 ] , <nl> + ) ; <nl> + <nl> + <nl> + while ( my ( $ ical_str , $ expect ) = each % ICal_Dates ) { <nl> + my $ ical = Date : : ICal - > new ( ical = > $ ical_str ) ; <nl> + <nl> + ok ( defined $ ical , " new ( ical = > ' $ ical_str ' ) " ) ; <nl> + ok ( $ ical - > isa ( ' Date : : ICal ' ) , " and it ' s the right class " ) ; <nl> + <nl> + is ( $ ical - > year , $ expect - > [ 0 ] , ' year ( ) ' ) ; <nl> + is ( $ ical - > month , $ expect - > [ 1 ] , ' month ( ) ' ) ; <nl> + is ( $ ical - > day , $ expect - > [ 2 ] , ' day ( ) ' ) ; <nl> + is ( $ ical - > hour , $ expect - > [ 3 ] , ' hour ( ) ' ) ; <nl> + is ( $ ical - > min , $ expect - > [ 4 ] , ' min ( ) ' ) ; <nl> + is ( $ ical - > sec , $ expect - > [ 5 ] , ' sec ( ) ' ) ; <nl> + } <nl> + <nl> + So now we can test bunches of dates by just adding them to <nl> + C < % ICal_Dates > . Now that it ' s less work to test with more dates , you ' ll <nl> + be inclined to just throw more in as you think of them . <nl> + Only problem is , every time we add to that we have to keep adjusting <nl> + the C < use Test : : More tests = E < gt > # # > line . That can rapidly get <nl> + annoying . There ' s two ways to make this work better . <nl> + <nl> + First , we can calculate the plan dynamically using the C < plan ( ) > <nl> + function . <nl> + <nl> + use Test : : More ; <nl> + use Date : : ICal ; <nl> + <nl> + my % ICal_Dates = ( <nl> + . . . same as before . . . <nl> + ) ; <nl> + <nl> + # For each key in the hash we ' re running 8 tests . <nl> + plan tests = > keys % ICal_Dates * 8 ; <nl> + <nl> + Or to be even more flexible , we use C < no_plan > . This means we ' re just <nl> + running some tests , don ' t know how many . [ 6 ] <nl> + <nl> + use Test : : More ' no_plan ' ; # instead of tests = > 32 <nl> + <nl> + now we can just add tests and not have to do all sorts of math to <nl> + figure out how many we ' re running . <nl> + <nl> + <nl> + = head2 Informative names <nl> + <nl> + Take a look at this line here <nl> + <nl> + ok ( defined $ ical , " new ( ical = > ' $ ical_str ' ) " ) ; <nl> + <nl> + we ' ve added more detail about what we ' re testing and the ICal string <nl> + itself we ' re trying out to the name . So you get results like : <nl> + <nl> + ok 25 - new ( ical = > ' 19971024T120000 ' ) <nl> + ok 26 - and it ' s the right class <nl> + ok 27 - year ( ) <nl> + ok 28 - month ( ) <nl> + ok 29 - day ( ) <nl> + ok 30 - hour ( ) <nl> + ok 31 - min ( ) <nl> + ok 32 - sec ( ) <nl> + <nl> + if something in there fails , you ' ll know which one it was and that <nl> + will make tracking down the problem easier . So try to put a bit of <nl> + debugging information into the test names . <nl> + <nl> + Describe what the tests test , to make debugging a failed test easier <nl> + for you or for the next person who runs your test . <nl> + <nl> + <nl> + = head2 Skipping tests <nl> + <nl> + Poking around in the existing Date : : ICal tests , I found this in <nl> + F < t / 01sanity . t > [ 7 ] <nl> + <nl> + # ! / usr / bin / perl - w <nl> + <nl> + use Test : : More tests = > 7 ; <nl> + use Date : : ICal ; <nl> + <nl> + # Make sure epoch time is being handled sanely . <nl> + my $ t1 = Date : : ICal - > new ( epoch = > 0 ) ; <nl> + is ( $ t1 - > epoch , 0 , " Epoch time of 0 " ) ; <nl> + <nl> + # XXX This will only work on unix systems . <nl> + is ( $ t1 - > ical , ' 19700101Z ' , " epoch to ical " ) ; <nl> + <nl> + is ( $ t1 - > year , 1970 , " year ( ) " ) ; <nl> + is ( $ t1 - > month , 1 , " month ( ) " ) ; <nl> + is ( $ t1 - > day , 1 , " day ( ) " ) ; <nl> + <nl> + # like the tests above , but starting with ical instead of epoch <nl> + my $ t2 = Date : : ICal - > new ( ical = > ' 19700101Z ' ) ; <nl> + is ( $ t2 - > ical , ' 19700101Z ' , " Start of epoch in ICal notation " ) ; <nl> + <nl> + is ( $ t2 - > epoch , 0 , " and back to ICal " ) ; <nl> + <nl> + The beginning of the epoch is different on most non - Unix operating <nl> + systems [ 8 ] . Even though Perl smooths out the differences for the most <nl> + part , certain ports do it differently . MacPerl is one off the top of <nl> + my head . [ 9 ] We I < know > this will never work on MacOS . So rather than <nl> + just putting a comment in the test , we can explicitly say it ' s never <nl> + going to work and skip the test . <nl> + <nl> + use Test : : More tests = > 7 ; <nl> + use Date : : ICal ; <nl> + <nl> + # Make sure epoch time is being handled sanely . <nl> + my $ t1 = Date : : ICal - > new ( epoch = > 0 ) ; <nl> + is ( $ t1 - > epoch , 0 , " Epoch time of 0 " ) ; <nl> + <nl> + SKIP : { <nl> + skip ( ' epoch to ICal not working on MacOS ' , 6 ) <nl> + if $ ^ O eq ' MacOS ' ; <nl> + <nl> + is ( $ t1 - > ical , ' 19700101Z ' , " epoch to ical " ) ; <nl> + <nl> + is ( $ t1 - > year , 1970 , " year ( ) " ) ; <nl> + is ( $ t1 - > month , 1 , " month ( ) " ) ; <nl> + is ( $ t1 - > day , 1 , " day ( ) " ) ; <nl> + <nl> + # like the tests above , but starting with ical instead of epoch <nl> + my $ t2 = Date : : ICal - > new ( ical = > ' 19700101Z ' ) ; <nl> + is ( $ t2 - > ical , ' 19700101Z ' , " Start of epoch in ICal notation " ) ; <nl> + <nl> + is ( $ t2 - > epoch , 0 , " and back to ICal " ) ; <nl> + } <nl> + <nl> + A little bit of magic happens here . When running on anything but <nl> + MacOS , all the tests run normally . But when on MacOS , C < skip ( ) > causes <nl> + the entire contents of the SKIP block to be jumped over . It ' s never <nl> + run . Instead , it prints special output that tells Test : : Harness that <nl> + the tests have been skipped . <nl> + <nl> + 1 . . 7 <nl> + ok 1 - Epoch time of 0 <nl> + ok 2 # skip epoch to ICal not working on MacOS <nl> + ok 3 # skip epoch to ICal not working on MacOS <nl> + ok 4 # skip epoch to ICal not working on MacOS <nl> + ok 5 # skip epoch to ICal not working on MacOS <nl> + ok 6 # skip epoch to ICal not working on MacOS <nl> + ok 7 # skip epoch to ICal not working on MacOS <nl> + <nl> + This means your tests won ' t fail on MacOS . This means less emails <nl> + from MacPerl users telling you about failing tests that you know will <nl> + never work . You ' ve got to be careful with skip tests . These are for <nl> + tests which don ' t work and I < never will > . It is not for skipping <nl> + genuine bugs ( we ' ll get to that in a moment ) . <nl> + <nl> + The tests are wholly and completely skipped . [ 10 ] This will work . <nl> + <nl> + SKIP : { <nl> + skip ( " I don ' t wanna die ! " ) ; <nl> + <nl> + die , die , die , die , die ; <nl> + } <nl> + <nl> + <nl> + = head2 Todo tests <nl> + <nl> + Thumbing through the Date : : ICal man page , I came across this : <nl> + <nl> + ical <nl> + <nl> + $ ical_string = $ ical - > ical ; <nl> + <nl> + Retrieves , or sets , the date on the object , using any <nl> + valid ICal date / time string . <nl> + <nl> + " Retrieves or sets " . Hmmm , didn ' t see a test for using C < ical ( ) > to set <nl> + the date in the Date : : ICal test suite . So I ' ll write one . <nl> + <nl> + use Test : : More tests = > 1 ; <nl> + use Date : : ICal ; <nl> + <nl> + my $ ical = Date : : ICal - > new ; <nl> + $ ical - > ical ( ' 20201231Z ' ) ; <nl> + is ( $ ical - > ical , ' 20201231Z ' , ' Setting via ical ( ) ' ) ; <nl> + <nl> + run that and I get <nl> + <nl> + 1 . . 1 <nl> + not ok 1 - Setting via ical ( ) <nl> + # Failed test ( - at line 6 ) <nl> + # got : ' 20010814T233649Z ' <nl> + # expected : ' 20201231Z ' <nl> + # Looks like you failed 1 tests of 1 . <nl> + <nl> + Whoops ! Looks like it ' s unimplemented . Let ' s assume we don ' t have <nl> + the time to fix this . [ 11 ] Normally , you ' d just comment out the test <nl> + and put a note in a todo list somewhere . Instead , we ' re going to <nl> + explicitly state " this test will fail " by wrapping it in a C < TODO > block . <nl> + <nl> + use Test : : More tests = > 1 ; <nl> + <nl> + TODO : { <nl> + local $ TODO = ' ical ( $ ical ) not yet implemented ' ; <nl> + <nl> + my $ ical = Date : : ICal - > new ; <nl> + $ ical - > ical ( ' 20201231Z ' ) ; <nl> + <nl> + is ( $ ical - > ical , ' 20201231Z ' , ' Setting via ical ( ) ' ) ; <nl> + } <nl> + <nl> + Now when you run , it ' s a little different : <nl> + <nl> + 1 . . 1 <nl> + not ok 1 - Setting via ical ( ) # TODO ical ( $ ical ) not yet implemented <nl> + # got : ' 20010822T201551Z ' <nl> + # expected : ' 20201231Z ' <nl> + <nl> + Test : : More doesn ' t say " Looks like you failed 1 tests of 1 " . That ' # <nl> + TODO ' tells Test : : Harness " this is supposed to fail " and it treats a <nl> + failure as a successful test . So you can write tests even before <nl> + you ' ve fixed the underlying code . <nl> + <nl> + If a TODO test passes , Test : : Harness will report it " UNEXPECTEDLY <nl> + SUCCEEDED " . When that happens , you simply remove the TODO block with <nl> + C < local $ TODO > and turn it into a real test . <nl> + <nl> + <nl> + = head2 Testing with taint mode . <nl> + <nl> + Taint mode is a funny thing . It ' s the globalest of all global <nl> + features . Once you turn it on , it affects I < all > code in your program <nl> + and I < all > modules used ( and all the modules they use ) . If a single <nl> + piece of code isn ' t taint clean , the whole thing explodes . With that <nl> + in mind , it ' s very important to ensure your module works under taint <nl> + mode . <nl> + <nl> + It ' s very simple to have your tests run under taint mode . Just throw <nl> + a C < - T > into the C < # ! > line . Test : : Harness will read the switches <nl> + in C < # ! > and use them to run your tests . <nl> + <nl> + # ! / usr / bin / perl - Tw <nl> + <nl> + . . . test normally here . . . <nl> + <nl> + So when you say C < make test > it will be run with taint mode and <nl> + warnings on . <nl> + <nl> + <nl> + = head1 FOOTNOTES <nl> + <nl> + = over 4 <nl> + <nl> + = item 1 <nl> + <nl> + The first number doesn ' t really mean anything , but it has to be 1 . <nl> + It ' s the second number that ' s important . <nl> + <nl> + = item 2 <nl> + <nl> + For those following along at home , I ' m using version 1 . 31 . It has <nl> + some bugs , which is good - - we ' ll uncover them with our tests . <nl> + <nl> + = item 3 <nl> + <nl> + You can actually take this one step further and test the manual <nl> + itself . Have a look at B < Test : : Inline > ( formerly B < Pod : : Tests > ) . <nl> + <nl> + = item 4 <nl> + <nl> + Yes , there ' s a mistake in the test suite . What ! Me , contrived ? <nl> + <nl> + = item 5 <nl> + <nl> + We ' ll get to testing the contents of lists later . <nl> + <nl> + = item 6 <nl> + <nl> + But what happens if your test program dies halfway through ? ! Since we <nl> + didn ' t say how many tests we ' re going to run , how can we know it <nl> + failed ? No problem , Test : : More employs some magic to catch that death <nl> + and turn the test into a failure , even if every test passed up to that <nl> + point . <nl> + <nl> + = item 7 <nl> + <nl> + I cleaned it up a little . <nl> + <nl> + = item 8 <nl> + <nl> + Most Operating Systems record time as the number of seconds since a <nl> + certain date . This date is the beginning of the epoch . Unix ' s starts <nl> + at midnight January 1st , 1970 GMT . <nl> + <nl> + = item 9 <nl> + <nl> + MacOS ' s epoch is midnight January 1st , 1904 . VMS ' s is midnight , <nl> + November 17th , 1858 , but vmsperl emulates the Unix epoch so it ' s not a <nl> + problem . <nl> + <nl> + = item 10 <nl> + <nl> + As long as the code inside the SKIP block at least compiles . Please <nl> + don ' t ask how . No , it ' s not a filter . <nl> + <nl> + = item 11 <nl> + <nl> + Do NOT be tempted to use TODO tests as a way to avoid fixing simple <nl> + bugs ! <nl> + <nl> + = back <nl> + <nl> + = head1 AUTHORS <nl> + <nl> + Michael G Schwern E < lt > schwern @ pobox . comE < gt > and the perl - qa dancers ! <nl> + <nl> + = head1 COPYRIGHT <nl> + <nl> + Copyright 2001 by Michael G Schwern E < lt > schwern @ pobox . comE < gt > . <nl> + <nl> + This documentation is free ; you can redistribute it and / or modify it <nl> + under the same terms as Perl itself . <nl> + <nl> + Irrespective of its distribution , all code examples in these files <nl> + are hereby placed into the public domain . You are permitted and <nl> + encouraged to use this code in your own programs for fun <nl> + or for profit as you see fit . A simple comment in the code giving <nl> + credit would be courteous but is not required . <nl> + <nl> + = cut <nl> new file mode 100755 <nl> index 00000000000 . . df9eab48fd6 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / multiversioning . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 13 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + my $ sock2 = $ server - > new_sock ; <nl> + <nl> + ok ( $ sock ! = $ sock2 , " have two different connections open " ) ; <nl> + <nl> + # set large value <nl> + my $ size = 256 * 1024 ; # 256 kB <nl> + my $ bigval = " 0123456789abcdef " x ( $ size / 16 ) ; <nl> + $ bigval = ~ s / ^ 0 / \ [ / ; $ bigval = ~ s / f $ / \ ] / ; <nl> + my $ bigval2 = uc ( $ bigval ) ; <nl> + <nl> + print $ sock " set big 0 0 $ size \ r \ n $ bigval \ r \ n " ; <nl> + is ( scalar < $ sock > , " STORED \ r \ n " , " stored foo " ) ; <nl> + mem_get_is ( $ sock , " big " , $ bigval , " big value got correctly " ) ; <nl> + <nl> + print $ sock " get big \ r \ n " ; <nl> + my $ buf ; <nl> + is ( read ( $ sock , $ buf , $ size / 2 ) , $ size / 2 , " read half the answer back " ) ; <nl> + like ( $ buf , qr / VALUE big / , " buf has big value header in it " ) ; <nl> + like ( $ buf , qr / abcdef / , " buf has some data in it " ) ; <nl> + unlike ( $ buf , qr / abcde \ ] / , " buf doesn ' t yet close " ) ; <nl> + <nl> + # sock2 interrupts ( maybe sock1 is slow ) and deletes stuff : <nl> + print $ sock2 " delete big \ r \ n " ; <nl> + is ( scalar < $ sock2 > , " DELETED \ r \ n " , " deleted big from sock2 while sock1 ' s still reading it " ) ; <nl> + mem_get_is ( $ sock2 , " big " , undef , " nothing from sock2 now . gone from namespace . " ) ; <nl> + print $ sock2 " set big 0 0 $ size \ r \ n $ bigval2 \ r \ n " ; <nl> + is ( scalar < $ sock2 > , " STORED \ r \ n " , " stored big w / val2 " ) ; <nl> + mem_get_is ( $ sock2 , " big " , $ bigval2 , " big value2 got correctly " ) ; <nl> + <nl> + # sock1 resumes reading . . . <nl> + $ buf . = < $ sock > ; <nl> + $ buf . = < $ sock > ; <nl> + like ( $ buf , qr / abcde \ ] / , " buf now closes " ) ; <nl> + <nl> + # and if sock1 reads again , it ' s the uppercase version : <nl> + mem_get_is ( $ sock , " big " , $ bigval2 , " big value2 got correctly from sock1 " ) ; <nl> new file mode 100755 <nl> index 00000000000 . . c9b804b40db <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / memcached_suite / noreply . t <nl> <nl> + # ! / usr / bin / perl <nl> + <nl> + use strict ; <nl> + use Test : : More tests = > 9 ; <nl> + use FindBin qw ( $ Bin ) ; <nl> + use lib " $ Bin / lib " ; <nl> + use MemcachedTest ; <nl> + <nl> + <nl> + my $ server = new_memcached ( ) ; <nl> + my $ sock = $ server - > sock ; <nl> + <nl> + <nl> + # Test that commands can take ' noreply ' parameter . <nl> + <nl> + # These tests aren ' t relevant to RethinkDB : <nl> + # print $ sock " flush_all noreply \ r \ n " ; <nl> + # print $ sock " flush_all 0 noreply \ r \ n " ; <nl> + <nl> + # print $ sock " verbosity 0 noreply \ r \ n " ; <nl> + <nl> + print $ sock " add noreply : foo 0 0 1 noreply \ r \ n1 \ r \ n " ; <nl> + mem_get_is ( $ sock , " noreply : foo " , " 1 " ) ; <nl> + <nl> + print $ sock " set noreply : foo 0 0 1 noreply \ r \ n2 \ r \ n " ; <nl> + mem_get_is ( $ sock , " noreply : foo " , " 2 " ) ; <nl> + <nl> + print $ sock " replace noreply : foo 0 0 1 noreply \ r \ n3 \ r \ n " ; <nl> + mem_get_is ( $ sock , " noreply : foo " , " 3 " ) ; <nl> + <nl> + print $ sock " append noreply : foo 0 0 1 noreply \ r \ n4 \ r \ n " ; <nl> + mem_get_is ( $ sock , " noreply : foo " , " 34 " ) ; <nl> + <nl> + print $ sock " prepend noreply : foo 0 0 1 noreply \ r \ n5 \ r \ n " ; <nl> + my @ result = mem_gets ( $ sock , " noreply : foo " ) ; <nl> + ok ( $ result [ 1 ] eq " 534 " ) ; <nl> + <nl> + print $ sock " cas noreply : foo 0 0 1 $ result [ 0 ] noreply \ r \ n6 \ r \ n " ; <nl> + mem_get_is ( $ sock , " noreply : foo " , " 6 " ) ; <nl> + <nl> + print $ sock " incr noreply : foo 3 noreply \ r \ n " ; <nl> + mem_get_is ( $ sock , " noreply : foo " , " 9 " ) ; <nl> + <nl> + print $ sock " decr noreply : foo 2 noreply \ r \ n " ; <nl> + mem_get_is ( $ sock , " noreply : foo " , " 7 " ) ; <nl> + <nl> + print $ sock " delete noreply : foo noreply \ r \ n " ; <nl> + mem_get_is ( $ sock , " noreply : foo " ) ; <nl> + <nl> new file mode 100755 <nl> index 00000000000 . . bdd75c76ef8 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / multi_serial_mix . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import sys , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import multiprocessing , time , pickle <nl> + import memcached_workload_common , serial_mix <nl> + from vcoptparse import * <nl> + <nl> + def child ( opts , log_path , load , save ) : <nl> + # This is run in a separate process <nl> + import sys <nl> + sys . stdout = sys . stderr = file ( log_path , " w " ) <nl> + if load is None : <nl> + clone , deleted = { } , set ( ) <nl> + else : <nl> + print " Loading from % r . . . " % load <nl> + with open ( load ) as f : <nl> + clone , deleted = pickle . load ( f ) <nl> + print " Starting test against server at % s : % d . . . " % opts [ " address " ] <nl> + with memcached_workload_common . make_memcache_connection ( opts ) as mc : <nl> + serial_mix . test ( opts , mc , clone , deleted ) <nl> + if save is not None : <nl> + print " Saving to % r . . . " % save <nl> + with open ( save , " w " ) as f : <nl> + pickle . dump ( ( clone , deleted ) , f ) <nl> + print " Done with test . " <nl> + <nl> + op = serial_mix . option_parser_for_serial_mix ( ) <nl> + op [ " num_testers " ] = IntFlag ( " - - num - testers " , 16 ) <nl> + op [ " load " ] = StringFlag ( " - - load " , None ) <nl> + op [ " save " ] = StringFlag ( " - - save " , None ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + shutdown_grace_period = 15 <nl> + <nl> + tester_log_dir = " multi_serial_mix_out " <nl> + if not os . path . isdir ( tester_log_dir ) : os . mkdir ( tester_log_dir ) <nl> + <nl> + processes = [ ] <nl> + try : <nl> + print " Starting % d child processes . . . " % opts [ " num_testers " ] <nl> + print " Writing output from child processes to % r " % tester_log_dir <nl> + <nl> + for id in xrange ( opts [ " num_testers " ] ) : <nl> + <nl> + log_path = os . path . join ( tester_log_dir , " % d . txt " % id ) <nl> + load_path = opts [ " load " ] + " _ % d " % id if opts [ " load " ] is not None else None <nl> + save_path = opts [ " save " ] + " _ % d " % id if opts [ " save " ] is not None else None <nl> + <nl> + opts2 = dict ( opts ) <nl> + opts2 [ " keysuffix " ] = " _ % d " % id # Prevent collisions between tests <nl> + <nl> + process = multiprocessing . Process ( target = child , args = ( opts2 , log_path , load_path , save_path ) ) <nl> + process . start ( ) <nl> + <nl> + processes . append ( ( process , id ) ) <nl> + <nl> + print " Waiting for child processes . . . " <nl> + <nl> + start_time = time . time ( ) <nl> + def time_remaining ( ) : <nl> + time_elapsed = time . time ( ) - start_time <nl> + # Give subprocesses lots of extra time <nl> + return opts [ " duration " ] * 2 - time_elapsed + 1 <nl> + <nl> + for ( process , id ) in processes : <nl> + tr = time_remaining ( ) <nl> + if tr < = 0 : tr = shutdown_grace_period <nl> + process . join ( tr ) <nl> + <nl> + stuck = sorted ( id for ( process , id ) in processes <nl> + if process . is_alive ( ) ) <nl> + failed = sorted ( id for ( process , id ) in processes <nl> + if not process . is_alive ( ) and process . exitcode ! = 0 ) <nl> + <nl> + if stuck or failed : <nl> + if len ( stuck ) = = opts [ " num_testers " ] : <nl> + raise ValueError ( " All % d processes did not finish in time . " % opts [ " num_testers " ] ) <nl> + elif len ( failed ) = = opts [ " num_testers " ] : <nl> + raise ValueError ( " All % d processes failed . " % opts [ " num_testers " ] ) <nl> + else : <nl> + raise ValueError ( " Of processes [ 1 . . . % d ] , the following did not finish in time : " \ <nl> + " % s and the following failed : % s " % ( opts [ " num_testers " ] , stuck , failed ) ) <nl> + <nl> + finally : <nl> + for ( process , id ) in processes : <nl> + if process . is_alive ( ) : <nl> + process . terminate ( ) <nl> + <nl> + print " Done . " <nl> new file mode 100755 <nl> index 00000000000 . . 7134426b262 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / pipeline . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import sys , os , random <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common , time <nl> + from vcoptparse import * <nl> + <nl> + # " I am a string " - > [ " I a " , " m a s " , " trin " , " g " ] <nl> + def rand_split ( string , nsub_strings ) : <nl> + cutoffs = random . sample ( range ( len ( string ) ) , nsub_strings ) ; <nl> + cutoffs . sort ( ) ; <nl> + cutoffs . insert ( 0 , 0 ) <nl> + cutoffs . append ( len ( string ) ) <nl> + strings = [ ] <nl> + for ( start , end ) in zip ( cutoffs [ 0 : len ( cutoffs ) - 1 ] , cutoffs [ 1 : len ( cutoffs ) ] ) : <nl> + strings . append ( string [ start : end ] ) <nl> + <nl> + return strings <nl> + <nl> + op = memcached_workload_common . option_parser_for_socket ( ) <nl> + op [ " chunk_size " ] = IntFlag ( " - - chunk - size " , 10 ) <nl> + op [ " num_ints " ] = IntFlag ( " - - num - ints " , 1000 ) <nl> + op [ " num_chunks " ] = IntFlag ( " - - num - chunks " , 50 ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + with memcached_workload_common . make_socket_connection ( opts ) as s : <nl> + <nl> + ints = range ( opts [ " num_ints " ] ) <nl> + <nl> + print " Set time " <nl> + <nl> + command_string = ' ' <nl> + for int in ints : <nl> + command_string + = ( " set " + str ( int ) + " 0 0 " + str ( len ( str ( int ) ) ) + " noreply \ r \ n " + str ( int ) + " \ r \ n " ) <nl> + <nl> + strings = rand_split ( command_string , opts [ " num_chunks " ] ) <nl> + for string in strings : <nl> + s . send ( string ) <nl> + <nl> + start = time . time ( ) <nl> + print " Get time " <nl> + <nl> + s . send ( " set foo 0 0 3 \ r \ nbar \ r \ n " ) <nl> + s . recv ( len ( " STORED \ r \ n " ) ) <nl> + <nl> + # pipeline some gets <nl> + command_string = ' ' <nl> + expected_response = ' ' <nl> + lengths = [ ] <nl> + for int in ints : <nl> + cmd = ( " get " + str ( int ) + " \ r \ n " ) <nl> + rsp = ( " VALUE " + str ( int ) + " 0 " + str ( len ( str ( int ) ) ) + " \ r \ n " + str ( int ) + " \ r \ nEND \ r \ n " ) <nl> + command_string + = cmd <nl> + expected_response + = rsp <nl> + lengths . append ( ( len ( cmd ) , len ( rsp ) ) ) <nl> + <nl> + sent_cmds = 0 <nl> + sent_bytes = 0 <nl> + response = ' ' <nl> + <nl> + foo = 0 <nl> + # print len ( command_string ) , len ( expected_response ) <nl> + while len ( response ) < len ( expected_response ) : # and response = = expected_response [ 0 : len ( response ) ] : <nl> + cur_cmds = sent_cmds <nl> + cur_bytes = sent_bytes <nl> + cmd = " " <nl> + while sent_bytes < len ( command_string ) and cur_cmds < sent_cmds + opts [ " chunk_size " ] : <nl> + chunk = command_string [ cur_bytes : cur_bytes + lengths [ cur_cmds ] [ 0 ] ] <nl> + cmd + = chunk <nl> + cur_bytes + = len ( chunk ) <nl> + cur_cmds + = 1 <nl> + <nl> + sent_bytes + = s . send ( cmd ) <nl> + <nl> + out = " " <nl> + expected = sum ( [ lengths [ i ] [ 1 ] for i in range ( sent_cmds , cur_cmds ) ] ) <nl> + while len ( out ) < expected : <nl> + out + = s . recv ( expected - len ( out ) ) <nl> + <nl> + sent_cmds = cur_cmds <nl> + response + = out <nl> + <nl> + print " Finished gets in % f seconds " % ( time . time ( ) - start ) <nl> + <nl> + if response ! = expected_response : <nl> + raise ValueError ( " Incorrect response : % r Expected : % r " % ( response , expected_response ) ) <nl> + <nl> + ' ' ' <nl> + print " Delete time " <nl> + command_string = ' ' <nl> + expected_response = ' ' <nl> + for int in ints : <nl> + command_string + = ( " delete " + str ( int ) + " \ r \ n " ) <nl> + expected_response + = " DELETED \ r \ n " <nl> + <nl> + strings = rand_split ( command_string , nChunks ) <nl> + for string in strings : <nl> + s . send ( string ) <nl> + <nl> + response = ' ' <nl> + while ( len ( response ) < len ( expected_response ) and response = = expected_response [ 0 : len ( response ) ] ) : <nl> + response + = s . recv ( len ( expected_response ) - len ( response ) ) <nl> + <nl> + if response ! = expected_response : <nl> + raise ValueError ( " Incorrect response : % r Expected : % r " % ( response , expected_response ) ) <nl> + ' ' ' <nl> + s . send ( " quit \ r \ n " ) <nl> new file mode 100755 <nl> index 00000000000 . . 6420888fc86 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / rget . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import os , sys , random , time <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , " common " ) ) ) <nl> + import memcached_workload_common <nl> + from line import * <nl> + <nl> + key_padding = ' ' . zfill ( 20 ) <nl> + def gen_key ( prefix , num ) : <nl> + return prefix + key_padding + str ( num ) . zfill ( 6 ) <nl> + <nl> + value_padding = ' ' . zfill ( 25 ) <nl> + large_value_padding = ' ' . zfill ( 2000 ) <nl> + def gen_value ( prefix , num ) : <nl> + if num % 5 = = 4 : <nl> + return prefix + large_value_padding + str ( num ) . zfill ( 6 ) <nl> + else : <nl> + return prefix + value_padding + str ( num ) . zfill ( 6 ) <nl> + <nl> + <nl> + def sock_readline ( sock_file ) : <nl> + ls = [ ] <nl> + while True : <nl> + l = sock_file . readline ( ) <nl> + ls . append ( l ) <nl> + if len ( l ) > = 2 and l [ - 2 : ] = = ' \ r \ n ' : <nl> + break <nl> + return ' ' . join ( ls ) <nl> + <nl> + value_line = line ( " ^ VALUE \ s + ( [ ^ \ s ] + ) \ s + ( \ d + ) \ s + ( \ d + ) \ r \ n $ " , [ ( ' key ' , ' s ' ) , ( ' flags ' , ' d ' ) , ( ' length ' , ' d ' ) ] ) <nl> + def is_sorted_output ( kvs ) : <nl> + k = None <nl> + for kv in kvs : <nl> + if not k : <nl> + k = kv [ ' key ' ] <nl> + continue <nl> + <nl> + if k > = kv [ ' key ' ] : <nl> + return False <nl> + <nl> + k = kv [ ' key ' ] <nl> + return True <nl> + <nl> + def get_results ( s ) : <nl> + res = [ ] <nl> + <nl> + f = s . makefile ( ) <nl> + while True : <nl> + l = sock_readline ( f ) <nl> + if l = = ' END \ r \ n ' : <nl> + break <nl> + val_def = value_line . parse_line ( l ) <nl> + if not val_def : <nl> + raise ValueError ( " received unexpected line from rget : % s " % l ) <nl> + val = sock_readline ( f ) . rstrip ( ) <nl> + if len ( val ) ! = val_def [ ' length ' ] : <nl> + raise ValueError ( " received value of unexpected length ( expected % d , got % d : ' % s ' ) " % ( val_def [ ' length ' ] , len ( val ) , val ) ) <nl> + <nl> + res . append ( { ' key ' : val_def [ ' key ' ] , ' value ' : val } ) <nl> + return res <nl> + <nl> + def check_results ( res , expected_count ) : <nl> + count = len ( res ) <nl> + if count < expected_count : <nl> + raise ValueError ( " received less rget results than expected ( expected : % d , got : % d ) " % ( expected_count , count ) ) <nl> + if count > expected_count : <nl> + raise ValueError ( " received more rget results than expected ( expected : % d , got : % d ) " % ( expected_count , count ) ) <nl> + if not is_sorted_output ( res ) : <nl> + raise ValueError ( " received unsorted rget output " ) <nl> + <nl> + op = memcached_workload_common . option_parser_for_socket ( ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + foo_count = 100 <nl> + fop_count = 1000 <nl> + max_results = foo_count + fop_count <nl> + <nl> + host , port = opts [ " address " ] <nl> + with memcached_workload_common . MemcacheConnection ( host , port ) as mc : <nl> + print " Creating test data " <nl> + mc . set ( ' z1 ' , ' bar ' , time = 1 ) # we expect it to expire before we check it later <nl> + for i in range ( 0 , foo_count ) : <nl> + mc . set ( gen_key ( ' foo ' , i ) , gen_value ( ' foo ' , i ) ) <nl> + for i in range ( 0 , fop_count ) : <nl> + mc . set ( gen_key ( ' fop ' , i ) , gen_value ( ' fop ' , i ) ) <nl> + <nl> + with memcached_workload_common . make_socket_connection ( opts ) as s : <nl> + print " Testing rget " <nl> + <nl> + print " Checking simple rget requests with open / closed boundaries " <nl> + s . send ( ' rget % s % s % d % d % d \ r \ n ' % ( gen_key ( ' foo ' , 0 ) , gen_key ( ' fop ' , 0 ) , 0 , 1 , max_results ) ) <nl> + res = get_results ( s ) <nl> + check_results ( res , foo_count ) <nl> + <nl> + s . send ( ' rget % s % s % d % d % d \ r \ n ' % ( gen_key ( ' foo ' , 0 ) , gen_key ( ' fop ' , 0 ) , 1 , 0 , max_results ) ) <nl> + res = get_results ( s ) <nl> + check_results ( res , foo_count - 1 + 1 ) <nl> + <nl> + s . send ( ' rget % s % s % d % d % d \ r \ n ' % ( gen_key ( ' foo ' , 0 ) , gen_key ( ' fop ' , 0 ) , 1 , 1 , max_results ) ) <nl> + res = get_results ( s ) <nl> + check_results ( res , foo_count - 1 ) <nl> + <nl> + print " Checking that rget works when the boundares are not real keys " <nl> + s . send ( ' rget % s % s % d % d % d \ r \ n ' % ( ' a ' , ' fop ' , 0 , 0 , max_results ) ) <nl> + res = get_results ( s ) <nl> + check_results ( res , foo_count ) <nl> + <nl> + print " Checking larger number of results " <nl> + s . send ( ' rget % s % s % d % d % d \ r \ n ' % ( ' a ' , gen_key ( ' goo ' , 0 ) , 0 , 1 , max_results ) ) <nl> + res = get_results ( s ) <nl> + check_results ( res , foo_count + fop_count ) <nl> + <nl> + print " Checking simple paging " <nl> + page_size = 13 <nl> + s . send ( ' rget % s % s % d % d % d \ r \ n ' % ( ' a ' , gen_key ( ' goo ' , 0 ) , 0 , 1 , page_size ) ) <nl> + res = get_results ( s ) <nl> + check_results ( res , page_size ) <nl> + <nl> + print " Checking contiguous paging " <nl> + page_size = 13 <nl> + res = [ ] <nl> + from_key = ' a ' <nl> + while True : <nl> + s . send ( ' rget % s % s % d % d % d \ r \ n ' % ( from_key , gen_key ( ' fop ' , 0 ) , 1 , 1 , page_size ) ) <nl> + cur_res = get_results ( s ) <nl> + res . extend ( cur_res ) <nl> + if len ( res ) < foo_count : <nl> + check_results ( cur_res , page_size ) <nl> + from_key = cur_res [ - 1 ] [ ' key ' ] <nl> + else : <nl> + check_results ( cur_res , len ( cur_res ) ) <nl> + break <nl> + check_results ( res , foo_count ) <nl> + <nl> + for i in range ( 1 , len ( res ) ) : <nl> + kv = res [ i ] <nl> + expected_key = gen_key ( ' foo ' , i ) <nl> + expected_value = gen_value ( ' foo ' , i ) <nl> + if kv [ ' key ' ] ! = expected_key : <nl> + raise ValueError ( " received wrong key ( expected : ' % s ' , got : ' % s ' ) " % ( expected_key , kv [ ' key ' ] ) ) <nl> + if kv [ ' value ' ] ! = expected_value : <nl> + raise ValueError ( " received wrong value ( expected : ' % s ' , got : ' % s ' ) " % ( expected_key , kv [ ' value ' ] ) ) <nl> + <nl> + print " Checking empty results being returned when no keys match " <nl> + s . send ( ' rget % s % s % d % d % d \ r \ n ' % ( ' a ' , ' b ' , 0 , 0 , max_results ) ) <nl> + res = get_results ( s ) <nl> + check_results ( res , 0 ) <nl> + <nl> + print " Checking that expired values are not returned " <nl> + time . sleep ( 1 ) <nl> + s . send ( ' rget z0 z2 1 1 100 \ r \ n ' ) <nl> + res = get_results ( s ) <nl> + check_results ( res , 0 ) <nl> new file mode 100755 <nl> index 00000000000 . . 8eca84e4aab <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / rget_huge . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import os , sys , random , time <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , " common " ) ) ) <nl> + import memcached_workload_common <nl> + from line import * <nl> + from vcoptparse import * <nl> + <nl> + def sock_readline ( sock_file ) : <nl> + ls = [ ] <nl> + while True : <nl> + l = sock_file . readline ( ) <nl> + ls . append ( l ) <nl> + if len ( l ) > = 2 and l [ - 2 : ] = = ' \ r \ n ' : <nl> + break <nl> + return ' ' . join ( ls ) <nl> + <nl> + def expect_line ( sock_file , expected_line ) : <nl> + actual_line = sock_readline ( sock_file ) <nl> + if actual_line ! = expected_line : <nl> + raise ValueError ( " Expected % r ; got % r " % ( expected_line , actual_line ) ) <nl> + <nl> + op = memcached_workload_common . option_parser_for_socket ( ) <nl> + op [ " count " ] = IntFlag ( " - - count " , 100000 ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + print_interval = 1 <nl> + while print_interval * 100 < opts [ " count " ] : <nl> + print_interval * = 10 <nl> + <nl> + alphabet = " abcdefghijklmnopqrstuvwxyz " <nl> + pairs = [ ] <nl> + for i in range ( 0 , opts [ " count " ] ) : <nl> + key = random . choice ( alphabet ) + random . choice ( alphabet ) + random . choice ( alphabet ) + str ( i ) <nl> + value = ' x ' * ( 50 + 500 * i % 2 ) <nl> + pairs . append ( ( key , value ) ) <nl> + <nl> + with memcached_workload_common . make_socket_connection ( opts ) as s : <nl> + f = s . makefile ( ) <nl> + print " Creating test data " <nl> + for i , ( key , value ) in enumerate ( pairs ) : <nl> + if ( i + 1 ) % print_interval = = 0 : <nl> + s . send ( ' set % s 0 0 % d \ r \ n % s \ r \ n ' % ( key , len ( value ) , value ) ) <nl> + expect_line ( f , " STORED \ r \ n " ) <nl> + print i + 1 , <nl> + sys . stdout . flush ( ) <nl> + else : <nl> + s . send ( ' set % s 0 0 % d noreply \ r \ n % s \ r \ n ' % ( key , len ( value ) , value ) ) <nl> + print <nl> + print " Testing rget " <nl> + s . send ( ' rget null null - 1 - 1 % d \ r \ n ' % ( opts [ " count " ] * 2 ) ) <nl> + for i , ( key , value ) in enumerate ( sorted ( pairs ) ) : <nl> + expect_line ( f , " VALUE % s 0 % d \ r \ n " % ( key , len ( value ) ) ) <nl> + expect_line ( f , " % s \ r \ n " % value ) <nl> + if ( i + 1 ) % print_interval = = 0 : <nl> + print i + 1 , <nl> + sys . stdout . flush ( ) <nl> + print <nl> + expect_line ( f , " END \ r \ n " ) <nl> + <nl> new file mode 100755 <nl> index 00000000000 . . d1458f76bea <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / serial_mix . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import random , time , sys , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + from vcoptparse import * <nl> + <nl> + def random_key ( opts ) : <nl> + # The reason we have keysuffix is in case another test ( such as multi_serial_mix . py ) is using <nl> + # this as a subroutine but wants to make sure that random keys don ' t collide with other random <nl> + # keys . <nl> + suf = opts . get ( " keysuffix " , " " ) <nl> + return " " . join ( random . choice ( " abcdefghijklmnopqrstuvwxyz " ) <nl> + for i in xrange ( random . randint ( 1 , opts [ " keysize " ] - len ( suf ) ) ) ) + suf <nl> + <nl> + def random_value ( opts ) : <nl> + # Most of the time we want to use small values , but we also want to test large values <nl> + # sometimes . <nl> + if random . randint ( 0 , 10 ) = = 0 : <nl> + return random . randint ( 0 , opts [ " valuesize " ] ) * random . choice ( " ABCDEFGHIJKLMNOPQRSTUVWXYZ " ) <nl> + else : <nl> + return random . randint ( 0 , min ( 200 , opts [ " valuesize " ] ) ) * random . choice ( " ABCDEFGHIJKLMNOPQRSTUVWXYZ " ) <nl> + <nl> + def fail ( k , v , v2 ) : <nl> + raise ValueError ( " Key % r should have value % r , but had value % r . " % ( k , v , v2 ) ) <nl> + <nl> + def verify_all ( opts , mc , clone , deleted ) : <nl> + for key in clone : <nl> + value = mc . get ( key ) <nl> + if value ! = clone [ key ] : <nl> + fail ( key , clone [ key ] , value ) <nl> + for key in deleted : <nl> + value = mc . get ( key ) <nl> + if value is not None : <nl> + fail ( key , None , value ) <nl> + <nl> + def verify ( opts , mc , clone , deleted , key ) : <nl> + if not opts [ " thorough " ] : <nl> + # Check the specified key <nl> + value = mc . get ( key ) <nl> + if value ! = clone . get ( key , None ) : <nl> + fail ( key , clone . get ( key , None ) , value ) <nl> + else : <nl> + # Check allllll the keys and deleted keys <nl> + verify_all ( opts , mc , clone , deleted ) <nl> + <nl> + def random_action ( opts , mc , clone , deleted ) : <nl> + <nl> + what_to_do = random . random ( ) <nl> + <nl> + if what_to_do < 0 . 2 : <nl> + # Check a random key <nl> + if opts [ " thorough " ] : <nl> + # We check thoroughly after every test anyway <nl> + return <nl> + if not clone : return <nl> + verify ( opts , mc , clone , deleted , random . choice ( clone . keys ( ) ) ) <nl> + <nl> + elif what_to_do < 0 . 25 : <nl> + # Check a deleted or nonexistent key <nl> + if random . random ( ) < 0 . 5 and deleted : <nl> + # A deleted key <nl> + key = random . choice ( list ( deleted ) ) <nl> + else : <nl> + # A new key <nl> + key = random_key ( opts ) <nl> + verify ( opts , mc , clone , deleted , key ) <nl> + <nl> + # NOTE : Was : < 0 . 6 , see below <nl> + elif what_to_do < 0 . 95 : <nl> + # Set <nl> + if random . random ( ) < 0 . 3 and clone : <nl> + # An existing key <nl> + key = random . choice ( clone . keys ( ) ) <nl> + else : <nl> + # A new key <nl> + key = random_key ( opts ) <nl> + deleted . discard ( key ) <nl> + value = random_value ( opts ) <nl> + clone [ key ] = value <nl> + ok = mc . set ( key , value ) <nl> + if ok = = 0 : <nl> + raise ValueError ( " Could not set % r to % r . " % ( key , value ) ) <nl> + verify ( opts , mc , clone , deleted , key ) <nl> + <nl> + # NOTE ( daniel ) : Append / prepend seems to be broken in our server . <nl> + # I just needed a quick test to check the serializer , so I disabled it . <nl> + # <nl> + # elif what_to_do < 0 . 95 : <nl> + # # Append / prepend <nl> + # if not clone : return <nl> + # key = random . choice ( clone . keys ( ) ) <nl> + # # Make sure that the value we add isn ' t long enough to make the value larger than our <nl> + # # specified maximum value size <nl> + # value_to_pend = random_value ( opts ) [ : opts [ " valuesize " ] - len ( clone [ key ] ) ] <nl> + # if random . randint ( 1 , 2 ) = = 1 : <nl> + # # Append <nl> + # clone [ key ] + = value_to_pend <nl> + # ok = mc . append ( key , value_to_pend ) <nl> + # else : <nl> + # # Prepend <nl> + # clone [ key ] = value_to_pend + clone [ key ] <nl> + # ok = mc . prepend ( key , value_to_pend ) <nl> + # if ok = = 0 : <nl> + # raise ValueError ( " Could not append / prepend % r to key % r " % ( value_to_pend , key ) ) <nl> + # verify ( opts , mc , clone , deleted , key ) <nl> + <nl> + else : <nl> + # Delete <nl> + if not clone : return <nl> + key = random . choice ( clone . keys ( ) ) <nl> + del clone [ key ] <nl> + deleted . add ( key ) <nl> + ok = mc . delete ( key ) <nl> + if ok = = 0 : <nl> + raise ValueError ( " Could not delete % r . " % key ) <nl> + verify ( opts , mc , clone , deleted , key ) <nl> + <nl> + def test ( opts , mc , clone , deleted ) : <nl> + if opts [ " duration " ] = = " forever " : <nl> + try : <nl> + while True : <nl> + random_action ( opts , mc , clone , deleted ) <nl> + except KeyboardInterrupt : <nl> + pass <nl> + else : <nl> + start_time = time . time ( ) <nl> + while time . time ( ) < start_time + opts [ " duration " ] : <nl> + random_action ( opts , mc , clone , deleted ) <nl> + <nl> + def option_parser_for_serial_mix ( ) : <nl> + op = memcached_workload_common . option_parser_for_memcache ( ) <nl> + op [ " keysize " ] = IntFlag ( " - - keysize " , 250 ) <nl> + op [ " valuesize " ] = IntFlag ( " - - valuesize " , 10000 ) <nl> + op [ " thorough " ] = BoolFlag ( " - - thorough " ) <nl> + def int_or_forever_parser ( string ) : <nl> + if string = = " forever " : <nl> + return " forever " <nl> + else : <nl> + try : <nl> + return int ( string ) <nl> + except ValueError : <nl> + raise OptError ( " expected ' forever ' or integer , got % r " % string ) <nl> + op [ " duration " ] = ValueFlag ( " - - duration " , converter = int_or_forever_parser , default = 10 ) <nl> + return op <nl> + <nl> + if __name__ = = " __main__ " : <nl> + import pickle <nl> + <nl> + op = option_parser_for_serial_mix ( ) <nl> + op [ " load " ] = StringFlag ( " - - load " , None ) <nl> + op [ " save " ] = StringFlag ( " - - save " , None ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + if opts [ " load " ] is None : <nl> + clone , deleted = { } , set ( ) <nl> + else : <nl> + print " Loading from % r . . . " % opts [ " load " ] <nl> + with open ( opts [ " load " ] ) as f : <nl> + clone , deleted = pickle . load ( f ) <nl> + with memcached_workload_common . make_memcache_connection ( opts ) as mc : <nl> + test ( opts , mc , clone , deleted ) <nl> + if opts [ " save " ] is not None : <nl> + print " Saving to % r . . . " % opts [ " save " ] <nl> + with open ( opts [ " save " ] , " w " ) as f : <nl> + pickle . dump ( ( clone , deleted ) , f ) <nl> new file mode 100755 <nl> index 00000000000 . . 176428bdcd0 <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / simulate_web_ui . py <nl> <nl> + # ! / usr / bin / python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import sys , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + import httplib , time <nl> + <nl> + # This isn ' t really a memcached workload . . . <nl> + <nl> + op = memcached_workload_common . option_parser_for_socket ( ) <nl> + opts = op . parse ( sys . argv ) <nl> + host , port = opts [ " address " ] <nl> + <nl> + def fetch ( resource , expect = [ 200 ] ) : <nl> + conn = httplib . HTTPConnection ( host , port ) <nl> + conn . request ( " GET " , resource ) <nl> + response = conn . getresponse ( ) <nl> + assert response . status in expect <nl> + <nl> + fetch ( " / " ) <nl> + <nl> + try : <nl> + while True : <nl> + fetch ( " / ajax " ) <nl> + fetch ( " / ajax / progress " ) <nl> + fetch ( " / ajax / log / _ ? max_length = 10 & min_timestamp = 0 " ) <nl> + for i in xrange ( 5 ) : <nl> + fetch ( " / ajax / stat " ) <nl> + time . sleep ( 1 ) <nl> + except KeyboardInterrupt : <nl> + pass <nl> \ No newline at end of file <nl> new file mode 100755 <nl> index 00000000000 . . 80e833c833e <nl> mmm / dev / null <nl> ppp b / test / memcached_workloads / unappend_unprepend . py <nl> <nl> + # ! / usr / bin / env python <nl> + # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + import sys , os <nl> + sys . path . append ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , os . path . pardir , ' common ' ) ) ) <nl> + import memcached_workload_common <nl> + <nl> + # TODO : This readline function is copied and pasted from big_values . py . <nl> + def readline ( s ) : <nl> + buf = " " <nl> + while not buf . endswith ( " \ r \ n " ) : <nl> + buf + = s . recv ( 1 ) <nl> + return buf <nl> + <nl> + # TODO : This expect function is copied and pasted from big_values . py . <nl> + def expect ( s , string ) : <nl> + msg = " " <nl> + while len ( msg ) < len ( string ) : <nl> + msg + = s . recv ( len ( string ) - len ( msg ) ) <nl> + if msg ! = string : <nl> + raise ValueError ( " Didn ' t get what we expected : expected % s , got % s " % ( string , msg ) ) ; <nl> + <nl> + def test_sizes ( s , cmd , lo , hi ) : <nl> + print ( " testing un % s with % d . . % d " % ( cmd , lo , hi ) ) <nl> + s . send ( " set x 0 0 % d \ r \ n " % lo + " a " * lo + " \ r \ n " ) <nl> + msg = readline ( s ) <nl> + if msg ! = " STORED \ r \ n " : <nl> + print ( " Server responded with ' % s ' , should have been STORED . " % msg ) <nl> + raise ValueError ( " Initial large value of size % d not set . Weird . " % lo ) <nl> + <nl> + # We send a malformed request , with an extra char ! <nl> + s . send ( " % s x 0 0 % d \ r \ n " % ( cmd , ( hi - lo ) ) + " b " * ( hi - lo ) + " b " + " \ r \ n " ) <nl> + <nl> + expect ( s , " CLIENT_ERROR bad data chunk \ r \ n " ) <nl> + <nl> + s . send ( " get x \ r \ n " ) <nl> + expect ( s , " VALUE x 0 % d \ r \ n " % lo ) <nl> + expect ( s , " a " * lo + " \ r \ n " ) <nl> + expect ( s , " END \ r \ n " ) <nl> + <nl> + op = memcached_workload_common . option_parser_for_socket ( ) <nl> + opts = op . parse ( sys . argv ) <nl> + <nl> + with memcached_workload_common . make_socket_connection ( opts ) as s : <nl> + <nl> + sizes = [ 1 , 100 , 300 , 1000 , 8000 , 700000 ] <nl> + <nl> + for ( lo , hi ) in [ ( x , y ) for x in sizes for y in sizes if x < y ] : <nl> + for cmd in [ " append " , " prepend " ] : <nl> + test_sizes ( s , cmd , lo , hi ) <nl>
Revert " Removed the memcached_workloads directory . "
rethinkdb/rethinkdb
eeeeb46e3a9fbb7ee7b0f2269f8183b39439e78c
2014-01-28T18:55:53Z
new file mode 100644 <nl> index 0000000000 . . 1da45d64f2 <nl> mmm / dev / null <nl> ppp b / code / online_challenges / src / codechef / VOTERS / README . md <nl> <nl> + # Problem Link : <nl> + [ VOTERS ] ( https : / / www . codechef . com / problems / VOTERS / ) <nl> + <nl> + # Description <nl> + As you might remember , the collector of Siruseri had ordered a complete revision of the Voters List . He knew that constructing the list of voters is a difficult task , prone to errors . Some voters may have been away on vacation , others may have moved during the enrollment and so on . <nl> + <nl> + To be as accurate as possible , he entrusted the task to three different officials . Each of them was to independently record the list of voters and send it to the collector . In Siruseri , every one has a ID number and the list would only list the ID numbers of the voters and not their names . The officials were expected to arrange the ID numbers in ascending order in their lists . <nl> + <nl> + On receiving the lists , the Collector realised that there were discrepancies - the three lists were not identical . He decided to go with the majority . That is , he decided to construct the final list including only those ID numbers that appeared in at least 2 out of the 3 lists . For example if the three lists were <nl> + <nl> + 23 30 42 57 90 <nl> + 21 23 35 57 90 92 <nl> + 21 23 30 57 90 <nl> + then the final list compiled by the collector would be : <nl> + <nl> + 21 23 30 57 90 <nl> + The ID numbers 35 , 42 and 92 which appeared in only one list each do not figure in the final list . <nl> + <nl> + Your task is to help the collector by writing a program that produces the final list from the three given lists . <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 0000000000 . . b76abc476b <nl> mmm / dev / null <nl> ppp b / code / online_challenges / src / codechef / VOTERS / VOTERS . c <nl> <nl> + # include < stdio . h > <nl> + <nl> + int main ( ) <nl> + { <nl> + int n1 , n2 , n3 ; <nl> + scanf ( " % ld % ld % ld " , & n1 , & n2 , & n3 ) ; <nl> + int n [ 1000000 ] = { 0 } ; <nl> + int i ; <nl> + for ( i = 0 ; i < n1 ; + + i ) <nl> + { <nl> + int x ; <nl> + scanf ( " % ld " , & x ) ; <nl> + + + n [ x ] ; <nl> + } <nl> + for ( i = 0 ; i < n2 ; + + i ) <nl> + { <nl> + int x ; <nl> + scanf ( " % ld " , & x ) ; <nl> + + + n [ x ] ; <nl> + } <nl> + for ( i = 0 ; i < n3 ; + + i ) <nl> + { <nl> + int x ; <nl> + scanf ( " % ld " , & x ) ; <nl> + + + n [ x ] ; <nl> + } <nl> + int count = 0 ; <nl> + for ( i = 0 ; i < 1000000 ; + + i ) <nl> + { <nl> + if ( n [ i ] > 1 ) <nl> + + + count ; <nl> + } <nl> + printf ( " % ld \ n " , count ) ; <nl> + for ( i = 0 ; i < 1000000 ; + + i ) <nl> + { <nl> + if ( n [ i ] > 1 ) <nl> + printf ( " % ld \ n " , i ) ; <nl> + } <nl> + return 0 ; <nl> + } <nl>
Codechef VOTERS solution in C
OpenGenus/cosmos
46089ec7dd1a8da40ca07dc737637fb628e9ef14
2020-03-29T16:50:07Z
mmm a / folly / synchronization / detail / AtomicUtils . h <nl> ppp b / folly / synchronization / detail / AtomicUtils . h <nl> inline std : : memory_order default_failure_memory_order ( <nl> return successMode ; <nl> } <nl> } <nl> + <nl> + inline char const * memory_order_to_str ( std : : memory_order mo ) { <nl> + switch ( mo ) { <nl> + case std : : memory_order_relaxed : <nl> + return " relaxed " ; <nl> + case std : : memory_order_consume : <nl> + return " consume " ; <nl> + case std : : memory_order_acquire : <nl> + return " acquire " ; <nl> + case std : : memory_order_release : <nl> + return " release " ; <nl> + case std : : memory_order_acq_rel : <nl> + return " acq_rel " ; <nl> + case std : : memory_order_seq_cst : <nl> + return " seq_cst " ; <nl> + } <nl> + } <nl> } / / namespace detail <nl> } / / namespace folly <nl> new file mode 100644 <nl> index 00000000000 . . 70c578dc148 <nl> mmm / dev / null <nl> ppp b / folly / test / BufferedAtomic . h <nl> <nl> + / * <nl> + * Copyright 2013 - present Facebook , Inc . <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include < sstream > <nl> + <nl> + # include < folly / synchronization / detail / AtomicUtils . h > <nl> + # include < folly / test / DeterministicSchedule . h > <nl> + <nl> + namespace folly { <nl> + namespace test { <nl> + <nl> + template < typename T > <nl> + class RecordBuffer { <nl> + private : <nl> + struct Record { <nl> + Record ( DSchedTimestamp ts , DSchedThreadId tid , bool sc , T val ) <nl> + : acqRelTimestamp_ ( ts ) , storingThread_ ( tid ) , seqCst_ ( sc ) , val_ ( val ) { } <nl> + explicit Record ( T val ) : val_ ( val ) { } <nl> + Record ( ) = delete ; <nl> + <nl> + DSchedTimestamp acqRelTimestamp_ ; <nl> + DSchedThreadId storingThread_ ; <nl> + bool seqCst_ ; <nl> + T val_ ; <nl> + ThreadTimestamps acqRelOrder_ ; <nl> + ThreadTimestamps firstObservedOrder_ ; <nl> + } ; <nl> + <nl> + public : <nl> + RecordBuffer ( ) = default ; <nl> + <nl> + T load ( ThreadInfo & threadInfo , std : : memory_order mo , bool rmw ) { <nl> + DSchedThreadId tid = DeterministicSchedule : : getThreadId ( ) ; <nl> + return load ( tid , threadInfo , mo , rmw ) ; <nl> + } <nl> + <nl> + T load ( <nl> + DSchedThreadId tid , <nl> + ThreadInfo & threadInfo , <nl> + std : : memory_order mo , <nl> + bool rmw = false ) { <nl> + if ( ! rmw ) { <nl> + assert ( mo ! = std : : memory_order_release ) ; <nl> + assert ( mo ! = std : : memory_order_acq_rel ) ; <nl> + } <nl> + <nl> + if ( ! isInitialized ( ) ) { <nl> + return 0 ; <nl> + } <nl> + <nl> + size_t oldestAllowed = <nl> + rmw ? 0 : getOldestAllowed ( mo , threadInfo . acqRelOrder_ ) ; <nl> + <nl> + size_t selected = DeterministicSchedule : : getRandNumber ( oldestAllowed + 1 ) ; <nl> + <nl> + FOLLY_TEST_DSCHED_VLOG ( <nl> + " buffered load , mo : " < < folly : : detail : : memory_order_to_str ( mo ) <nl> + < < " index " < < selected < < " / " < < oldestAllowed <nl> + < < " allowed . " <nl> + < < " current value : " < < loadDirect ( ) <nl> + < < " return value : " < < history_ [ selected ] . val_ ) ; <nl> + <nl> + Record & rec = history_ [ selected ] ; <nl> + DSchedTimestamp ts = threadInfo . acqRelOrder_ . advance ( tid ) ; <nl> + rec . firstObservedOrder_ . setIfNotPresent ( tid , ts ) ; <nl> + <nl> + bool synch = <nl> + ( mo = = std : : memory_order_acquire | | mo = = std : : memory_order_acq_rel | | <nl> + mo = = std : : memory_order_seq_cst ) ; <nl> + ThreadTimestamps & dst = <nl> + synch ? threadInfo . acqRelOrder_ : threadInfo . acqFenceOrder_ ; <nl> + dst . sync ( rec . acqRelOrder_ ) ; <nl> + <nl> + return rec . val_ ; <nl> + } <nl> + <nl> + T loadDirect ( ) const { <nl> + if ( ! isInitialized ( ) ) { <nl> + return 0 ; <nl> + } <nl> + return history_ [ 0 ] . val_ ; <nl> + } <nl> + <nl> + void storeDirect ( T val ) { <nl> + if ( isInitialized ( ) ) { <nl> + history_ [ 0 ] . val_ = val ; <nl> + } else { <nl> + history_ . emplace_front ( val ) ; <nl> + } <nl> + } <nl> + <nl> + void store ( ThreadInfo & threadInfo , T v , std : : memory_order mo , bool rmw ) { <nl> + DSchedThreadId tid = DeterministicSchedule : : getThreadId ( ) ; <nl> + store ( tid , threadInfo , v , mo , rmw ) ; <nl> + } <nl> + <nl> + void store ( <nl> + DSchedThreadId tid , <nl> + ThreadInfo & threadInfo , <nl> + T v , <nl> + std : : memory_order mo , <nl> + bool rmw = false ) { <nl> + if ( ! rmw ) { <nl> + assert ( mo ! = std : : memory_order_acquire ) ; <nl> + assert ( mo ! = std : : memory_order_acq_rel ) ; <nl> + assert ( mo ! = std : : memory_order_consume ) ; <nl> + } <nl> + <nl> + DSchedTimestamp ts = threadInfo . acqRelOrder_ . advance ( tid ) ; <nl> + bool preserve = isInitialized ( ) & & <nl> + ( rmw | | tid . val = = history_ . front ( ) . storingThread_ . val ) ; <nl> + bool sc = ( mo = = std : : memory_order_seq_cst ) ; <nl> + history_ . emplace_front ( ts , tid , sc , v ) ; <nl> + Record & rec = history_ . front ( ) ; <nl> + rec . firstObservedOrder_ . setIfNotPresent ( tid , ts ) ; <nl> + <nl> + bool synch = <nl> + ( mo = = std : : memory_order_release | | mo = = std : : memory_order_acq_rel | | <nl> + mo = = std : : memory_order_seq_cst ) ; <nl> + ThreadTimestamps & src = <nl> + synch ? threadInfo . acqRelOrder_ : threadInfo . relFenceOrder_ ; <nl> + if ( preserve ) { <nl> + rec . acqRelOrder_ = history_ . front ( ) . acqRelOrder_ ; <nl> + } <nl> + rec . acqRelOrder_ . sync ( src ) ; <nl> + if ( history_ . size ( ) > kMaxRecordBufferSize ) { <nl> + history_ . pop_back ( ) ; <nl> + } <nl> + } <nl> + <nl> + protected : <nl> + size_t getOldestAllowed ( <nl> + std : : memory_order mo , <nl> + const ThreadTimestamps & acqRelOrder ) { <nl> + assert ( isInitialized ( ) ) ; <nl> + for ( size_t i = 0 ; i < history_ . size ( ) - 1 ; i + + ) { <nl> + Record & rec = history_ [ i ] ; <nl> + if ( rec . seqCst_ & & ( mo = = std : : memory_order_seq_cst ) ) { <nl> + return i ; <nl> + } <nl> + <nl> + if ( acqRelOrder . atLeastAsRecentAs ( <nl> + rec . storingThread_ , rec . acqRelTimestamp_ ) ) { <nl> + return i ; <nl> + } <nl> + <nl> + if ( acqRelOrder . atLeastAsRecentAsAny ( rec . firstObservedOrder_ ) ) { <nl> + return i ; <nl> + } <nl> + } <nl> + return history_ . size ( ) - 1 ; <nl> + } <nl> + / / index 0 is newest , index size - 1 is oldest <nl> + std : : deque < Record > history_ ; <nl> + <nl> + private : <nl> + static constexpr size_t kMaxRecordBufferSize = 64 ; <nl> + <nl> + bool isInitialized ( ) const { <nl> + return ! history_ . empty ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + template < typename T > <nl> + struct BufferedAtomic { <nl> + BufferedAtomic ( ) { <nl> + DeterministicSchedule : : beforeSharedAccess ( ) ; <nl> + assert ( bufs . count ( this ) = = 0 ) ; <nl> + bufs [ this ] ; <nl> + DeterministicSchedule : : afterSharedAccess ( ) ; <nl> + } <nl> + ~ BufferedAtomic ( ) { <nl> + DeterministicSchedule : : beforeSharedAccess ( ) ; <nl> + assert ( bufs . count ( this ) = = 1 ) ; <nl> + bufs . erase ( this ) ; <nl> + DeterministicSchedule : : afterSharedAccess ( ) ; <nl> + } <nl> + BufferedAtomic ( BufferedAtomic < T > const & ) = delete ; <nl> + BufferedAtomic < T > & operator = ( BufferedAtomic < T > const & ) = delete ; <nl> + <nl> + using Modification = std : : function < T ( const T & ) > ; <nl> + <nl> + constexpr / * implicit * / BufferedAtomic ( T v ) noexcept { <nl> + DeterministicSchedule : : beforeSharedAccess ( ) ; <nl> + assert ( bufs . count ( this ) = = 0 ) ; <nl> + bufs [ this ] ; <nl> + doStore ( v , std : : memory_order_relaxed ) ; <nl> + DeterministicSchedule : : afterSharedAccess ( ) ; <nl> + } <nl> + <nl> + bool is_lock_free ( ) const noexcept { <nl> + return false ; <nl> + } <nl> + <nl> + bool compare_exchange_strong ( <nl> + T & v0 , <nl> + T v1 , <nl> + std : : memory_order mo = std : : memory_order_seq_cst ) noexcept { <nl> + return compare_exchange_strong ( <nl> + v0 , v1 , mo , folly : : detail : : default_failure_memory_order ( mo ) ) ; <nl> + } <nl> + bool compare_exchange_strong ( <nl> + T & expected , <nl> + T desired , <nl> + std : : memory_order success , <nl> + std : : memory_order failure ) noexcept { <nl> + return doCompareExchange ( expected , desired , success , failure , false ) ; <nl> + } <nl> + <nl> + bool compare_exchange_weak ( <nl> + T & v0 , <nl> + T v1 , <nl> + std : : memory_order mo = std : : memory_order_seq_cst ) noexcept { <nl> + return compare_exchange_weak ( <nl> + v0 , v1 , mo , : : folly : : detail : : default_failure_memory_order ( mo ) ) ; <nl> + } <nl> + bool compare_exchange_weak ( <nl> + T & expected , <nl> + T desired , <nl> + std : : memory_order success , <nl> + std : : memory_order failure ) noexcept { <nl> + return doCompareExchange ( expected , desired , success , failure , true ) ; <nl> + } <nl> + <nl> + T exchange ( T v , std : : memory_order mo = std : : memory_order_seq_cst ) noexcept { <nl> + Modification mod = [ & ] ( const T & / * prev * / ) { return v ; } ; <nl> + return doReadModifyWrite ( mod , mo ) ; <nl> + } <nl> + <nl> + / * implicit * / operator T ( ) const noexcept { <nl> + return doLoad ( std : : memory_order_seq_cst ) ; <nl> + } <nl> + <nl> + T load ( std : : memory_order mo = std : : memory_order_seq_cst ) const noexcept { <nl> + return doLoad ( mo ) ; <nl> + } <nl> + <nl> + T operator = ( T v ) noexcept { <nl> + doStore ( v , std : : memory_order_seq_cst ) ; <nl> + return v ; <nl> + } <nl> + <nl> + void store ( T v , std : : memory_order mo = std : : memory_order_seq_cst ) noexcept { <nl> + doStore ( v , mo ) ; <nl> + } <nl> + <nl> + T operator + + ( ) noexcept { <nl> + Modification mod = [ ] ( const T & prev ) { return prev + 1 ; } ; <nl> + return doReadModifyWrite ( mod , std : : memory_order_seq_cst ) + 1 ; <nl> + } <nl> + <nl> + T operator + + ( int / * postDummy * / ) noexcept { <nl> + Modification mod = [ ] ( const T & prev ) { return prev + 1 ; } ; <nl> + return doReadModifyWrite ( mod , std : : memory_order_seq_cst ) ; <nl> + } <nl> + <nl> + T operator - - ( ) noexcept { <nl> + Modification mod = [ ] ( const T & prev ) { return prev - 1 ; } ; <nl> + return doReadModifyWrite ( mod , std : : memory_order_seq_cst ) - 1 ; <nl> + } <nl> + <nl> + T operator - - ( int / * postDummy * / ) noexcept { <nl> + Modification mod = [ ] ( const T & prev ) { return prev - 1 ; } ; <nl> + return doReadModifyWrite ( mod , std : : memory_order_seq_cst ) ; <nl> + } <nl> + <nl> + T operator + = ( T v ) noexcept { <nl> + Modification mod = [ & ] ( const T & prev ) { return prev + v ; } ; <nl> + return doReadModifyWrite ( mod , std : : memory_order_seq_cst ) + v ; <nl> + } <nl> + <nl> + T fetch_add ( T v , std : : memory_order mo = std : : memory_order_seq_cst ) noexcept { <nl> + Modification mod = [ & ] ( const T & prev ) { return prev + v ; } ; <nl> + return doReadModifyWrite ( mod , mo ) ; <nl> + } <nl> + <nl> + T operator - = ( T v ) noexcept { <nl> + Modification mod = [ & ] ( const T & prev ) { return prev - v ; } ; <nl> + return doReadModifyWrite ( mod , std : : memory_order_seq_cst ) - v ; <nl> + } <nl> + <nl> + T fetch_sub ( T v , std : : memory_order mo = std : : memory_order_seq_cst ) noexcept { <nl> + Modification mod = [ & ] ( const T & prev ) { return prev - v ; } ; <nl> + return doReadModifyWrite ( mod , mo ) ; <nl> + } <nl> + <nl> + T operator & = ( T v ) noexcept { <nl> + Modification mod = [ & ] ( const T & prev ) { return prev & v ; } ; <nl> + return doReadModifyWrite ( mod , std : : memory_order_seq_cst ) & v ; <nl> + } <nl> + <nl> + T fetch_and ( T v , std : : memory_order mo = std : : memory_order_seq_cst ) noexcept { <nl> + Modification mod = [ & ] ( const T & prev ) { return prev & v ; } ; <nl> + return doReadModifyWrite ( mod , mo ) ; <nl> + } <nl> + <nl> + T operator | = ( T v ) noexcept { <nl> + Modification mod = [ & ] ( const T & prev ) { return prev | v ; } ; <nl> + return doReadModifyWrite ( mod , std : : memory_order_seq_cst ) | v ; <nl> + } <nl> + <nl> + T fetch_or ( T v , std : : memory_order mo = std : : memory_order_seq_cst ) noexcept { <nl> + Modification mod = [ & ] ( const T & prev ) { return prev | v ; } ; <nl> + return doReadModifyWrite ( mod , mo ) ; <nl> + } <nl> + <nl> + T operator ^ = ( T v ) noexcept { <nl> + Modification mod = [ & ] ( const T & prev ) { return prev ^ v ; } ; <nl> + return doReadModifyWrite ( mod , std : : memory_order_seq_cst ) ^ v ; <nl> + } <nl> + <nl> + T fetch_xor ( T v , std : : memory_order mo = std : : memory_order_seq_cst ) noexcept { <nl> + Modification mod = [ & ] ( const T & prev ) { return prev ^ v ; } ; <nl> + return doReadModifyWrite ( mod , mo ) ; <nl> + } <nl> + <nl> + private : <nl> + T doLoad ( std : : memory_order mo , bool rmw = false ) const { <nl> + / / Static destructors that outlive DSched instance may load atomics <nl> + if ( ! DeterministicSchedule : : isActive ( ) ) { <nl> + auto prev = prevUnguardedAccess . exchange ( std : : this_thread : : get_id ( ) ) ; <nl> + assert ( prev = = std : : thread : : id ( ) | | prev = = std : : this_thread : : get_id ( ) ) ; <nl> + return getBuf ( ) . loadDirect ( ) ; <nl> + } <nl> + ThreadInfo & threadInfo = DeterministicSchedule : : getCurrentThreadInfo ( ) ; <nl> + T rv = getBuf ( ) . load ( threadInfo , mo , rmw ) ; <nl> + return rv ; <nl> + } <nl> + <nl> + void doStore ( T val , std : : memory_order mo , bool rmw = false ) { <nl> + / / Static destructors that outlive DSched instance may store to atomics <nl> + if ( ! DeterministicSchedule : : isActive ( ) ) { <nl> + auto prev = prevUnguardedAccess . exchange ( std : : this_thread : : get_id ( ) ) ; <nl> + assert ( prev = = std : : thread : : id ( ) | | prev = = std : : this_thread : : get_id ( ) ) ; <nl> + getBuf ( ) . storeDirect ( val ) ; <nl> + return ; <nl> + } <nl> + ThreadInfo & threadInfo = DeterministicSchedule : : getCurrentThreadInfo ( ) ; <nl> + getBuf ( ) . store ( threadInfo , val , mo , rmw ) ; <nl> + FOLLY_TEST_DSCHED_VLOG ( <nl> + " \ tstore mo : " < < folly : : detail : : memory_order_to_str ( mo ) <nl> + < < " rmw : " < < rmw ) ; <nl> + } <nl> + <nl> + T doReadModifyWrite ( Modification mod , std : : memory_order mo ) { <nl> + T prev = doLoad ( mo , true ) ; <nl> + T next = mod ( prev ) ; <nl> + doStore ( next , mo , true ) ; <nl> + return prev ; <nl> + } <nl> + <nl> + bool doCompareExchange ( <nl> + T & expected , <nl> + T desired , <nl> + std : : memory_order success , <nl> + std : : memory_order failure , <nl> + bool spuriousFailures ) { <nl> + T current = getBuf ( ) . loadDirect ( ) ; <nl> + if ( current = = expected ) { <nl> + if ( ! spuriousFailures | | DeterministicSchedule : : getRandNumber ( 2 ) ) { <nl> + Modification mod = [ & ] ( const T & / * prev * / ) { return desired ; } ; <nl> + doReadModifyWrite ( mod , success ) ; <nl> + return true ; <nl> + } <nl> + } <nl> + expected = doLoad ( failure , true ) ; <nl> + assert ( expected = = current ) ; <nl> + return false ; <nl> + } <nl> + <nl> + RecordBuffer < T > & getBuf ( ) const { <nl> + assert ( bufs . count ( this ) = = 1 ) ; <nl> + return bufs . at ( this ) ; <nl> + } <nl> + <nl> + static std : : unordered_map < const BufferedAtomic < T > * , RecordBuffer < T > > bufs ; <nl> + mutable std : : atomic < std : : thread : : id > prevUnguardedAccess ; <nl> + } ; <nl> + <nl> + template < typename T > <nl> + std : : unordered_map < const BufferedAtomic < T > * , RecordBuffer < T > > <nl> + BufferedAtomic < T > : : bufs = <nl> + std : : unordered_map < const BufferedAtomic < T > * , RecordBuffer < T > > ( ) ; <nl> + } / / namespace test <nl> + <nl> + } / / namespace folly <nl> new file mode 100644 <nl> index 00000000000 . . c0c2c496857 <nl> mmm / dev / null <nl> ppp b / folly / test / BufferedAtomicTest . cpp <nl> <nl> + / * <nl> + * Copyright 2013 - present Facebook , Inc . <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # include < folly / test / BufferedAtomic . h > <nl> + # include < folly / SingletonThreadLocal . h > <nl> + <nl> + # include < folly / portability / GFlags . h > <nl> + # include < folly / portability / GTest . h > <nl> + # include < random > <nl> + <nl> + using namespace folly : : test ; <nl> + using DSched = DeterministicSchedule ; <nl> + <nl> + template < typename T > <nl> + class RecordBufferTest : public RecordBuffer < T > { <nl> + public : <nl> + void assertOldestAllowed ( <nl> + size_t expected , <nl> + std : : memory_order mo , <nl> + const ThreadTimestamps & acqRelOrder ) { <nl> + size_t oldestAllowed = RecordBuffer < T > : : getOldestAllowed ( mo , acqRelOrder ) ; <nl> + ASSERT_EQ ( expected , RecordBuffer < T > : : history_ [ oldestAllowed ] . val_ ) ; <nl> + } <nl> + } ; <nl> + <nl> + struct DSchedTimestampTest : public DSchedTimestamp { <nl> + explicit DSchedTimestampTest ( size_t v ) : DSchedTimestamp ( v ) { } <nl> + } ; <nl> + <nl> + TEST ( BufferedAtomic , basic ) { <nl> + RecordBufferTest < int > buf ; <nl> + DSchedThreadId tid ( 0 ) ; <nl> + ThreadInfo threadInfo ( tid ) ; <nl> + <nl> + ASSERT_TRUE ( <nl> + threadInfo . acqRelOrder_ . atLeastAsRecentAs ( tid , DSchedTimestampTest ( 1 ) ) ) ; <nl> + ASSERT_FALSE ( <nl> + threadInfo . acqRelOrder_ . atLeastAsRecentAs ( tid , DSchedTimestampTest ( 2 ) ) ) ; <nl> + <nl> + / / value stored is equal to ts at time of store <nl> + for ( int i = 2 ; i < 12 ; i + + ) { <nl> + buf . store ( tid , threadInfo , i , std : : memory_order_relaxed ) ; <nl> + } <nl> + <nl> + ASSERT_TRUE ( <nl> + threadInfo . acqRelOrder_ . atLeastAsRecentAs ( tid , DSchedTimestampTest ( 11 ) ) ) ; <nl> + ASSERT_FALSE ( <nl> + threadInfo . acqRelOrder_ . atLeastAsRecentAs ( tid , DSchedTimestampTest ( 12 ) ) ) ; <nl> + <nl> + ThreadTimestamps tts ; <nl> + buf . assertOldestAllowed ( 2 , std : : memory_order_relaxed , tts ) ; <nl> + <nl> + tts . setIfNotPresent ( tid , DSchedTimestampTest ( 8 ) ) ; <nl> + buf . assertOldestAllowed ( 8 , std : : memory_order_relaxed , tts ) ; <nl> + <nl> + tts . clear ( ) ; <nl> + tts . setIfNotPresent ( tid , DSchedTimestampTest ( 10 ) ) ; <nl> + buf . assertOldestAllowed ( 10 , std : : memory_order_relaxed , tts ) ; <nl> + <nl> + tts . clear ( ) ; <nl> + tts . setIfNotPresent ( tid , DSchedTimestampTest ( 115 ) ) ; <nl> + buf . assertOldestAllowed ( 11 , std : : memory_order_relaxed , tts ) ; <nl> + } <nl> + <nl> + TEST ( BufferedAtomic , seq_cst ) { <nl> + RecordBufferTest < int > buf ; <nl> + DSchedThreadId tid ( 0 ) ; <nl> + ThreadInfo threadInfo ( tid ) ; <nl> + <nl> + buf . store ( tid , threadInfo , 0 , std : : memory_order_relaxed ) ; <nl> + buf . store ( tid , threadInfo , 1 , std : : memory_order_seq_cst ) ; <nl> + buf . store ( tid , threadInfo , 2 , std : : memory_order_relaxed ) ; <nl> + <nl> + ThreadTimestamps tts ; <nl> + buf . assertOldestAllowed ( 0 , std : : memory_order_relaxed , tts ) ; <nl> + buf . assertOldestAllowed ( 0 , std : : memory_order_acquire , tts ) ; <nl> + buf . assertOldestAllowed ( 1 , std : : memory_order_seq_cst , tts ) ; <nl> + } <nl> + <nl> + TEST ( BufferedAtomic , transitive_sync ) { <nl> + RecordBufferTest < int > buf ; <nl> + DSchedThreadId tid0 ( 0 ) ; <nl> + DSchedThreadId tid1 ( 1 ) ; <nl> + DSchedThreadId tid2 ( 2 ) ; <nl> + ThreadInfo threadInfo0 ( tid0 ) ; <nl> + ThreadInfo threadInfo1 ( tid1 ) ; <nl> + ThreadInfo threadInfo2 ( tid2 ) ; <nl> + <nl> + buf . store ( tid0 , threadInfo0 , 0 , std : : memory_order_relaxed ) ; <nl> + buf . store ( tid0 , threadInfo0 , 1 , std : : memory_order_seq_cst ) ; <nl> + <nl> + int val = buf . load ( tid1 , threadInfo1 , std : : memory_order_seq_cst ) ; <nl> + ASSERT_EQ ( 1 , val ) ; <nl> + <nl> + buf . assertOldestAllowed ( <nl> + 0 , std : : memory_order_relaxed , threadInfo2 . acqRelOrder_ ) ; <nl> + threadInfo2 . acqRelOrder_ . sync ( threadInfo1 . acqRelOrder_ ) ; <nl> + buf . assertOldestAllowed ( <nl> + 1 , std : : memory_order_relaxed , threadInfo2 . acqRelOrder_ ) ; <nl> + } <nl> + <nl> + TEST ( BufferedAtomic , acq_rel ) { <nl> + RecordBufferTest < int > buf ; <nl> + DSchedThreadId tid0 ( 0 ) ; <nl> + DSchedThreadId tid1 ( 1 ) ; <nl> + ThreadInfo threadInfo0 ( tid0 ) ; <nl> + ThreadInfo threadInfo1 ( tid1 ) ; <nl> + <nl> + buf . store ( tid0 , threadInfo0 , 0 , std : : memory_order_relaxed ) ; <nl> + buf . store ( tid0 , threadInfo0 , 1 , std : : memory_order_release ) ; <nl> + while ( buf . load ( tid1 , threadInfo1 , std : : memory_order_relaxed ) = = 0 ) { <nl> + } <nl> + <nl> + ASSERT_TRUE ( threadInfo1 . acqFenceOrder_ . atLeastAsRecentAs ( <nl> + tid0 , DSchedTimestampTest ( 3 ) ) ) ; <nl> + ASSERT_FALSE ( threadInfo1 . acqFenceOrder_ . atLeastAsRecentAs ( <nl> + tid0 , DSchedTimestampTest ( 4 ) ) ) ; <nl> + ASSERT_FALSE ( <nl> + threadInfo1 . acqRelOrder_ . atLeastAsRecentAs ( tid0 , DSchedTimestampTest ( 1 ) ) ) ; <nl> + } <nl> + <nl> + TEST ( BufferedAtomic , atomic_buffer_thread_create_join_sync ) { <nl> + for ( int i = 0 ; i < 32 ; i + + ) { <nl> + DSched sched ( DSched : : uniform ( i ) ) ; <nl> + <nl> + DeterministicAtomicImpl < int , DeterministicSchedule , BufferedAtomic > x ; <nl> + <nl> + x . store ( 0 , std : : memory_order_relaxed ) ; <nl> + x . store ( 1 , std : : memory_order_relaxed ) ; <nl> + <nl> + std : : thread thread = DeterministicSchedule : : thread ( [ & ] ( ) { <nl> + ASSERT_EQ ( 1 , x . load ( std : : memory_order_relaxed ) ) ; <nl> + x . store ( 2 , std : : memory_order_relaxed ) ; <nl> + } ) ; <nl> + DeterministicSchedule : : join ( thread ) ; <nl> + <nl> + thread = DeterministicSchedule : : thread ( [ & ] ( ) { <nl> + ASSERT_EQ ( 2 , x . load ( std : : memory_order_relaxed ) ) ; <nl> + x . store ( 3 , std : : memory_order_relaxed ) ; <nl> + } ) ; <nl> + DeterministicSchedule : : join ( thread ) ; <nl> + <nl> + ASSERT_EQ ( 3 , x . load ( std : : memory_order_relaxed ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST ( BufferedAtomic , atomic_buffer_fence ) { <nl> + for ( int i = 0 ; i < 1024 ; i + + ) { <nl> + FOLLY_TEST_DSCHED_VLOG ( " seed : " < < i ) ; <nl> + DSched sched ( DSched : : uniform ( i ) ) ; <nl> + <nl> + DeterministicMutex mutex ; <nl> + mutex . lock ( ) ; <nl> + DeterministicAtomicImpl < int , DeterministicSchedule , BufferedAtomic > x ; <nl> + DeterministicAtomicImpl < int , DeterministicSchedule , BufferedAtomic > y ; <nl> + DeterministicAtomicImpl < int , DeterministicSchedule , BufferedAtomic > z ; <nl> + <nl> + x . store ( 0 , std : : memory_order_relaxed ) ; <nl> + y . store ( 0 , std : : memory_order_relaxed ) ; <nl> + z . store ( 0 , std : : memory_order_relaxed ) ; <nl> + <nl> + std : : thread threadA = DeterministicSchedule : : thread ( [ & ] ( ) { <nl> + x . store ( 1 , std : : memory_order_relaxed ) ; <nl> + <nl> + DeterministicSchedule : : atomic_thread_fence ( std : : memory_order_release ) ; <nl> + y . store ( 1 , std : : memory_order_relaxed ) ; <nl> + <nl> + mutex . lock ( ) ; <nl> + ASSERT_EQ ( 1 , z . load ( std : : memory_order_relaxed ) ) ; <nl> + mutex . unlock ( ) ; <nl> + } ) ; <nl> + std : : thread threadB = DeterministicSchedule : : thread ( [ & ] ( ) { <nl> + while ( y . load ( std : : memory_order_relaxed ) ! = 1 ) { <nl> + } <nl> + DeterministicSchedule : : atomic_thread_fence ( std : : memory_order_acquire ) ; <nl> + ASSERT_EQ ( 1 , x . load ( std : : memory_order_relaxed ) ) ; <nl> + } ) ; <nl> + DeterministicSchedule : : join ( threadB ) ; <nl> + z . store ( 1 , std : : memory_order_relaxed ) ; <nl> + mutex . unlock ( ) ; <nl> + DeterministicSchedule : : join ( threadA ) ; <nl> + } <nl> + } <nl> + <nl> + TEST ( BufferedAtomic , single_thread_unguarded_access ) { <nl> + DSched * sched = new DSched ( DSched : : uniform ( 0 ) ) ; <nl> + DeterministicAtomicImpl < int , DeterministicSchedule , BufferedAtomic > x ( 0 ) ; <nl> + delete sched ; <nl> + <nl> + x . store ( 1 ) ; <nl> + ASSERT_EQ ( 1 , x . load ( ) ) ; <nl> + } <nl> + <nl> + TEST ( BufferedAtomic , multiple_thread_unguarded_access ) { <nl> + DSched * sched = new DSched ( DSched : : uniform ( 0 ) ) ; <nl> + DeterministicAtomicImpl < int , DeterministicSchedule , BufferedAtomic > x ( 0 ) ; <nl> + delete sched ; <nl> + <nl> + / / simulate static construction / destruction or access to shared <nl> + / / DeterministicAtomic in pthread_setspecific callbacks after <nl> + / / DeterministicSchedule : : beforeThreadAccess ( ) has been run . <nl> + ASSERT_EQ ( 0 , x . load ( ) ) ; <nl> + auto t = std : : thread ( <nl> + [ & ] ( ) { ASSERT_DEATH ( x . store ( 1 ) , " prev = = std : : thread : : id ( ) " ) ; } ) ; <nl> + t . join ( ) ; <nl> + } <nl> mmm a / folly / test / DeterministicSchedule . cpp <nl> ppp b / folly / test / DeterministicSchedule . cpp <nl> namespace test { <nl> <nl> FOLLY_TLS sem_t * DeterministicSchedule : : tls_sem ; <nl> FOLLY_TLS DeterministicSchedule * DeterministicSchedule : : tls_sched ; <nl> - FOLLY_TLS unsigned DeterministicSchedule : : tls_threadId ; <nl> + FOLLY_TLS DSchedThreadId DeterministicSchedule : : tls_threadId ; <nl> thread_local AuxAct DeterministicSchedule : : tls_aux_act ; <nl> AuxChk DeterministicSchedule : : aux_chk ; <nl> <nl> static std : : unordered_map < <nl> <nl> static std : : mutex futexLock ; <nl> <nl> + void ThreadTimestamps : : sync ( const ThreadTimestamps & src ) { <nl> + if ( src . timestamps_ . size ( ) > timestamps_ . size ( ) ) { <nl> + timestamps_ . resize ( src . timestamps_ . size ( ) ) ; <nl> + } <nl> + for ( size_t i = 0 ; i < src . timestamps_ . size ( ) ; i + + ) { <nl> + timestamps_ [ i ] . sync ( src . timestamps_ [ i ] ) ; <nl> + } <nl> + } <nl> + <nl> + DSchedTimestamp ThreadTimestamps : : advance ( DSchedThreadId tid ) { <nl> + assert ( timestamps_ . size ( ) > tid . val ) ; <nl> + return timestamps_ [ tid . val ] . advance ( ) ; <nl> + } <nl> + <nl> + void ThreadTimestamps : : setIfNotPresent ( DSchedThreadId tid , DSchedTimestamp ts ) { <nl> + assert ( ts . initialized ( ) ) ; <nl> + if ( tid . val > = timestamps_ . size ( ) ) { <nl> + timestamps_ . resize ( tid . val + 1 ) ; <nl> + } <nl> + if ( ! timestamps_ [ tid . val ] . initialized ( ) ) { <nl> + timestamps_ [ tid . val ] . sync ( ts ) ; <nl> + } <nl> + } <nl> + <nl> + void ThreadTimestamps : : clear ( ) { <nl> + timestamps_ . clear ( ) ; <nl> + } <nl> + <nl> + bool ThreadTimestamps : : atLeastAsRecentAs ( DSchedThreadId tid , DSchedTimestamp ts ) <nl> + const { <nl> + / / It is not meaningful learn whether any instance is at least <nl> + / / as recent as timestamp 0 . <nl> + assert ( ts . initialized ( ) ) ; <nl> + if ( tid . val > = timestamps_ . size ( ) ) { <nl> + return false ; <nl> + } <nl> + return timestamps_ [ tid . val ] . atLeastAsRecentAs ( ts ) ; <nl> + } <nl> + <nl> + bool ThreadTimestamps : : atLeastAsRecentAsAny ( const ThreadTimestamps & src ) const { <nl> + size_t min = timestamps_ . size ( ) < src . timestamps_ . size ( ) <nl> + ? timestamps_ . size ( ) <nl> + : src . timestamps_ . size ( ) ; <nl> + for ( size_t i = 0 ; i < min ; i + + ) { <nl> + if ( src . timestamps_ [ i ] . initialized ( ) & & <nl> + timestamps_ [ i ] . atLeastAsRecentAs ( src . timestamps_ [ i ] ) ) { <nl> + return true ; <nl> + } <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + void ThreadSyncVar : : acquire ( ) { <nl> + ThreadInfo & threadInfo = DeterministicSchedule : : getCurrentThreadInfo ( ) ; <nl> + DSchedThreadId tid = DeterministicSchedule : : getThreadId ( ) ; <nl> + threadInfo . acqRelOrder_ . advance ( tid ) ; <nl> + threadInfo . acqRelOrder_ . sync ( order_ ) ; <nl> + } <nl> + <nl> + void ThreadSyncVar : : release ( ) { <nl> + ThreadInfo & threadInfo = DeterministicSchedule : : getCurrentThreadInfo ( ) ; <nl> + DSchedThreadId tid = DeterministicSchedule : : getThreadId ( ) ; <nl> + threadInfo . acqRelOrder_ . advance ( tid ) ; <nl> + order_ . sync ( threadInfo . acqRelOrder_ ) ; <nl> + } <nl> + <nl> + void ThreadSyncVar : : acq_rel ( ) { <nl> + ThreadInfo & threadInfo = DeterministicSchedule : : getCurrentThreadInfo ( ) ; <nl> + DSchedThreadId tid = DeterministicSchedule : : getThreadId ( ) ; <nl> + threadInfo . acqRelOrder_ . advance ( tid ) ; <nl> + threadInfo . acqRelOrder_ . sync ( order_ ) ; <nl> + order_ . sync ( threadInfo . acqRelOrder_ ) ; <nl> + } <nl> + <nl> DeterministicSchedule : : DeterministicSchedule ( <nl> const std : : function < size_t ( size_t ) > & scheduler ) <nl> - : scheduler_ ( scheduler ) , nextThreadId_ ( 1 ) , step_ ( 0 ) { <nl> + : scheduler_ ( scheduler ) , nextThreadId_ ( 0 ) , step_ ( 0 ) { <nl> assert ( tls_sem = = nullptr ) ; <nl> assert ( tls_sched = = nullptr ) ; <nl> assert ( tls_aux_act = = nullptr ) ; <nl> DeterministicSchedule : : DeterministicSchedule ( <nl> sem_init ( tls_sem , 0 , 1 ) ; <nl> sems_ . push_back ( tls_sem ) ; <nl> <nl> + tls_threadId = nextThreadId_ + + ; <nl> + threadInfoMap_ . emplace_back ( tls_threadId ) ; <nl> tls_sched = this ; <nl> } <nl> <nl> int DeterministicSchedule : : getcpu ( <nl> unsigned * cpu , <nl> unsigned * node , <nl> void * / * unused * / ) { <nl> - if ( ! tls_threadId & & tls_sched ) { <nl> - beforeSharedAccess ( ) ; <nl> - tls_threadId = tls_sched - > nextThreadId_ + + ; <nl> - afterSharedAccess ( ) ; <nl> - } <nl> if ( cpu ) { <nl> - * cpu = tls_threadId ; <nl> + * cpu = tls_threadId . val ; <nl> } <nl> if ( node ) { <nl> - * node = tls_threadId ; <nl> + * node = tls_threadId . val ; <nl> } <nl> return 0 ; <nl> } <nl> void DeterministicSchedule : : afterThreadCreate ( sem_t * sem ) { <nl> beforeSharedAccess ( ) ; <nl> if ( active_ . count ( std : : this_thread : : get_id ( ) ) = = 1 ) { <nl> started = true ; <nl> + tls_threadId = nextThreadId_ + + ; <nl> + assert ( tls_threadId . val = = threadInfoMap_ . size ( ) ) ; <nl> + threadInfoMap_ . emplace_back ( tls_threadId ) ; <nl> } <nl> afterSharedAccess ( ) ; <nl> } <nl> + atomic_thread_fence ( std : : memory_order_seq_cst ) ; <nl> } <nl> <nl> void DeterministicSchedule : : beforeThreadExit ( ) { <nl> assert ( tls_sched = = this ) ; <nl> + <nl> + atomic_thread_fence ( std : : memory_order_seq_cst ) ; <nl> beforeSharedAccess ( ) ; <nl> auto parent = joins_ . find ( std : : this_thread : : get_id ( ) ) ; <nl> if ( parent ! = joins_ . end ( ) ) { <nl> void DeterministicSchedule : : join ( std : : thread & child ) { <nl> } <nl> afterSharedAccess ( ) ; <nl> } <nl> + atomic_thread_fence ( std : : memory_order_seq_cst ) ; <nl> FOLLY_TEST_DSCHED_VLOG ( " joined " < < std : : hex < < child . get_id ( ) ) ; <nl> child . join ( ) ; <nl> } <nl> void DeterministicSchedule : : callAux ( bool success ) { <nl> } <nl> } <nl> <nl> + static std : : unordered_map < sem_t * , std : : unique_ptr < ThreadSyncVar > > semSyncVar ; <nl> + <nl> void DeterministicSchedule : : post ( sem_t * sem ) { <nl> beforeSharedAccess ( ) ; <nl> + if ( semSyncVar . count ( sem ) = = 0 ) { <nl> + semSyncVar [ sem ] = std : : make_unique < ThreadSyncVar > ( ) ; <nl> + } <nl> + semSyncVar [ sem ] - > release ( ) ; <nl> sem_post ( sem ) ; <nl> FOLLY_TEST_DSCHED_VLOG ( " sem_post ( " < < sem < < " ) " ) ; <nl> afterSharedAccess ( ) ; <nl> void DeterministicSchedule : : post ( sem_t * sem ) { <nl> <nl> bool DeterministicSchedule : : tryWait ( sem_t * sem ) { <nl> beforeSharedAccess ( ) ; <nl> + if ( semSyncVar . count ( sem ) = = 0 ) { <nl> + semSyncVar [ sem ] = std : : make_unique < ThreadSyncVar > ( ) ; <nl> + } <nl> + <nl> int rv = sem_trywait ( sem ) ; <nl> int e = rv = = 0 ? 0 : errno ; <nl> FOLLY_TEST_DSCHED_VLOG ( <nl> " sem_trywait ( " < < sem < < " ) = " < < rv < < " errno = " < < e ) ; <nl> + if ( rv = = 0 ) { <nl> + semSyncVar [ sem ] - > acq_rel ( ) ; <nl> + } else { <nl> + semSyncVar [ sem ] - > acquire ( ) ; <nl> + } <nl> + <nl> afterSharedAccess ( ) ; <nl> if ( rv = = 0 ) { <nl> return true ; <nl> void DeterministicSchedule : : wait ( sem_t * sem ) { <nl> } <nl> } <nl> <nl> + ThreadInfo & DeterministicSchedule : : getCurrentThreadInfo ( ) { <nl> + auto sched = tls_sched ; <nl> + assert ( sched ) ; <nl> + assert ( tls_threadId . val < sched - > threadInfoMap_ . size ( ) ) ; <nl> + return sched - > threadInfoMap_ [ tls_threadId . val ] ; <nl> + } <nl> + <nl> + void DeterministicSchedule : : atomic_thread_fence ( std : : memory_order mo ) { <nl> + if ( ! tls_sched ) { <nl> + std : : atomic_thread_fence ( mo ) ; <nl> + return ; <nl> + } <nl> + beforeSharedAccess ( ) ; <nl> + ThreadInfo & threadInfo = getCurrentThreadInfo ( ) ; <nl> + switch ( mo ) { <nl> + case std : : memory_order_relaxed : <nl> + assert ( false ) ; <nl> + break ; <nl> + case std : : memory_order_consume : <nl> + case std : : memory_order_acquire : <nl> + threadInfo . acqRelOrder_ . sync ( threadInfo . acqFenceOrder_ ) ; <nl> + break ; <nl> + case std : : memory_order_release : <nl> + threadInfo . relFenceOrder_ . sync ( threadInfo . acqRelOrder_ ) ; <nl> + break ; <nl> + case std : : memory_order_acq_rel : <nl> + threadInfo . acqRelOrder_ . sync ( threadInfo . acqFenceOrder_ ) ; <nl> + threadInfo . relFenceOrder_ . sync ( threadInfo . acqRelOrder_ ) ; <nl> + break ; <nl> + case std : : memory_order_seq_cst : <nl> + threadInfo . acqRelOrder_ . sync ( threadInfo . acqFenceOrder_ ) ; <nl> + threadInfo . acqRelOrder_ . sync ( tls_sched - > seqCstFenceOrder_ ) ; <nl> + tls_sched - > seqCstFenceOrder_ = threadInfo . acqRelOrder_ ; <nl> + threadInfo . relFenceOrder_ . sync ( threadInfo . acqRelOrder_ ) ; <nl> + break ; <nl> + } <nl> + FOLLY_TEST_DSCHED_VLOG ( " fence : " < < folly : : detail : : memory_order_to_str ( mo ) ) ; <nl> + afterSharedAccess ( ) ; <nl> + } <nl> + <nl> detail : : FutexResult futexWaitImpl ( <nl> const detail : : Futex < DeterministicAtomic > * futex , <nl> uint32_t expected , <nl> detail : : FutexResult futexWaitImpl ( <nl> " futexWait ( " < < futex < < " , " < < std : : hex < < expected < < " , . . , " <nl> < < std : : hex < < waitMask < < " ) beginning . . " ) ; <nl> futexLock . lock ( ) ; <nl> + / / load_direct avoids deadlock on inner call to beforeSharedAccess <nl> if ( futex - > load_direct ( ) = = expected ) { <nl> auto & queue = futexQueues [ futex ] ; <nl> queue . emplace_back ( waitMask , & awoken ) ; <nl> mmm a / folly / test / DeterministicSchedule . h <nl> ppp b / folly / test / DeterministicSchedule . h <nl> namespace test { <nl> using AuxAct = std : : function < void ( bool ) > ; <nl> using AuxChk = std : : function < void ( uint64_t ) > ; <nl> <nl> + struct DSchedThreadId { <nl> + unsigned val ; <nl> + explicit constexpr DSchedThreadId ( ) : val ( 0 ) { } <nl> + explicit constexpr DSchedThreadId ( unsigned v ) : val ( v ) { } <nl> + unsigned operator = ( unsigned v ) { <nl> + return val = v ; <nl> + } <nl> + } ; <nl> + <nl> + class DSchedTimestamp { <nl> + public : <nl> + constexpr explicit DSchedTimestamp ( ) : val_ ( 0 ) { } <nl> + DSchedTimestamp advance ( ) { <nl> + return DSchedTimestamp ( + + val_ ) ; <nl> + } <nl> + bool atLeastAsRecentAs ( const DSchedTimestamp & other ) const { <nl> + return val_ > = other . val_ ; <nl> + } <nl> + void sync ( const DSchedTimestamp & other ) { <nl> + val_ = std : : max ( val_ , other . val_ ) ; <nl> + } <nl> + bool initialized ( ) const { <nl> + return val_ > 0 ; <nl> + } <nl> + static constexpr DSchedTimestamp initial ( ) { <nl> + return DSchedTimestamp ( 1 ) ; <nl> + } <nl> + <nl> + protected : <nl> + constexpr explicit DSchedTimestamp ( size_t v ) : val_ ( v ) { } <nl> + <nl> + private : <nl> + size_t val_ ; <nl> + } ; <nl> + <nl> + class ThreadTimestamps { <nl> + public : <nl> + void sync ( const ThreadTimestamps & src ) ; <nl> + DSchedTimestamp advance ( DSchedThreadId tid ) ; <nl> + <nl> + void setIfNotPresent ( DSchedThreadId tid , DSchedTimestamp ts ) ; <nl> + void clear ( ) ; <nl> + bool atLeastAsRecentAs ( DSchedThreadId tid , DSchedTimestamp ts ) const ; <nl> + bool atLeastAsRecentAsAny ( const ThreadTimestamps & src ) const ; <nl> + <nl> + private : <nl> + std : : vector < DSchedTimestamp > timestamps_ ; <nl> + } ; <nl> + <nl> + struct ThreadInfo { <nl> + ThreadInfo ( ) = delete ; <nl> + explicit ThreadInfo ( DSchedThreadId tid ) { <nl> + acqRelOrder_ . setIfNotPresent ( tid , DSchedTimestamp : : initial ( ) ) ; <nl> + } <nl> + ThreadTimestamps acqRelOrder_ ; <nl> + ThreadTimestamps acqFenceOrder_ ; <nl> + ThreadTimestamps relFenceOrder_ ; <nl> + } ; <nl> + <nl> + class ThreadSyncVar { <nl> + public : <nl> + ThreadSyncVar ( ) = default ; <nl> + <nl> + void acquire ( ) ; <nl> + void release ( ) ; <nl> + void acq_rel ( ) ; <nl> + <nl> + private : <nl> + ThreadTimestamps order_ ; <nl> + } ; <nl> + <nl> / * * <nl> * DeterministicSchedule coordinates the inter - thread communication of a <nl> * set of threads under test , so that despite concurrency the execution is <nl> class DeterministicSchedule : boost : : noncopyable { <nl> template < typename Func , typename . . . Args > <nl> static inline std : : thread thread ( Func & & func , Args & & . . . args ) { <nl> / / TODO : maybe future versions of gcc will allow forwarding to thread <nl> + atomic_thread_fence ( std : : memory_order_seq_cst ) ; <nl> auto sched = tls_sched ; <nl> auto sem = sched ? sched - > beforeThreadCreate ( ) : nullptr ; <nl> auto child = std : : thread ( <nl> class DeterministicSchedule : boost : : noncopyable { <nl> / * * Add sem back into sems_ * / <nl> static void reschedule ( sem_t * sem ) ; <nl> <nl> + static bool isActive ( ) { <nl> + return tls_sched ! = nullptr ; <nl> + } <nl> + <nl> + static DSchedThreadId getThreadId ( ) { <nl> + assert ( tls_sched ! = nullptr ) ; <nl> + return tls_threadId ; <nl> + } <nl> + <nl> + static ThreadInfo & getCurrentThreadInfo ( ) ; <nl> + <nl> + static void atomic_thread_fence ( std : : memory_order mo ) ; <nl> + <nl> private : <nl> static FOLLY_TLS sem_t * tls_sem ; <nl> static FOLLY_TLS DeterministicSchedule * tls_sched ; <nl> - static FOLLY_TLS unsigned tls_threadId ; <nl> + static FOLLY_TLS DSchedThreadId tls_threadId ; <nl> static thread_local AuxAct tls_aux_act ; <nl> static AuxChk aux_chk ; <nl> <nl> class DeterministicSchedule : boost : : noncopyable { <nl> std : : vector < sem_t * > sems_ ; <nl> std : : unordered_set < std : : thread : : id > active_ ; <nl> std : : unordered_map < std : : thread : : id , sem_t * > joins_ ; <nl> + <nl> + std : : vector < ThreadInfo > threadInfoMap_ ; <nl> + ThreadTimestamps seqCstFenceOrder_ ; <nl> + <nl> unsigned nextThreadId_ ; <nl> / * step_ keeps count of shared accesses that correspond to user <nl> * synchronization steps ( atomic accesses for now ) . <nl> void atomic_notify_all ( const DeterministicAtomic < Integer > * ) { } <nl> struct DeterministicMutex { <nl> std : : mutex m ; <nl> std : : queue < sem_t * > waiters_ ; <nl> + ThreadSyncVar syncVar_ ; <nl> <nl> DeterministicMutex ( ) = default ; <nl> ~ DeterministicMutex ( ) = default ; <nl> struct DeterministicMutex { <nl> / / Wait to be scheduled by unlock <nl> DeterministicSchedule : : beforeSharedAccess ( ) ; <nl> } <nl> + if ( DeterministicSchedule : : isActive ( ) ) { <nl> + syncVar_ . acquire ( ) ; <nl> + } <nl> DeterministicSchedule : : afterSharedAccess ( ) ; <nl> } <nl> <nl> bool try_lock ( ) { <nl> DeterministicSchedule : : beforeSharedAccess ( ) ; <nl> bool rv = m . try_lock ( ) ; <nl> + if ( rv & & DeterministicSchedule : : isActive ( ) ) { <nl> + syncVar_ . acquire ( ) ; <nl> + } <nl> FOLLY_TEST_DSCHED_VLOG ( this < < " . try_lock ( ) - > " < < rv ) ; <nl> DeterministicSchedule : : afterSharedAccess ( ) ; <nl> return rv ; <nl> struct DeterministicMutex { <nl> <nl> void unlock ( ) { <nl> FOLLY_TEST_DSCHED_VLOG ( this < < " . unlock ( ) " ) ; <nl> - m . unlock ( ) ; <nl> DeterministicSchedule : : beforeSharedAccess ( ) ; <nl> + m . unlock ( ) ; <nl> + if ( DeterministicSchedule : : isActive ( ) ) { <nl> + syncVar_ . release ( ) ; <nl> + } <nl> if ( ! waiters_ . empty ( ) ) { <nl> sem_t * sem = waiters_ . front ( ) ; <nl> DeterministicSchedule : : reschedule ( sem ) ; <nl> mmm a / folly / test / DeterministicScheduleTest . cpp <nl> ppp b / folly / test / DeterministicScheduleTest . cpp <nl> TEST ( DeterministicSchedule , global_invariants ) { <nl> } <nl> } <nl> <nl> + struct DSchedTimestampTest : public DSchedTimestamp { <nl> + explicit DSchedTimestampTest ( size_t v ) : DSchedTimestamp ( v ) { } <nl> + } ; <nl> + <nl> + TEST ( DeterministicSchedule , thread_timestamps ) { <nl> + ThreadTimestamps tss ; <nl> + DSchedThreadId tid0 ( 0 ) ; <nl> + DSchedThreadId tid1 ( 1 ) ; <nl> + <nl> + ASSERT_FALSE ( tss . atLeastAsRecentAs ( tid0 , DSchedTimestampTest ( 1 ) ) ) ; <nl> + <nl> + tss . setIfNotPresent ( tid0 , DSchedTimestampTest ( 1 ) ) ; <nl> + ASSERT_TRUE ( tss . atLeastAsRecentAs ( tid0 , DSchedTimestampTest ( 1 ) ) ) ; <nl> + ASSERT_FALSE ( tss . atLeastAsRecentAs ( tid0 , DSchedTimestampTest ( 2 ) ) ) ; <nl> + ASSERT_FALSE ( tss . atLeastAsRecentAs ( tid1 , DSchedTimestampTest ( 1 ) ) ) ; <nl> + <nl> + tss . setIfNotPresent ( tid0 , DSchedTimestampTest ( 2 ) ) ; <nl> + ASSERT_FALSE ( tss . atLeastAsRecentAs ( tid0 , DSchedTimestampTest ( 2 ) ) ) ; <nl> + <nl> + auto ts = tss . advance ( tid0 ) ; <nl> + ASSERT_TRUE ( ts . atLeastAsRecentAs ( DSchedTimestampTest ( 2 ) ) ) ; <nl> + ASSERT_FALSE ( ts . atLeastAsRecentAs ( DSchedTimestampTest ( 3 ) ) ) ; <nl> + ASSERT_TRUE ( tss . atLeastAsRecentAs ( tid0 , DSchedTimestampTest ( 2 ) ) ) ; <nl> + ASSERT_FALSE ( tss . atLeastAsRecentAs ( tid1 , DSchedTimestampTest ( 1 ) ) ) ; <nl> + <nl> + ThreadTimestamps tss2 ; <nl> + tss2 . setIfNotPresent ( tid1 , DSchedTimestampTest ( 3 ) ) ; <nl> + ASSERT_FALSE ( tss2 . atLeastAsRecentAs ( tid1 , DSchedTimestampTest ( 4 ) ) ) ; <nl> + ASSERT_TRUE ( tss2 . atLeastAsRecentAs ( tid1 , DSchedTimestampTest ( 3 ) ) ) ; <nl> + <nl> + ASSERT_FALSE ( tss . atLeastAsRecentAsAny ( tss2 ) ) ; <nl> + tss . sync ( tss2 ) ; <nl> + ASSERT_TRUE ( tss . atLeastAsRecentAs ( tid1 , DSchedTimestampTest ( 3 ) ) ) ; <nl> + ASSERT_FALSE ( tss . atLeastAsRecentAs ( tid1 , DSchedTimestampTest ( 4 ) ) ) ; <nl> + <nl> + ThreadTimestamps tss3 ; <nl> + tss3 . setIfNotPresent ( tid1 , DSchedTimestampTest ( 4 ) ) ; <nl> + ASSERT_TRUE ( tss3 . atLeastAsRecentAsAny ( tss2 ) ) ; <nl> + ASSERT_FALSE ( tss2 . atLeastAsRecentAsAny ( tss3 ) ) ; <nl> + <nl> + ThreadTimestamps tss4 , tss5 ; <nl> + tss4 . setIfNotPresent ( DSchedThreadId ( 10 ) , DSchedTimestampTest ( 5 ) ) ; <nl> + tss5 . setIfNotPresent ( DSchedThreadId ( 11 ) , DSchedTimestampTest ( 5 ) ) ; <nl> + ASSERT_FALSE ( tss4 . atLeastAsRecentAsAny ( tss5 ) ) ; <nl> + ASSERT_FALSE ( tss5 . atLeastAsRecentAsAny ( tss4 ) ) ; <nl> + } <nl> + <nl> int main ( int argc , char * * argv ) { <nl> testing : : InitGoogleTest ( & argc , argv ) ; <nl> gflags : : ParseCommandLineFlags ( & argc , & argv , true ) ; <nl>
DeterministicSchedule : Introduce BufferedAtomic
facebook/folly
140f6df76e21262a761f84fd9e82b2b7117bd8ba
2018-11-30T17:09:46Z
mmm a / src / common / message - template . h <nl> ppp b / src / common / message - template . h <nl> namespace internal { <nl> T ( BigIntDivZero , " Division by zero " ) \ <nl> T ( BigIntNegativeExponent , " Exponent must be positive " ) \ <nl> T ( BigIntTooBig , " Maximum BigInt size exceeded " ) \ <nl> + T ( CantSetOptionXWhenYIsUsed , " Can ' t set option % when % is used " ) \ <nl> T ( DateRange , " Provided date is not in valid range . " ) \ <nl> T ( ExpectedLocation , \ <nl> " Expected letters optionally connected with underscores or hyphens for " \ <nl> mmm a / src / objects / js - date - time - format . cc <nl> ppp b / src / objects / js - date - time - format . cc <nl> MaybeHandle < JSDateTimeFormat > JSDateTimeFormat : : New ( <nl> / / iii . If p is not undefined , then <nl> / / 1 . Throw a TypeError exception . <nl> if ( skeleton . length ( ) > 0 ) { <nl> - THROW_NEW_ERROR ( isolate , <nl> - NewTypeError ( MessageTemplate : : kInvalid , <nl> - factory - > NewStringFromStaticChars ( " option " ) , <nl> - date_style ! = DateTimeStyle : : kUndefined <nl> - ? factory - > dateStyle_string ( ) <nl> - : factory - > timeStyle_string ( ) ) , <nl> - JSDateTimeFormat ) ; <nl> + std : : string prop ; <nl> + for ( const auto & item : GetPatternItems ( ) ) { <nl> + for ( const auto & pair : item . pairs ) { <nl> + if ( skeleton . find ( pair . pattern ) ! = std : : string : : npos ) { <nl> + prop . assign ( item . property ) ; <nl> + break ; <nl> + } <nl> + } <nl> + if ( ! prop . empty ( ) ) { <nl> + break ; <nl> + } <nl> + } <nl> + if ( prop . empty ( ) & & skeleton . find ( " S " ) ! = std : : string : : npos ) { <nl> + prop . assign ( " fractionalSecondDigits " ) ; <nl> + } <nl> + if ( ! prop . empty ( ) ) { <nl> + THROW_NEW_ERROR ( <nl> + isolate , <nl> + NewTypeError ( MessageTemplate : : kCantSetOptionXWhenYIsUsed , <nl> + factory - > NewStringFromAsciiChecked ( prop . c_str ( ) ) , <nl> + date_style ! = DateTimeStyle : : kUndefined <nl> + ? factory - > dateStyle_string ( ) <nl> + : factory - > timeStyle_string ( ) ) , <nl> + JSDateTimeFormat ) ; <nl> + } <nl> + UNREACHABLE ( ) ; <nl> } <nl> / / b . Let pattern be DateTimeStylePattern ( dateStyle , timeStyle , <nl> / / dataLocaleData , hc ) . <nl> mmm a / test / cctest / interpreter / bytecode_expectations / PrivateAccessorAccess . golden <nl> ppp b / test / cctest / interpreter / bytecode_expectations / PrivateAccessorAccess . golden <nl> bytecodes : [ <nl> B ( Mov ) , R ( this ) , R ( 0 ) , <nl> B ( Mov ) , R ( context ) , R ( 2 ) , <nl> / * 48 E > * / B ( CallRuntime ) , U16 ( Runtime : : kAddPrivateBrand ) , R ( 0 ) , U8 ( 3 ) , <nl> - / * 53 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 266 ) , <nl> + / * 53 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 267 ) , <nl> B ( Star ) , R ( 3 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 4 ) , <nl> bytecodes : [ <nl> B ( Mov ) , R ( this ) , R ( 0 ) , <nl> B ( Mov ) , R ( context ) , R ( 2 ) , <nl> / * 41 E > * / B ( CallRuntime ) , U16 ( Runtime : : kAddPrivateBrand ) , R ( 0 ) , U8 ( 3 ) , <nl> - / * 46 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 265 ) , <nl> + / * 46 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 266 ) , <nl> B ( Star ) , R ( 3 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 4 ) , <nl> bytecodes : [ <nl> B ( Mov ) , R ( this ) , R ( 0 ) , <nl> B ( Mov ) , R ( context ) , R ( 2 ) , <nl> / * 48 E > * / B ( CallRuntime ) , U16 ( Runtime : : kAddPrivateBrand ) , R ( 0 ) , U8 ( 3 ) , <nl> - / * 53 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 266 ) , <nl> + / * 53 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 267 ) , <nl> B ( Star ) , R ( 3 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 4 ) , <nl> bytecodes : [ <nl> B ( Mov ) , R ( this ) , R ( 0 ) , <nl> B ( Mov ) , R ( context ) , R ( 2 ) , <nl> / * 41 E > * / B ( CallRuntime ) , U16 ( Runtime : : kAddPrivateBrand ) , R ( 0 ) , U8 ( 3 ) , <nl> - / * 46 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 265 ) , <nl> + / * 46 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 266 ) , <nl> B ( Star ) , R ( 4 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 5 ) , <nl> mmm a / test / cctest / interpreter / bytecode_expectations / PrivateMethodAccess . golden <nl> ppp b / test / cctest / interpreter / bytecode_expectations / PrivateMethodAccess . golden <nl> bytecodes : [ <nl> B ( Mov ) , R ( this ) , R ( 0 ) , <nl> B ( Mov ) , R ( context ) , R ( 2 ) , <nl> / * 44 E > * / B ( CallRuntime ) , U16 ( Runtime : : kAddPrivateBrand ) , R ( 0 ) , U8 ( 3 ) , <nl> - / * 49 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 264 ) , <nl> + / * 49 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 265 ) , <nl> B ( Star ) , R ( 3 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 4 ) , <nl> bytecodes : [ <nl> B ( Mov ) , R ( this ) , R ( 0 ) , <nl> B ( Mov ) , R ( context ) , R ( 2 ) , <nl> / * 44 E > * / B ( CallRuntime ) , U16 ( Runtime : : kAddPrivateBrand ) , R ( 0 ) , U8 ( 3 ) , <nl> - / * 49 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 264 ) , <nl> + / * 49 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 265 ) , <nl> B ( Star ) , R ( 3 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 4 ) , <nl> mmm a / test / cctest / interpreter / bytecode_expectations / StaticPrivateMethodAccess . golden <nl> ppp b / test / cctest / interpreter / bytecode_expectations / StaticPrivateMethodAccess . golden <nl> bytecodes : [ <nl> B ( TestReferenceEqual ) , R ( this ) , <nl> B ( Mov ) , R ( this ) , R ( 1 ) , <nl> B ( JumpIfTrue ) , U8 ( 18 ) , <nl> - B ( Wide ) , B ( LdaSmi ) , I16 ( 262 ) , <nl> + B ( Wide ) , B ( LdaSmi ) , I16 ( 263 ) , <nl> B ( Star ) , R ( 2 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 3 ) , <nl> frame size : 2 <nl> parameter count : 1 <nl> bytecode array length : 16 <nl> bytecodes : [ <nl> - / * 56 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 264 ) , <nl> + / * 56 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 265 ) , <nl> B ( Star ) , R ( 0 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 1 ) , <nl> frame size : 2 <nl> parameter count : 1 <nl> bytecode array length : 16 <nl> bytecodes : [ <nl> - / * 56 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 264 ) , <nl> + / * 56 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 265 ) , <nl> B ( Star ) , R ( 0 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 1 ) , <nl> bytecodes : [ <nl> / * 94 E > * / B ( TestReferenceEqual ) , R ( this ) , <nl> B ( Mov ) , R ( this ) , R ( 0 ) , <nl> B ( JumpIfTrue ) , U8 ( 18 ) , <nl> - B ( Wide ) , B ( LdaSmi ) , I16 ( 262 ) , <nl> + B ( Wide ) , B ( LdaSmi ) , I16 ( 263 ) , <nl> B ( Star ) , R ( 2 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 3 ) , <nl> bytecodes : [ <nl> / * 109 E > * / B ( TestReferenceEqual ) , R ( this ) , <nl> B ( Mov ) , R ( this ) , R ( 1 ) , <nl> B ( JumpIfTrue ) , U8 ( 18 ) , <nl> - B ( Wide ) , B ( LdaSmi ) , I16 ( 263 ) , <nl> + B ( Wide ) , B ( LdaSmi ) , I16 ( 264 ) , <nl> B ( Star ) , R ( 3 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 4 ) , <nl> bytecodes : [ <nl> / * 133 E > * / B ( TestReferenceEqual ) , R ( this ) , <nl> B ( Mov ) , R ( this ) , R ( 0 ) , <nl> B ( JumpIfTrue ) , U8 ( 18 ) , <nl> - B ( Wide ) , B ( LdaSmi ) , I16 ( 262 ) , <nl> + B ( Wide ) , B ( LdaSmi ) , I16 ( 263 ) , <nl> B ( Star ) , R ( 2 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 3 ) , <nl> frame size : 2 <nl> parameter count : 1 <nl> bytecode array length : 16 <nl> bytecodes : [ <nl> - / * 60 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 266 ) , <nl> + / * 60 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 267 ) , <nl> B ( Star ) , R ( 0 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 1 ) , <nl> frame size : 2 <nl> parameter count : 1 <nl> bytecode array length : 16 <nl> bytecodes : [ <nl> - / * 53 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 265 ) , <nl> + / * 53 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 266 ) , <nl> B ( Star ) , R ( 0 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 1 ) , <nl> frame size : 2 <nl> parameter count : 1 <nl> bytecode array length : 16 <nl> bytecodes : [ <nl> - / * 60 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 266 ) , <nl> + / * 60 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 267 ) , <nl> B ( Star ) , R ( 0 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 1 ) , <nl> frame size : 3 <nl> parameter count : 1 <nl> bytecode array length : 16 <nl> bytecodes : [ <nl> - / * 46 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 265 ) , <nl> + / * 46 S > * / B ( Wide ) , B ( LdaSmi ) , I16 ( 266 ) , <nl> B ( Star ) , R ( 1 ) , <nl> B ( LdaConstant ) , U8 ( 0 ) , <nl> B ( Star ) , R ( 2 ) , <nl> mmm a / test / intl / regress - 10438 . js <nl> ppp b / test / intl / regress - 10438 . js <nl> assertThrows ( <nl> ( ) = > ( new Intl . DateTimeFormat ( <nl> " en " , { timeStyle : " short " , fractionalSecondDigits : 3 } ) ) , <nl> TypeError , <nl> - " Invalid option : timeStyle " ) ; <nl> + " Can ' t set option fractionalSecondDigits when timeStyle is used " ) ; <nl> <nl> assertThrows ( <nl> ( ) = > ( new Intl . DateTimeFormat ( <nl> " en " , { dateStyle : " short " , fractionalSecondDigits : 3 } ) ) , <nl> TypeError , <nl> - " Invalid option : dateStyle " ) ; <nl> + " Can ' t set option fractionalSecondDigits when dateStyle is used " ) ; <nl> mmm a / test / intl / regress - 10613 . js <nl> ppp b / test / intl / regress - 10613 . js <nl> let opt = { <nl> day : ' 2 - digit ' , <nl> hour : ' 2 - digit ' , <nl> minute : ' 2 - digit ' , <nl> + fractionalSecondDigits : 2 , <nl> } ; <nl> <nl> let keys = Object . keys ( opt ) ; <nl> testTimeStyle . timeStyle = ' long ' ; <nl> for ( key of keys ) { <nl> assertThrows ( <nl> ( ) = > new Intl . DateTimeFormat ( ' en ' , testDateStyle ) , <nl> - TypeError , " Invalid option : dateStyle " ) ; <nl> + TypeError , " Can ' t set option " + key + " when dateStyle is used " ) ; <nl> assertThrows ( <nl> ( ) = > new Intl . DateTimeFormat ( ' en ' , testTimeStyle ) , <nl> - TypeError , " Invalid option : timeStyle " ) ; <nl> + TypeError , " Can ' t set option " + key + " when timeStyle is used " ) ; <nl> testDateStyle [ key ] = undefined ; <nl> testTimeStyle [ key ] = undefined ; <nl> } <nl>
Use better error messages for dateStyle / timeStyle
v8/v8
5d988ea3269cd2ba3b708c92c5d93742753263e8
2020-09-11T11:26:50Z
mmm a / dbms / src / Formats / CSVRowInputStream . cpp <nl> ppp b / dbms / src / Formats / CSVRowInputStream . cpp <nl> CSVRowInputStream : : CSVRowInputStream ( ReadBuffer & istr_ , const Block & header_ , <nl> data_types [ i ] = column_info . type ; <nl> column_indexes_by_names . emplace ( column_info . name , i ) ; <nl> } <nl> - <nl> - column_indexes_for_input_fields . reserve ( num_columns ) ; <nl> - read_columns . assign ( num_columns , false ) ; <nl> } <nl> <nl> - <nl> - void CSVRowInputStream : : setupAllColumnsByTableSchema ( ) <nl> - { <nl> - read_columns . assign ( header . columns ( ) , true ) ; <nl> - column_indexes_for_input_fields . resize ( header . columns ( ) ) ; <nl> - <nl> - for ( size_t i = 0 ; i < column_indexes_for_input_fields . size ( ) ; + + i ) <nl> - { <nl> - column_indexes_for_input_fields [ i ] = i ; <nl> - } <nl> - } <nl> - <nl> - <nl> + / / / Map an input file column to a table column , based on its name . <nl> void CSVRowInputStream : : addInputColumn ( const String & column_name ) <nl> { <nl> const auto column_it = column_indexes_by_names . find ( column_name ) ; <nl> void CSVRowInputStream : : addInputColumn ( const String & column_name ) <nl> column_indexes_for_input_fields . emplace_back ( column_index ) ; <nl> } <nl> <nl> - <nl> - void CSVRowInputStream : : fillUnreadColumnsWithDefaults ( MutableColumns & columns , RowReadExtension & row_read_extension ) <nl> - { <nl> - / / / It is safe to memorize this on the first run - the format guarantees this does not change <nl> - if ( unlikely ( row_num = = 1 ) ) <nl> - { <nl> - columns_to_fill_with_default_values . clear ( ) ; <nl> - for ( size_t index = 0 ; index < read_columns . size ( ) ; + + index ) <nl> - if ( read_columns [ index ] = = 0 ) <nl> - columns_to_fill_with_default_values . push_back ( index ) ; <nl> - } <nl> - <nl> - for ( const auto column_index : columns_to_fill_with_default_values ) <nl> - data_types [ column_index ] - > insertDefaultInto ( * columns [ column_index ] ) ; <nl> - <nl> - row_read_extension . read_columns = read_columns ; <nl> - } <nl> - <nl> - <nl> void CSVRowInputStream : : readPrefix ( ) <nl> { <nl> / / / In this format , we assume , that if first string field contain BOM as value , it will be written in quotes , <nl> void CSVRowInputStream : : readPrefix ( ) <nl> <nl> if ( with_names ) <nl> { <nl> + / / / This CSV file has a header row with column names . Depending on the <nl> + / / / settings , use it or skip it . <nl> if ( format_settings . with_names_use_header ) <nl> { <nl> - String column_name ; <nl> + / / / Look at the file header to see which columns we have there . <nl> + / / / The missing columns are filled with defaults . <nl> + read_columns . assign ( header . columns ( ) , false ) ; <nl> do <nl> { <nl> + String column_name ; <nl> skipWhitespacesAndTabs ( istr ) ; <nl> readCSVString ( column_name , istr , format_settings . csv ) ; <nl> skipWhitespacesAndTabs ( istr ) ; <nl> void CSVRowInputStream : : readPrefix ( ) <nl> while ( checkChar ( format_settings . csv . delimiter , istr ) ) ; <nl> <nl> skipDelimiter ( istr , format_settings . csv . delimiter , true ) ; <nl> + <nl> + for ( size_t column = 0 ; column < read_columns . size ( ) ; column + + ) <nl> + { <nl> + if ( ! read_columns [ column ] ) <nl> + { <nl> + have_always_default_columns = true ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + return ; <nl> } <nl> else <nl> { <nl> - setupAllColumnsByTableSchema ( ) ; <nl> - skipRow ( istr , format_settings . csv , column_indexes_for_input_fields . size ( ) ) ; <nl> + skipRow ( istr , format_settings . csv , header . columns ( ) ) ; <nl> } <nl> } <nl> - else <nl> + <nl> + / / / The default : map each column of the file to the column of the table with <nl> + / / / the same index . <nl> + read_columns . assign ( header . columns ( ) , true ) ; <nl> + column_indexes_for_input_fields . resize ( header . columns ( ) ) ; <nl> + <nl> + for ( size_t i = 0 ; i < column_indexes_for_input_fields . size ( ) ; + + i ) <nl> { <nl> - setupAllColumnsByTableSchema ( ) ; <nl> + column_indexes_for_input_fields [ i ] = i ; <nl> } <nl> } <nl> <nl> - <nl> + / * * If you change this function , don ' t forget to change its counterpart <nl> + * with extended error reporting : parseRowAndPrintDiagnosticInfo ( ) . <nl> + * / <nl> bool CSVRowInputStream : : read ( MutableColumns & columns , RowReadExtension & ext ) <nl> { <nl> if ( istr . eof ( ) ) <nl> bool CSVRowInputStream : : read ( MutableColumns & columns , RowReadExtension & ext ) <nl> <nl> updateDiagnosticInfo ( ) ; <nl> <nl> - String tmp ; <nl> - for ( size_t input_position = 0 ; input_position < column_indexes_for_input_fields . size ( ) ; + + input_position ) <nl> + / / / Track whether we have to fill any columns in this row with default <nl> + / / / values . If not , we return an empty column mask to the caller , so that <nl> + / / / it doesn ' t have to check it . <nl> + bool have_default_columns = have_always_default_columns ; <nl> + <nl> + const auto delimiter = format_settings . csv . delimiter ; <nl> + for ( size_t file_column = 0 ; file_column < column_indexes_for_input_fields . size ( ) ; + + file_column ) <nl> { <nl> - const auto & column_index = column_indexes_for_input_fields [ input_position ] ; <nl> - if ( column_index ) <nl> + const auto & table_column = column_indexes_for_input_fields [ file_column ] ; <nl> + const bool is_last_file_column = <nl> + file_column + 1 = = column_indexes_for_input_fields . size ( ) ; <nl> + <nl> + if ( table_column ) <nl> { <nl> - skipWhitespacesAndTabs ( istr ) ; <nl> - data_types [ * column_index ] - > deserializeAsTextCSV ( * columns [ * column_index ] , istr , format_settings ) ; <nl> - skipWhitespacesAndTabs ( istr ) ; <nl> + const auto & type = data_types [ * table_column ] ; <nl> + const bool at_delimiter = * istr . position ( ) = = delimiter ; <nl> + const bool at_last_column_line_end = is_last_file_column <nl> + & & ( * istr . position ( ) = = ' \ n ' | | * istr . position ( ) = = ' \ r ' <nl> + | | istr . eof ( ) ) ; <nl> + <nl> + if ( format_settings . csv . empty_as_default <nl> + & & ( at_delimiter | | at_last_column_line_end ) ) <nl> + { <nl> + / / / Treat empty unquoted column value as default value , if <nl> + / / / specified in the settings . Tuple columns might seem <nl> + / / / problematic , because they are never quoted but still contain <nl> + / / / commas , which might be also used as delimiters . However , <nl> + / / / they do not contain empty unquoted fields , so this check <nl> + / / / works for tuples as well . <nl> + read_columns [ * table_column ] = false ; <nl> + have_default_columns = true ; <nl> + } <nl> + else <nl> + { <nl> + / / / Read the column normally . <nl> + read_columns [ * table_column ] = true ; <nl> + skipWhitespacesAndTabs ( istr ) ; <nl> + type - > deserializeAsTextCSV ( * columns [ * table_column ] , istr , <nl> + format_settings ) ; <nl> + skipWhitespacesAndTabs ( istr ) ; <nl> + } <nl> } <nl> else <nl> { <nl> + / / / We never read this column from the file , just skip it . <nl> + String tmp ; <nl> readCSVString ( tmp , istr , format_settings . csv ) ; <nl> } <nl> <nl> - skipDelimiter ( istr , format_settings . csv . delimiter , input_position + 1 = = column_indexes_for_input_fields . size ( ) ) ; <nl> + skipDelimiter ( istr , delimiter , is_last_file_column ) ; <nl> } <nl> <nl> - fillUnreadColumnsWithDefaults ( columns , ext ) ; <nl> + if ( have_default_columns ) <nl> + { <nl> + for ( size_t i = 0 ; i < read_columns . size ( ) ; i + + ) <nl> + { <nl> + if ( ! read_columns [ i ] ) <nl> + { <nl> + / / / The column value for this row is going to be overwritten <nl> + / / / with default by the caller , but the general assumption is <nl> + / / / that the column size increases for each row , so we have <nl> + / / / to insert something . Since we do not care about the exact <nl> + / / / value , we do not have to use the default value specified by <nl> + / / / the data type , and can just use IColumn : : insertDefault ( ) . <nl> + columns [ i ] - > insertDefault ( ) ; <nl> + } <nl> + } <nl> + ext . read_columns = read_columns ; <nl> + } <nl> <nl> return true ; <nl> } <nl> bool OPTIMIZE ( 1 ) CSVRowInputStream : : parseRowAndPrintDiagnosticInfo ( MutableColumn <nl> { <nl> const char delimiter = format_settings . csv . delimiter ; <nl> <nl> - for ( size_t input_position = 0 ; input_position < column_indexes_for_input_fields . size ( ) ; + + input_position ) <nl> + for ( size_t file_column = 0 ; file_column < column_indexes_for_input_fields . size ( ) ; + + file_column ) <nl> { <nl> - if ( input_position = = 0 & & istr . eof ( ) ) <nl> + if ( file_column = = 0 & & istr . eof ( ) ) <nl> { <nl> out < < " < End of stream > \ n " ; <nl> return false ; <nl> } <nl> <nl> - if ( column_indexes_for_input_fields [ input_position ] . has_value ( ) ) <nl> + if ( column_indexes_for_input_fields [ file_column ] . has_value ( ) ) <nl> { <nl> - const auto & column_index = * column_indexes_for_input_fields [ input_position ] ; <nl> - const auto & current_column_type = data_types [ column_index ] ; <nl> - <nl> - out < < " Column " < < input_position < < " , " < < std : : string ( ( input_position < 10 ? 2 : input_position < 100 ? 1 : 0 ) , ' ' ) <nl> - < < " name : " < < header . safeGetByPosition ( column_index ) . name < < " , " < < std : : string ( max_length_of_column_name - header . safeGetByPosition ( column_index ) . name . size ( ) , ' ' ) <nl> + const auto & table_column = * column_indexes_for_input_fields [ file_column ] ; <nl> + const auto & current_column_type = data_types [ table_column ] ; <nl> + const bool is_last_file_column = <nl> + file_column + 1 = = column_indexes_for_input_fields . size ( ) ; <nl> + const bool at_delimiter = * istr . position ( ) = = delimiter ; <nl> + const bool at_last_column_line_end = is_last_file_column <nl> + & & ( * istr . position ( ) = = ' \ n ' | | * istr . position ( ) = = ' \ r ' <nl> + | | istr . eof ( ) ) ; <nl> + <nl> + out < < " Column " < < file_column < < " , " < < std : : string ( ( file_column < 10 ? 2 : file_column < 100 ? 1 : 0 ) , ' ' ) <nl> + < < " name : " < < header . safeGetByPosition ( table_column ) . name < < " , " < < std : : string ( max_length_of_column_name - header . safeGetByPosition ( table_column ) . name . size ( ) , ' ' ) <nl> < < " type : " < < current_column_type - > getName ( ) < < " , " < < std : : string ( max_length_of_data_type_name - current_column_type - > getName ( ) . size ( ) , ' ' ) ; <nl> <nl> - BufferBase : : Position prev_position = istr . position ( ) ; <nl> - BufferBase : : Position curr_position = istr . position ( ) ; <nl> - std : : exception_ptr exception ; <nl> - <nl> - try <nl> + if ( format_settings . csv . empty_as_default <nl> + & & ( at_delimiter | | at_last_column_line_end ) ) <nl> { <nl> - skipWhitespacesAndTabs ( istr ) ; <nl> - prev_position = istr . position ( ) ; <nl> - current_column_type - > deserializeAsTextCSV ( * columns [ column_index ] , istr , format_settings ) ; <nl> - curr_position = istr . position ( ) ; <nl> - skipWhitespacesAndTabs ( istr ) ; <nl> + columns [ table_column ] - > insertDefault ( ) ; <nl> } <nl> - catch ( . . . ) <nl> + else <nl> { <nl> - exception = std : : current_exception ( ) ; <nl> - } <nl> + BufferBase : : Position prev_position = istr . position ( ) ; <nl> + BufferBase : : Position curr_position = istr . position ( ) ; <nl> + std : : exception_ptr exception ; <nl> <nl> - if ( curr_position < prev_position ) <nl> - throw Exception ( " Logical error : parsing is non - deterministic . " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + try <nl> + { <nl> + skipWhitespacesAndTabs ( istr ) ; <nl> + prev_position = istr . position ( ) ; <nl> + current_column_type - > deserializeAsTextCSV ( * columns [ table_column ] , istr , format_settings ) ; <nl> + curr_position = istr . position ( ) ; <nl> + skipWhitespacesAndTabs ( istr ) ; <nl> + } <nl> + catch ( . . . ) <nl> + { <nl> + exception = std : : current_exception ( ) ; <nl> + } <nl> <nl> - if ( isNativeNumber ( current_column_type ) | | isDateOrDateTime ( current_column_type ) ) <nl> - { <nl> - / / / An empty string instead of a value . <nl> - if ( curr_position = = prev_position ) <nl> + if ( curr_position < prev_position ) <nl> + throw Exception ( " Logical error : parsing is non - deterministic . " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + if ( isNativeNumber ( current_column_type ) | | isDateOrDateTime ( current_column_type ) ) <nl> { <nl> - out < < " ERROR : text " ; <nl> - verbosePrintString ( prev_position , std : : min ( prev_position + 10 , istr . buffer ( ) . end ( ) ) , out ) ; <nl> - out < < " is not like " < < current_column_type - > getName ( ) < < " \ n " ; <nl> - return false ; <nl> + / / / An empty string instead of a value . <nl> + if ( curr_position = = prev_position ) <nl> + { <nl> + out < < " ERROR : text " ; <nl> + verbosePrintString ( prev_position , std : : min ( prev_position + 10 , istr . buffer ( ) . end ( ) ) , out ) ; <nl> + out < < " is not like " < < current_column_type - > getName ( ) < < " \ n " ; <nl> + return false ; <nl> + } <nl> } <nl> - } <nl> <nl> - out < < " parsed text : " ; <nl> - verbosePrintString ( prev_position , curr_position , out ) ; <nl> + out < < " parsed text : " ; <nl> + verbosePrintString ( prev_position , curr_position , out ) ; <nl> <nl> - if ( exception ) <nl> - { <nl> - if ( current_column_type - > getName ( ) = = " DateTime " ) <nl> - out < < " ERROR : DateTime must be in YYYY - MM - DD hh : mm : ss or NNNNNNNNNN ( unix timestamp , exactly 10 digits ) format . \ n " ; <nl> - else if ( current_column_type - > getName ( ) = = " Date " ) <nl> - out < < " ERROR : Date must be in YYYY - MM - DD format . \ n " ; <nl> - else <nl> - out < < " ERROR \ n " ; <nl> - return false ; <nl> - } <nl> + if ( exception ) <nl> + { <nl> + if ( current_column_type - > getName ( ) = = " DateTime " ) <nl> + out < < " ERROR : DateTime must be in YYYY - MM - DD hh : mm : ss or NNNNNNNNNN ( unix timestamp , exactly 10 digits ) format . \ n " ; <nl> + else if ( current_column_type - > getName ( ) = = " Date " ) <nl> + out < < " ERROR : Date must be in YYYY - MM - DD format . \ n " ; <nl> + else <nl> + out < < " ERROR \ n " ; <nl> + return false ; <nl> + } <nl> <nl> - out < < " \ n " ; <nl> + out < < " \ n " ; <nl> <nl> - if ( current_column_type - > haveMaximumSizeOfValue ( ) ) <nl> - { <nl> - if ( * curr_position ! = ' \ n ' & & * curr_position ! = ' \ r ' & & * curr_position ! = delimiter ) <nl> + if ( current_column_type - > haveMaximumSizeOfValue ( ) <nl> + & & * curr_position ! = ' \ n ' & & * curr_position ! = ' \ r ' <nl> + & & * curr_position ! = delimiter ) <nl> { <nl> out < < " ERROR : garbage after " < < current_column_type - > getName ( ) < < " : " ; <nl> verbosePrintString ( curr_position , std : : min ( curr_position + 10 , istr . buffer ( ) . end ( ) ) , out ) ; <nl> bool OPTIMIZE ( 1 ) CSVRowInputStream : : parseRowAndPrintDiagnosticInfo ( MutableColumn <nl> else <nl> { <nl> static const String skipped_column_str = " < SKIPPED COLUMN > " ; <nl> - out < < " Column " < < input_position < < " , " < < std : : string ( ( input_position < 10 ? 2 : input_position < 100 ? 1 : 0 ) , ' ' ) <nl> + out < < " Column " < < file_column < < " , " < < std : : string ( ( file_column < 10 ? 2 : file_column < 100 ? 1 : 0 ) , ' ' ) <nl> < < " name : " < < skipped_column_str < < " , " < < std : : string ( max_length_of_column_name - skipped_column_str . length ( ) , ' ' ) <nl> < < " type : " < < skipped_column_str < < " , " < < std : : string ( max_length_of_data_type_name - skipped_column_str . length ( ) , ' ' ) ; <nl> <nl> bool OPTIMIZE ( 1 ) CSVRowInputStream : : parseRowAndPrintDiagnosticInfo ( MutableColumn <nl> } <nl> <nl> / / / Delimiters <nl> - if ( input_position + 1 = = column_indexes_for_input_fields . size ( ) ) <nl> + if ( file_column + 1 = = column_indexes_for_input_fields . size ( ) ) <nl> { <nl> if ( istr . eof ( ) ) <nl> return false ; <nl> mmm a / dbms / src / Formats / CSVRowInputStream . h <nl> ppp b / dbms / src / Formats / CSVRowInputStream . h <nl> class CSVRowInputStream : public IRowInputStream <nl> { <nl> public : <nl> / * * with_names - in the first line the header with column names <nl> - * with_types - on the next line header with type names <nl> * / <nl> CSVRowInputStream ( ReadBuffer & istr_ , const Block & header_ , bool with_names_ , const FormatSettings & format_settings ) ; <nl> <nl> class CSVRowInputStream : public IRowInputStream <nl> using IndexesMap = std : : unordered_map < String , size_t > ; <nl> IndexesMap column_indexes_by_names ; <nl> <nl> + / / / Maps indexes of columns in the input file to indexes of table columns <nl> using OptionalIndexes = std : : vector < std : : optional < size_t > > ; <nl> OptionalIndexes column_indexes_for_input_fields ; <nl> <nl> + / / / Tracks which colums we have read in a single read ( ) call . <nl> + / / / For columns that are never read , it is initialized to false when we <nl> + / / / read the file header , and never changed afterwards . <nl> + / / / For other columns , it is updated on each read ( ) call . <nl> std : : vector < UInt8 > read_columns ; <nl> - std : : vector < size_t > columns_to_fill_with_default_values ; <nl> + <nl> + / / / Whether we have any columns that are not read from file at all , <nl> + / / / and must be always initialized with defaults . <nl> + bool have_always_default_columns = false ; <nl> <nl> void addInputColumn ( const String & column_name ) ; <nl> - void setupAllColumnsByTableSchema ( ) ; <nl> - void fillUnreadColumnsWithDefaults ( MutableColumns & columns , RowReadExtension & ext ) ; <nl> <nl> / / / For convenient diagnostics in case of an error . <nl> - <nl> size_t row_num = 0 ; <nl> <nl> / / / How many bytes were read , not counting those that are still in the buffer . <nl> mmm a / dbms / src / Formats / FormatFactory . cpp <nl> ppp b / dbms / src / Formats / FormatFactory . cpp <nl> BlockInputStreamPtr FormatFactory : : getInput ( const String & name , ReadBuffer & bu <nl> format_settings . csv . delimiter = settings . format_csv_delimiter ; <nl> format_settings . csv . allow_single_quotes = settings . format_csv_allow_single_quotes ; <nl> format_settings . csv . allow_double_quotes = settings . format_csv_allow_double_quotes ; <nl> + format_settings . csv . empty_as_default = settings . input_format_defaults_for_omitted_fields ; <nl> format_settings . values . interpret_expressions = settings . input_format_values_interpret_expressions ; <nl> format_settings . with_names_use_header = settings . input_format_with_names_use_header ; <nl> format_settings . skip_unknown_fields = settings . input_format_skip_unknown_fields ; <nl> mmm a / dbms / src / Formats / FormatSettings . h <nl> ppp b / dbms / src / Formats / FormatSettings . h <nl> struct FormatSettings <nl> char delimiter = ' , ' ; <nl> bool allow_single_quotes = true ; <nl> bool allow_double_quotes = true ; <nl> + bool empty_as_default = false ; <nl> } ; <nl> <nl> CSV csv ; <nl> mmm a / dbms / src / Formats / IRowInputStream . h <nl> ppp b / dbms / src / Formats / IRowInputStream . h <nl> struct RowReadExtension <nl> { <nl> / / / IRowInputStream . read ( ) output . It contains non zero for columns that actually read from the source and zero otherwise . <nl> / / / It ' s used to attach defaults for partially filled rows . <nl> + / / / Can be empty , this means that all columns are read . <nl> std : : vector < UInt8 > read_columns ; <nl> } ; <nl> <nl> mmm a / dbms / tests / queries / 0_stateless / 00301_csv . reference <nl> ppp b / dbms / tests / queries / 0_stateless / 00301_csv . reference <nl> Hello , world 123 2016 - 01 - 01 <nl> Hello , " world " 456 2016 - 01 - 02 <nl> Hello " world " 789 2016 - 01 - 03 <nl> Hello \ n world 100 2016 - 01 - 04 <nl> + default 1 2019 - 06 - 19 <nl> + default - eof 1 2019 - 06 - 19 <nl> 2016 - 01 - 01 01 : 02 : 03 1 <nl> 2016 - 01 - 02 01 : 02 : 03 2 <nl> 2017 - 08 - 15 13 : 15 : 01 3 <nl> mmm a / dbms / tests / queries / 0_stateless / 00301_csv . sh <nl> ppp b / dbms / tests / queries / 0_stateless / 00301_csv . sh <nl> CURDIR = $ ( cd " $ ( dirname " $ { BASH_SOURCE [ 0 ] } " ) " & & pwd ) <nl> . $ CURDIR / . . / shell_config . sh <nl> <nl> $ CLICKHOUSE_CLIENT - - query = " DROP TABLE IF EXISTS csv " ; <nl> - $ CLICKHOUSE_CLIENT - - query = " CREATE TABLE csv ( s String , n UInt64 , d Date ) ENGINE = Memory " ; <nl> + $ CLICKHOUSE_CLIENT - - query = " CREATE TABLE csv ( s String , n UInt64 DEFAULT 1 , d Date DEFAULT ' 2019 - 06 - 19 ' ) ENGINE = Memory " ; <nl> <nl> - echo ' " Hello , world " , 123 , " 2016 - 01 - 01 " <nl> + printf ' " Hello , world " , 123 , " 2016 - 01 - 01 " <nl> " Hello , " " world " " " , " 456 " , 2016 - 01 - 02 , <nl> Hello " world " , 789 , 2016 - 01 - 03 <nl> " Hello <nl> - world " , 100 , 2016 - 01 - 04 , ' | $ CLICKHOUSE_CLIENT - - query = " INSERT INTO csv FORMAT CSV " ; <nl> + world " , 100 , 2016 - 01 - 04 , <nl> + default , , <nl> + default - eof , , ' | $ CLICKHOUSE_CLIENT - - input_format_defaults_for_omitted_fields - - query = " INSERT INTO csv FORMAT CSV " ; <nl> <nl> $ CLICKHOUSE_CLIENT - - query = " SELECT * FROM csv ORDER BY d " ; <nl> $ CLICKHOUSE_CLIENT - - query = " DROP TABLE csv " ; <nl> mmm a / docs / en / interfaces / formats . md <nl> ppp b / docs / en / interfaces / formats . md <nl> clickhouse - client - - format_csv_delimiter = " | " - - query = " INSERT INTO test . csv FORMA <nl> <nl> When parsing , all values can be parsed either with or without quotes . Both double and single quotes are supported . Rows can also be arranged without quotes . In this case , they are parsed up to the delimiter character or line feed ( CR or LF ) . In violation of the RFC , when parsing rows without quotes , the leading and trailing spaces and tabs are ignored . For the line feed , Unix ( LF ) , Windows ( CR LF ) and Mac OS Classic ( CR LF ) types are all supported . <nl> <nl> + Empty unquoted input values are replaced with default values for the respective <nl> + columns , if <nl> + [ input_format_defaults_for_omitted_fields ] ( . . / operations / settings / settings . md # session_settings - input_format_defaults_for_omitted_fields ) <nl> + is enabled . <nl> + <nl> ` NULL ` is formatted as ` \ N ` . <nl> <nl> The CSV format supports the output of totals and extremes the same way as ` TabSeparated ` . <nl> mmm a / docs / en / operations / settings / settings . md <nl> ppp b / docs / en / operations / settings / settings . md <nl> Ok . <nl> <nl> # # input_format_defaults_for_omitted_fields { # session_settings - input_format_defaults_for_omitted_fields } <nl> <nl> - Turns on / off the extended data exchange between a ClickHouse client and a ClickHouse server . This setting applies for ` INSERT ` queries . <nl> - <nl> - When executing the ` INSERT ` query , the ClickHouse client prepares data and sends it to the server for writing . The client gets the table structure from the server when preparing the data . In some cases , the client needs more information than the server sends by default . Turn on the extended data exchange with ` input_format_defaults_for_omitted_fields = 1 ` . <nl> - <nl> - When the extended data exchange is enabled , the server sends the additional metadata along with the table structure . The composition of the metadata depends on the operation . <nl> - <nl> - Operations where you may need the extended data exchange enabled : <nl> - <nl> - - Inserting data in [ JSONEachRow ] ( . . / . . / interfaces / formats . md # jsoneachrow ) format . <nl> - <nl> - For all other operations , ClickHouse doesn ' t apply the setting . <nl> + When performing ` INSERT ` queries , replace omitted input column values with <nl> + default values of the respective columns . This option only applies to <nl> + [ JSONEachRow ] ( . . / . . / interfaces / formats . md # jsoneachrow ) and <nl> + [ CSV ] ( . . / . . / interfaces / formats . md # csv ) formats . <nl> <nl> ! ! ! note " Note " <nl> - The extended data exchange functionality consumes additional computing resources on the server and can reduce performance . <nl> + When this option is enabled , extended table metadata are sent <nl> + from server to client . It consumes additional computing resources on the server <nl> + and can reduce performance . <nl> <nl> Possible values : <nl> <nl>
Treat empty cells in CSV as default values . ( )
ClickHouse/ClickHouse
4cc9f632a0ed84bdf9d8549a5c26bebccf5e622b
2019-06-20T12:46:36Z
mmm a / addons / skin . estuary / xml / Variables . xml <nl> ppp b / addons / skin . estuary / xml / Variables . xml <nl> <nl> < value > $ INFO [ ListItem . Premiered , [ COLOR grey ] $ LOCALIZE [ 20416 ] : [ / COLOR ] , [ CR ] ] < / value > <nl> < / variable > <nl> < variable name = " FlagLabel " > <nl> - < value condition = " ListItem . IsPremiere " > [ B ] [ [ COLOR button_focus ] $ LOCALIZE [ 838 ] [ / COLOR ] [ / B ] < / value > <nl> + < value condition = " ListItem . IsPremiere " > [ B ] [ COLOR button_focus ] $ LOCALIZE [ 838 ] [ / COLOR ] [ / B ] < / value > <nl> < value condition = " ListItem . IsFinale " > [ B ] [ COLOR button_focus ] $ LOCALIZE [ 849 ] [ / COLOR ] [ / B ] < / value > <nl> < value condition = " ListItem . IsLive " > [ B ] [ COLOR button_focus ] $ LOCALIZE [ 839 ] [ / COLOR ] [ / B ] < / value > <nl> < value condition = " ListItem . IsNew " > [ B ] [ COLOR button_focus ] $ LOCALIZE [ 842 ] [ / COLOR ] [ / B ] < / value > <nl> < / variable > <nl> < variable name = " FlagDashLabel " > <nl> < value condition = " ListItem . IsPremiere + String . IsEmpty ( ListItem . Season ) + String . IsEmpty ( ListItem . Episode ) + String . IsEmpty ( ListItem . EpisodeName ) " > [ B ] [ COLOR button_focus ] $ LOCALIZE [ 838 ] [ / COLOR ] [ / B ] < / value > <nl> - < value condition = " ListItem . IsPremiere " > [ B ] [ [ COLOR button_focus ] $ LOCALIZE [ 838 ] [ / COLOR ] [ / B ] - < / value > <nl> + < value condition = " ListItem . IsPremiere " > [ B ] [ COLOR button_focus ] $ LOCALIZE [ 838 ] [ / COLOR ] [ / B ] - < / value > <nl> < value condition = " ListItem . IsFinale + String . IsEmpty ( ListItem . Season ) + String . IsEmpty ( ListItem . Episode ) + String . IsEmpty ( ListItem . EpisodeName ) " > [ B ] [ COLOR button_focus ] $ LOCALIZE [ 849 ] [ / COLOR ] [ / B ] < / value > <nl> < value condition = " ListItem . IsFinale " > [ B ] [ COLOR button_focus ] $ LOCALIZE [ 849 ] [ / COLOR ] [ / B ] - < / value > <nl> < value condition = " ListItem . IsLive + String . IsEmpty ( ListItem . Season ) + String . IsEmpty ( ListItem . Episode ) + String . IsEmpty ( ListItem . EpisodeName ) " > [ B ] [ COLOR button_focus ] $ LOCALIZE [ 839 ] [ / COLOR ] [ / B ] < / value > <nl>
Merge pull request from phunkyfish / new - live - bracket - fix
xbmc/xbmc
39f68151963c369a84f57c21a4e894c0409dc421
2020-08-09T12:07:51Z
mmm a / ruby / Gemfile . lock <nl> ppp b / ruby / Gemfile . lock <nl> <nl> PATH <nl> remote : . <nl> specs : <nl> - google - protobuf ( 3 . 0 . 0 . alpha . 4 ) <nl> + google - protobuf ( 3 . 0 . 0 . alpha . 4 . 0 ) <nl> <nl> GEM <nl> remote : https : / / rubygems . org / <nl> DEPENDENCIES <nl> rake - compiler <nl> rubygems - tasks <nl> test - unit <nl> + <nl> + BUNDLED WITH <nl> + 1 . 10 . 6 <nl> mmm a / ruby / ext / google / protobuf_c / encode_decode . c <nl> ppp b / ruby / ext / google / protobuf_c / encode_decode . c <nl> <nl> / / For more information , see : <nl> / / https : / / bugs . ruby - lang . org / issues / 11328 <nl> VALUE noleak_rb_str_cat ( VALUE rb_str , const char * str , long len ) { <nl> + char * p ; <nl> size_t oldlen = RSTRING_LEN ( rb_str ) ; <nl> rb_str_modify_expand ( rb_str , len ) ; <nl> - char * p = RSTRING_PTR ( rb_str ) ; <nl> + p = RSTRING_PTR ( rb_str ) ; <nl> memcpy ( p + oldlen , str , len ) ; <nl> rb_str_set_len ( rb_str , oldlen + len ) ; <nl> + return rb_str ; <nl> } <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl>
A very small fix to silence some warnings .
protocolbuffers/protobuf
14e2b4fa51285d480ac36589e11c18d6d82819ca
2015-09-28T15:56:14Z
mmm a / android / sdk / build . gradle <nl> ppp b / android / sdk / build . gradle <nl> buildscript { <nl> } <nl> } <nl> <nl> - <nl> allprojects { <nl> repositories { <nl> mavenCentral ( ) <nl>
* [ android ] test webhook
apache/incubator-weex
df729db8c2e9660e6377262508774f91dc69439d
2016-11-09T07:28:38Z
mmm a / src / compilation - info . cc <nl> ppp b / src / compilation - info . cc <nl> CompilationInfo : : CompilationInfo ( Zone * zone , Isolate * isolate , <nl> <nl> if ( FLAG_function_context_specialization ) MarkAsFunctionContextSpecializing ( ) ; <nl> if ( FLAG_turbo_splitting ) MarkAsSplittingEnabled ( ) ; <nl> + if ( ! FLAG_turbo_disable_switch_jump_table ) SetFlag ( kSwitchJumpTableEnabled ) ; <nl> <nl> / / Collect source positions for optimized code when profiling or if debugger <nl> / / is active , to be able to get more precise source positions at the price of <nl> mmm a / src / compilation - info . h <nl> ppp b / src / compilation - info . h <nl> class V8_EXPORT_PRIVATE CompilationInfo final { <nl> kBailoutOnUninitialized = 1 < < 9 , <nl> kLoopPeelingEnabled = 1 < < 10 , <nl> kUntrustedCodeMitigations = 1 < < 11 , <nl> + kSwitchJumpTableEnabled = 1 < < 12 , <nl> } ; <nl> <nl> / / TODO ( mtrofin ) : investigate if this might be generalized outside wasm , with <nl> class V8_EXPORT_PRIVATE CompilationInfo final { <nl> return GetFlag ( kUntrustedCodeMitigations ) ; <nl> } <nl> <nl> + bool switch_jump_table_enabled ( ) const { <nl> + return GetFlag ( kSwitchJumpTableEnabled ) ; <nl> + } <nl> + <nl> / / Code getters and setters . <nl> <nl> void SetCode ( Handle < Code > code ) { code_ = code ; } <nl> mmm a / src / compiler / arm / instruction - selector - arm . cc <nl> ppp b / src / compiler / arm / instruction - selector - arm . cc <nl> void InstructionSelector : : VisitSwitch ( Node * node , const SwitchInfo & sw ) { <nl> InstructionOperand value_operand = g . UseRegister ( node - > InputAt ( 0 ) ) ; <nl> <nl> / / Emit either ArchTableSwitch or ArchLookupSwitch . <nl> - static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> - size_t table_space_cost = 4 + sw . value_range ; <nl> - size_t table_time_cost = 3 ; <nl> - size_t lookup_space_cost = 3 + 2 * sw . case_count ; <nl> - size_t lookup_time_cost = sw . case_count ; <nl> - if ( sw . case_count > 0 & & <nl> - table_space_cost + 3 * table_time_cost < = <nl> - lookup_space_cost + 3 * lookup_time_cost & & <nl> - sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> - sw . value_range < = kMaxTableSwitchValueRange ) { <nl> - InstructionOperand index_operand = value_operand ; <nl> - if ( sw . min_value ) { <nl> - index_operand = g . TempRegister ( ) ; <nl> - Emit ( kArmSub | AddressingModeField : : encode ( kMode_Operand2_I ) , <nl> - index_operand , value_operand , g . TempImmediate ( sw . min_value ) ) ; <nl> + if ( enable_switch_jump_table_ = = kEnableSwitchJumpTable ) { <nl> + static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> + size_t table_space_cost = 4 + sw . value_range ; <nl> + size_t table_time_cost = 3 ; <nl> + size_t lookup_space_cost = 3 + 2 * sw . case_count ; <nl> + size_t lookup_time_cost = sw . case_count ; <nl> + if ( sw . case_count > 0 & & <nl> + table_space_cost + 3 * table_time_cost < = <nl> + lookup_space_cost + 3 * lookup_time_cost & & <nl> + sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> + sw . value_range < = kMaxTableSwitchValueRange ) { <nl> + InstructionOperand index_operand = value_operand ; <nl> + if ( sw . min_value ) { <nl> + index_operand = g . TempRegister ( ) ; <nl> + Emit ( kArmSub | AddressingModeField : : encode ( kMode_Operand2_I ) , <nl> + index_operand , value_operand , g . TempImmediate ( sw . min_value ) ) ; <nl> + } <nl> + / / Generate a table lookup . <nl> + return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> - / / Generate a table lookup . <nl> - return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> <nl> / / Generate a sequence of conditional jumps . <nl> mmm a / src / compiler / arm64 / instruction - selector - arm64 . cc <nl> ppp b / src / compiler / arm64 / instruction - selector - arm64 . cc <nl> void InstructionSelector : : VisitSwitch ( Node * node , const SwitchInfo & sw ) { <nl> InstructionOperand value_operand = g . UseRegister ( node - > InputAt ( 0 ) ) ; <nl> <nl> / / Emit either ArchTableSwitch or ArchLookupSwitch . <nl> - static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> - size_t table_space_cost = 4 + sw . value_range ; <nl> - size_t table_time_cost = 3 ; <nl> - size_t lookup_space_cost = 3 + 2 * sw . case_count ; <nl> - size_t lookup_time_cost = sw . case_count ; <nl> - if ( sw . case_count > 0 & & <nl> - table_space_cost + 3 * table_time_cost < = <nl> - lookup_space_cost + 3 * lookup_time_cost & & <nl> - sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> - sw . value_range < = kMaxTableSwitchValueRange ) { <nl> - InstructionOperand index_operand = value_operand ; <nl> - if ( sw . min_value ) { <nl> - index_operand = g . TempRegister ( ) ; <nl> - Emit ( kArm64Sub32 , index_operand , value_operand , <nl> - g . TempImmediate ( sw . min_value ) ) ; <nl> + if ( enable_switch_jump_table_ = = kEnableSwitchJumpTable ) { <nl> + static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> + size_t table_space_cost = 4 + sw . value_range ; <nl> + size_t table_time_cost = 3 ; <nl> + size_t lookup_space_cost = 3 + 2 * sw . case_count ; <nl> + size_t lookup_time_cost = sw . case_count ; <nl> + if ( sw . case_count > 0 & & <nl> + table_space_cost + 3 * table_time_cost < = <nl> + lookup_space_cost + 3 * lookup_time_cost & & <nl> + sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> + sw . value_range < = kMaxTableSwitchValueRange ) { <nl> + InstructionOperand index_operand = value_operand ; <nl> + if ( sw . min_value ) { <nl> + index_operand = g . TempRegister ( ) ; <nl> + Emit ( kArm64Sub32 , index_operand , value_operand , <nl> + g . TempImmediate ( sw . min_value ) ) ; <nl> + } <nl> + / / Generate a table lookup . <nl> + return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> - / / Generate a table lookup . <nl> - return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> <nl> / / Generate a sequence of conditional jumps . <nl> mmm a / src / compiler / ia32 / instruction - selector - ia32 . cc <nl> ppp b / src / compiler / ia32 / instruction - selector - ia32 . cc <nl> void InstructionSelector : : VisitSwitch ( Node * node , const SwitchInfo & sw ) { <nl> InstructionOperand value_operand = g . UseRegister ( node - > InputAt ( 0 ) ) ; <nl> <nl> / / Emit either ArchTableSwitch or ArchLookupSwitch . <nl> - static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> - size_t table_space_cost = 4 + sw . value_range ; <nl> - size_t table_time_cost = 3 ; <nl> - size_t lookup_space_cost = 3 + 2 * sw . case_count ; <nl> - size_t lookup_time_cost = sw . case_count ; <nl> - if ( sw . case_count > 4 & & <nl> - table_space_cost + 3 * table_time_cost < = <nl> - lookup_space_cost + 3 * lookup_time_cost & & <nl> - sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> - sw . value_range < = kMaxTableSwitchValueRange ) { <nl> - InstructionOperand index_operand = value_operand ; <nl> - if ( sw . min_value ) { <nl> - index_operand = g . TempRegister ( ) ; <nl> - Emit ( kIA32Lea | AddressingModeField : : encode ( kMode_MRI ) , index_operand , <nl> - value_operand , g . TempImmediate ( - sw . min_value ) ) ; <nl> + if ( enable_switch_jump_table_ = = kEnableSwitchJumpTable ) { <nl> + static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> + size_t table_space_cost = 4 + sw . value_range ; <nl> + size_t table_time_cost = 3 ; <nl> + size_t lookup_space_cost = 3 + 2 * sw . case_count ; <nl> + size_t lookup_time_cost = sw . case_count ; <nl> + if ( sw . case_count > 4 & & <nl> + table_space_cost + 3 * table_time_cost < = <nl> + lookup_space_cost + 3 * lookup_time_cost & & <nl> + sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> + sw . value_range < = kMaxTableSwitchValueRange ) { <nl> + InstructionOperand index_operand = value_operand ; <nl> + if ( sw . min_value ) { <nl> + index_operand = g . TempRegister ( ) ; <nl> + Emit ( kIA32Lea | AddressingModeField : : encode ( kMode_MRI ) , index_operand , <nl> + value_operand , g . TempImmediate ( - sw . min_value ) ) ; <nl> + } <nl> + / / Generate a table lookup . <nl> + return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> - / / Generate a table lookup . <nl> - return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> <nl> / / Generate a sequence of conditional jumps . <nl> mmm a / src / compiler / instruction - selector . cc <nl> ppp b / src / compiler / instruction - selector . cc <nl> InstructionSelector : : InstructionSelector ( <nl> Zone * zone , size_t node_count , Linkage * linkage , <nl> InstructionSequence * sequence , Schedule * schedule , <nl> SourcePositionTable * source_positions , Frame * frame , <nl> + EnableSwitchJumpTable enable_switch_jump_table , <nl> SourcePositionMode source_position_mode , Features features , <nl> EnableScheduling enable_scheduling , <nl> EnableSerialization enable_serialization ) <nl> InstructionSelector : : InstructionSelector ( <nl> scheduler_ ( nullptr ) , <nl> enable_scheduling_ ( enable_scheduling ) , <nl> enable_serialization_ ( enable_serialization ) , <nl> + enable_switch_jump_table_ ( enable_switch_jump_table ) , <nl> frame_ ( frame ) , <nl> instruction_selection_failed_ ( false ) { <nl> instructions_ . reserve ( node_count ) ; <nl> mmm a / src / compiler / instruction - selector . h <nl> ppp b / src / compiler / instruction - selector . h <nl> class V8_EXPORT_PRIVATE InstructionSelector final { <nl> enum SourcePositionMode { kCallSourcePositions , kAllSourcePositions } ; <nl> enum EnableScheduling { kDisableScheduling , kEnableScheduling } ; <nl> enum EnableSerialization { kDisableSerialization , kEnableSerialization } ; <nl> + enum EnableSwitchJumpTable { <nl> + kDisableSwitchJumpTable , <nl> + kEnableSwitchJumpTable <nl> + } ; <nl> <nl> InstructionSelector ( <nl> Zone * zone , size_t node_count , Linkage * linkage , <nl> InstructionSequence * sequence , Schedule * schedule , <nl> SourcePositionTable * source_positions , Frame * frame , <nl> + EnableSwitchJumpTable enable_switch_jump_table , <nl> SourcePositionMode source_position_mode = kCallSourcePositions , <nl> Features features = SupportedFeatures ( ) , <nl> EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling <nl> class V8_EXPORT_PRIVATE InstructionSelector final { <nl> InstructionScheduler * scheduler_ ; <nl> EnableScheduling enable_scheduling_ ; <nl> EnableSerialization enable_serialization_ ; <nl> + EnableSwitchJumpTable enable_switch_jump_table_ ; <nl> Frame * frame_ ; <nl> bool instruction_selection_failed_ ; <nl> } ; <nl> mmm a / src / compiler / mips / instruction - selector - mips . cc <nl> ppp b / src / compiler / mips / instruction - selector - mips . cc <nl> void InstructionSelector : : VisitSwitch ( Node * node , const SwitchInfo & sw ) { <nl> InstructionOperand value_operand = g . UseRegister ( node - > InputAt ( 0 ) ) ; <nl> <nl> / / Emit either ArchTableSwitch or ArchLookupSwitch . <nl> - static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> - size_t table_space_cost = 9 + sw . value_range ; <nl> - size_t table_time_cost = 3 ; <nl> - size_t lookup_space_cost = 2 + 2 * sw . case_count ; <nl> - size_t lookup_time_cost = sw . case_count ; <nl> - if ( sw . case_count > 0 & & <nl> - table_space_cost + 3 * table_time_cost < = <nl> - lookup_space_cost + 3 * lookup_time_cost & & <nl> - sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> - sw . value_range < = kMaxTableSwitchValueRange ) { <nl> - InstructionOperand index_operand = value_operand ; <nl> - if ( sw . min_value ) { <nl> - index_operand = g . TempRegister ( ) ; <nl> - Emit ( kMipsSub , index_operand , value_operand , <nl> - g . TempImmediate ( sw . min_value ) ) ; <nl> + if ( enable_switch_jump_table_ = = kEnableSwitchJumpTable ) { <nl> + static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> + size_t table_space_cost = 9 + sw . value_range ; <nl> + size_t table_time_cost = 3 ; <nl> + size_t lookup_space_cost = 2 + 2 * sw . case_count ; <nl> + size_t lookup_time_cost = sw . case_count ; <nl> + if ( sw . case_count > 0 & & <nl> + table_space_cost + 3 * table_time_cost < = <nl> + lookup_space_cost + 3 * lookup_time_cost & & <nl> + sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> + sw . value_range < = kMaxTableSwitchValueRange ) { <nl> + InstructionOperand index_operand = value_operand ; <nl> + if ( sw . min_value ) { <nl> + index_operand = g . TempRegister ( ) ; <nl> + Emit ( kMipsSub , index_operand , value_operand , <nl> + g . TempImmediate ( sw . min_value ) ) ; <nl> + } <nl> + / / Generate a table lookup . <nl> + return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> - / / Generate a table lookup . <nl> - return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> <nl> / / Generate a sequence of conditional jumps . <nl> mmm a / src / compiler / mips64 / instruction - selector - mips64 . cc <nl> ppp b / src / compiler / mips64 / instruction - selector - mips64 . cc <nl> void InstructionSelector : : VisitSwitch ( Node * node , const SwitchInfo & sw ) { <nl> InstructionOperand value_operand = g . UseRegister ( node - > InputAt ( 0 ) ) ; <nl> <nl> / / Emit either ArchTableSwitch or ArchLookupSwitch . <nl> - static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> - size_t table_space_cost = 10 + 2 * sw . value_range ; <nl> - size_t table_time_cost = 3 ; <nl> - size_t lookup_space_cost = 2 + 2 * sw . case_count ; <nl> - size_t lookup_time_cost = sw . case_count ; <nl> - if ( sw . case_count > 0 & & <nl> - table_space_cost + 3 * table_time_cost < = <nl> - lookup_space_cost + 3 * lookup_time_cost & & <nl> - sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> - sw . value_range < = kMaxTableSwitchValueRange ) { <nl> - InstructionOperand index_operand = value_operand ; <nl> - if ( sw . min_value ) { <nl> - index_operand = g . TempRegister ( ) ; <nl> - Emit ( kMips64Sub , index_operand , value_operand , <nl> - g . TempImmediate ( sw . min_value ) ) ; <nl> + if ( enable_switch_jump_table_ = = kEnableSwitchJumpTable ) { <nl> + static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> + size_t table_space_cost = 10 + 2 * sw . value_range ; <nl> + size_t table_time_cost = 3 ; <nl> + size_t lookup_space_cost = 2 + 2 * sw . case_count ; <nl> + size_t lookup_time_cost = sw . case_count ; <nl> + if ( sw . case_count > 0 & & <nl> + table_space_cost + 3 * table_time_cost < = <nl> + lookup_space_cost + 3 * lookup_time_cost & & <nl> + sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> + sw . value_range < = kMaxTableSwitchValueRange ) { <nl> + InstructionOperand index_operand = value_operand ; <nl> + if ( sw . min_value ) { <nl> + index_operand = g . TempRegister ( ) ; <nl> + Emit ( kMips64Sub , index_operand , value_operand , <nl> + g . TempImmediate ( sw . min_value ) ) ; <nl> + } <nl> + / / Generate a table lookup . <nl> + return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> - / / Generate a table lookup . <nl> - return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> <nl> / / Generate a sequence of conditional jumps . <nl> mmm a / src / compiler / pipeline . cc <nl> ppp b / src / compiler / pipeline . cc <nl> struct InstructionSelectionPhase { <nl> InstructionSelector selector ( <nl> temp_zone , data - > graph ( ) - > NodeCount ( ) , linkage , data - > sequence ( ) , <nl> data - > schedule ( ) , data - > source_positions ( ) , data - > frame ( ) , <nl> + data - > info ( ) - > switch_jump_table_enabled ( ) <nl> + ? InstructionSelector : : kEnableSwitchJumpTable <nl> + : InstructionSelector : : kDisableSwitchJumpTable , <nl> data - > info ( ) - > is_source_positions_enabled ( ) <nl> ? InstructionSelector : : kAllSourcePositions <nl> : InstructionSelector : : kCallSourcePositions , <nl> mmm a / src / compiler / ppc / instruction - selector - ppc . cc <nl> ppp b / src / compiler / ppc / instruction - selector - ppc . cc <nl> void InstructionSelector : : VisitSwitch ( Node * node , const SwitchInfo & sw ) { <nl> InstructionOperand value_operand = g . UseRegister ( node - > InputAt ( 0 ) ) ; <nl> <nl> / / Emit either ArchTableSwitch or ArchLookupSwitch . <nl> - static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> - size_t table_space_cost = 4 + sw . value_range ; <nl> - size_t table_time_cost = 3 ; <nl> - size_t lookup_space_cost = 3 + 2 * sw . case_count ; <nl> - size_t lookup_time_cost = sw . case_count ; <nl> - if ( sw . case_count > 0 & & <nl> - table_space_cost + 3 * table_time_cost < = <nl> - lookup_space_cost + 3 * lookup_time_cost & & <nl> - sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> - sw . value_range < = kMaxTableSwitchValueRange ) { <nl> - InstructionOperand index_operand = value_operand ; <nl> - if ( sw . min_value ) { <nl> - index_operand = g . TempRegister ( ) ; <nl> - Emit ( kPPC_Sub , index_operand , value_operand , <nl> - g . TempImmediate ( sw . min_value ) ) ; <nl> + if ( enable_switch_jump_table_ = = kEnableSwitchJumpTable ) { <nl> + static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> + size_t table_space_cost = 4 + sw . value_range ; <nl> + size_t table_time_cost = 3 ; <nl> + size_t lookup_space_cost = 3 + 2 * sw . case_count ; <nl> + size_t lookup_time_cost = sw . case_count ; <nl> + if ( sw . case_count > 0 & & <nl> + table_space_cost + 3 * table_time_cost < = <nl> + lookup_space_cost + 3 * lookup_time_cost & & <nl> + sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> + sw . value_range < = kMaxTableSwitchValueRange ) { <nl> + InstructionOperand index_operand = value_operand ; <nl> + if ( sw . min_value ) { <nl> + index_operand = g . TempRegister ( ) ; <nl> + Emit ( kPPC_Sub , index_operand , value_operand , <nl> + g . TempImmediate ( sw . min_value ) ) ; <nl> + } <nl> + / / Generate a table lookup . <nl> + return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> - / / Generate a table lookup . <nl> - return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> <nl> / / Generate a sequence of conditional jumps . <nl> mmm a / src / compiler / s390 / instruction - selector - s390 . cc <nl> ppp b / src / compiler / s390 / instruction - selector - s390 . cc <nl> void InstructionSelector : : VisitSwitch ( Node * node , const SwitchInfo & sw ) { <nl> InstructionOperand value_operand = g . UseRegister ( node - > InputAt ( 0 ) ) ; <nl> <nl> / / Emit either ArchTableSwitch or ArchLookupSwitch . <nl> - static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> - size_t table_space_cost = 4 + sw . value_range ; <nl> - size_t table_time_cost = 3 ; <nl> - size_t lookup_space_cost = 3 + 2 * sw . case_count ; <nl> - size_t lookup_time_cost = sw . case_count ; <nl> - if ( sw . case_count > 0 & & <nl> - table_space_cost + 3 * table_time_cost < = <nl> - lookup_space_cost + 3 * lookup_time_cost & & <nl> - sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> - sw . value_range < = kMaxTableSwitchValueRange ) { <nl> - InstructionOperand index_operand = value_operand ; <nl> - if ( sw . min_value ) { <nl> - index_operand = g . TempRegister ( ) ; <nl> - Emit ( kS390_Lay | AddressingModeField : : encode ( kMode_MRI ) , index_operand , <nl> - value_operand , g . TempImmediate ( - sw . min_value ) ) ; <nl> - } <nl> + if ( enable_switch_jump_table_ = = kEnableSwitchJumpTable ) { <nl> + static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> + size_t table_space_cost = 4 + sw . value_range ; <nl> + size_t table_time_cost = 3 ; <nl> + size_t lookup_space_cost = 3 + 2 * sw . case_count ; <nl> + size_t lookup_time_cost = sw . case_count ; <nl> + if ( sw . case_count > 0 & & <nl> + table_space_cost + 3 * table_time_cost < = <nl> + lookup_space_cost + 3 * lookup_time_cost & & <nl> + sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> + sw . value_range < = kMaxTableSwitchValueRange ) { <nl> + InstructionOperand index_operand = value_operand ; <nl> + if ( sw . min_value ) { <nl> + index_operand = g . TempRegister ( ) ; <nl> + Emit ( kS390_Lay | AddressingModeField : : encode ( kMode_MRI ) , index_operand , <nl> + value_operand , g . TempImmediate ( - sw . min_value ) ) ; <nl> + } <nl> # if V8_TARGET_ARCH_S390X <nl> InstructionOperand index_operand_zero_ext = g . TempRegister ( ) ; <nl> Emit ( kS390_Uint32ToUint64 , index_operand_zero_ext , index_operand ) ; <nl> void InstructionSelector : : VisitSwitch ( Node * node , const SwitchInfo & sw ) { <nl> / / Generate a table lookup . <nl> return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> + } <nl> <nl> / / Generate a sequence of conditional jumps . <nl> return EmitLookupSwitch ( sw , value_operand ) ; <nl> mmm a / src / compiler / x64 / instruction - selector - x64 . cc <nl> ppp b / src / compiler / x64 / instruction - selector - x64 . cc <nl> void InstructionSelector : : VisitSwitch ( Node * node , const SwitchInfo & sw ) { <nl> InstructionOperand value_operand = g . UseRegister ( node - > InputAt ( 0 ) ) ; <nl> <nl> / / Emit either ArchTableSwitch or ArchLookupSwitch . <nl> - static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> - size_t table_space_cost = 4 + sw . value_range ; <nl> - size_t table_time_cost = 3 ; <nl> - size_t lookup_space_cost = 3 + 2 * sw . case_count ; <nl> - size_t lookup_time_cost = sw . case_count ; <nl> - if ( sw . case_count > 4 & & <nl> - table_space_cost + 3 * table_time_cost < = <nl> - lookup_space_cost + 3 * lookup_time_cost & & <nl> - sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> - sw . value_range < = kMaxTableSwitchValueRange ) { <nl> - InstructionOperand index_operand = g . TempRegister ( ) ; <nl> - if ( sw . min_value ) { <nl> - / / The leal automatically zero extends , so result is a valid 64 - bit index . <nl> - Emit ( kX64Lea32 | AddressingModeField : : encode ( kMode_MRI ) , index_operand , <nl> - value_operand , g . TempImmediate ( - sw . min_value ) ) ; <nl> - } else { <nl> - / / Zero extend , because we use it as 64 - bit index into the jump table . <nl> - Emit ( kX64Movl , index_operand , value_operand ) ; <nl> + if ( enable_switch_jump_table_ = = kEnableSwitchJumpTable ) { <nl> + static const size_t kMaxTableSwitchValueRange = 2 < < 16 ; <nl> + size_t table_space_cost = 4 + sw . value_range ; <nl> + size_t table_time_cost = 3 ; <nl> + size_t lookup_space_cost = 3 + 2 * sw . case_count ; <nl> + size_t lookup_time_cost = sw . case_count ; <nl> + if ( sw . case_count > 4 & & <nl> + table_space_cost + 3 * table_time_cost < = <nl> + lookup_space_cost + 3 * lookup_time_cost & & <nl> + sw . min_value > std : : numeric_limits < int32_t > : : min ( ) & & <nl> + sw . value_range < = kMaxTableSwitchValueRange ) { <nl> + InstructionOperand index_operand = g . TempRegister ( ) ; <nl> + if ( sw . min_value ) { <nl> + / / The leal automatically zero extends , so result is a valid 64 - bit <nl> + / / index . <nl> + Emit ( kX64Lea32 | AddressingModeField : : encode ( kMode_MRI ) , index_operand , <nl> + value_operand , g . TempImmediate ( - sw . min_value ) ) ; <nl> + } else { <nl> + / / Zero extend , because we use it as 64 - bit index into the jump table . <nl> + Emit ( kX64Movl , index_operand , value_operand ) ; <nl> + } <nl> + / / Generate a table lookup . <nl> + return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> - / / Generate a table lookup . <nl> - return EmitTableSwitch ( sw , index_operand ) ; <nl> } <nl> <nl> / / Generate a sequence of conditional jumps . <nl> mmm a / src / flag - definitions . h <nl> ppp b / src / flag - definitions . h <nl> DEFINE_BOOL ( untrusted_code_mitigations , V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS , <nl> " Enable mitigations for executing untrusted code " ) <nl> # undef V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS <nl> <nl> + DEFINE_BOOL ( turbo_disable_switch_jump_table , false , <nl> + " do not emit jump - tables in Turbofan " ) <nl> + DEFINE_IMPLICATION ( untrusted_code_mitigations , turbo_disable_switch_jump_table ) <nl> + <nl> / / Flags to help platform porters <nl> DEFINE_BOOL ( minimal , false , <nl> " simplifies execution model to make porting " <nl> mmm a / test / unittests / compiler / instruction - selector - unittest . cc <nl> ppp b / test / unittests / compiler / instruction - selector - unittest . cc <nl> InstructionSelectorTest : : Stream InstructionSelectorTest : : StreamBuilder : : Build ( <nl> SourcePositionTable source_position_table ( graph ( ) ) ; <nl> InstructionSelector selector ( test_ - > zone ( ) , node_count , & linkage , & sequence , <nl> schedule , & source_position_table , nullptr , <nl> + InstructionSelector : : kEnableSwitchJumpTable , <nl> source_position_mode , features , <nl> InstructionSelector : : kDisableScheduling ) ; <nl> selector . SelectInstructions ( ) ; <nl>
Reland " [ turbofan ] disable indirect jumps in Turbofan generated switches "
v8/v8
2778b46081c6db1090fad409923ec34089bfb910
2018-02-01T08:03:04Z
mmm a / docs / docs / imaging . xml <nl> ppp b / docs / docs / imaging . xml <nl> <nl> < ? xml - stylesheet type = " text / xsl " href = " stylesheet . xsl " ? > <nl> <nl> < doc > <nl> - < title > Imaging < / title > <nl> + < title > Image Processing < / title > <nl> <nl> < ! - - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - - > <nl> <nl> mmm a / docs / docs / main_menu . xml <nl> ppp b / docs / docs / main_menu . xml <nl> <nl> < chm_sub > parsing . xml < / chm_sub > <nl> < / item > <nl> < item > <nl> - < name > Imaging < / name > <nl> + < name > Image Processing < / name > <nl> < link > imaging . html < / link > <nl> < chm_sub > imaging . xml < / chm_sub > <nl> < / item > <nl>
Renamed the Imaging page to Image Processing .
davisking/dlib
e419bb1c0f6b5f907fe037bb0c4df0956b70e1b7
2010-02-19T00:57:03Z
mmm a / utils / buildbot - release - notes . txt <nl> ppp b / utils / buildbot - release - notes . txt <nl> <nl> dependency order , so global variable initializers that cross module <nl> boundaries don ' t have undefined behavior or fragile link order dependencies . <nl> <nl> + * Swift has the start of an immutability model for value types . As part of this , <nl> + you can now declare immutable value bindings with a new ' let ' declaration , <nl> + which is semantically similar to defining a get - only property : <nl> + <nl> + let x = foo ( ) <nl> + print ( x ) / / ok <nl> + x = bar ( ) / / error : cannot modify an immutable value <nl> + swap ( & x , & y ) / / error : cannot pass an immutable value as @ inout parameter <nl> + <nl> + In the case of bindings of class type , the bound object itself is still <nl> + mutable , but you cannot change the binding . <nl> + <nl> + let r = Rocket ( ) <nl> + r . blastOff ( ) / / Ok , your rocket is mutable . <nl> + r = Rocket ( ) / / error : cannot modify an immutable binding . <nl> + <nl> + In addition to the ' let ' declaration itself , the iteration variable in <nl> + for - each loops , ' self ' on classes , and a few other minor things have switched <nl> + to immutable bindings . <nl> + <nl> + There is more to come here . <nl> + <nl> * A " map " method with the semantics of Haskell ' s " fmap " was added to <nl> Optional < T > . Map applies a function f : T - > U to any value stored in <nl> an Optional < T > , and returns an Optional < U > . So , <nl>
describe some basic ' let ' stuff in the release notes .
apple/swift
ff158116a1a7f3dcbd92f4e0a4e8cce620d37ad6
2013-12-12T19:51:38Z
mmm a / addons / resource . language . en_gb / resources / strings . po <nl> ppp b / addons / resource . language . en_gb / resources / strings . po <nl> msgctxt " # 566 " <nl> msgid " Album artist " <nl> msgstr " " <nl> <nl> + # . generic " play count " label used in different places <nl> # : xbmc / playlists / SmartPlaylist . cpp <nl> # : addons / skin . estuary / xml / Variables . xml <nl> + # : xbmc / pvr / dialogs / GUIDialogPVRRecordingSettings . cpp <nl> msgctxt " # 567 " <nl> msgid " Play count " <nl> msgstr " " <nl> msgctxt " # 19067 " <nl> msgid " This event is already being recorded . " <nl> msgstr " " <nl> <nl> - # empty string with id 19068 <nl> + # . Label for recording settings dialog header <nl> + # : xbmc / pvr / dialogs / GUIDialogPVRRecordingSettings . cpp <nl> + msgctxt " # 19068 " <nl> + msgid " Recording settings " <nl> + msgstr " " <nl> <nl> # . Electronic program guide <nl> # : addons / skin . estuary / xml / Variables . xml <nl> msgctxt " # 19074 " <nl> msgid " Active " <nl> msgstr " " <nl> <nl> - # . Label of name edit in PVR timer settings dialog <nl> + # . Label of name edit in PVR timer and recording settings dialog <nl> + # : xbmc / pvr / dialogs / GUIDialogPVRRecordingSettings . cpp <nl> # : xbmc / pvr / dialogs / GUIDialogPVRTimerSettings . cpp <nl> msgctxt " # 19075 " <nl> msgid " Name " <nl> msgctxt " # 19096 " <nl> msgid " \ " Smart select \ " " <nl> msgstr " " <nl> <nl> - # . label of the help text for the timer name edit field in PVR timer settings dialog <nl> + # . label of the help text for the name edit field in PVR timer and recording settings dialog <nl> + # : xbmc / pvr / dialogs / GUIDialogPVRRecordingSettings . cpp <nl> # : xbmc / pvr / dialogs / GUIDialogPVRTimerSettings . cpp <nl> msgctxt " # 19097 " <nl> msgid " Enter the name for the timer " <nl> mmm a / xbmc / guilib / GUIWindowManager . cpp <nl> ppp b / xbmc / guilib / GUIWindowManager . cpp <nl> <nl> # include " pvr / dialogs / GUIDialogPVRGuideSearch . h " <nl> # include " pvr / dialogs / GUIDialogPVRRadioRDSInfo . h " <nl> # include " pvr / dialogs / GUIDialogPVRRecordingInfo . h " <nl> + # include " pvr / dialogs / GUIDialogPVRRecordingSettings . h " <nl> # include " pvr / dialogs / GUIDialogPVRTimerSettings . h " <nl> <nl> # include " video / dialogs / GUIDialogTeletext . h " <nl> void CGUIWindowManager : : CreateWindows ( ) <nl> Add ( new CGUIDialogPVRGuideSearch ) ; <nl> Add ( new CGUIDialogPVRChannelsOSD ) ; <nl> Add ( new CGUIDialogPVRChannelGuide ) ; <nl> + Add ( new CGUIDialogPVRRecordingSettings ) ; <nl> <nl> Add ( new CGUIDialogSelect ) ; <nl> Add ( new CGUIDialogMusicInfo ) ; <nl> bool CGUIWindowManager : : DestroyWindows ( ) <nl> DestroyWindow ( WINDOW_DIALOG_PVR_OSD_CHANNELS ) ; <nl> DestroyWindow ( WINDOW_DIALOG_PVR_CHANNEL_GUIDE ) ; <nl> DestroyWindow ( WINDOW_DIALOG_OSD_TELETEXT ) ; <nl> + DestroyWindow ( WINDOW_DIALOG_PVR_RECORDING_SETTING ) ; <nl> <nl> DestroyWindow ( WINDOW_DIALOG_TEXT_VIEWER ) ; <nl> DestroyWindow ( WINDOW_DIALOG_PLAY_EJECT ) ; <nl> mmm a / xbmc / guilib / WindowIDs . h <nl> ppp b / xbmc / guilib / WindowIDs . h <nl> <nl> # define WINDOW_DIALOG_PVR_OSD_CHANNELS ( WINDOW_DIALOG_PVR_ID_START + 8 ) <nl> # define WINDOW_DIALOG_PVR_CHANNEL_GUIDE ( WINDOW_DIALOG_PVR_ID_START + 9 ) <nl> # define WINDOW_DIALOG_PVR_RADIO_RDS_INFO ( WINDOW_DIALOG_PVR_ID_START + 10 ) <nl> - # define WINDOW_DIALOG_PVR_ID_END WINDOW_DIALOG_PVR_RADIO_RDS_INFO <nl> + # define WINDOW_DIALOG_PVR_RECORDING_SETTING ( WINDOW_DIALOG_PVR_ID_START + 11 ) <nl> + # define WINDOW_DIALOG_PVR_ID_END WINDOW_DIALOG_PVR_RECORDING_SETTING <nl> <nl> # define WINDOW_PVR_ID_START 10700 <nl> # define WINDOW_TV_CHANNELS ( WINDOW_PVR_ID_START ) <nl> mmm a / xbmc / pvr / PVRContextMenus . cpp <nl> ppp b / xbmc / pvr / PVRContextMenus . cpp <nl> namespace PVR <nl> DECL_STATICCONTEXTMENUITEM ( DeleteTimerRule ) ; <nl> DECL_CONTEXTMENUITEM ( EditTimer ) ; <nl> DECL_CONTEXTMENUITEM ( DeleteTimer ) ; <nl> + DECL_STATICCONTEXTMENUITEM ( EditRecording ) ; <nl> DECL_STATICCONTEXTMENUITEM ( RenameRecording ) ; <nl> DECL_CONTEXTMENUITEM ( DeleteRecording ) ; <nl> DECL_STATICCONTEXTMENUITEM ( UndeleteRecording ) ; <nl> namespace PVR <nl> return CServiceBroker : : GetPVRManager ( ) . GUIActions ( ) - > StopRecording ( item ) ; <nl> } <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / Edit recording <nl> + <nl> + bool EditRecording : : IsVisible ( const CFileItem & item ) const <nl> + { <nl> + const CPVRRecordingPtr recording ( item . GetPVRRecordingInfoTag ( ) ) ; <nl> + if ( recording & & ! recording - > IsDeleted ( ) ) <nl> + return true ; <nl> + <nl> + return false ; <nl> + } <nl> + <nl> + bool EditRecording : : Execute ( const CFileItemPtr & item ) const <nl> + { <nl> + return CServiceBroker : : GetPVRManager ( ) . GUIActions ( ) - > EditRecording ( item ) ; <nl> + } <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / Rename recording <nl> <nl> namespace PVR <nl> std : : make_shared < CONTEXTMENUITEM : : DeleteTimer > ( ) , <nl> std : : make_shared < CONTEXTMENUITEM : : StartRecording > ( 264 ) , / * Record * / <nl> std : : make_shared < CONTEXTMENUITEM : : StopRecording > ( 19059 ) , / * Stop recording * / <nl> + std : : make_shared < CONTEXTMENUITEM : : EditRecording > ( 21450 ) , / * Edit * / <nl> std : : make_shared < CONTEXTMENUITEM : : RenameRecording > ( 118 ) , / * Rename * / <nl> std : : make_shared < CONTEXTMENUITEM : : DeleteRecording > ( ) , <nl> std : : make_shared < CONTEXTMENUITEM : : UndeleteRecording > ( 19290 ) , / * Undelete * / <nl> mmm a / xbmc / pvr / PVRGUIActions . cpp <nl> ppp b / xbmc / pvr / PVRGUIActions . cpp <nl> <nl> # include " pvr / dialogs / GUIDialogPVRGuideInfo . h " <nl> # include " pvr / dialogs / GUIDialogPVRChannelGuide . h " <nl> # include " pvr / dialogs / GUIDialogPVRRecordingInfo . h " <nl> + # include " pvr / dialogs / GUIDialogPVRRecordingSettings . h " <nl> # include " pvr / dialogs / GUIDialogPVRTimerSettings . h " <nl> # include " pvr / PVRDatabase . h " <nl> # include " pvr / PVRItem . h " <nl> namespace PVR <nl> bool DoRun ( const CFileItemPtr & item ) override { return CServiceBroker : : GetPVRManager ( ) . Recordings ( ) - > Undelete ( * item ) ; } <nl> } ; <nl> <nl> + class AsyncSetRecordingPlayCount : public AsyncRecordingAction <nl> + { <nl> + private : <nl> + bool DoRun ( const CFileItemPtr & item ) override <nl> + { <nl> + PVR_ERROR error ; <nl> + CServiceBroker : : GetPVRManager ( ) . Clients ( ) - > SetRecordingPlayCount ( * item - > GetPVRRecordingInfoTag ( ) , item - > GetPVRRecordingInfoTag ( ) - > GetLocalPlayCount ( ) , & error ) ; <nl> + return error = = PVR_ERROR_NO_ERROR ; <nl> + } <nl> + } ; <nl> + <nl> CPVRGUIActions : : CPVRGUIActions ( ) <nl> : m_bChannelScanRunning ( false ) , <nl> m_settings ( { <nl> namespace PVR <nl> CVariant { timer - > Title ( ) } ) ; <nl> } <nl> <nl> + bool CPVRGUIActions : : EditRecording ( const CFileItemPtr & item ) const <nl> + { <nl> + const CPVRRecordingPtr recording = CPVRItem ( item ) . GetRecording ( ) ; <nl> + if ( ! recording ) <nl> + { <nl> + CLog : : Log ( LOGERROR , " CPVRGUIActions - % s - no recording ! " , __FUNCTION__ ) ; <nl> + return false ; <nl> + } <nl> + <nl> + CPVRRecordingPtr origRecording ( new CPVRRecording ) ; <nl> + origRecording - > Update ( * recording ) ; <nl> + <nl> + if ( ! ShowRecordingSettings ( recording ) ) <nl> + return false ; <nl> + <nl> + if ( origRecording - > m_strTitle ! = recording - > m_strTitle ) <nl> + { <nl> + if ( ! AsyncRenameRecording ( recording - > m_strTitle ) . Execute ( item ) ) <nl> + CLog : : Log ( LOGERROR , " CPVRGUIActions - % s - renaming recording failed ! " , __FUNCTION__ ) ; <nl> + } <nl> + <nl> + if ( origRecording - > GetLocalPlayCount ( ) ! = recording - > GetLocalPlayCount ( ) ) <nl> + { <nl> + if ( ! AsyncSetRecordingPlayCount ( ) . Execute ( item ) ) <nl> + CLog : : Log ( LOGERROR , " CPVRGUIActions - % s - setting recording playcount failed ! " , __FUNCTION__ ) ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> bool CPVRGUIActions : : RenameRecording ( const CFileItemPtr & item ) const <nl> { <nl> const CPVRRecordingPtr recording ( item - > GetPVRRecordingInfoTag ( ) ) ; <nl> namespace PVR <nl> return true ; <nl> } <nl> <nl> + bool CPVRGUIActions : : ShowRecordingSettings ( const CPVRRecordingPtr & recording ) const <nl> + { <nl> + CGUIDialogPVRRecordingSettings * pDlgInfo = g_windowManager . GetWindow < CGUIDialogPVRRecordingSettings > ( WINDOW_DIALOG_PVR_RECORDING_SETTING ) ; <nl> + if ( ! pDlgInfo ) <nl> + { <nl> + CLog : : Log ( LOGERROR , " CPVRGUIActions - % s - unable to get WINDOW_DIALOG_PVR_RECORDING_SETTING ! " , __FUNCTION__ ) ; <nl> + return false ; <nl> + } <nl> + <nl> + pDlgInfo - > SetRecording ( recording ) ; <nl> + pDlgInfo - > Open ( ) ; <nl> + <nl> + return pDlgInfo - > IsConfirmed ( ) ; <nl> + } <nl> + <nl> std : : string CPVRGUIActions : : GetResumeLabel ( const CFileItem & item ) const <nl> { <nl> std : : string resumeString ; <nl> mmm a / xbmc / pvr / PVRGUIActions . h <nl> ppp b / xbmc / pvr / PVRGUIActions . h <nl> namespace PVR <nl> * / <nl> bool StopRecording ( const CFileItemPtr & item ) const ; <nl> <nl> + / * ! <nl> + * @ brief Open the recording settings dialog to edit a recording . <nl> + * @ param item containing the recording to edit . <nl> + * @ return true on success , false otherwise . <nl> + * / <nl> + bool EditRecording ( const CFileItemPtr & item ) const ; <nl> + <nl> / * ! <nl> * @ brief Rename a recording , showing a text input dialog . <nl> * @ param item containing a recording to rename . <nl> namespace PVR <nl> * / <nl> bool ConfirmDeleteAllRecordingsFromTrash ( ) const ; <nl> <nl> + / * ! <nl> + * @ brief Open the recording settings dialog . <nl> + * @ param recording containing the recording the settings shall be displayed for . <nl> + * @ return true , if the dialog was ended successfully , false otherwise . <nl> + * / <nl> + bool ShowRecordingSettings ( const CPVRRecordingPtr & recording ) const ; <nl> + <nl> / * ! <nl> * @ brief Check whether resume play is possible for a given item , display " resume from . . . " / " play from start " context menu in case . <nl> * @ param item containing a recording or an epg tag . <nl> mmm a / xbmc / pvr / dialogs / CMakeLists . txt <nl> ppp b / xbmc / pvr / dialogs / CMakeLists . txt <nl> set ( SOURCES GUIDialogPVRChannelManager . cpp <nl> GUIDialogPVRGuideSearch . cpp <nl> GUIDialogPVRRadioRDSInfo . cpp <nl> GUIDialogPVRRecordingInfo . cpp <nl> + GUIDialogPVRRecordingSettings . cpp <nl> GUIDialogPVRTimerSettings . cpp ) <nl> <nl> set ( HEADERS GUIDialogPVRChannelManager . h <nl> set ( HEADERS GUIDialogPVRChannelManager . h <nl> GUIDialogPVRGuideSearch . h <nl> GUIDialogPVRRadioRDSInfo . h <nl> GUIDialogPVRRecordingInfo . h <nl> + GUIDialogPVRRecordingSettings . h <nl> GUIDialogPVRTimerSettings . h ) <nl> <nl> core_add_library ( pvr_dialogs ) <nl> new file mode 100644 <nl> index 000000000000 . . 66b913d55628 <nl> mmm / dev / null <nl> ppp b / xbmc / pvr / dialogs / GUIDialogPVRRecordingSettings . cpp <nl> <nl> + / * <nl> + * Copyright ( C ) 2017 Team Kodi <nl> + * http : / / kodi . tv <nl> + * <nl> + * This Program is free software ; you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> + * any later version . <nl> + * <nl> + * This Program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with XBMC ; see the file COPYING . If not , see <nl> + * < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + <nl> + # include " GUIDialogPVRRecordingSettings . h " <nl> + <nl> + # include " ServiceBroker . h " <nl> + # include " guilib / LocalizeStrings . h " <nl> + # include " settings / lib / Setting . h " <nl> + # include " settings / lib / SettingsManager . h " <nl> + # include " utils / Variant . h " <nl> + # include " utils / log . h " <nl> + <nl> + # include " pvr / PVRManager . h " <nl> + # include " pvr / addons / PVRClients . h " <nl> + # include " pvr / recordings / PVRRecording . h " <nl> + <nl> + using namespace PVR ; <nl> + <nl> + # define SETTING_RECORDING_NAME " recording . name " <nl> + # define SETTING_RECORDING_PLAYCOUNT " recording . playcount " <nl> + <nl> + CGUIDialogPVRRecordingSettings : : CGUIDialogPVRRecordingSettings ( ) : <nl> + CGUIDialogSettingsManualBase ( WINDOW_DIALOG_PVR_RECORDING_SETTING , " DialogSettings . xml " ) , <nl> + m_iPlayCount ( 0 ) <nl> + { <nl> + m_loadType = LOAD_EVERY_TIME ; <nl> + } <nl> + <nl> + void CGUIDialogPVRRecordingSettings : : SetRecording ( const CPVRRecordingPtr & recording ) <nl> + { <nl> + if ( ! recording ) <nl> + { <nl> + CLog : : Log ( LOGERROR , " CGUIDialogPVRRecordingSettings : : SetRecording - no recording given " ) ; <nl> + return ; <nl> + } <nl> + <nl> + m_recording = recording ; <nl> + <nl> + / / Copy data we need from tag . Do not modify the tag itself until Save ( ) ! <nl> + m_strTitle = m_recording - > m_strTitle ; <nl> + } <nl> + <nl> + void CGUIDialogPVRRecordingSettings : : SetupView ( ) <nl> + { <nl> + CGUIDialogSettingsManualBase : : SetupView ( ) ; <nl> + SetHeading ( 19068 ) ; / / Recording settings <nl> + SET_CONTROL_HIDDEN ( CONTROL_SETTINGS_CUSTOM_BUTTON ) ; <nl> + SET_CONTROL_LABEL ( CONTROL_SETTINGS_OKAY_BUTTON , 186 ) ; / / OK <nl> + SET_CONTROL_LABEL ( CONTROL_SETTINGS_CANCEL_BUTTON , 222 ) ; / / Cancel <nl> + } <nl> + <nl> + void CGUIDialogPVRRecordingSettings : : InitializeSettings ( ) <nl> + { <nl> + CGUIDialogSettingsManualBase : : InitializeSettings ( ) ; <nl> + <nl> + const std : : shared_ptr < CSettingCategory > category = AddCategory ( " pvrrecordingsettings " , - 1 ) ; <nl> + if ( category = = nullptr ) <nl> + { <nl> + CLog : : Log ( LOGERROR , " CGUIDialogPVRRecordingSettings : : InitializeSettings - Unable to add settings category " ) ; <nl> + return ; <nl> + } <nl> + <nl> + const std : : shared_ptr < CSettingGroup > group = AddGroup ( category ) ; <nl> + if ( group = = nullptr ) <nl> + { <nl> + CLog : : Log ( LOGERROR , " CGUIDialogPVRRecordingSettings : : InitializeSettings - Unable to add settings group " ) ; <nl> + return ; <nl> + } <nl> + <nl> + std : : shared_ptr < CSetting > setting = nullptr ; <nl> + <nl> + / / Name <nl> + setting = AddEdit ( group , SETTING_RECORDING_NAME , 19075 , SettingLevel : : Basic , m_strTitle ) ; <nl> + setting - > SetEnabled ( CServiceBroker : : GetPVRManager ( ) . Clients ( ) - > SupportsRecordingsRename ( m_recording - > ClientID ( ) ) ) ; <nl> + <nl> + / / Play count <nl> + if ( CServiceBroker : : GetPVRManager ( ) . Clients ( ) - > SupportsRecordingPlayCount ( m_recording - > ClientID ( ) ) ) <nl> + setting = AddEdit ( group , SETTING_RECORDING_PLAYCOUNT , 567 , SettingLevel : : Basic , m_recording - > GetLocalPlayCount ( ) ) ; <nl> + } <nl> + <nl> + void CGUIDialogPVRRecordingSettings : : OnSettingChanged ( std : : shared_ptr < const CSetting > setting ) <nl> + { <nl> + if ( setting = = nullptr ) <nl> + { <nl> + CLog : : Log ( LOGERROR , " CGUIDialogPVRRecordingSettings : : OnSettingChanged - No setting " ) ; <nl> + return ; <nl> + } <nl> + <nl> + CGUIDialogSettingsManualBase : : OnSettingChanged ( setting ) ; <nl> + <nl> + const std : : string & settingId = setting - > GetId ( ) ; <nl> + <nl> + if ( settingId = = SETTING_RECORDING_NAME ) <nl> + { <nl> + m_strTitle = std : : static_pointer_cast < const CSettingString > ( setting ) - > GetValue ( ) ; <nl> + } <nl> + else if ( settingId = = SETTING_RECORDING_PLAYCOUNT ) <nl> + { <nl> + m_iPlayCount = std : : static_pointer_cast < const CSettingInt > ( setting ) - > GetValue ( ) ; <nl> + } <nl> + } <nl> + <nl> + void CGUIDialogPVRRecordingSettings : : Save ( ) <nl> + { <nl> + / / Name <nl> + m_recording - > m_strTitle = m_strTitle ; <nl> + <nl> + / / Play count <nl> + m_recording - > SetLocalPlayCount ( m_iPlayCount ) ; <nl> + } <nl> + <nl> + void CGUIDialogPVRRecordingSettings : : AddCondition ( <nl> + std : : shared_ptr < CSetting > setting , const std : : string & identifier , SettingConditionCheck condition , <nl> + SettingDependencyType depType , const std : : string & settingId ) <nl> + { <nl> + GetSettingsManager ( ) - > AddCondition ( identifier , condition , this ) ; <nl> + CSettingDependency dep ( depType , GetSettingsManager ( ) ) ; <nl> + dep . And ( ) - > Add ( CSettingDependencyConditionPtr ( new CSettingDependencyCondition ( identifier , <nl> + " true " , <nl> + settingId , <nl> + false , <nl> + GetSettingsManager ( ) ) ) ) ; <nl> + SettingDependencies deps ( setting - > GetDependencies ( ) ) ; <nl> + deps . push_back ( dep ) ; <nl> + setting - > SetDependencies ( deps ) ; <nl> + } <nl> new file mode 100644 <nl> index 000000000000 . . d6e137837688 <nl> mmm / dev / null <nl> ppp b / xbmc / pvr / dialogs / GUIDialogPVRRecordingSettings . h <nl> <nl> + # pragma once <nl> + / * <nl> + * Copyright ( C ) 2017 Team Kodi <nl> + * http : / / kodi . tv <nl> + * <nl> + * This Program is free software ; you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation ; either version 2 , or ( at your option ) <nl> + * any later version . <nl> + * <nl> + * This Program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with XBMC ; see the file COPYING . If not , see <nl> + * < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + <nl> + # include " settings / dialogs / GUIDialogSettingsManualBase . h " <nl> + <nl> + # include " settings / SettingConditions . h " <nl> + # include " settings / lib / SettingDependency . h " <nl> + <nl> + # include " pvr / PVRTypes . h " <nl> + <nl> + # include < string > <nl> + <nl> + class CSetting ; <nl> + <nl> + namespace PVR <nl> + { <nl> + class CGUIDialogPVRRecordingSettings : public CGUIDialogSettingsManualBase <nl> + { <nl> + public : <nl> + CGUIDialogPVRRecordingSettings ( ) ; <nl> + <nl> + void SetRecording ( const CPVRRecordingPtr & recording ) ; <nl> + <nl> + protected : <nl> + / / implementation of ISettingCallback <nl> + void OnSettingChanged ( std : : shared_ptr < const CSetting > setting ) override ; <nl> + <nl> + / / specialization of CGUIDialogSettingsBase <nl> + bool AllowResettingSettings ( ) const override { return false ; } <nl> + void Save ( ) override ; <nl> + void SetupView ( ) override ; <nl> + <nl> + / / specialization of CGUIDialogSettingsManualBase <nl> + void InitializeSettings ( ) override ; <nl> + <nl> + private : <nl> + void AddCondition ( std : : shared_ptr < CSetting > setting , const std : : string & identifier , <nl> + SettingConditionCheck condition , <nl> + SettingDependencyType depType , const std : : string & settingId ) ; <nl> + <nl> + CPVRRecordingPtr m_recording ; <nl> + std : : string m_strTitle ; <nl> + int m_iPlayCount ; <nl> + } ; <nl> + } / / namespace PVR <nl> mmm a / xbmc / pvr / recordings / PVRRecording . h <nl> ppp b / xbmc / pvr / recordings / PVRRecording . h <nl> namespace PVR <nl> * / <nl> bool IncrementPlayCount ( ) override ; <nl> <nl> + / * ! <nl> + * @ brief Set this recording ' s play count without transferring the value to the backend , even if it supports server - side play counts . <nl> + * @ param count play count . <nl> + * @ return True if play count was set successfully , false otherwise . <nl> + * / <nl> + bool SetLocalPlayCount ( int count ) { return CVideoInfoTag : : SetPlayCount ( count ) ; } <nl> + <nl> / * ! <nl> * @ brief Get this recording ' s local play count . The value will not be obtained from the backend , even if it supports server - side play counts . <nl> * @ return the play count . <nl>
[ PVR ] Introduce Recordings settings dialog . Now some properties ( more to come ) of recordings can be edited the way it already works for timers .
xbmc/xbmc
a2b3dd432a2d43336f581f6c1bb9682919bec546
2017-07-07T15:38:25Z
mmm a / trunk / src / app / srs_app_http . cpp <nl> ppp b / trunk / src / app / srs_app_http . cpp <nl> using namespace std ; <nl> <nl> # define SRS_HTTP_DEFAULT_PAGE " index . html " <nl> <nl> - int srs_go_http_response_json ( ISrsGoHttpResponseWriter * w , string data ) <nl> + int srs_go_http_response_json ( ISrsHttpResponseWriter * w , string data ) <nl> { <nl> w - > header ( ) - > set_content_length ( data . length ( ) ) ; <nl> w - > header ( ) - > set_content_type ( " application / json " ) ; <nl> string srs_go_http_detect ( char * data , int size ) <nl> <nl> / / Error replies to the request with the specified error message and HTTP code . <nl> / / The error message should be plain text . <nl> - int srs_go_http_error ( ISrsGoHttpResponseWriter * w , int code , string error ) <nl> + int srs_go_http_error ( ISrsHttpResponseWriter * w , int code , string error ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> int srs_go_http_error ( ISrsGoHttpResponseWriter * w , int code , string error ) <nl> return ret ; <nl> } <nl> <nl> - SrsGoHttpHeader : : SrsGoHttpHeader ( ) <nl> + SrsHttpHeader : : SrsHttpHeader ( ) <nl> { <nl> } <nl> <nl> - SrsGoHttpHeader : : ~ SrsGoHttpHeader ( ) <nl> + SrsHttpHeader : : ~ SrsHttpHeader ( ) <nl> { <nl> } <nl> <nl> - void SrsGoHttpHeader : : set ( string key , string value ) <nl> + void SrsHttpHeader : : set ( string key , string value ) <nl> { <nl> headers [ key ] = value ; <nl> } <nl> <nl> - string SrsGoHttpHeader : : get ( string key ) <nl> + string SrsHttpHeader : : get ( string key ) <nl> { <nl> std : : string v ; <nl> <nl> string SrsGoHttpHeader : : get ( string key ) <nl> return v ; <nl> } <nl> <nl> - int64_t SrsGoHttpHeader : : content_length ( ) <nl> + int64_t SrsHttpHeader : : content_length ( ) <nl> { <nl> std : : string cl = get ( " Content - Length " ) ; <nl> <nl> int64_t SrsGoHttpHeader : : content_length ( ) <nl> return ( int64_t ) : : atof ( cl . c_str ( ) ) ; <nl> } <nl> <nl> - void SrsGoHttpHeader : : set_content_length ( int64_t size ) <nl> + void SrsHttpHeader : : set_content_length ( int64_t size ) <nl> { <nl> char buf [ 64 ] ; <nl> snprintf ( buf , sizeof ( buf ) , " % " PRId64 , size ) ; <nl> set ( " Content - Length " , buf ) ; <nl> } <nl> <nl> - string SrsGoHttpHeader : : content_type ( ) <nl> + string SrsHttpHeader : : content_type ( ) <nl> { <nl> return get ( " Content - Type " ) ; <nl> } <nl> <nl> - void SrsGoHttpHeader : : set_content_type ( string ct ) <nl> + void SrsHttpHeader : : set_content_type ( string ct ) <nl> { <nl> set ( " Content - Type " , ct ) ; <nl> } <nl> <nl> - void SrsGoHttpHeader : : write ( stringstream & ss ) <nl> + void SrsHttpHeader : : write ( stringstream & ss ) <nl> { <nl> std : : map < std : : string , std : : string > : : iterator it ; <nl> for ( it = headers . begin ( ) ; it ! = headers . end ( ) ; + + it ) { <nl> void SrsGoHttpHeader : : write ( stringstream & ss ) <nl> } <nl> } <nl> <nl> - ISrsGoHttpResponseWriter : : ISrsGoHttpResponseWriter ( ) <nl> + ISrsHttpResponseWriter : : ISrsHttpResponseWriter ( ) <nl> { <nl> } <nl> <nl> - ISrsGoHttpResponseWriter : : ~ ISrsGoHttpResponseWriter ( ) <nl> + ISrsHttpResponseWriter : : ~ ISrsHttpResponseWriter ( ) <nl> { <nl> } <nl> <nl> - ISrsGoHttpHandler : : ISrsGoHttpHandler ( ) <nl> + ISrsHttpHandler : : ISrsHttpHandler ( ) <nl> { <nl> entry = NULL ; <nl> } <nl> <nl> - ISrsGoHttpHandler : : ~ ISrsGoHttpHandler ( ) <nl> + ISrsHttpHandler : : ~ ISrsHttpHandler ( ) <nl> { <nl> } <nl> <nl> - SrsGoHttpRedirectHandler : : SrsGoHttpRedirectHandler ( string u , int c ) <nl> + SrsHttpRedirectHandler : : SrsHttpRedirectHandler ( string u , int c ) <nl> { <nl> url = u ; <nl> code = c ; <nl> } <nl> <nl> - SrsGoHttpRedirectHandler : : ~ SrsGoHttpRedirectHandler ( ) <nl> + SrsHttpRedirectHandler : : ~ SrsHttpRedirectHandler ( ) <nl> { <nl> } <nl> <nl> - int SrsGoHttpRedirectHandler : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsHttpRedirectHandler : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> / / TODO : FIXME : implements it . <nl> return ret ; <nl> } <nl> <nl> - SrsGoHttpNotFoundHandler : : SrsGoHttpNotFoundHandler ( ) <nl> + SrsHttpNotFoundHandler : : SrsHttpNotFoundHandler ( ) <nl> { <nl> } <nl> <nl> - SrsGoHttpNotFoundHandler : : ~ SrsGoHttpNotFoundHandler ( ) <nl> + SrsHttpNotFoundHandler : : ~ SrsHttpNotFoundHandler ( ) <nl> { <nl> } <nl> <nl> - int SrsGoHttpNotFoundHandler : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsHttpNotFoundHandler : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> return srs_go_http_error ( w , <nl> SRS_CONSTS_HTTP_NotFound , SRS_CONSTS_HTTP_NotFound_str ) ; <nl> } <nl> <nl> - SrsGoHttpFileServer : : SrsGoHttpFileServer ( string root_dir ) <nl> + SrsHttpFileServer : : SrsHttpFileServer ( string root_dir ) <nl> { <nl> dir = root_dir ; <nl> } <nl> <nl> - SrsGoHttpFileServer : : ~ SrsGoHttpFileServer ( ) <nl> + SrsHttpFileServer : : ~ SrsHttpFileServer ( ) <nl> { <nl> } <nl> <nl> - int SrsGoHttpFileServer : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsHttpFileServer : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> string upath = r - > path ( ) ; <nl> <nl> int SrsGoHttpFileServer : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * <nl> if ( ! srs_path_exists ( fullpath ) ) { <nl> srs_warn ( " http miss file = % s , pattern = % s , upath = % s " , <nl> fullpath . c_str ( ) , entry - > pattern . c_str ( ) , upath . c_str ( ) ) ; <nl> - return SrsGoHttpNotFoundHandler ( ) . serve_http ( w , r ) ; <nl> + return SrsHttpNotFoundHandler ( ) . serve_http ( w , r ) ; <nl> } <nl> srs_trace ( " http match file = % s , pattern = % s , upath = % s " , <nl> fullpath . c_str ( ) , entry - > pattern . c_str ( ) , upath . c_str ( ) ) ; <nl> int SrsGoHttpFileServer : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * <nl> return serve_file ( w , r , fullpath ) ; <nl> } <nl> <nl> - int SrsGoHttpFileServer : : serve_file ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r , string fullpath ) <nl> + int SrsHttpFileServer : : serve_file ( ISrsHttpResponseWriter * w , SrsHttpMessage * r , string fullpath ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> int SrsGoHttpFileServer : : serve_file ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * <nl> return w - > final_request ( ) ; <nl> } <nl> <nl> - int SrsGoHttpFileServer : : serve_flv_file ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r , string fullpath ) <nl> + int SrsHttpFileServer : : serve_flv_file ( ISrsHttpResponseWriter * w , SrsHttpMessage * r , string fullpath ) <nl> { <nl> std : : string start = r - > query_get ( " start " ) ; <nl> if ( start . empty ( ) ) { <nl> int SrsGoHttpFileServer : : serve_flv_file ( ISrsGoHttpResponseWriter * w , SrsHttpMess <nl> return serve_flv_stream ( w , r , fullpath , offset ) ; <nl> } <nl> <nl> - int SrsGoHttpFileServer : : serve_mp4_file ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r , string fullpath ) <nl> + int SrsHttpFileServer : : serve_mp4_file ( ISrsHttpResponseWriter * w , SrsHttpMessage * r , string fullpath ) <nl> { <nl> / / for flash to request mp4 range in query string . <nl> / / for example , http : / / digitalprimates . net / dash / DashTest . html ? url = http : / / dashdemo . edgesuite . net / digitalprimates / nexus / oops - 20120802 - manifest . mpd <nl> int SrsGoHttpFileServer : : serve_mp4_file ( ISrsGoHttpResponseWriter * w , SrsHttpMess <nl> return serve_mp4_stream ( w , r , fullpath , start , end ) ; <nl> } <nl> <nl> - int SrsGoHttpFileServer : : serve_flv_stream ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r , string fullpath , int offset ) <nl> + int SrsHttpFileServer : : serve_flv_stream ( ISrsHttpResponseWriter * w , SrsHttpMessage * r , string fullpath , int offset ) <nl> { <nl> return serve_file ( w , r , fullpath ) ; <nl> } <nl> <nl> - int SrsGoHttpFileServer : : serve_mp4_stream ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r , string fullpath , int start , int end ) <nl> + int SrsHttpFileServer : : serve_mp4_stream ( ISrsHttpResponseWriter * w , SrsHttpMessage * r , string fullpath , int start , int end ) <nl> { <nl> return serve_file ( w , r , fullpath ) ; <nl> } <nl> <nl> - int SrsGoHttpFileServer : : copy ( ISrsGoHttpResponseWriter * w , SrsFileReader * fs , SrsHttpMessage * r , int size ) <nl> + int SrsHttpFileServer : : copy ( ISrsHttpResponseWriter * w , SrsFileReader * fs , SrsHttpMessage * r , int size ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> int SrsGoHttpFileServer : : copy ( ISrsGoHttpResponseWriter * w , SrsFileReader * fs , Sr <nl> return ret ; <nl> } <nl> <nl> - SrsGoHttpMuxEntry : : SrsGoHttpMuxEntry ( ) <nl> + SrsHttpMuxEntry : : SrsHttpMuxEntry ( ) <nl> { <nl> enabled = true ; <nl> explicit_match = false ; <nl> handler = NULL ; <nl> } <nl> <nl> - SrsGoHttpMuxEntry : : ~ SrsGoHttpMuxEntry ( ) <nl> + SrsHttpMuxEntry : : ~ SrsHttpMuxEntry ( ) <nl> { <nl> srs_freep ( handler ) ; <nl> } <nl> <nl> - SrsGoHttpServeMux : : SrsGoHttpServeMux ( ) <nl> + SrsHttpServeMux : : SrsHttpServeMux ( ) <nl> { <nl> } <nl> <nl> - SrsGoHttpServeMux : : ~ SrsGoHttpServeMux ( ) <nl> + SrsHttpServeMux : : ~ SrsHttpServeMux ( ) <nl> { <nl> - std : : map < std : : string , SrsGoHttpMuxEntry * > : : iterator it ; <nl> + std : : map < std : : string , SrsHttpMuxEntry * > : : iterator it ; <nl> for ( it = entries . begin ( ) ; it ! = entries . end ( ) ; + + it ) { <nl> - SrsGoHttpMuxEntry * entry = it - > second ; <nl> + SrsHttpMuxEntry * entry = it - > second ; <nl> srs_freep ( entry ) ; <nl> } <nl> entries . clear ( ) ; <nl> SrsGoHttpServeMux : : ~ SrsGoHttpServeMux ( ) <nl> vhosts . clear ( ) ; <nl> } <nl> <nl> - int SrsGoHttpServeMux : : initialize ( ) <nl> + int SrsHttpServeMux : : initialize ( ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> / / TODO : FIXME : implements it . <nl> return ret ; <nl> } <nl> <nl> - int SrsGoHttpServeMux : : handle ( std : : string pattern , ISrsGoHttpHandler * handler ) <nl> + int SrsHttpServeMux : : handle ( std : : string pattern , ISrsHttpHandler * handler ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> int SrsGoHttpServeMux : : handle ( std : : string pattern , ISrsGoHttpHandler * handler ) <nl> } <nl> <nl> if ( entries . find ( pattern ) ! = entries . end ( ) ) { <nl> - SrsGoHttpMuxEntry * exists = entries [ pattern ] ; <nl> + SrsHttpMuxEntry * exists = entries [ pattern ] ; <nl> if ( exists - > explicit_match ) { <nl> ret = ERROR_HTTP_PATTERN_DUPLICATED ; <nl> srs_error ( " http : multiple registrations for % s . ret = % d " , pattern . c_str ( ) , ret ) ; <nl> int SrsGoHttpServeMux : : handle ( std : : string pattern , ISrsGoHttpHandler * handler ) <nl> } <nl> <nl> if ( true ) { <nl> - SrsGoHttpMuxEntry * entry = new SrsGoHttpMuxEntry ( ) ; <nl> + SrsHttpMuxEntry * entry = new SrsHttpMuxEntry ( ) ; <nl> entry - > explicit_match = true ; <nl> entry - > handler = handler ; <nl> entry - > pattern = pattern ; <nl> entry - > handler - > entry = entry ; <nl> <nl> if ( entries . find ( pattern ) ! = entries . end ( ) ) { <nl> - SrsGoHttpMuxEntry * exists = entries [ pattern ] ; <nl> + SrsHttpMuxEntry * exists = entries [ pattern ] ; <nl> srs_freep ( exists ) ; <nl> } <nl> entries [ pattern ] = entry ; <nl> int SrsGoHttpServeMux : : handle ( std : : string pattern , ISrsGoHttpHandler * handler ) <nl> / / It can be overridden by an explicit registration . <nl> if ( pattern ! = " / " & & ! pattern . empty ( ) & & pattern . at ( pattern . length ( ) - 1 ) = = ' / ' ) { <nl> std : : string rpattern = pattern . substr ( 0 , pattern . length ( ) - 1 ) ; <nl> - SrsGoHttpMuxEntry * entry = NULL ; <nl> + SrsHttpMuxEntry * entry = NULL ; <nl> <nl> / / free the exists not explicit entry <nl> if ( entries . find ( rpattern ) ! = entries . end ( ) ) { <nl> - SrsGoHttpMuxEntry * exists = entries [ rpattern ] ; <nl> + SrsHttpMuxEntry * exists = entries [ rpattern ] ; <nl> if ( ! exists - > explicit_match ) { <nl> entry = exists ; <nl> } <nl> int SrsGoHttpServeMux : : handle ( std : : string pattern , ISrsGoHttpHandler * handler ) <nl> if ( ! entry | | entry - > explicit_match ) { <nl> srs_freep ( entry ) ; <nl> <nl> - entry = new SrsGoHttpMuxEntry ( ) ; <nl> + entry = new SrsHttpMuxEntry ( ) ; <nl> entry - > explicit_match = false ; <nl> - entry - > handler = new SrsGoHttpRedirectHandler ( pattern , SRS_CONSTS_HTTP_MovedPermanently ) ; <nl> + entry - > handler = new SrsHttpRedirectHandler ( pattern , SRS_CONSTS_HTTP_MovedPermanently ) ; <nl> entry - > pattern = pattern ; <nl> entry - > handler - > entry = entry ; <nl> <nl> int SrsGoHttpServeMux : : handle ( std : : string pattern , ISrsGoHttpHandler * handler ) <nl> return ret ; <nl> } <nl> <nl> - int SrsGoHttpServeMux : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsHttpServeMux : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> - ISrsGoHttpHandler * h = NULL ; <nl> + ISrsHttpHandler * h = NULL ; <nl> if ( ( ret = find_handler ( r , & h ) ) ! = ERROR_SUCCESS ) { <nl> srs_error ( " find handler failed . ret = % d " , ret ) ; <nl> return ret ; <nl> int SrsGoHttpServeMux : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r <nl> return ret ; <nl> } <nl> <nl> - int SrsGoHttpServeMux : : find_handler ( SrsHttpMessage * r , ISrsGoHttpHandler * * ph ) <nl> + int SrsHttpServeMux : : find_handler ( SrsHttpMessage * r , ISrsHttpHandler * * ph ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> int SrsGoHttpServeMux : : find_handler ( SrsHttpMessage * r , ISrsGoHttpHandler * * ph ) <nl> } <nl> <nl> if ( * ph = = NULL ) { <nl> - * ph = new SrsGoHttpNotFoundHandler ( ) ; <nl> + * ph = new SrsHttpNotFoundHandler ( ) ; <nl> } <nl> <nl> return ret ; <nl> } <nl> <nl> - int SrsGoHttpServeMux : : match ( SrsHttpMessage * r , ISrsGoHttpHandler * * ph ) <nl> + int SrsHttpServeMux : : match ( SrsHttpMessage * r , ISrsHttpHandler * * ph ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> int SrsGoHttpServeMux : : match ( SrsHttpMessage * r , ISrsGoHttpHandler * * ph ) <nl> } <nl> <nl> int nb_matched = 0 ; <nl> - ISrsGoHttpHandler * h = NULL ; <nl> + ISrsHttpHandler * h = NULL ; <nl> <nl> - std : : map < std : : string , SrsGoHttpMuxEntry * > : : iterator it ; <nl> + std : : map < std : : string , SrsHttpMuxEntry * > : : iterator it ; <nl> for ( it = entries . begin ( ) ; it ! = entries . end ( ) ; + + it ) { <nl> std : : string pattern = it - > first ; <nl> - SrsGoHttpMuxEntry * entry = it - > second ; <nl> + SrsHttpMuxEntry * entry = it - > second ; <nl> <nl> if ( ! entry - > enabled ) { <nl> continue ; <nl> int SrsGoHttpServeMux : : match ( SrsHttpMessage * r , ISrsGoHttpHandler * * ph ) <nl> return ret ; <nl> } <nl> <nl> - bool SrsGoHttpServeMux : : path_match ( string pattern , string path ) <nl> + bool SrsHttpServeMux : : path_match ( string pattern , string path ) <nl> { <nl> if ( pattern . empty ( ) ) { <nl> return false ; <nl> bool SrsGoHttpServeMux : : path_match ( string pattern , string path ) <nl> return false ; <nl> } <nl> <nl> - SrsGoHttpResponseWriter : : SrsGoHttpResponseWriter ( SrsStSocket * io ) <nl> + SrsHttpResponseWriter : : SrsHttpResponseWriter ( SrsStSocket * io ) <nl> { <nl> skt = io ; <nl> - hdr = new SrsGoHttpHeader ( ) ; <nl> + hdr = new SrsHttpHeader ( ) ; <nl> header_wrote = false ; <nl> status = SRS_CONSTS_HTTP_OK ; <nl> content_length = - 1 ; <nl> SrsGoHttpResponseWriter : : SrsGoHttpResponseWriter ( SrsStSocket * io ) <nl> header_sent = false ; <nl> } <nl> <nl> - SrsGoHttpResponseWriter : : ~ SrsGoHttpResponseWriter ( ) <nl> + SrsHttpResponseWriter : : ~ SrsHttpResponseWriter ( ) <nl> { <nl> srs_freep ( hdr ) ; <nl> } <nl> <nl> - int SrsGoHttpResponseWriter : : final_request ( ) <nl> + int SrsHttpResponseWriter : : final_request ( ) <nl> { <nl> / / complete the chunked encoding . <nl> if ( content_length = = - 1 ) { <nl> int SrsGoHttpResponseWriter : : final_request ( ) <nl> return write ( NULL , 0 ) ; <nl> } <nl> <nl> - SrsGoHttpHeader * SrsGoHttpResponseWriter : : header ( ) <nl> + SrsHttpHeader * SrsHttpResponseWriter : : header ( ) <nl> { <nl> return hdr ; <nl> } <nl> <nl> - int SrsGoHttpResponseWriter : : write ( char * data , int size ) <nl> + int SrsHttpResponseWriter : : write ( char * data , int size ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> int SrsGoHttpResponseWriter : : write ( char * data , int size ) <nl> return ret ; <nl> } <nl> <nl> - void SrsGoHttpResponseWriter : : write_header ( int code ) <nl> + void SrsHttpResponseWriter : : write_header ( int code ) <nl> { <nl> if ( header_wrote ) { <nl> srs_warn ( " http : multiple write_header calls , code = % d " , code ) ; <nl> void SrsGoHttpResponseWriter : : write_header ( int code ) <nl> content_length = hdr - > content_length ( ) ; <nl> } <nl> <nl> - int SrsGoHttpResponseWriter : : send_header ( char * data , int size ) <nl> + int SrsHttpResponseWriter : : send_header ( char * data , int size ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> mmm a / trunk / src / app / srs_app_http . hpp <nl> ppp b / trunk / src / app / srs_app_http . hpp <nl> class SrsHttpUri ; <nl> class SrsHttpMessage ; <nl> class SrsFileReader ; <nl> class SrsSimpleBuffer ; <nl> - class SrsGoHttpMuxEntry ; <nl> - class ISrsGoHttpResponseWriter ; <nl> + class SrsHttpMuxEntry ; <nl> + class ISrsHttpResponseWriter ; <nl> <nl> / / http specification <nl> / / CR = < US - ASCII CR , carriage return ( 13 ) > <nl> class ISrsGoHttpResponseWriter ; <nl> # define __SRS_HTTP_TS_SEND_BUFFER_SIZE 4096 <nl> <nl> / / helper function : response in json format . <nl> - extern int srs_go_http_response_json ( ISrsGoHttpResponseWriter * w , std : : string data ) ; <nl> + extern int srs_go_http_response_json ( ISrsHttpResponseWriter * w , std : : string data ) ; <nl> <nl> / / state of message <nl> enum SrsHttpParseState { <nl> enum SrsHttpParseState { <nl> } ; <nl> <nl> / / A Header represents the key - value pairs in an HTTP header . <nl> - class SrsGoHttpHeader <nl> + class SrsHttpHeader <nl> { <nl> private : <nl> std : : map < std : : string , std : : string > headers ; <nl> public : <nl> - SrsGoHttpHeader ( ) ; <nl> - virtual ~ SrsGoHttpHeader ( ) ; <nl> + SrsHttpHeader ( ) ; <nl> + virtual ~ SrsHttpHeader ( ) ; <nl> public : <nl> / / Add adds the key , value pair to the header . <nl> / / It appends to any existing values associated with key . <nl> class SrsGoHttpHeader <nl> / / A ResponseWriter interface is used by an HTTP handler to <nl> / / construct an HTTP response . <nl> / / Usage 1 , response with specified length content : <nl> - / / ISrsGoHttpResponseWriter * w ; / / create or get response . <nl> + / / ISrsHttpResponseWriter * w ; / / create or get response . <nl> / / std : : string msg = " Hello , HTTP ! " ; <nl> / / w - > header ( ) - > set_content_type ( " text / plain ; charset = utf - 8 " ) ; <nl> / / w - > header ( ) - > set_content_length ( msg . length ( ) ) ; <nl> class SrsGoHttpHeader <nl> / / w - > write ( ( char * ) msg . data ( ) , ( int ) msg . length ( ) ) ; <nl> / / w - > final_request ( ) ; / / optional flush . <nl> / / Usage 2 , response with HTTP code only , zero content length . <nl> - / / ISrsGoHttpResponseWriter * w ; / / create or get response . <nl> + / / ISrsHttpResponseWriter * w ; / / create or get response . <nl> / / w - > header ( ) - > set_content_length ( 0 ) ; <nl> / / w - > write_header ( SRS_CONSTS_HTTP_OK ) ; <nl> / / w - > final_request ( ) ; <nl> / / Usage 3 , response in chunked encoding . <nl> - / / ISrsGoHttpResponseWriter * w ; / / create or get response . <nl> + / / ISrsHttpResponseWriter * w ; / / create or get response . <nl> / / std : : string msg = " Hello , HTTP ! " ; <nl> / / w - > header ( ) - > set_content_type ( " application / octet - stream " ) ; <nl> / / w - > write_header ( SRS_CONSTS_HTTP_OK ) ; <nl> class SrsGoHttpHeader <nl> / / w - > write ( ( char * ) msg . data ( ) , ( int ) msg . length ( ) ) ; <nl> / / w - > write ( ( char * ) msg . data ( ) , ( int ) msg . length ( ) ) ; <nl> / / w - > final_request ( ) ; / / required to end the chunked and flush . <nl> - class ISrsGoHttpResponseWriter <nl> + class ISrsHttpResponseWriter <nl> { <nl> public : <nl> - ISrsGoHttpResponseWriter ( ) ; <nl> - virtual ~ ISrsGoHttpResponseWriter ( ) ; <nl> + ISrsHttpResponseWriter ( ) ; <nl> + virtual ~ ISrsHttpResponseWriter ( ) ; <nl> public : <nl> / / when chunked mode , <nl> / / final the request to complete the chunked encoding . <nl> class ISrsGoHttpResponseWriter <nl> / / Header returns the header map that will be sent by WriteHeader . <nl> / / Changing the header after a call to WriteHeader ( or Write ) has <nl> / / no effect . <nl> - virtual SrsGoHttpHeader * header ( ) = 0 ; <nl> + virtual SrsHttpHeader * header ( ) = 0 ; <nl> <nl> / / Write writes the data to the connection as part of an HTTP reply . <nl> / / If WriteHeader has not yet been called , Write calls WriteHeader ( http . StatusOK ) <nl> class ISrsGoHttpResponseWriter <nl> / / and then return . Returning signals that the request is finished <nl> / / and that the HTTP server can move on to the next request on <nl> / / the connection . <nl> - class ISrsGoHttpHandler <nl> + class ISrsHttpHandler <nl> { <nl> public : <nl> - SrsGoHttpMuxEntry * entry ; <nl> + SrsHttpMuxEntry * entry ; <nl> public : <nl> - ISrsGoHttpHandler ( ) ; <nl> - virtual ~ ISrsGoHttpHandler ( ) ; <nl> + ISrsHttpHandler ( ) ; <nl> + virtual ~ ISrsHttpHandler ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) = 0 ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) = 0 ; <nl> } ; <nl> <nl> / / Redirect to a fixed URL <nl> - class SrsGoHttpRedirectHandler : public ISrsGoHttpHandler <nl> + class SrsHttpRedirectHandler : public ISrsHttpHandler <nl> { <nl> private : <nl> std : : string url ; <nl> int code ; <nl> public : <nl> - SrsGoHttpRedirectHandler ( std : : string u , int c ) ; <nl> - virtual ~ SrsGoHttpRedirectHandler ( ) ; <nl> + SrsHttpRedirectHandler ( std : : string u , int c ) ; <nl> + virtual ~ SrsHttpRedirectHandler ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> / / NotFound replies to the request with an HTTP 404 not found error . <nl> - class SrsGoHttpNotFoundHandler : public ISrsGoHttpHandler <nl> + class SrsHttpNotFoundHandler : public ISrsHttpHandler <nl> { <nl> public : <nl> - SrsGoHttpNotFoundHandler ( ) ; <nl> - virtual ~ SrsGoHttpNotFoundHandler ( ) ; <nl> + SrsHttpNotFoundHandler ( ) ; <nl> + virtual ~ SrsHttpNotFoundHandler ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> / / FileServer returns a handler that serves HTTP requests <nl> class SrsGoHttpNotFoundHandler : public ISrsGoHttpHandler <nl> / / To use the operating system ' s file system implementation , <nl> / / use http . Dir : <nl> / / <nl> - / / http . Handle ( " / " , SrsGoHttpFileServer ( " / tmp " ) ) <nl> - / / http . Handle ( " / " , SrsGoHttpFileServer ( " static - dir " ) ) <nl> - class SrsGoHttpFileServer : public ISrsGoHttpHandler <nl> + / / http . Handle ( " / " , SrsHttpFileServer ( " / tmp " ) ) <nl> + / / http . Handle ( " / " , SrsHttpFileServer ( " static - dir " ) ) <nl> + class SrsHttpFileServer : public ISrsHttpHandler <nl> { <nl> protected : <nl> std : : string dir ; <nl> public : <nl> - SrsGoHttpFileServer ( std : : string root_dir ) ; <nl> - virtual ~ SrsGoHttpFileServer ( ) ; <nl> + SrsHttpFileServer ( std : : string root_dir ) ; <nl> + virtual ~ SrsHttpFileServer ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> private : <nl> / * * <nl> * serve the file by specified path <nl> * / <nl> - virtual int serve_file ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r , std : : string fullpath ) ; <nl> - virtual int serve_flv_file ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r , std : : string fullpath ) ; <nl> - virtual int serve_mp4_file ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r , std : : string fullpath ) ; <nl> + virtual int serve_file ( ISrsHttpResponseWriter * w , SrsHttpMessage * r , std : : string fullpath ) ; <nl> + virtual int serve_flv_file ( ISrsHttpResponseWriter * w , SrsHttpMessage * r , std : : string fullpath ) ; <nl> + virtual int serve_mp4_file ( ISrsHttpResponseWriter * w , SrsHttpMessage * r , std : : string fullpath ) ; <nl> protected : <nl> / * * <nl> * when access flv file with x . flv ? start = xxx <nl> * / <nl> - virtual int serve_flv_stream ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r , std : : string fullpath , int offset ) ; <nl> + virtual int serve_flv_stream ( ISrsHttpResponseWriter * w , SrsHttpMessage * r , std : : string fullpath , int offset ) ; <nl> / * * <nl> * when access mp4 file with x . mp4 ? range = start - end <nl> * @ param start the start offset in bytes . <nl> * @ param end the end offset in bytes . - 1 to end of file . <nl> * @ remark response data in [ start , end ] . <nl> * / <nl> - virtual int serve_mp4_stream ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r , std : : string fullpath , int start , int end ) ; <nl> + virtual int serve_mp4_stream ( ISrsHttpResponseWriter * w , SrsHttpMessage * r , std : : string fullpath , int start , int end ) ; <nl> protected : <nl> / * * <nl> * copy the fs to response writer in size bytes . <nl> * / <nl> - virtual int copy ( ISrsGoHttpResponseWriter * w , SrsFileReader * fs , SrsHttpMessage * r , int size ) ; <nl> + virtual int copy ( ISrsHttpResponseWriter * w , SrsFileReader * fs , SrsHttpMessage * r , int size ) ; <nl> } ; <nl> <nl> / / the mux entry for server mux . <nl> - class SrsGoHttpMuxEntry <nl> + class SrsHttpMuxEntry <nl> { <nl> public : <nl> bool explicit_match ; <nl> - ISrsGoHttpHandler * handler ; <nl> + ISrsHttpHandler * handler ; <nl> std : : string pattern ; <nl> bool enabled ; <nl> public : <nl> - SrsGoHttpMuxEntry ( ) ; <nl> - virtual ~ SrsGoHttpMuxEntry ( ) ; <nl> + SrsHttpMuxEntry ( ) ; <nl> + virtual ~ SrsHttpMuxEntry ( ) ; <nl> } ; <nl> <nl> / / ServeMux is an HTTP request multiplexer . <nl> class SrsGoHttpMuxEntry <nl> / / ServeMux also takes care of sanitizing the URL request path , <nl> / / redirecting any request containing . or . . elements to an <nl> / / equivalent . - and . . - free URL . <nl> - class SrsGoHttpServeMux <nl> + class SrsHttpServeMux <nl> { <nl> private : <nl> / / the pattern handler . <nl> - std : : map < std : : string , SrsGoHttpMuxEntry * > entries ; <nl> + std : : map < std : : string , SrsHttpMuxEntry * > entries ; <nl> / / the vhost handler . <nl> - std : : map < std : : string , ISrsGoHttpHandler * > vhosts ; <nl> + std : : map < std : : string , ISrsHttpHandler * > vhosts ; <nl> public : <nl> - SrsGoHttpServeMux ( ) ; <nl> - virtual ~ SrsGoHttpServeMux ( ) ; <nl> + SrsHttpServeMux ( ) ; <nl> + virtual ~ SrsHttpServeMux ( ) ; <nl> public : <nl> / * * <nl> * initialize the http serve mux . <nl> class SrsGoHttpServeMux <nl> public : <nl> / / Handle registers the handler for the given pattern . <nl> / / If a handler already exists for pattern , Handle panics . <nl> - virtual int handle ( std : : string pattern , ISrsGoHttpHandler * handler ) ; <nl> - / / interface ISrsGoHttpHandler <nl> + virtual int handle ( std : : string pattern , ISrsHttpHandler * handler ) ; <nl> + / / interface ISrsHttpHandler <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> private : <nl> - virtual int find_handler ( SrsHttpMessage * r , ISrsGoHttpHandler * * ph ) ; <nl> - virtual int match ( SrsHttpMessage * r , ISrsGoHttpHandler * * ph ) ; <nl> + virtual int find_handler ( SrsHttpMessage * r , ISrsHttpHandler * * ph ) ; <nl> + virtual int match ( SrsHttpMessage * r , ISrsHttpHandler * * ph ) ; <nl> virtual bool path_match ( std : : string pattern , std : : string path ) ; <nl> } ; <nl> <nl> / * * <nl> * response writer use st socket <nl> * / <nl> - class SrsGoHttpResponseWriter : public ISrsGoHttpResponseWriter <nl> + class SrsHttpResponseWriter : public ISrsHttpResponseWriter <nl> { <nl> private : <nl> SrsStSocket * skt ; <nl> - SrsGoHttpHeader * hdr ; <nl> + SrsHttpHeader * hdr ; <nl> private : <nl> / / reply header has been ( logically ) written <nl> bool header_wrote ; <nl> class SrsGoHttpResponseWriter : public ISrsGoHttpResponseWriter <nl> / / logically written . <nl> bool header_sent ; <nl> public : <nl> - SrsGoHttpResponseWriter ( SrsStSocket * io ) ; <nl> - virtual ~ SrsGoHttpResponseWriter ( ) ; <nl> + SrsHttpResponseWriter ( SrsStSocket * io ) ; <nl> + virtual ~ SrsHttpResponseWriter ( ) ; <nl> public : <nl> virtual int final_request ( ) ; <nl> - virtual SrsGoHttpHeader * header ( ) ; <nl> + virtual SrsHttpHeader * header ( ) ; <nl> virtual int write ( char * data , int size ) ; <nl> virtual void write_header ( int code ) ; <nl> virtual int send_header ( char * data , int size ) ; <nl> mmm a / trunk / src / app / srs_app_http_api . cpp <nl> ppp b / trunk / src / app / srs_app_http_api . cpp <nl> SrsGoApiRoot : : ~ SrsGoApiRoot ( ) <nl> { <nl> } <nl> <nl> - int SrsGoApiRoot : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsGoApiRoot : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> std : : stringstream ss ; <nl> <nl> SrsGoApiApi : : ~ SrsGoApiApi ( ) <nl> { <nl> } <nl> <nl> - int SrsGoApiApi : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsGoApiApi : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> std : : stringstream ss ; <nl> <nl> SrsGoApiV1 : : ~ SrsGoApiV1 ( ) <nl> { <nl> } <nl> <nl> - int SrsGoApiV1 : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsGoApiV1 : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> std : : stringstream ss ; <nl> <nl> SrsGoApiVersion : : ~ SrsGoApiVersion ( ) <nl> { <nl> } <nl> <nl> - int SrsGoApiVersion : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsGoApiVersion : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> std : : stringstream ss ; <nl> <nl> SrsGoApiSummaries : : ~ SrsGoApiSummaries ( ) <nl> { <nl> } <nl> <nl> - int SrsGoApiSummaries : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsGoApiSummaries : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> std : : stringstream ss ; <nl> srs_api_dump_summaries ( ss ) ; <nl> SrsGoApiRusages : : ~ SrsGoApiRusages ( ) <nl> { <nl> } <nl> <nl> - int SrsGoApiRusages : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * req ) <nl> + int SrsGoApiRusages : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * req ) <nl> { <nl> std : : stringstream ss ; <nl> <nl> SrsGoApiSelfProcStats : : ~ SrsGoApiSelfProcStats ( ) <nl> { <nl> } <nl> <nl> - int SrsGoApiSelfProcStats : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsGoApiSelfProcStats : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> std : : stringstream ss ; <nl> <nl> SrsGoApiSystemProcStats : : ~ SrsGoApiSystemProcStats ( ) <nl> { <nl> } <nl> <nl> - int SrsGoApiSystemProcStats : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsGoApiSystemProcStats : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> std : : stringstream ss ; <nl> <nl> SrsGoApiMemInfos : : ~ SrsGoApiMemInfos ( ) <nl> { <nl> } <nl> <nl> - int SrsGoApiMemInfos : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsGoApiMemInfos : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> std : : stringstream ss ; <nl> <nl> SrsGoApiAuthors : : ~ SrsGoApiAuthors ( ) <nl> { <nl> } <nl> <nl> - int SrsGoApiAuthors : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsGoApiAuthors : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> std : : stringstream ss ; <nl> <nl> SrsGoApiRequests : : ~ SrsGoApiRequests ( ) <nl> { <nl> } <nl> <nl> - int SrsGoApiRequests : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsGoApiRequests : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> SrsHttpMessage * req = r ; <nl> <nl> SrsGoApiVhosts : : ~ SrsGoApiVhosts ( ) <nl> { <nl> } <nl> <nl> - int SrsGoApiVhosts : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsGoApiVhosts : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> std : : stringstream data ; <nl> SrsStatistic * stat = SrsStatistic : : instance ( ) ; <nl> SrsGoApiStreams : : ~ SrsGoApiStreams ( ) <nl> { <nl> } <nl> <nl> - int SrsGoApiStreams : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsGoApiStreams : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> std : : stringstream data ; <nl> SrsStatistic * stat = SrsStatistic : : instance ( ) ; <nl> int SrsGoApiStreams : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> return srs_go_http_response_json ( w , ss . str ( ) ) ; <nl> } <nl> <nl> - SrsHttpApi : : SrsHttpApi ( SrsServer * svr , st_netfd_t fd , SrsGoHttpServeMux * m ) <nl> + SrsHttpApi : : SrsHttpApi ( SrsServer * svr , st_netfd_t fd , SrsHttpServeMux * m ) <nl> : SrsConnection ( svr , fd ) <nl> { <nl> mux = m ; <nl> int SrsHttpApi : : do_cycle ( ) <nl> SrsAutoFree ( SrsHttpMessage , req ) ; <nl> <nl> / / ok , handle http request . <nl> - SrsGoHttpResponseWriter writer ( & skt ) ; <nl> + SrsHttpResponseWriter writer ( & skt ) ; <nl> if ( ( ret = process_request ( & writer , req ) ) ! = ERROR_SUCCESS ) { <nl> return ret ; <nl> } <nl> int SrsHttpApi : : do_cycle ( ) <nl> return ret ; <nl> } <nl> <nl> - int SrsHttpApi : : process_request ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsHttpApi : : process_request ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> mmm a / trunk / src / app / srs_app_http_api . hpp <nl> ppp b / trunk / src / app / srs_app_http_api . hpp <nl> class SrsHttpHandler ; <nl> # include < srs_app_http . hpp > <nl> <nl> / / for http root . <nl> - class SrsGoApiRoot : public ISrsGoHttpHandler <nl> + class SrsGoApiRoot : public ISrsHttpHandler <nl> { <nl> public : <nl> SrsGoApiRoot ( ) ; <nl> virtual ~ SrsGoApiRoot ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> - class SrsGoApiApi : public ISrsGoHttpHandler <nl> + class SrsGoApiApi : public ISrsHttpHandler <nl> { <nl> public : <nl> SrsGoApiApi ( ) ; <nl> virtual ~ SrsGoApiApi ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> - class SrsGoApiV1 : public ISrsGoHttpHandler <nl> + class SrsGoApiV1 : public ISrsHttpHandler <nl> { <nl> public : <nl> SrsGoApiV1 ( ) ; <nl> virtual ~ SrsGoApiV1 ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> - class SrsGoApiVersion : public ISrsGoHttpHandler <nl> + class SrsGoApiVersion : public ISrsHttpHandler <nl> { <nl> public : <nl> SrsGoApiVersion ( ) ; <nl> virtual ~ SrsGoApiVersion ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> - class SrsGoApiSummaries : public ISrsGoHttpHandler <nl> + class SrsGoApiSummaries : public ISrsHttpHandler <nl> { <nl> public : <nl> SrsGoApiSummaries ( ) ; <nl> virtual ~ SrsGoApiSummaries ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> - class SrsGoApiRusages : public ISrsGoHttpHandler <nl> + class SrsGoApiRusages : public ISrsHttpHandler <nl> { <nl> public : <nl> SrsGoApiRusages ( ) ; <nl> virtual ~ SrsGoApiRusages ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> - class SrsGoApiSelfProcStats : public ISrsGoHttpHandler <nl> + class SrsGoApiSelfProcStats : public ISrsHttpHandler <nl> { <nl> public : <nl> SrsGoApiSelfProcStats ( ) ; <nl> virtual ~ SrsGoApiSelfProcStats ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> - class SrsGoApiSystemProcStats : public ISrsGoHttpHandler <nl> + class SrsGoApiSystemProcStats : public ISrsHttpHandler <nl> { <nl> public : <nl> SrsGoApiSystemProcStats ( ) ; <nl> virtual ~ SrsGoApiSystemProcStats ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> - class SrsGoApiMemInfos : public ISrsGoHttpHandler <nl> + class SrsGoApiMemInfos : public ISrsHttpHandler <nl> { <nl> public : <nl> SrsGoApiMemInfos ( ) ; <nl> virtual ~ SrsGoApiMemInfos ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> - class SrsGoApiAuthors : public ISrsGoHttpHandler <nl> + class SrsGoApiAuthors : public ISrsHttpHandler <nl> { <nl> public : <nl> SrsGoApiAuthors ( ) ; <nl> virtual ~ SrsGoApiAuthors ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> - class SrsGoApiRequests : public ISrsGoHttpHandler <nl> + class SrsGoApiRequests : public ISrsHttpHandler <nl> { <nl> public : <nl> SrsGoApiRequests ( ) ; <nl> virtual ~ SrsGoApiRequests ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> - class SrsGoApiVhosts : public ISrsGoHttpHandler <nl> + class SrsGoApiVhosts : public ISrsHttpHandler <nl> { <nl> public : <nl> SrsGoApiVhosts ( ) ; <nl> virtual ~ SrsGoApiVhosts ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> - class SrsGoApiStreams : public ISrsGoHttpHandler <nl> + class SrsGoApiStreams : public ISrsHttpHandler <nl> { <nl> public : <nl> SrsGoApiStreams ( ) ; <nl> virtual ~ SrsGoApiStreams ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> class SrsHttpApi : public SrsConnection <nl> { <nl> private : <nl> SrsHttpParser * parser ; <nl> - SrsGoHttpServeMux * mux ; <nl> + SrsHttpServeMux * mux ; <nl> bool crossdomain_required ; <nl> public : <nl> - SrsHttpApi ( SrsServer * svr , st_netfd_t fd , SrsGoHttpServeMux * m ) ; <nl> + SrsHttpApi ( SrsServer * svr , st_netfd_t fd , SrsHttpServeMux * m ) ; <nl> virtual ~ SrsHttpApi ( ) ; <nl> public : <nl> virtual void kbps_resample ( ) ; <nl> class SrsHttpApi : public SrsConnection <nl> protected : <nl> virtual int do_cycle ( ) ; <nl> private : <nl> - virtual int process_request ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int process_request ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> # endif <nl> mmm a / trunk / src / app / srs_app_http_conn . cpp <nl> ppp b / trunk / src / app / srs_app_http_conn . cpp <nl> using namespace std ; <nl> # include < srs_app_pithy_print . hpp > <nl> <nl> SrsVodStream : : SrsVodStream ( string root_dir ) <nl> - : SrsGoHttpFileServer ( root_dir ) <nl> + : SrsHttpFileServer ( root_dir ) <nl> { <nl> } <nl> <nl> SrsVodStream : : ~ SrsVodStream ( ) <nl> { <nl> } <nl> <nl> - int SrsVodStream : : serve_flv_stream ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r , string fullpath , int offset ) <nl> + int SrsVodStream : : serve_flv_stream ( ISrsHttpResponseWriter * w , SrsHttpMessage * r , string fullpath , int offset ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> int SrsVodStream : : serve_flv_stream ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * <nl> return ret ; <nl> } <nl> <nl> - int SrsVodStream : : serve_mp4_stream ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r , string fullpath , int start , int end ) <nl> + int SrsVodStream : : serve_mp4_stream ( ISrsHttpResponseWriter * w , SrsHttpMessage * r , string fullpath , int start , int end ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> int SrsMp3StreamEncoder : : dump_cache ( SrsConsumer * consumer ) <nl> return cache - > dump_cache ( consumer ) ; <nl> } <nl> <nl> - SrsStreamWriter : : SrsStreamWriter ( ISrsGoHttpResponseWriter * w ) <nl> + SrsStreamWriter : : SrsStreamWriter ( ISrsHttpResponseWriter * w ) <nl> { <nl> writer = w ; <nl> } <nl> SrsLiveStream : : ~ SrsLiveStream ( ) <nl> srs_freep ( req ) ; <nl> } <nl> <nl> - int SrsLiveStream : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsLiveStream : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> void SrsHlsM3u8Stream : : set_m3u8 ( std : : string v ) <nl> m3u8 = v ; <nl> } <nl> <nl> - int SrsHlsM3u8Stream : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsHlsM3u8Stream : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> void SrsHlsTsStream : : set_ts ( std : : string v ) <nl> ts = v ; <nl> } <nl> <nl> - int SrsHlsTsStream : : serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsHlsTsStream : : serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> int SrsHttpServer : : mount_hls ( SrsRequest * r ) <nl> SrsHlsEntry * entry = hls [ r - > vhost ] ; <nl> <nl> / / TODO : FIXME : supports reload . <nl> - std : : map < std : : string , ISrsGoHttpHandler * > : : iterator it ; <nl> + std : : map < std : : string , ISrsHttpHandler * > : : iterator it ; <nl> for ( it = entry - > streams . begin ( ) ; it ! = entry - > streams . end ( ) ; + + it ) { <nl> - ISrsGoHttpHandler * stream = it - > second ; <nl> + ISrsHttpHandler * stream = it - > second ; <nl> stream - > entry - > enabled = true ; <nl> } <nl> <nl> int SrsHttpServer : : hls_update_m3u8 ( SrsRequest * r , string m3u8 ) <nl> mount = srs_string_replace ( mount , SRS_CONSTS_RTMP_DEFAULT_VHOST " / " , " / " ) ; <nl> <nl> if ( entry - > streams . find ( mount ) = = entry - > streams . end ( ) ) { <nl> - ISrsGoHttpHandler * he = new SrsHlsM3u8Stream ( ) ; <nl> + ISrsHttpHandler * he = new SrsHlsM3u8Stream ( ) ; <nl> entry - > streams [ mount ] = he ; <nl> <nl> if ( ( ret = mux . handle ( mount , he ) ) ! = ERROR_SUCCESS ) { <nl> int SrsHttpServer : : hls_update_ts ( SrsRequest * r , string uri , string ts ) <nl> mount + = uri ; <nl> <nl> if ( entry - > streams . find ( mount ) = = entry - > streams . end ( ) ) { <nl> - ISrsGoHttpHandler * he = new SrsHlsTsStream ( ) ; <nl> + ISrsHttpHandler * he = new SrsHlsTsStream ( ) ; <nl> entry - > streams [ mount ] = he ; <nl> <nl> if ( ( ret = mux . handle ( mount , he ) ) ! = ERROR_SUCCESS ) { <nl> void SrsHttpServer : : unmount_hls ( SrsRequest * r ) <nl> <nl> SrsHlsEntry * entry = hls [ r - > vhost ] ; <nl> <nl> - std : : map < std : : string , ISrsGoHttpHandler * > : : iterator it ; <nl> + std : : map < std : : string , ISrsHttpHandler * > : : iterator it ; <nl> for ( it = entry - > streams . begin ( ) ; it ! = entry - > streams . end ( ) ; + + it ) { <nl> - ISrsGoHttpHandler * stream = it - > second ; <nl> + ISrsHttpHandler * stream = it - > second ; <nl> stream - > entry - > enabled = false ; <nl> } <nl> } <nl> int SrsHttpConn : : do_cycle ( ) <nl> SrsAutoFree ( SrsHttpMessage , req ) ; <nl> <nl> / / ok , handle http request . <nl> - SrsGoHttpResponseWriter writer ( & skt ) ; <nl> + SrsHttpResponseWriter writer ( & skt ) ; <nl> if ( ( ret = process_request ( & writer , req ) ) ! = ERROR_SUCCESS ) { <nl> return ret ; <nl> } <nl> int SrsHttpConn : : do_cycle ( ) <nl> return ret ; <nl> } <nl> <nl> - int SrsHttpConn : : process_request ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) <nl> + int SrsHttpConn : : process_request ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> mmm a / trunk / src / app / srs_app_http_conn . hpp <nl> ppp b / trunk / src / app / srs_app_http_conn . hpp <nl> class SrsSharedPtrMessage ; <nl> * server will write flv header and sequence header , <nl> * then seek ( 10240 ) and response flv tag data . <nl> * / <nl> - class SrsVodStream : public SrsGoHttpFileServer <nl> + class SrsVodStream : public SrsHttpFileServer <nl> { <nl> public : <nl> SrsVodStream ( std : : string root_dir ) ; <nl> virtual ~ SrsVodStream ( ) ; <nl> protected : <nl> - virtual int serve_flv_stream ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r , std : : string fullpath , int offset ) ; <nl> - virtual int serve_mp4_stream ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r , std : : string fullpath , int start , int end ) ; <nl> + virtual int serve_flv_stream ( ISrsHttpResponseWriter * w , SrsHttpMessage * r , std : : string fullpath , int offset ) ; <nl> + virtual int serve_mp4_stream ( ISrsHttpResponseWriter * w , SrsHttpMessage * r , std : : string fullpath , int start , int end ) ; <nl> } ; <nl> <nl> / * * <nl> class SrsMp3StreamEncoder : public ISrsStreamEncoder <nl> class SrsStreamWriter : public SrsFileWriter <nl> { <nl> private : <nl> - ISrsGoHttpResponseWriter * writer ; <nl> + ISrsHttpResponseWriter * writer ; <nl> public : <nl> - SrsStreamWriter ( ISrsGoHttpResponseWriter * w ) ; <nl> + SrsStreamWriter ( ISrsHttpResponseWriter * w ) ; <nl> virtual ~ SrsStreamWriter ( ) ; <nl> public : <nl> virtual int open ( std : : string file ) ; <nl> class SrsStreamWriter : public SrsFileWriter <nl> * the flv live stream supports access rtmp in flv over http . <nl> * srs will remux rtmp to flv streaming . <nl> * / <nl> - class SrsLiveStream : public ISrsGoHttpHandler <nl> + class SrsLiveStream : public ISrsHttpHandler <nl> { <nl> private : <nl> SrsRequest * req ; <nl> class SrsLiveStream : public ISrsGoHttpHandler <nl> SrsLiveStream ( SrsSource * s , SrsRequest * r , SrsStreamCache * c ) ; <nl> virtual ~ SrsLiveStream ( ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> private : <nl> virtual int streaming_send_messages ( ISrsStreamEncoder * enc , SrsSharedPtrMessage * * msgs , int nb_msgs ) ; <nl> } ; <nl> struct SrsLiveEntry <nl> / * * <nl> * the m3u8 stream handler . <nl> * / <nl> - class SrsHlsM3u8Stream : public ISrsGoHttpHandler <nl> + class SrsHlsM3u8Stream : public ISrsHttpHandler <nl> { <nl> private : <nl> std : : string m3u8 ; <nl> class SrsHlsM3u8Stream : public ISrsGoHttpHandler <nl> public : <nl> virtual void set_m3u8 ( std : : string v ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> / * * <nl> * the ts stream handler . <nl> * / <nl> - class SrsHlsTsStream : public ISrsGoHttpHandler <nl> + class SrsHlsTsStream : public ISrsHttpHandler <nl> { <nl> private : <nl> std : : string ts ; <nl> class SrsHlsTsStream : public ISrsGoHttpHandler <nl> public : <nl> virtual void set_ts ( std : : string v ) ; <nl> public : <nl> - virtual int serve_http ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int serve_http ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> / * * <nl> struct SrsHlsEntry <nl> <nl> / / key : the m3u8 / ts file path . <nl> / / value : the http handler . <nl> - std : : map < std : : string , ISrsGoHttpHandler * > streams ; <nl> + std : : map < std : : string , ISrsHttpHandler * > streams ; <nl> <nl> SrsHlsEntry ( ) ; <nl> } ; <nl> struct SrsHlsEntry <nl> class SrsHttpServer : public ISrsReloadHandler <nl> { <nl> public : <nl> - SrsGoHttpServeMux mux ; <nl> + SrsHttpServeMux mux ; <nl> / / the flv live streaming template . <nl> std : : map < std : : string , SrsLiveEntry * > flvs ; <nl> / / the hls live streaming template . <nl> class SrsHttpConn : public SrsConnection <nl> protected : <nl> virtual int do_cycle ( ) ; <nl> private : <nl> - virtual int process_request ( ISrsGoHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> + virtual int process_request ( ISrsHttpResponseWriter * w , SrsHttpMessage * r ) ; <nl> } ; <nl> <nl> # endif <nl> mmm a / trunk / src / app / srs_app_server . cpp <nl> ppp b / trunk / src / app / srs_app_server . cpp <nl> SrsServer : : SrsServer ( ) <nl> / / for some global instance is not ready now , <nl> / / new these objects in initialize instead . <nl> # ifdef SRS_AUTO_HTTP_API <nl> - http_api_mux = new SrsGoHttpServeMux ( ) ; <nl> + http_api_mux = new SrsHttpServeMux ( ) ; <nl> # endif <nl> # ifdef SRS_AUTO_HTTP_SERVER <nl> http_stream_mux = new SrsHttpServer ( ) ; <nl> mmm a / trunk / src / app / srs_app_server . hpp <nl> ppp b / trunk / src / app / srs_app_server . hpp <nl> CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE . <nl> <nl> class SrsServer ; <nl> class SrsConnection ; <nl> - class SrsGoHttpServeMux ; <nl> + class SrsHttpServeMux ; <nl> class SrsHttpServer ; <nl> class SrsIngester ; <nl> class SrsHttpHeartbeat ; <nl> class SrsServer : virtual public ISrsReloadHandler <nl> { <nl> private : <nl> # ifdef SRS_AUTO_HTTP_API <nl> - SrsGoHttpServeMux * http_api_mux ; <nl> + SrsHttpServeMux * http_api_mux ; <nl> # endif <nl> # ifdef SRS_AUTO_HTTP_SERVER <nl> SrsHttpServer * http_stream_mux ; <nl>
refine http for srs , rename SrsGoHttp to SrsHttp .
ossrs/srs
04dc60ebe1dd2f7f2f83b5f54a715c75807f1957
2015-03-04T05:09:35Z
mmm a / atom / browser / net / url_request_buffer_job . cc <nl> ppp b / atom / browser / net / url_request_buffer_job . cc <nl> <nl> <nl> # include " atom / common / atom_constants . h " <nl> # include " base / strings / string_number_conversions . h " <nl> + # include " base / strings / utf_string_conversions . h " <nl> + # include " net / base / mime_util . h " <nl> # include " net / base / net_errors . h " <nl> <nl> namespace atom { <nl> <nl> + namespace { <nl> + <nl> + std : : string GetExtFromURL ( const GURL & url ) { <nl> + std : : string spec = url . spec ( ) ; <nl> + size_t index = spec . find_last_of ( ' . ' ) ; <nl> + if ( index = = std : : string : : npos | | index = = spec . size ( ) ) <nl> + return std : : string ( ) ; <nl> + return spec . substr ( index + 1 , spec . size ( ) - index - 1 ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> URLRequestBufferJob : : URLRequestBufferJob ( <nl> net : : URLRequest * request , net : : NetworkDelegate * network_delegate ) <nl> : JsAsker < net : : URLRequestSimpleJob > ( request , network_delegate ) , <nl> void URLRequestBufferJob : : StartAsync ( std : : unique_ptr < base : : Value > options ) { <nl> options - > GetAsBinary ( & binary ) ; <nl> } <nl> <nl> + if ( mime_type_ . empty ( ) ) { <nl> + std : : string ext = GetExtFromURL ( request ( ) - > url ( ) ) ; <nl> + # if defined ( OS_WIN ) <nl> + net : : GetWellKnownMimeTypeFromExtension ( base : : UTF8ToUTF16 ( ext ) , & mime_type_ ) ; <nl> + # else <nl> + net : : GetWellKnownMimeTypeFromExtension ( ext , & mime_type_ ) ; <nl> + # endif <nl> + } <nl> + <nl> if ( ! binary ) { <nl> NotifyStartError ( net : : URLRequestStatus ( <nl> net : : URLRequestStatus : : FAILED , net : : ERR_NOT_IMPLEMENTED ) ) ; <nl> mmm a / lib / browser / chrome - extension . js <nl> ppp b / lib / browser / chrome - extension . js <nl> app . once ( ' ready ' , function ( ) { <nl> if ( err ) { <nl> return callback ( - 6 ) / / FILE_NOT_FOUND <nl> } else { <nl> - return callback ( { mimeType : ' text / html ' , data : content } ) <nl> + return callback ( content ) <nl> } <nl> } ) <nl> } <nl>
Set default mimeType for BufferJob
electron/electron
f4fe60d126f9b86bca7dd7bfe99f49c81359cda5
2016-05-28T13:36:22Z
mmm a / bench / sim / sim . py <nl> ppp b / bench / sim / sim . py <nl> <nl> import socket <nl> import random <nl> <nl> - nthreads = 8 <nl> + nthreads = 500 <nl> nrequests = 5000 <nl> ip = ' 192 . 168 . 2 . 5 ' <nl> port = 8080 <nl> mmm a / src / event_queue . cc <nl> ppp b / src / event_queue . cc <nl> <nl> # include < signal . h > <nl> # include < strings . h > <nl> # include < new > <nl> + # include < algorithm > <nl> # include " config . hpp " <nl> # include " utils . hpp " <nl> # include " event_queue . hpp " <nl> <nl> / / TODO : report event queue statistics . <nl> <nl> + / / Some forward declarations <nl> + void queue_init_timer ( event_queue_t * event_queue , time_t secs ) ; <nl> + void queue_stop_timer ( event_queue_t * event_queue ) ; <nl> + <nl> void process_aio_notify ( event_queue_t * self ) { <nl> int res , nevents ; <nl> eventfd_t nevents_total ; <nl> void process_aio_notify ( event_queue_t * self ) { <nl> io_event events [ MAX_IO_EVENT_PROCESSING_BATCH_SIZE ] ; <nl> <nl> do { <nl> - / / Grab the events <nl> - nevents = io_getevents ( self - > aio_context , 1 , MAX_IO_EVENT_PROCESSING_BATCH_SIZE , <nl> + / / Grab the events . Note : we need to make sure we don ' t read <nl> + / / more than nevents_total , otherwise we risk reading an io <nl> + / / event and getting an eventfd for this read event later due <nl> + / / to the way the kernel is structured . Better avoid this <nl> + / / complexity ( hence std : : min below ) . <nl> + nevents = io_getevents ( self - > aio_context , 0 , std : : min ( ( int ) nevents_total , MAX_IO_EVENT_PROCESSING_BATCH_SIZE ) , <nl> events , NULL ) ; <nl> check ( " Waiting for AIO event failed " , nevents < 1 ) ; <nl> <nl> void * epoll_handler ( void * arg ) { <nl> / / epoll_wait might return with EINTR in some cases ( in <nl> / / particular under GDB ) , we just need to retry . <nl> if ( res = = - 1 & & errno = = EINTR ) { <nl> - if ( self - > dying ) <nl> - break ; <nl> - else <nl> + if ( ! self - > dying ) <nl> continue ; <nl> } <nl> + <nl> + / / See if we need to quit <nl> + if ( self - > dying ) <nl> + break ; <nl> + <nl> check ( " Waiting for epoll events failed " , res = = - 1 ) ; <nl> <nl> for ( int i = 0 ; i < res ; i + + ) { <nl> void create_event_queue ( event_queue_t * event_queue , int queue_id , event_handler_ <nl> CPU_SET ( queue_id % ncpus , & mask ) ; <nl> res = pthread_setaffinity_np ( event_queue - > epoll_thread , sizeof ( cpu_set_t ) , & mask ) ; <nl> check ( " Could not set thread affinity " , res ! = 0 ) ; <nl> + <nl> + / / Start the timer <nl> + queue_init_timer ( event_queue , TIMER_TICKS_IN_SECS ) ; <nl> } <nl> <nl> void destroy_event_queue ( event_queue_t * event_queue ) { <nl> void destroy_event_queue ( event_queue_t * event_queue ) { <nl> res = pthread_kill ( event_queue - > epoll_thread , SIGTERM ) ; <nl> check ( " Could not send kill signal to epoll thread " , res ! = 0 ) ; <nl> <nl> - / / Wait for the threads to die <nl> + / / Wait for the poll thread to die <nl> res = pthread_join ( event_queue - > epoll_thread , NULL ) ; <nl> check ( " Could not join with epoll thread " , res ! = 0 ) ; <nl> <nl> void destroy_event_queue ( event_queue_t * event_queue ) { <nl> queue_stop_timer ( event_queue ) ; <nl> <nl> / / Cleanup resources <nl> - close ( event_queue - > aio_notify_fd ) ; <nl> - close ( event_queue - > epoll_fd ) ; <nl> - io_destroy ( event_queue - > aio_context ) ; <nl> + res = close ( event_queue - > aio_notify_fd ) ; <nl> + check ( " Could not close aio_notify_fd " , res ! = 0 ) ; <nl> + <nl> + res = close ( event_queue - > epoll_fd ) ; <nl> + check ( " Could not close epoll_fd " , res ! = 0 ) ; <nl> + <nl> + res = io_destroy ( event_queue - > aio_context ) ; <nl> + check ( " Could not destroy aio_context " , res ! = 0 ) ; <nl> + <nl> ( & event_queue - > alloc ) - > ~ objectheap_alloc_t ( ) ; <nl> } <nl> <nl> mmm a / src / event_queue . hpp <nl> ppp b / src / event_queue . hpp <nl> void queue_watch_resource ( event_queue_t * event_queue , resource_t resource , <nl> event_op_t event_op , void * state ) ; <nl> void queue_forget_resource ( event_queue_t * event_queue , resource_t resource ) ; <nl> <nl> - void queue_init_timer ( event_queue_t * event_queue , time_t secs ) ; <nl> - void queue_stop_timer ( event_queue_t * event_queue ) ; <nl> - <nl> # endif / / __EVENT_QUEUE_HPP__ <nl> <nl> mmm a / src / main . cc <nl> ppp b / src / main . cc <nl> int main ( int argc , char * argv [ ] ) <nl> process_socket ( newsockfd , & worker_pool ) ; <nl> } <nl> <nl> - / / Cleanup the resources <nl> - destroy_worker_pool ( & worker_pool ) ; <nl> + / / Stop accepting connections <nl> res = shutdown ( sockfd , SHUT_RDWR ) ; <nl> check ( " Could not shutdown main socket " , res = = - 1 ) ; <nl> res = close ( sockfd ) ; <nl> check ( " Could not close main socket " , res ! = 0 ) ; <nl> + / / Clean up the rest <nl> + destroy_worker_pool ( & worker_pool ) ; <nl> res = close ( ( int ) ( long ) worker_pool . data ) ; <nl> check ( " Could not close served file " , res ! = 0 ) ; <nl> printf ( " Server offline \ n " ) ; <nl> mmm a / src / worker_pool . cc <nl> ppp b / src / worker_pool . cc <nl> void create_worker_pool ( worker_pool_t * worker_pool , event_handler_t event_handle <nl> worker_pool - > workers = ( event_queue_t * ) malloc ( sizeof ( event_queue_t ) * workers ) ; <nl> for ( int i = 0 ; i < workers ; i + + ) { <nl> create_event_queue ( & worker_pool - > workers [ i ] , i , event_handler , worker_pool ) ; <nl> - queue_init_timer ( & worker_pool - > workers [ i ] , TIMER_TICKS_IN_SECS ) ; <nl> } <nl> worker_pool - > active_worker = 0 ; <nl> / / TODO : consider creating lower priority threads to standby in <nl> void create_worker_pool ( worker_pool_t * worker_pool , event_handler_t event_handle <nl> <nl> void destroy_worker_pool ( worker_pool_t * worker_pool ) { <nl> for ( int i = 0 ; i < worker_pool - > nworkers ; i + + ) { <nl> - queue_stop_timer ( & worker_pool - > workers [ i ] ) ; <nl> destroy_event_queue ( & worker_pool - > workers [ i ] ) ; <nl> } <nl> free ( worker_pool - > workers ) ; <nl>
Fixes to the server so it doesn ' t lock up under various high - stress conditions
rethinkdb/rethinkdb
296306a87c964f4991715452f246deda9cf90635
2009-11-30T10:36:18Z
mmm a / src / butil / files / dir_reader_unix . h <nl> ppp b / src / butil / files / dir_reader_unix . h <nl> class DirReaderUnix { <nl> if ( IGNORE_EINTR ( close ( fd_ ) ) ) <nl> RAW_LOG ( ERROR , " Failed to close directory handle " ) ; <nl> } <nl> - if ( NULL ! = dir_ ) <nl> - { <nl> + if ( NULL ! = dir_ ) { <nl> closedir ( dir_ ) ; <nl> } <nl> } <nl> class DirReaderUnix { <nl> / / Move to the next entry returning false if the iteration is complete . <nl> bool Next ( ) { <nl> int err = readdir_r ( dir_ , & entry_ , & current_ ) ; <nl> - if ( 0 ! = err | | NULL = = current_ ) <nl> - { <nl> + if ( 0 ! = err | | NULL = = current_ ) { <nl> return false ; <nl> } <nl> return true ; <nl>
code style fix
apache/incubator-brpc
aa78fce0af0e95702e07d3bde45bf18894e12f5d
2018-11-05T06:45:32Z
mmm a / arangod / Scheduler / SocketTask . cpp <nl> ppp b / arangod / Scheduler / SocketTask . cpp <nl> SocketTask : : SocketTask ( arangodb : : EventLoop loop , <nl> double keepAliveTimeout , bool skipInit = false ) <nl> : Task ( loop , " SocketTask " ) , <nl> _connectionStatistics ( nullptr ) , <nl> - _connectionInfo ( connectionInfo ) , <nl> + _connectionInfo ( std : : move ( connectionInfo ) ) , <nl> _readBuffer ( TRI_UNKNOWN_MEM_ZONE , READ_BLOCK_SIZE + 1 , false ) , <nl> _writeBuffer ( nullptr , nullptr ) , <nl> _peer ( std : : move ( socket ) ) , <nl>
move connection info
arangodb/arangodb
945edacf3f37b867b43cb3822b3352f691e5b9b6
2017-02-03T16:43:57Z
mmm a / src / preamble . js <nl> ppp b / src / preamble . js <nl> function addRunDependency ( id ) { <nl> if ( id ) { <nl> assert ( ! runDependencyTracking [ id ] ) ; <nl> runDependencyTracking [ id ] = 1 ; <nl> + # if ASSERTIONS <nl> if ( runDependencyWatcher = = = null & & typeof setInterval ! = = ' undefined ' ) { <nl> / / Check for missing dependencies every few seconds <nl> runDependencyWatcher = setInterval ( function ( ) { <nl> function addRunDependency ( id ) { <nl> if ( shown ) { <nl> Module . printErr ( ' ( end of list ) ' ) ; <nl> } <nl> - } , 6000 ) ; <nl> + } , 10000 ) ; <nl> } <nl> + # endif <nl> } else { <nl> Module . printErr ( ' warning : run dependency added without ID ' ) ; <nl> } <nl>
show awaited run dependencies only in ASSERTIONS builds
emscripten-core/emscripten
d5e550b1763ef2e3f4e6fbfc609784a3c75adecf
2013-05-22T01:31:23Z
mmm a / src / builtins / arm / builtins - arm . cc <nl> ppp b / src / builtins / arm / builtins - arm . cc <nl> void Builtins : : Generate_CallOrConstructForwardVarargs ( MacroAssembler * masm , <nl> __ Jump ( code , RelocInfo : : CODE_TARGET ) ; <nl> } <nl> <nl> - / / static <nl> - / / The CSA macro " BranchIfCanUseFastCallFunction " should be used to determine <nl> - / / whether a JSFunction can be called using this stub . <nl> - void Builtins : : Generate_FastCallFunction ( MacroAssembler * masm ) { <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - r0 : the number of arguments ( not including the receiver ) <nl> - / / - - r1 : the function to call ( checked to be a JSFunction ) <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - __ AssertFunction ( r1 ) ; <nl> - __ ldr ( cp , FieldMemOperand ( r1 , JSFunction : : kContextOffset ) ) ; <nl> - <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - r0 : the number of arguments ( not including the receiver ) <nl> - / / - - r1 : the function to call ( checked to be a JSFunction ) <nl> - / / - - cp : the function context . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / On function call , call into the debugger if necessary . <nl> - __ CheckDebugHook ( r1 , no_reg , ParameterCount ( r0 ) , ParameterCount ( r0 ) ) ; <nl> - __ LoadRoot ( r3 , Heap : : kUndefinedValueRootIndex ) ; <nl> - <nl> - Register code = kJavaScriptCallCodeStartRegister ; <nl> - __ ldr ( code , FieldMemOperand ( r1 , JSFunction : : kCodeOffset ) ) ; <nl> - __ add ( code , code , Operand ( Code : : kHeaderSize - kHeapObjectTag ) ) ; <nl> - __ Jump ( code ) ; <nl> - } <nl> - <nl> / / static <nl> void Builtins : : Generate_CallFunction ( MacroAssembler * masm , <nl> ConvertReceiverMode mode ) { <nl> mmm a / src / builtins / arm64 / builtins - arm64 . cc <nl> ppp b / src / builtins / arm64 / builtins - arm64 . cc <nl> void Builtins : : Generate_CallOrConstructForwardVarargs ( MacroAssembler * masm , <nl> __ Jump ( code , RelocInfo : : CODE_TARGET ) ; <nl> } <nl> <nl> - / / static <nl> - / / The CSA macro " BranchIfCanUseFastCallFunction " should be used to determine <nl> - / / whether a JSFunction can be called using this stub . <nl> - void Builtins : : Generate_FastCallFunction ( MacroAssembler * masm ) { <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - x0 : the number of arguments ( not including the receiver ) <nl> - / / - - x1 : the function to call ( checked to be a JSFunction ) <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - __ AssertFunction ( x1 ) ; <nl> - __ Ldr ( cp , FieldMemOperand ( x1 , JSFunction : : kContextOffset ) ) ; <nl> - <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - x0 : the number of arguments ( not including the receiver ) <nl> - / / - - x1 : the function to call ( checked to be a JSFunction ) <nl> - / / - - cp : the function context . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / On function call , call into the debugger if necessary . <nl> - __ Mov ( x2 , x0 ) ; <nl> - __ CheckDebugHook ( x1 , no_reg , ParameterCount ( x0 ) , ParameterCount ( x2 ) ) ; <nl> - __ LoadRoot ( x3 , Heap : : kUndefinedValueRootIndex ) ; <nl> - <nl> - Register code = kJavaScriptCallCodeStartRegister ; <nl> - __ Ldr ( code , FieldMemOperand ( x1 , JSFunction : : kCodeOffset ) ) ; <nl> - __ Add ( code , code , Operand ( Code : : kHeaderSize - kHeapObjectTag ) ) ; <nl> - __ Jump ( code ) ; <nl> - } <nl> - <nl> / / static <nl> void Builtins : : Generate_CallFunction ( MacroAssembler * masm , <nl> ConvertReceiverMode mode ) { <nl> mmm a / src / builtins / base . tq <nl> ppp b / src / builtins / base . tq <nl> extern macro Call ( <nl> extern macro Call ( <nl> Context , Callable , Object , Object , Object , Object , Object , Object ) : Object ; <nl> <nl> - extern macro FastCall ( Context , Callable , Object , Object ) : Object ; <nl> - <nl> extern macro ExtractFixedArray ( FixedArrayBase , Smi , Smi , Smi ) : FixedArrayBase ; <nl> extern macro ExtractFixedArray ( FixedArrayBase , Smi , Smi , Smi , <nl> constexpr ExtractFixedArrayFlags ) : FixedArrayBase ; <nl> macro NumberIsNaN ( number : Number ) : bool { <nl> } <nl> <nl> extern macro BranchIfToBooleanIsTrue ( Object ) : never labels Taken , NotTaken ; <nl> - extern macro BranchIfCanUseFastCallFunction ( HeapObject , int32 ) : <nl> - never labels Taken , <nl> - NotTaken ; <nl> <nl> macro ToBoolean ( obj : Object ) : bool { <nl> if ( BranchIfToBooleanIsTrue ( obj ) ) { <nl> mmm a / src / builtins / builtins - call - gen . cc <nl> ppp b / src / builtins / builtins - call - gen . cc <nl> void Builtins : : Generate_Call_ReceiverIsAny ( MacroAssembler * masm ) { <nl> Generate_Call ( masm , ConvertReceiverMode : : kAny ) ; <nl> } <nl> <nl> - void Builtins : : Generate_FastCallFunction_ReceiverIsNullOrUndefined ( <nl> - MacroAssembler * masm ) { <nl> - Generate_FastCallFunction ( masm ) ; <nl> - } <nl> - <nl> void Builtins : : Generate_CallVarargs ( MacroAssembler * masm ) { <nl> Generate_CallOrConstructVarargs ( masm , masm - > isolate ( ) - > builtins ( ) - > Call ( ) ) ; <nl> } <nl> void CallOrConstructBuiltinsAssembler : : CallOrConstructWithArrayLike ( <nl> } <nl> } <nl> <nl> - / / Takes a FixedArray of doubles and creates a new FixedArray with those <nl> - / / doubles boxed as HeapNumbers , then tail calls CallVarargs / ConstructVarargs <nl> - / / depending on whether { new_target } was passed . <nl> + / / Takes a FixedArray of doubles and creates a new FixedArray with those doubles <nl> + / / boxed as HeapNumbers , then tail calls CallVarargs / ConstructVarargs depending <nl> + / / on whether { new_target } was passed . <nl> void CallOrConstructBuiltinsAssembler : : CallOrConstructDoubleVarargs ( <nl> TNode < Object > target , SloppyTNode < Object > new_target , <nl> TNode < FixedDoubleArray > elements , TNode < Int32T > length , <nl> mmm a / src / builtins / builtins - definitions . h <nl> ppp b / src / builtins / builtins - definitions . h <nl> namespace internal { <nl> ASM ( Call_ReceiverIsNotNullOrUndefined ) \ <nl> ASM ( Call_ReceiverIsAny ) \ <nl> \ <nl> - ASM ( FastCallFunction_ReceiverIsNullOrUndefined ) \ <nl> - \ <nl> / * ES6 section 9 . 5 . 12 [ [ Call ] ] ( thisArgument , argumentsList ) * / \ <nl> TFC ( CallProxy , CallTrampoline , 1 ) \ <nl> ASM ( CallVarargs ) \ <nl> mmm a / src / builtins / builtins . h <nl> ppp b / src / builtins / builtins . h <nl> class Builtins { <nl> static void Generate_CallFunction ( MacroAssembler * masm , <nl> ConvertReceiverMode mode ) ; <nl> <nl> - static void Generate_FastCallFunction ( MacroAssembler * masm ) ; <nl> - <nl> static void Generate_CallBoundFunctionImpl ( MacroAssembler * masm ) ; <nl> <nl> static void Generate_Call ( MacroAssembler * masm , ConvertReceiverMode mode ) ; <nl> mmm a / src / builtins / ia32 / builtins - ia32 . cc <nl> ppp b / src / builtins / ia32 / builtins - ia32 . cc <nl> void Builtins : : Generate_CallOrConstructForwardVarargs ( MacroAssembler * masm , <nl> __ Jump ( code , RelocInfo : : CODE_TARGET ) ; <nl> } <nl> <nl> - / / static <nl> - / / The CSA macro " BranchIfCanUseFastCallFunction " should be used to determine <nl> - / / whether a JSFunction can be called using this stub . <nl> - void Builtins : : Generate_FastCallFunction ( MacroAssembler * masm ) { <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - eax : the number of arguments ( not including the receiver ) <nl> - / / - - edi : the function to call ( checked to be a JSFunction ) <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - __ AssertFunction ( edi ) ; <nl> - __ mov ( esi , FieldOperand ( edi , JSFunction : : kContextOffset ) ) ; <nl> - <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - eax : the number of arguments ( not including the receiver ) <nl> - / / - - edi : the function to call ( checked to be a JSFunction ) <nl> - / / - - esi : the function context . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / On function call , call into the debugger if necessary . <nl> - __ CheckDebugHook ( edi , no_reg , ParameterCount ( eax ) , ParameterCount ( eax ) ) ; <nl> - __ mov ( edx , __ isolate ( ) - > factory ( ) - > undefined_value ( ) ) ; <nl> - <nl> - static_assert ( kJavaScriptCallCodeStartRegister = = ecx , " ABI mismatch " ) ; <nl> - __ mov ( ecx , FieldOperand ( edi , JSFunction : : kCodeOffset ) ) ; <nl> - __ add ( ecx , Immediate ( Code : : kHeaderSize - kHeapObjectTag ) ) ; <nl> - __ jmp ( ecx ) ; <nl> - } <nl> - <nl> / / static <nl> void Builtins : : Generate_CallFunction ( MacroAssembler * masm , <nl> ConvertReceiverMode mode ) { <nl> mmm a / src / builtins / mips / builtins - mips . cc <nl> ppp b / src / builtins / mips / builtins - mips . cc <nl> void Builtins : : Generate_CallOrConstructForwardVarargs ( MacroAssembler * masm , <nl> __ Jump ( code , RelocInfo : : CODE_TARGET ) ; <nl> } <nl> <nl> - / / static <nl> - / / The CSA macro " BranchIfCanUseFastCallFunction " should be used to determine <nl> - / / whether a JSFunction can be called using this stub . <nl> - void Builtins : : Generate_FastCallFunction ( MacroAssembler * masm ) { <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - a0 : the number of arguments ( not including the receiver ) <nl> - / / - - a1 : the function to call ( checked to be a JSFunction ) <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - __ AssertFunction ( a1 ) ; <nl> - __ lw ( cp , FieldMemOperand ( a1 , JSFunction : : kContextOffset ) ) ; <nl> - <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - a0 : the number of arguments ( not including the receiver ) <nl> - / / - - a1 : the function to call ( checked to be a JSFunction ) <nl> - / / - - cp : the function context . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / On function call , call into the debugger if necessary . <nl> - __ CheckDebugHook ( a1 , no_reg , ParameterCount ( a0 ) , ParameterCount ( a0 ) ) ; <nl> - __ LoadRoot ( a3 , Heap : : kUndefinedValueRootIndex ) ; <nl> - <nl> - Register code = kJavaScriptCallCodeStartRegister ; <nl> - __ lw ( code , FieldMemOperand ( a1 , JSFunction : : kCodeOffset ) ) ; <nl> - __ Addu ( code , code , Code : : kHeaderSize - kHeapObjectTag ) ; <nl> - __ Jump ( code ) ; <nl> - } <nl> - <nl> / / static <nl> void Builtins : : Generate_CallFunction ( MacroAssembler * masm , <nl> ConvertReceiverMode mode ) { <nl> mmm a / src / builtins / mips64 / builtins - mips64 . cc <nl> ppp b / src / builtins / mips64 / builtins - mips64 . cc <nl> void Builtins : : Generate_CallOrConstructForwardVarargs ( MacroAssembler * masm , <nl> __ Jump ( code , RelocInfo : : CODE_TARGET ) ; <nl> } <nl> <nl> - / / static <nl> - / / The CSA macro " BranchIfCanUseFastCallFunction " should be used to determine <nl> - / / whether a JSFunction can be called using this stub . <nl> - void Builtins : : Generate_FastCallFunction ( MacroAssembler * masm ) { <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - a0 : the number of arguments ( not including the receiver ) <nl> - / / - - a1 : the function to call ( checked to be a JSFunction ) <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - __ AssertFunction ( a1 ) ; <nl> - __ Ld ( cp , FieldMemOperand ( a1 , JSFunction : : kContextOffset ) ) ; <nl> - <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - a0 : the number of arguments ( not including the receiver ) <nl> - / / - - a1 : the function to call ( checked to be a JSFunction ) <nl> - / / - - cp : the function context . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / On function call , call into the debugger if necessary . <nl> - __ CheckDebugHook ( a1 , no_reg , ParameterCount ( a0 ) , ParameterCount ( a0 ) ) ; <nl> - __ LoadRoot ( a3 , Heap : : kUndefinedValueRootIndex ) ; <nl> - <nl> - Register code = kJavaScriptCallCodeStartRegister ; <nl> - __ Ld ( code , FieldMemOperand ( a1 , JSFunction : : kCodeOffset ) ) ; <nl> - __ Daddu ( code , code , Operand ( Code : : kHeaderSize - kHeapObjectTag ) ) ; <nl> - __ Jump ( code ) ; <nl> - } <nl> - <nl> / / static <nl> void Builtins : : Generate_CallFunction ( MacroAssembler * masm , <nl> ConvertReceiverMode mode ) { <nl> mmm a / src / builtins / ppc / builtins - ppc . cc <nl> ppp b / src / builtins / ppc / builtins - ppc . cc <nl> void Builtins : : Generate_CallOrConstructForwardVarargs ( MacroAssembler * masm , <nl> __ Jump ( code , RelocInfo : : CODE_TARGET ) ; <nl> } <nl> <nl> - / / static <nl> - / / The CSA macro " BranchIfCanUseFastCallFunction " should be used to determine <nl> - / / whether a JSFunction can be called using this stub . <nl> - void Builtins : : Generate_FastCallFunction ( MacroAssembler * masm ) { <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - r3 : the number of arguments ( not including the receiver ) <nl> - / / - - r4 : the function to call ( checked to be a JSFunction ) <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - __ AssertFunction ( r4 ) ; <nl> - __ LoadP ( cp , FieldMemOperand ( r4 , JSFunction : : kContextOffset ) ) ; <nl> - <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - r3 : the number of arguments ( not including the receiver ) <nl> - / / - - r4 : the function to call ( checked to be a JSFunction ) <nl> - / / - - cp : the function context . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / On function call , call into the debugger if necessary . <nl> - __ CheckDebugHook ( r4 , no_reg , ParameterCount ( r3 ) , ParameterCount ( r3 ) ) ; <nl> - __ LoadRoot ( r6 , Heap : : kUndefinedValueRootIndex ) ; <nl> - <nl> - Register code = kJavaScriptCallCodeStartRegister ; <nl> - __ LoadP ( code , FieldMemOperand ( r4 , JSFunction : : kCodeOffset ) ) ; <nl> - __ addi ( code , code , Operand ( Code : : kHeaderSize - kHeapObjectTag ) ) ; <nl> - __ JumpToJSEntry ( code ) ; <nl> - } <nl> - <nl> / / static <nl> void Builtins : : Generate_CallFunction ( MacroAssembler * masm , <nl> ConvertReceiverMode mode ) { <nl> mmm a / src / builtins / s390 / builtins - s390 . cc <nl> ppp b / src / builtins / s390 / builtins - s390 . cc <nl> void Builtins : : Generate_CallOrConstructForwardVarargs ( MacroAssembler * masm , <nl> __ Jump ( code , RelocInfo : : CODE_TARGET ) ; <nl> } <nl> <nl> - / / static <nl> - / / The CSA macro " BranchIfCanUseFastCallFunction " should be used to determine <nl> - / / whether a JSFunction can be called using this stub . <nl> - void Builtins : : Generate_FastCallFunction ( MacroAssembler * masm ) { <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - r2 : the number of arguments ( not including the receiver ) <nl> - / / - - r3 : the function to call ( checked to be a JSFunction ) <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - __ AssertFunction ( r3 ) ; <nl> - __ LoadP ( cp , FieldMemOperand ( r3 , JSFunction : : kContextOffset ) ) ; <nl> - <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - r2 : the number of arguments ( not including the receiver ) <nl> - / / - - r3 : the function to call ( checked to be a JSFunction ) <nl> - / / - - cp : the function context . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / On function call , call into the debugger if necessary . <nl> - __ CheckDebugHook ( r3 , no_reg , ParameterCount ( r2 ) , ParameterCount ( r2 ) ) ; <nl> - __ LoadRoot ( r5 , Heap : : kUndefinedValueRootIndex ) ; <nl> - <nl> - Register code = kJavaScriptCallCodeStartRegister ; <nl> - __ LoadP ( code , FieldMemOperand ( r3 , JSFunction : : kCodeOffset ) ) ; <nl> - __ AddP ( code , code , Operand ( Code : : kHeaderSize - kHeapObjectTag ) ) ; <nl> - __ JumpToJSEntry ( code ) ; <nl> - } <nl> - <nl> / / static <nl> void Builtins : : Generate_CallFunction ( MacroAssembler * masm , <nl> ConvertReceiverMode mode ) { <nl> mmm a / src / builtins / x64 / builtins - x64 . cc <nl> ppp b / src / builtins / x64 / builtins - x64 . cc <nl> void Builtins : : Generate_CallOrConstructForwardVarargs ( MacroAssembler * masm , <nl> __ Jump ( code , RelocInfo : : CODE_TARGET ) ; <nl> } <nl> <nl> - / / static <nl> - / / The CSA macro " BranchIfCanUseFastCallFunction " should be used to determine <nl> - / / whether a JSFunction can be called using this stub . <nl> - void Builtins : : Generate_FastCallFunction ( MacroAssembler * masm ) { <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - rax : the number of arguments ( not including the receiver ) <nl> - / / - - rdi : the function to call ( checked to be a JSFunction ) <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - __ AssertFunction ( rdi ) ; <nl> - __ movp ( rsi , FieldOperand ( rdi , JSFunction : : kContextOffset ) ) ; <nl> - <nl> - / / mmmmmmmmm - - S t a t e mmmmmmmmmmmm - <nl> - / / - - rax : the number of arguments ( not including the receiver ) <nl> - / / - - rdi : the function to call ( checked to be a JSFunction ) <nl> - / / - - rsi : the function context . <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / On function call , call into the debugger if necessary . <nl> - __ CheckDebugHook ( rdi , no_reg , ParameterCount ( rax ) , ParameterCount ( rax ) ) ; <nl> - __ LoadRoot ( rdx , Heap : : kUndefinedValueRootIndex ) ; <nl> - <nl> - __ movp ( rcx , FieldOperand ( rdi , JSFunction : : kCodeOffset ) ) ; <nl> - __ addp ( rcx , Immediate ( Code : : kHeaderSize - kHeapObjectTag ) ) ; <nl> - __ jmp ( rcx ) ; <nl> - } <nl> - <nl> / / static <nl> void Builtins : : Generate_CallFunction ( MacroAssembler * masm , <nl> ConvertReceiverMode mode ) { <nl> mmm a / src / code - stub - assembler . cc <nl> ppp b / src / code - stub - assembler . cc <nl> TNode < Map > CodeStubAssembler : : LoadJSArrayElementsMap ( <nl> LoadContextElement ( native_context , Context : : ArrayMapIndex ( kind ) ) ) ; <nl> } <nl> <nl> - void CodeStubAssembler : : BranchIfCanUseFastCallFunction ( <nl> - TNode < HeapObject > callable , TNode < Int32T > actualParameterCount , <nl> - Label * if_true , Label * if_false ) { <nl> - GotoIfNot ( IsJSFunction ( callable ) , if_false ) ; <nl> - <nl> - TNode < JSFunction > function = CAST ( callable ) ; <nl> - TNode < SharedFunctionInfo > sfi = LoadSharedFunctionInfo ( function ) ; <nl> - TNode < Word32T > flags = UncheckedCast < Word32T > ( LoadObjectField ( <nl> - sfi , SharedFunctionInfo : : kFlagsOffset , MachineType : : Uint32 ( ) ) ) ; <nl> - <nl> - GotoIf ( IsSetWord32 < SharedFunctionInfo : : IsClassConstructorBit > ( flags ) , <nl> - if_false ) ; <nl> - <nl> - / / Receiver needs to be converted for non - native sloppy mode functions . <nl> - GotoIfNot ( IsSetWord32 ( flags , SharedFunctionInfo : : IsNativeBit : : kMask | <nl> - SharedFunctionInfo : : IsStrictBit : : kMask ) , <nl> - if_false ) ; <nl> - <nl> - Branch ( Word32Equal ( actualParameterCount , LoadFormalParameterCount ( sfi ) ) , <nl> - if_true , if_false ) ; <nl> - } <nl> - <nl> - TNode < BoolT > CodeStubAssembler : : CanUseFastCallFunction ( <nl> - TNode < HeapObject > callable , TNode < Int32T > actualParameterCount ) { <nl> - Label if_true ( this ) , if_false ( this ) , done ( this ) ; <nl> - TVARIABLE ( BoolT , result ) ; <nl> - BranchIfCanUseFastCallFunction ( callable , actualParameterCount , & if_true , <nl> - & if_false ) ; <nl> - BIND ( & if_true ) ; <nl> - result = Int32TrueConstant ( ) ; <nl> - Goto ( & done ) ; <nl> - <nl> - BIND ( & if_false ) ; <nl> - result = Int32FalseConstant ( ) ; <nl> - Goto ( & done ) ; <nl> - <nl> - BIND ( & done ) ; <nl> - return result . value ( ) ; <nl> - } <nl> - <nl> - TNode < SharedFunctionInfo > CodeStubAssembler : : LoadSharedFunctionInfo ( <nl> - TNode < JSFunction > function ) { <nl> - return CAST ( LoadObjectField ( function , JSFunction : : kSharedFunctionInfoOffset ) ) ; <nl> - } <nl> - <nl> - TNode < Int32T > CodeStubAssembler : : LoadFormalParameterCount ( <nl> - TNode < SharedFunctionInfo > sfi ) { <nl> - return UncheckedCast < Int32T > ( <nl> - LoadObjectField ( sfi , SharedFunctionInfo : : kFormalParameterCountOffset , <nl> - MachineType : : Uint16 ( ) ) ) ; <nl> - } <nl> - <nl> TNode < BoolT > CodeStubAssembler : : IsGeneratorFunction ( <nl> TNode < JSFunction > function ) { <nl> TNode < SharedFunctionInfo > const shared_function_info = <nl> mmm a / src / code - stub - assembler . h <nl> ppp b / src / code - stub - assembler . h <nl> class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler : : CodeAssembler { <nl> callable , receiver , args . . . ) ) ; <nl> } <nl> <nl> - / / Checks whether we can use " FastCall " instead of " Call " when calling <nl> - / / a JSFunction . This can be used for builtins where the user provides a <nl> - / / callback . The callback doesn ' t change during execution of the builtin , so <nl> - / / a lot of the checks that " Call " does can be done once upfront . <nl> - / / <nl> - / / These checks need to be kept in - sync with the " FastCall " and " Call * " stubs . <nl> - void BranchIfCanUseFastCallFunction ( TNode < HeapObject > callable , <nl> - TNode < Int32T > actualParameterCount , <nl> - Label * if_true , Label * if_false ) ; <nl> - <nl> - / / Uses the above function to simply return { true } or { false } , used in a <nl> - / / CSA_SLOW_ASSERT . <nl> - TNode < BoolT > CanUseFastCallFunction ( TNode < HeapObject > callable , <nl> - TNode < Int32T > actualParameterCount ) ; <nl> - <nl> - template < class . . . TArgs > <nl> - TNode < Object > FastCall ( TNode < Context > context , TNode < Object > callable , <nl> - TArgs . . . args ) { <nl> - CSA_SLOW_ASSERT ( this , CanUseFastCallFunction ( <nl> - CAST ( callable ) , Int32Constant ( sizeof . . . ( TArgs ) ) ) ) ; <nl> - <nl> - Callable call ( isolate ( ) - > builtins ( ) - > builtin_handle ( <nl> - Builtins : : kFastCallFunction_ReceiverIsNullOrUndefined ) , <nl> - CallTrampolineDescriptor { } ) ; <nl> - return UncheckedCast < Object > ( <nl> - CallJS ( call , context , callable , UndefinedConstant ( ) , args . . . ) ) ; <nl> - } <nl> - <nl> template < class A , class F , class G > <nl> TNode < A > Select ( SloppyTNode < BoolT > condition , const F & true_body , <nl> const G & false_body ) { <nl> class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler : : CodeAssembler { <nl> TNode < Map > LoadJSArrayElementsMap ( SloppyTNode < Int32T > kind , <nl> SloppyTNode < Context > native_context ) ; <nl> <nl> - TNode < SharedFunctionInfo > LoadSharedFunctionInfo ( TNode < JSFunction > function ) ; <nl> - TNode < Int32T > LoadFormalParameterCount ( TNode < SharedFunctionInfo > sfi ) ; <nl> - <nl> TNode < BoolT > IsGeneratorFunction ( TNode < JSFunction > function ) ; <nl> TNode < BoolT > HasPrototypeProperty ( TNode < JSFunction > function , TNode < Map > map ) ; <nl> void GotoIfPrototypeRequiresRuntimeLookup ( TNode < JSFunction > function , <nl> mmm a / test / js - perf - test / ArraySort / sort - base . js <nl> ppp b / test / js - perf - test / ArraySort / sort - base . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - " use strict " ; <nl> - <nl> const kArraySize = 4000 ; <nl> let template_array = [ ] ; <nl> <nl> deleted file mode 100644 <nl> index bfd4038483e . . 00000000000 <nl> mmm a / test / mjsunit / array - sort - fast - call - builtin . js <nl> ppp / dev / null <nl> <nl> - / / Copyright 2018 the V8 project authors . All rights reserved . <nl> - / / Use of this source code is governed by a BSD - style license that can be <nl> - / / found in the LICENSE file . <nl> - <nl> - / / Exercises the check that determines whether to use the <nl> - / / " FastCallFunction_ " stub when calling the comparison function . <nl> - <nl> - ( function TestClassConstructorAsCmpFn ( ) { <nl> - class FooBar { } ; <nl> - assertThrows ( ( ) = > [ 1 , 2 ] . sort ( FooBar ) ) ; <nl> - } ) ( ) ; <nl> - <nl> - <nl> - const globalThis = this ; <nl> - ( function TestGlobalProxyIsSetAsReceiverWhenSloppy ( ) { <nl> - [ 1 , 2 ] . sort ( ( a , b ) = > { <nl> - assertSame ( globalThis , this ) ; <nl> - return a - b ; <nl> - } ) ; <nl> - } ) ( ) ; <nl> - <nl> - <nl> - ( function TestReceiverIsUndefinedWhenStrict ( ) { <nl> - " use strict " ; <nl> - <nl> - [ 1 , 2 ] . sort ( ( a , b ) = > { <nl> - assertSame ( undefined , this ) ; <nl> - return a - b ; <nl> - } ) ; <nl> - } ) ( ) ; <nl> - <nl> - <nl> - ( function TestBoundFunctionAsCmpFn ( ) { <nl> - const object = { foo : " bar " } ; <nl> - <nl> - function cmpfn ( a , b ) { <nl> - assertSame ( this , object ) ; <nl> - assertSame ( this . foo , " bar " ) ; <nl> - return a - b ; <nl> - } ; <nl> - <nl> - const bound_cmpfn = cmpfn . bind ( object ) ; <nl> - [ 1 , 2 ] . sort ( bound_cmpfn ) ; <nl> - } ) ( ) ; <nl> mmm a / third_party / v8 / builtins / array - sort . tq <nl> ppp b / third_party / v8 / builtins / array - sort . tq <nl> module array { <nl> return v ; <nl> } <nl> <nl> - builtin FastSortCompareUserFn ( <nl> - context : Context , comparefn : Object , x : Object , y : Object ) : Number { <nl> - assert ( comparefn ! = Undefined ) ; <nl> - const cmpfn : Callable = UnsafeCast < Callable > ( comparefn ) ; <nl> - <nl> - / / a . Let v be ? ToNumber ( ? Call ( comparefn , undefined , x , y ) ) . <nl> - const v : Number = ToNumber_Inline ( context , FastCall ( context , cmpfn , x , y ) ) ; <nl> - <nl> - / / b . If v is NaN , return + 0 . <nl> - if ( NumberIsNaN ( v ) ) return 0 ; <nl> - <nl> - / / c . return v . <nl> - return v ; <nl> - } <nl> - <nl> builtin CanUseSameAccessor < ElementsAccessor : type > ( <nl> context : Context , receiver : JSReceiver , initialReceiverMap : Object , <nl> initialReceiverLength : Number ) : Boolean { <nl> module array { <nl> CanUseSameAccessor < GenericElementsAccessor > ; <nl> } <nl> <nl> - / / If no comparison function was provided , the default lexicographic compare <nl> - / / is used . Otherwise we try to use a faster JS call by eliding some checks . <nl> - macro InitializeSortCompareFn ( sortState : FixedArray , comparefnObj : Object ) { <nl> - if ( comparefnObj = = Undefined ) { <nl> - sortState [ kSortComparePtrIdx ] = SortCompareDefault ; <nl> - return ; <nl> - } <nl> - <nl> - assert ( TaggedIsNotSmi ( comparefnObj ) ) ; <nl> - assert ( IsCallable ( UnsafeCast < HeapObject > ( comparefnObj ) ) ) ; <nl> - <nl> - sortState [ kSortComparePtrIdx ] = <nl> - BranchIfCanUseFastCallFunction ( <nl> - UnsafeCast < HeapObject > ( comparefnObj ) , 2 ) ? <nl> - FastSortCompareUserFn : <nl> - SortCompareUserFn ; <nl> - } <nl> - <nl> macro ArrayTimSortImpl ( context : Context , sortState : FixedArray , length : Smi ) <nl> labels Bailout { <nl> InitializeSortState ( sortState ) ; <nl> module array { <nl> let map : Map = obj . map ; <nl> <nl> const sortState : FixedArray = AllocateZeroedFixedArray ( kSortStateSize ) ; <nl> - InitializeSortCompareFn ( sortState , comparefnObj ) ; <nl> <nl> sortState [ kReceiverIdx ] = obj ; <nl> sortState [ kUserCmpFnIdx ] = comparefnObj ; <nl> + sortState [ kSortComparePtrIdx ] = <nl> + comparefnObj ! = Undefined ? SortCompareUserFn : SortCompareDefault ; <nl> sortState [ kInitialReceiverMapIdx ] = map ; <nl> sortState [ kBailoutStatusIdx ] = kSuccess ; <nl> <nl>
Revert " [ builtins ] Add FastCallFunction builtin that elides some checks "
v8/v8
74320a1b9211dbcf93d16f90afdc38655a99a68c
2018-09-14T11:09:08Z
mmm a / BUILD . gn <nl> ppp b / BUILD . gn <nl> config ( " internal_config " ) { <nl> <nl> include_dirs = [ " . " ] <nl> <nl> - if ( component_mode = = " shared_library " ) { <nl> + if ( is_component_build ) { <nl> defines = [ <nl> " V8_SHARED " , <nl> " BUILDING_V8_SHARED " , <nl> if ( current_toolchain = = snapshot_toolchain ) { <nl> # Public targets <nl> # <nl> <nl> - if ( component_mode = = " shared_library " ) { <nl> + if ( is_component_build ) { <nl> component ( " v8 " ) { <nl> sources = [ <nl> " src / v8dll - main . cc " , <nl> if ( component_mode = = " shared_library " ) { <nl> if ( v8_use_snapshot & & v8_use_external_startup_data ) { <nl> deps = [ <nl> " : v8_base " , <nl> + ] <nl> + public_deps = [ <nl> " : v8_external_snapshot " , <nl> ] <nl> } else if ( v8_use_snapshot ) { <nl> if ( ( current_toolchain = = host_toolchain & & v8_toolset_for_d8 = = " host " ) | | <nl> sources + = [ " src / d8 - windows . cc " ] <nl> } <nl> <nl> - if ( component_mode ! = " shared_library " ) { <nl> + if ( ! is_component_build ) { <nl> sources + = [ <nl> " src / d8 - debug . cc " , <nl> " $ target_gen_dir / d8 - js . cc " , <nl>
Make v8 snapshot public in component build .
v8/v8
4cf578a1ea7e9d69412de8b957d6b8cd3329730e
2015-06-09T08:36:16Z
new file mode 100644 <nl> index 000000000000 . . 3492c33151e4 <nl> mmm / dev / null <nl> ppp b / include / swift / AST / TypeDifferenceVisitor . h <nl> <nl> + / / = = = mmm TypeDifferenceVisitor . h - Visitor for pairs of types mmm * - C + + - * - = = = / / <nl> + / / <nl> + / / This source file is part of the Swift . org open source project <nl> + / / <nl> + / / Copyright ( c ) 2014 - 2017 Apple Inc . and the Swift project authors <nl> + / / Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> + / / <nl> + / / See https : / / swift . org / LICENSE . txt for license information <nl> + / / See https : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> + / / <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + / / <nl> + / / This file defines TypeDifferenceVisitor , a visitor which finds <nl> + / / differences between canonical types . <nl> + / / <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + <nl> + # ifndef SWIFT_AST_TYPEDIFFERENCEVISITOR_H <nl> + # define SWIFT_AST_TYPEDIFFERENCEVISITOR_H <nl> + <nl> + # include " swift / AST / SILLayout . h " <nl> + # include " swift / AST / Types . h " <nl> + <nl> + namespace swift { <nl> + <nl> + / / TODO : maybe have a version of this that works on non - canonical types <nl> + <nl> + template < class Impl , class RetTy , class . . . Args > <nl> + class CanTypePairVisitor { <nl> + public : <nl> + / / Provide default implementations that chain to the base class . <nl> + # define ABSTRACT_TYPE ( CLASS , PARENT ) \ <nl> + RetTy visit # # CLASS # # Type ( Can # # CLASS # # Type type1 , \ <nl> + Can # # CLASS # # Type type2 , \ <nl> + Args . . . args ) { \ <nl> + return static_cast < Impl & > ( * this ) \ <nl> + . visit # # PARENT ( type1 , type2 , std : : forward < Args > ( args ) . . . ) ; \ <nl> + } <nl> + # define TYPE ( CLASS , PARENT ) ABSTRACT_TYPE ( CLASS , PARENT ) <nl> + # define ABSTRACT_SUGARED_TYPE ( CLASS , PARENT ) <nl> + # define SUGARED_TYPE ( CLASS , PARENT ) <nl> + / / Don ' t allow unchecked types by default , but allow visitors to opt - in to <nl> + / / handling them . <nl> + # define UNCHECKED_TYPE ( CLASS , PARENT ) \ <nl> + RetTy visit # # CLASS # # Type ( Can # # CLASS # # Type type1 , \ <nl> + Can # # CLASS # # Type type2 , \ <nl> + Args . . . args ) { \ <nl> + llvm_unreachable ( " unchecked type " ) ; \ <nl> + } <nl> + # include " swift / AST / TypeNodes . def " <nl> + } ; <nl> + <nl> + / / / A CRTP class for finding differences between types . <nl> + / / / <nl> + / / / The visitors all short - circuit as soon as one returns true . <nl> + / / / <nl> + / / / visitDifferentTypes ( ) <nl> + template < class Impl > <nl> + class CanTypeDifferenceVisitor : public CanTypePairVisitor < Impl , bool > { <nl> + protected : <nl> + Impl & asImpl ( ) { return static_cast < Impl & > ( * this ) ; } <nl> + public : <nl> + / / / Two component types differ . <nl> + bool visitDifferentComponentTypes ( CanType type1 , CanType type2 ) { <nl> + asImpl ( ) . visitDifferentTypes ( type1 , type2 ) ; <nl> + <nl> + / / Short - circuit by default . <nl> + return true ; <nl> + } <nl> + <nl> + / / / Two types differ in non - type structure , like a convention or a label . <nl> + / / / Generally , you can ' t usefully recover when this is called ; it always <nl> + / / / needs to return true . <nl> + bool visitDifferentTypeStructure ( CanType type1 , CanType type2 ) { <nl> + asImpl ( ) . visitDifferentTypes ( type1 , type2 ) ; <nl> + return true ; <nl> + } <nl> + <nl> + / / / Inform the subclass that a difference was detected . <nl> + void visitDifferentTypes ( CanType type1 , CanType type2 ) { } <nl> + <nl> + bool visit ( CanType type1 , CanType type2 ) { <nl> + if ( type1 = = type2 ) <nl> + return false ; <nl> + <nl> + if ( type1 - > getKind ( ) ! = type2 - > getKind ( ) ) <nl> + return asImpl ( ) . visitDifferentComponentTypes ( type1 , type2 ) ; <nl> + <nl> + switch ( type1 - > getKind ( ) ) { <nl> + # define SUGARED_TYPE ( CLASS , PARENT ) \ <nl> + case TypeKind : : CLASS : <nl> + # define TYPE ( CLASS , PARENT ) <nl> + # include " swift / AST / TypeNodes . def " <nl> + llvm_unreachable ( " non - canonical type " ) ; <nl> + <nl> + # define SUGARED_TYPE ( CLASS , PARENT ) <nl> + # define TYPE ( CLASS , PARENT ) \ <nl> + case TypeKind : : CLASS : \ <nl> + return asImpl ( ) . visit # # CLASS # # Type ( cast < CLASS # # Type > ( type1 ) , \ <nl> + cast < CLASS # # Type > ( type2 ) ) ; <nl> + # include " swift / AST / TypeNodes . def " <nl> + } <nl> + llvm_unreachable ( " Not reachable , all cases handled " ) ; <nl> + } <nl> + <nl> + / / In the type - specific visitors , we know that we have <nl> + / / non - identical types . <nl> + <nl> + / / These types are singleton and can ' t actually differ . <nl> + # define SINGLETON_TYPE ( TYPE ) \ <nl> + bool visit # # TYPE ( Can # # TYPE type1 , Can # # TYPE type2 ) { \ <nl> + llvm_unreachable ( " singleton type that wasn ' t identical " ) ; \ <nl> + } <nl> + SINGLETON_TYPE ( BuiltinIntegerLiteralType ) <nl> + SINGLETON_TYPE ( BuiltinRawPointerType ) <nl> + SINGLETON_TYPE ( BuiltinNativeObjectType ) <nl> + SINGLETON_TYPE ( BuiltinBridgeObjectType ) <nl> + SINGLETON_TYPE ( BuiltinUnsafeValueBufferType ) <nl> + SINGLETON_TYPE ( SILTokenType ) <nl> + # undef SINGLETON_TYPE <nl> + <nl> + bool visitBuiltinIntegerType ( CanBuiltinIntegerType type1 , <nl> + CanBuiltinIntegerType type2 ) { <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + } <nl> + <nl> + bool visitBuiltinFloatType ( CanBuiltinFloatType type1 , <nl> + CanBuiltinFloatType type2 ) { <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + } <nl> + <nl> + bool visitBuiltinVectorType ( CanBuiltinVectorType type1 , <nl> + CanBuiltinVectorType type2 ) { <nl> + if ( type1 - > getNumElements ( ) ! = type2 - > getNumElements ( ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + return asImpl ( ) . visit ( type1 . getElementType ( ) , type2 . getElementType ( ) ) ; <nl> + } <nl> + <nl> + bool visitTupleType ( CanTupleType type1 , CanTupleType type2 ) { <nl> + return visitComponentArray ( type1 , type2 , <nl> + type1 - > getElements ( ) , type2 - > getElements ( ) ) ; <nl> + } <nl> + <nl> + bool visitComponent ( CanType type1 , CanType type2 , <nl> + const TupleTypeElt & elt1 , const TupleTypeElt & elt2 ) { <nl> + if ( elt1 . getName ( ) ! = elt2 . getName ( ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + return asImpl ( ) . visit ( CanType ( elt1 . getType ( ) ) , CanType ( elt2 . getType ( ) ) ) ; <nl> + } <nl> + <nl> + bool visitReferenceStorageType ( CanReferenceStorageType type1 , <nl> + CanReferenceStorageType type2 ) { <nl> + return asImpl ( ) . visit ( type1 . getReferentType ( ) , type2 . getReferentType ( ) ) ; <nl> + } <nl> + <nl> + bool visitUnboundGenericType ( CanUnboundGenericType type1 , <nl> + CanUnboundGenericType type2 ) { <nl> + assert ( type1 - > getDecl ( ) ! = type2 - > getDecl ( ) ) ; <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + } <nl> + <nl> + bool visitNominalType ( CanNominalType type1 , CanNominalType type2 ) { <nl> + assert ( type1 - > getDecl ( ) ! = type2 - > getDecl ( ) ) ; <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + } <nl> + <nl> + bool visitBoundGenericType ( CanBoundGenericType type1 , <nl> + CanBoundGenericType type2 ) { <nl> + if ( type1 - > getDecl ( ) ! = type2 - > getDecl ( ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + <nl> + return visitComponentArray ( type1 , type2 , <nl> + type1 . getGenericArgs ( ) , type2 . getGenericArgs ( ) ) ; <nl> + } <nl> + <nl> + bool visitAnyMetatypeType ( CanAnyMetatypeType type1 , <nl> + CanAnyMetatypeType type2 ) { <nl> + if ( type1 - > hasRepresentation ( ) ! = type2 - > hasRepresentation ( ) | | <nl> + ( type1 - > hasRepresentation ( ) & & <nl> + type1 - > getRepresentation ( ) ! = type2 - > getRepresentation ( ) ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + <nl> + return asImpl ( ) . visit ( type1 . getInstanceType ( ) , type2 . getInstanceType ( ) ) ; <nl> + } <nl> + <nl> + bool visitModuleType ( CanModuleType type1 , CanModuleType type2 ) { <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + } <nl> + <nl> + bool visitDynamicSelfType ( CanDynamicSelfType type1 , <nl> + CanDynamicSelfType type2 ) { <nl> + return asImpl ( ) . visit ( type1 . getSelfType ( ) , type2 . getSelfType ( ) ) ; <nl> + } <nl> + <nl> + bool visitSubstitutableType ( CanSubstitutableType type1 , <nl> + CanSubstitutableType type2 ) { <nl> + return asImpl ( ) . visitDifferentComponentTypes ( type1 , type2 ) ; <nl> + } <nl> + <nl> + bool visitDependentMemberType ( CanDependentMemberType type1 , <nl> + CanDependentMemberType type2 ) { <nl> + if ( type1 - > getName ( ) ! = type2 - > getName ( ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + return asImpl ( ) . visit ( type1 . getBase ( ) , type2 . getBase ( ) ) ; <nl> + } <nl> + <nl> + bool visitGenericFunctionType ( CanGenericFunctionType type1 , <nl> + CanGenericFunctionType type2 ) { <nl> + if ( type1 . getGenericSignature ( ) ! = type2 . getGenericSignature ( ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + <nl> + return asImpl ( ) . visitAnyFunctionType ( type1 , type2 ) ; <nl> + } <nl> + <nl> + bool visitAnyFunctionType ( CanAnyFunctionType type1 , <nl> + CanAnyFunctionType type2 ) { <nl> + if ( type1 - > getExtInfo ( ) ! = type2 - > getExtInfo ( ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + <nl> + if ( asImpl ( ) . visit ( type1 . getResult ( ) , type2 . getResult ( ) ) ) <nl> + return true ; <nl> + <nl> + return visitComponentArray ( type1 , type2 , <nl> + type1 . getParams ( ) , type2 . getParams ( ) ) ; <nl> + } <nl> + <nl> + bool visitComponent ( CanType type1 , CanType type2 , <nl> + AnyFunctionType : : CanParam param1 , <nl> + AnyFunctionType : : CanParam param2 ) { <nl> + if ( param1 . getLabel ( ) ! = param2 . getLabel ( ) | | <nl> + param1 . getParameterFlags ( ) ! = param2 . getParameterFlags ( ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + return asImpl ( ) . visit ( param1 . getPlainType ( ) , param2 . getPlainType ( ) ) ; <nl> + } <nl> + <nl> + bool visitSILFunctionType ( CanSILFunctionType type1 , <nl> + CanSILFunctionType type2 ) { <nl> + return ( asImpl ( ) . visitSILFunctionTypeStructure ( type1 , type2 ) | | <nl> + asImpl ( ) . visitSILFunctionTypeSubstitutions ( type1 , type2 ) | | <nl> + asImpl ( ) . visitSILFunctionTypeComponents ( type1 , type2 ) ) ; <nl> + } <nl> + <nl> + bool visitSILFunctionTypeStructure ( CanSILFunctionType type1 , <nl> + CanSILFunctionType type2 ) { <nl> + if ( type1 - > getExtInfo ( ) ! = type2 - > getExtInfo ( ) | | <nl> + type1 - > getCoroutineKind ( ) ! = type2 - > getCoroutineKind ( ) | | <nl> + type1 - > getInvocationGenericSignature ( ) <nl> + ! = type2 - > getInvocationGenericSignature ( ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + return false ; <nl> + } <nl> + <nl> + bool visitSILFunctionTypeSubstitutions ( CanSILFunctionType type1 , <nl> + CanSILFunctionType type2 ) { <nl> + return asImpl ( ) . visitOptSubstitutionMap ( type1 , type2 , <nl> + type1 - > getPatternSubstitutions ( ) , <nl> + type2 - > getPatternSubstitutions ( ) ) <nl> + | | asImpl ( ) . visitOptSubstitutionMap ( type1 , type2 , <nl> + type1 - > getInvocationSubstitutions ( ) , <nl> + type2 - > getInvocationSubstitutions ( ) ) ; <nl> + } <nl> + <nl> + bool visitSILFunctionTypeComponents ( CanSILFunctionType type1 , <nl> + CanSILFunctionType type2 ) { <nl> + return visitComponentArray ( type1 , type2 , <nl> + type1 - > getParameters ( ) , type2 - > getParameters ( ) ) <nl> + | | visitComponentArray ( type1 , type2 , <nl> + type1 - > getResults ( ) , type2 - > getResults ( ) ) <nl> + | | visitComponentArray ( type1 , type2 , <nl> + type1 - > getYields ( ) , type2 - > getYields ( ) ) ; <nl> + } <nl> + <nl> + bool visitComponent ( CanType type1 , CanType type2 , <nl> + SILParameterInfo param1 , SILParameterInfo param2 ) { <nl> + if ( param1 . getConvention ( ) ! = param2 . getConvention ( ) | | <nl> + param1 . getDifferentiability ( ) ! = param2 . getDifferentiability ( ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + <nl> + return asImpl ( ) . visit ( param1 . getInterfaceType ( ) , <nl> + param2 . getInterfaceType ( ) ) ; <nl> + } <nl> + <nl> + bool visitComponent ( CanType type1 , CanType type2 , <nl> + SILResultInfo result1 , SILResultInfo result2 ) { <nl> + if ( result1 . getConvention ( ) ! = result2 . getConvention ( ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + <nl> + return asImpl ( ) . visit ( result1 . getInterfaceType ( ) , <nl> + result2 . getInterfaceType ( ) ) ; <nl> + } <nl> + <nl> + bool visitComponent ( CanType type1 , CanType type2 , <nl> + SILYieldInfo yield1 , SILYieldInfo yield2 ) { <nl> + if ( yield1 . getConvention ( ) ! = yield2 . getConvention ( ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + <nl> + return asImpl ( ) . visit ( yield1 . getInterfaceType ( ) , <nl> + yield2 . getInterfaceType ( ) ) ; <nl> + } <nl> + <nl> + bool visitSILBoxType ( CanSILBoxType type1 , CanSILBoxType type2 ) { <nl> + return ( asImpl ( ) . visitSILLayout ( type1 , type2 , <nl> + type1 - > getLayout ( ) , type2 - > getLayout ( ) ) | | <nl> + asImpl ( ) . visitOptSubstitutionMap ( type1 , type2 , <nl> + type1 - > getSubstitutions ( ) , <nl> + type2 - > getSubstitutions ( ) ) ) ; <nl> + } <nl> + <nl> + bool visitSILLayout ( CanType type1 , CanType type2 , <nl> + SILLayout * layout1 , SILLayout * layout2 ) { <nl> + if ( layout1 - > getGenericSignature ( ) ! = layout2 - > getGenericSignature ( ) | | <nl> + layout1 - > isMutable ( ) ! = layout2 - > isMutable ( ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + <nl> + return visitComponentArray ( type1 , type2 , <nl> + layout1 - > getFields ( ) , layout2 - > getFields ( ) ) ; <nl> + } <nl> + <nl> + bool visitComponent ( CanType type1 , CanType type2 , <nl> + const SILField & field1 , const SILField & field2 ) { <nl> + if ( field1 . isMutable ( ) ! = field2 . isMutable ( ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + return asImpl ( ) . visit ( field1 . getLoweredType ( ) , field2 . getLoweredType ( ) ) ; <nl> + } <nl> + <nl> + bool visitSILBlockStorageType ( CanSILBlockStorageType type1 , <nl> + CanSILBlockStorageType type2 ) { <nl> + return asImpl ( ) . visit ( type1 - > getCaptureType ( ) , type2 - > getCaptureType ( ) ) ; <nl> + } <nl> + <nl> + bool visitProtocolCompositionType ( CanProtocolCompositionType type1 , <nl> + CanProtocolCompositionType type2 ) { <nl> + return visitComponentArray ( type1 , type2 , <nl> + type1 - > getMembers ( ) , type2 - > getMembers ( ) ) ; <nl> + } <nl> + <nl> + bool visitLValueType ( CanLValueType type1 , CanLValueType type2 ) { <nl> + return asImpl ( ) . visit ( type1 . getObjectType ( ) , type2 . getObjectType ( ) ) ; <nl> + } <nl> + <nl> + bool visitInOutType ( CanInOutType type1 , CanInOutType type2 ) { <nl> + return asImpl ( ) . visit ( type1 . getObjectType ( ) , type2 . getObjectType ( ) ) ; <nl> + } <nl> + <nl> + bool visitErrorType ( CanErrorType type1 , CanErrorType type2 ) { <nl> + return false ; <nl> + } <nl> + <nl> + bool visitOptSubstitutionMap ( CanType type1 , CanType type2 , <nl> + SubstitutionMap subs1 , SubstitutionMap subs2 ) { <nl> + if ( ( bool ) subs1 ! = ( bool ) subs2 ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + if ( subs1 ) <nl> + return asImpl ( ) . visitSubstitutionMap ( type1 , type2 , subs1 , subs2 ) ; <nl> + return false ; <nl> + } <nl> + <nl> + bool visitSubstitutionMap ( CanType type1 , CanType type2 , <nl> + SubstitutionMap subs1 , SubstitutionMap subs2 ) { <nl> + if ( CanGenericSignature ( subs1 . getGenericSignature ( ) ) <nl> + ! = CanGenericSignature ( subs2 . getGenericSignature ( ) ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + <nl> + return visitComponentArray ( type1 , type2 , <nl> + subs1 . getReplacementTypes ( ) , <nl> + subs2 . getReplacementTypes ( ) ) ; <nl> + } <nl> + <nl> + private : <nl> + bool visitComponent ( CanType type1 , CanType type2 , <nl> + Type componentType1 , Type componentType2 ) { <nl> + return asImpl ( ) . visit ( CanType ( componentType1 ) , CanType ( componentType2 ) ) ; <nl> + } <nl> + <nl> + template < class T > <nl> + bool visitComponentArray ( CanType type1 , CanType type2 , T array1 , T array2 ) { <nl> + if ( array1 . size ( ) ! = array2 . size ( ) ) <nl> + return asImpl ( ) . visitDifferentTypeStructure ( type1 , type2 ) ; <nl> + <nl> + for ( auto i : indices ( array1 ) ) { <nl> + if ( asImpl ( ) . visitComponent ( type1 , type2 , array1 [ i ] , array2 [ i ] ) ) <nl> + return true ; <nl> + } <nl> + <nl> + return false ; <nl> + } <nl> + } ; <nl> + <nl> + } <nl> + <nl> + # endif <nl> \ No newline at end of file <nl> mmm a / include / swift / AST / Types . h <nl> ppp b / include / swift / AST / Types . h <nl> class SILParameterInfo { <nl> return SILParameterInfo ( type , getConvention ( ) , getDifferentiability ( ) ) ; <nl> } <nl> <nl> + / / / Return a version of this parameter info with the convention replaced . <nl> + SILParameterInfo getWithConvention ( ParameterConvention c ) const { <nl> + return SILParameterInfo ( getInterfaceType ( ) , c , getDifferentiability ( ) ) ; <nl> + } <nl> + <nl> / / / Transform this SILParameterInfo by applying the user - provided <nl> / / / function to its type . <nl> / / / <nl> class SILParameterInfo { <nl> - > getCanonicalType ( ) ) ; <nl> } <nl> <nl> + / / / Treating this parameter info as a component of the given function <nl> + / / / type , apply any substitutions from the function type to it to <nl> + / / / get a substituted version of it , as you would get from <nl> + / / / SILFunctionType : : getUnsubstitutedType . <nl> + SILParameterInfo getUnsubstituted ( SILModule & M , <nl> + const SILFunctionType * fnType ) const { <nl> + return getWithInterfaceType ( getArgumentType ( M , fnType ) ) ; <nl> + } <nl> + <nl> void profile ( llvm : : FoldingSetNodeID & id ) { <nl> id . AddPointer ( getInterfaceType ( ) . getPointer ( ) ) ; <nl> id . AddInteger ( ( unsigned ) getConvention ( ) ) ; <nl> class SILResultInfo { <nl> return SILResultInfo ( type , getConvention ( ) ) ; <nl> } <nl> <nl> + / / / Return a version of this result info with the convention replaced . <nl> + SILResultInfo getWithConvention ( ResultConvention c ) const { <nl> + return SILResultInfo ( getInterfaceType ( ) , c ) ; <nl> + } <nl> + <nl> / / Does this result convention require indirect storage ? This reflects a <nl> / / SILFunctionType ' s formal ( immutable ) conventions , as opposed to the <nl> / / transient SIL conventions that dictate SILValue types . <nl> class SILResultInfo { <nl> - > getCanonicalType ( ) ) ; <nl> } <nl> <nl> + / / / Treating this result info as a component of the given function <nl> + / / / type , apply any substitutions from the function type to it to <nl> + / / / get a substituted version of it , as you would get from <nl> + / / / SILFunctionType : : getUnsubstitutedType . <nl> + SILResultInfo getUnsubstituted ( SILModule & M , <nl> + const SILFunctionType * fnType ) const { <nl> + return getWithInterfaceType ( getReturnValueType ( M , fnType ) ) ; <nl> + } <nl> + <nl> void profile ( llvm : : FoldingSetNodeID & id ) { <nl> id . AddPointer ( TypeAndConvention . getOpaqueValue ( ) ) ; <nl> } <nl> class SILYieldInfo : public SILParameterInfo { <nl> return SILYieldInfo ( type , getConvention ( ) ) ; <nl> } <nl> <nl> + / / / Return a version of this yield info with the convention replaced . <nl> + SILYieldInfo getWithConvention ( YieldConvention c ) const { <nl> + return SILYieldInfo ( getInterfaceType ( ) , c ) ; <nl> + } <nl> + <nl> template < typename F > <nl> SILYieldInfo map ( const F & fn ) const { <nl> return getWithInterfaceType ( fn ( getInterfaceType ( ) ) ) ; <nl> class SILYieldInfo : public SILParameterInfo { <nl> return getWithInterfaceType ( getInterfaceType ( ) - > mapTypeOutOfContext ( ) <nl> - > getCanonicalType ( ) ) ; <nl> } <nl> + <nl> + CanType getYieldValueType ( SILModule & M , <nl> + const SILFunctionType * fnType ) const { <nl> + return getArgumentType ( M , fnType ) ; <nl> + } <nl> + <nl> + / / / Treating this yield info as a component of the given function <nl> + / / / type , apply any substitutions from the function type to it to <nl> + / / / get a substituted version of it , as you would get from <nl> + / / / SILFunctionType : : getUnsubstitutedType . <nl> + SILYieldInfo getUnsubstituted ( SILModule & M , <nl> + const SILFunctionType * fnType ) const { <nl> + return getWithInterfaceType ( getYieldValueType ( M , fnType ) ) ; <nl> + } <nl> } ; <nl> <nl> / / / SILCoroutineKind - What kind of coroutine is this SILFunction ? <nl> class SILFunctionType final : public TypeBase , public llvm : : FoldingSetNode , <nl> CanSILFunctionType <nl> withPatternSubstitutions ( SubstitutionMap subs ) const ; <nl> <nl> + / / / Create a SILFunctionType with the same structure as this one , <nl> + / / / but replacing the invocation generic signature and pattern <nl> + / / / substitutions . This type must either be polymorphic or have <nl> + / / / pattern substitutions , and the substitution signature must <nl> + / / / match ` getSubstGenericSignature ( ) ` . <nl> + CanSILFunctionType <nl> + withPatternSpecialization ( CanGenericSignature sign , <nl> + SubstitutionMap subs , <nl> + ProtocolConformanceRef witnessConformance = <nl> + ProtocolConformanceRef ( ) ) const ; <nl> + <nl> class ABICompatibilityCheckResult { <nl> friend class SILFunctionType ; <nl> <nl> mmm a / include / swift / SIL / SILFunction . h <nl> ppp b / include / swift / SIL / SILFunction . h <nl> class SILFunction <nl> return SILFunctionConventions ( LoweredType , getModule ( ) ) ; <nl> } <nl> <nl> + SILFunctionConventions getConventionsInContext ( ) const { <nl> + auto fnType = getLoweredFunctionTypeInContext ( getTypeExpansionContext ( ) ) ; <nl> + return SILFunctionConventions ( fnType , getModule ( ) ) ; <nl> + } <nl> + <nl> SILProfiler * getProfiler ( ) const { return Profiler ; } <nl> <nl> SILFunction * getDynamicallyReplacedFunction ( ) const { <nl> mmm a / include / swift / SIL / SILType . h <nl> ppp b / include / swift / SIL / SILType . h <nl> class SILType { <nl> bool hasArchetype ( ) const { <nl> return getASTType ( ) - > hasArchetype ( ) ; <nl> } <nl> + <nl> + / / / True if the type involves any opaque archetypes . <nl> + bool hasOpaqueArchetype ( ) const { <nl> + return getASTType ( ) - > hasOpaqueArchetype ( ) ; <nl> + } <nl> <nl> / / / Returns the ASTContext for the referenced Swift type . <nl> ASTContext & getASTContext ( ) const { <nl> mmm a / lib / AST / ASTMangler . cpp <nl> ppp b / lib / AST / ASTMangler . cpp <nl> void ASTMangler : : appendImplFunctionType ( SILFunctionType * fn ) { <nl> OpArgs . push_back ( ' G ' ) ; <nl> break ; <nl> } <nl> + <nl> + auto outerGenericSig = CurGenericSignature ; <nl> + CurGenericSignature = fn - > getSubstGenericSignature ( ) ; <nl> <nl> / / Mangle the parameters . <nl> for ( auto param : fn - > getParameters ( ) ) { <nl> void ASTMangler : : appendImplFunctionType ( SILFunctionType * fn ) { <nl> OpArgs . push_back ( getResultConvention ( error . getConvention ( ) ) ) ; <nl> appendType ( error . getInterfaceType ( ) ) ; <nl> } <nl> + <nl> if ( auto sig = fn - > getInvocationGenericSignature ( ) ) { <nl> appendGenericSignature ( sig ) ; <nl> + CurGenericSignature = outerGenericSig ; <nl> } <nl> if ( auto subs = fn - > getInvocationSubstitutions ( ) ) { <nl> appendFlatGenericArgs ( subs ) ; <nl> void ASTMangler : : appendImplFunctionType ( SILFunctionType * fn ) { <nl> } <nl> if ( auto subs = fn - > getPatternSubstitutions ( ) ) { <nl> appendGenericSignature ( subs . getGenericSignature ( ) ) ; <nl> + CurGenericSignature = <nl> + fn - > getInvocationGenericSignature ( ) <nl> + ? fn - > getInvocationGenericSignature ( ) <nl> + : outerGenericSig ; <nl> appendFlatGenericArgs ( subs ) ; <nl> appendRetroactiveConformances ( subs , Mod ) ; <nl> + CurGenericSignature = outerGenericSig ; <nl> } <nl> <nl> OpArgs . push_back ( ' _ ' ) ; <nl> mmm a / lib / AST / ASTPrinter . cpp <nl> ppp b / lib / AST / ASTPrinter . cpp <nl> class TypePrinter : public TypeVisitor < TypePrinter > { <nl> } <nl> sub - > Printer < < " ) - > " ; <nl> <nl> - unsigned totalResults = <nl> - T - > getNumYields ( ) + T - > getNumResults ( ) + unsigned ( T - > hasErrorResult ( ) ) ; <nl> - <nl> - if ( totalResults ! = 1 ) <nl> + bool parenthesizeResults = mustParenthesizeResults ( T ) ; <nl> + if ( parenthesizeResults ) <nl> sub - > Printer < < " ( " ; <nl> <nl> first = true ; <nl> class TypePrinter : public TypeVisitor < TypePrinter > { <nl> T - > getErrorResult ( ) . getInterfaceType ( ) . print ( sub - > Printer , subOptions ) ; <nl> } <nl> <nl> - if ( totalResults ! = 1 ) <nl> + if ( parenthesizeResults ) <nl> sub - > Printer < < " ) " ; <nl> } ( ) ; <nl> <nl> class TypePrinter : public TypeVisitor < TypePrinter > { <nl> } <nl> } <nl> <nl> + static bool mustParenthesizeResults ( SILFunctionType * T ) { <nl> + / / If we don ' t have exactly one result , we must parenthesize . <nl> + unsigned totalResults = <nl> + T - > getNumYields ( ) + T - > getNumResults ( ) + unsigned ( T - > hasErrorResult ( ) ) ; <nl> + if ( totalResults ! = 1 ) <nl> + return true ; <nl> + <nl> + / / If we have substitutions , we must parenthesize if the single <nl> + / / result is a function type . <nl> + if ( ! T - > hasPatternSubstitutions ( ) & & ! T - > hasInvocationSubstitutions ( ) ) <nl> + return false ; <nl> + if ( T - > getNumResults ( ) = = 1 ) <nl> + return isa < SILFunctionType > ( T - > getResults ( ) [ 0 ] . getInterfaceType ( ) ) ; <nl> + if ( T - > getNumYields ( ) = = 1 ) <nl> + return isa < SILFunctionType > ( T - > getYields ( ) [ 0 ] . getInterfaceType ( ) ) ; <nl> + return isa < SILFunctionType > ( T - > getErrorResult ( ) . getInterfaceType ( ) ) ; <nl> + } <nl> + <nl> void visitSILBlockStorageType ( SILBlockStorageType * T ) { <nl> Printer < < " @ block_storage " ; <nl> printWithParensIfNotSimple ( T - > getCaptureType ( ) ) ; <nl> mmm a / lib / AST / Type . cpp <nl> ppp b / lib / AST / Type . cpp <nl> SILFunctionType : : withPatternSubstitutions ( SubstitutionMap subs ) const { <nl> getWitnessMethodConformanceOrInvalid ( ) ) ; <nl> } <nl> <nl> + CanSILFunctionType <nl> + SILFunctionType : : withPatternSpecialization ( CanGenericSignature sig , <nl> + SubstitutionMap subs , <nl> + ProtocolConformanceRef <nl> + witnessConformance ) const { <nl> + assert ( ! hasInvocationSubstitutions ( ) ) ; <nl> + subs = subs . getCanonical ( ) ; <nl> + assert ( ! subs | | CanGenericSignature ( subs . getGenericSignature ( ) ) <nl> + = = getSubstGenericSignature ( ) ) ; <nl> + return SILFunctionType : : get ( sig , <nl> + getExtInfo ( ) , getCoroutineKind ( ) , <nl> + getCalleeConvention ( ) , <nl> + getParameters ( ) , getYields ( ) , getResults ( ) , <nl> + getOptionalErrorResult ( ) , <nl> + subs , SubstitutionMap ( ) , <nl> + const_cast < SILFunctionType * > ( this ) - > getASTContext ( ) , <nl> + witnessConformance ) ; <nl> + } <nl> + <nl> SourceLoc swift : : extractNearestSourceLoc ( Type ty ) { <nl> if ( auto nominal = ty - > getAnyNominal ( ) ) <nl> return extractNearestSourceLoc ( nominal ) ; <nl> mmm a / lib / Frontend / ModuleInterfaceBuilder . cpp <nl> ppp b / lib / Frontend / ModuleInterfaceBuilder . cpp <nl> bool ModuleInterfaceBuilder : : collectDepsForSerialization ( <nl> bool ModuleInterfaceBuilder : : buildSwiftModuleInternal ( <nl> StringRef OutPath , bool ShouldSerializeDeps , <nl> std : : unique_ptr < llvm : : MemoryBuffer > * ModuleBuffer ) { <nl> + <nl> + auto outerPrettyStackState = llvm : : SavePrettyStackState ( ) ; <nl> + <nl> bool SubError = false ; <nl> bool RunSuccess = llvm : : CrashRecoveryContext ( ) . RunSafelyOnThread ( [ & ] { <nl> + / / Pretend we ' re on the original thread for pretty - stack - trace purposes . <nl> + auto savedInnerPrettyStackState = llvm : : SavePrettyStackState ( ) ; <nl> + llvm : : RestorePrettyStackState ( outerPrettyStackState ) ; <nl> + SWIFT_DEFER { <nl> + llvm : : RestorePrettyStackState ( savedInnerPrettyStackState ) ; <nl> + } ; <nl> + <nl> / / Note that we don ' t assume cachePath is the same as the Clang <nl> / / module cache path at this point . <nl> if ( ! moduleCachePath . empty ( ) ) <nl> mmm a / lib / SIL / SILFunctionType . cpp <nl> ppp b / lib / SIL / SILFunctionType . cpp <nl> class SubstFunctionTypeCollector { <nl> public : <nl> TypeConverter & TC ; <nl> TypeExpansionContext Expansion ; <nl> + CanGenericSignature GenericSig ; <nl> bool Enabled ; <nl> <nl> SmallVector < GenericTypeParamType * , 4 > substGenericParams ; <nl> class SubstFunctionTypeCollector { <nl> SmallVector < ProtocolConformanceRef , 4 > substConformances ; <nl> <nl> SubstFunctionTypeCollector ( TypeConverter & TC , TypeExpansionContext context , <nl> - bool enabled ) <nl> - : TC ( TC ) , Expansion ( context ) , Enabled ( enabled ) { <nl> + CanGenericSignature genericSig , bool enabled ) <nl> + : TC ( TC ) , Expansion ( context ) , GenericSig ( genericSig ) , Enabled ( enabled ) { <nl> } <nl> SubstFunctionTypeCollector ( const SubstFunctionTypeCollector & ) = delete ; <nl> <nl> class SubstFunctionTypeCollector { <nl> } <nl> <nl> if ( upperBoundSuperclass ) { <nl> + upperBoundSuperclass = upperBoundSuperclass - > mapTypeOutOfContext ( ) ; <nl> substRequirements . push_back ( <nl> Requirement ( RequirementKind : : Superclass , param , upperBoundSuperclass ) ) ; <nl> } <nl> class SubstFunctionTypeCollector { <nl> <nl> auto origContextType = origType . getType ( ) ; <nl> <nl> - / / TODO : If the substituted type is a subclass of the abstraction pattern <nl> - / / type , then bail out . This should only come up when lowering override <nl> - / / types for vtable entries , where we don ' t currently use substituted <nl> - / / function types . <nl> - <nl> + / / If the substituted type is a subclass of the abstraction pattern <nl> + / / type , build substitutions for any type parameters in it . This only <nl> + / / comes up when lowering override types for vtable entries . <nl> auto areDifferentClasses = [ ] ( Type a , Type b ) - > bool { <nl> if ( auto dynA = a - > getAs < DynamicSelfType > ( ) ) { <nl> a = dynA - > getSelfType ( ) ; <nl> class SubstFunctionTypeCollector { <nl> return false ; <nl> } ; <nl> <nl> + bool substituteBindingsInSubstType = false ; <nl> if ( areDifferentClasses ( substType , origContextType ) ) { <nl> - return substType ; <nl> + substituteBindingsInSubstType = true ; <nl> } <nl> if ( auto substMeta = dyn_cast < MetatypeType > ( substType ) ) { <nl> if ( auto origMeta = dyn_cast < MetatypeType > ( origContextType ) ) { <nl> if ( areDifferentClasses ( substMeta - > getInstanceType ( ) , <nl> origMeta - > getInstanceType ( ) ) ) { <nl> - return substType ; <nl> + substituteBindingsInSubstType = true ; <nl> } <nl> } <nl> } <nl> + <nl> + CanGenericSignature origSig = origType . getGenericSignature ( ) ; <nl> + if ( substituteBindingsInSubstType ) { <nl> + origContextType = substType ; <nl> + origSig = GenericSig ; <nl> + } <nl> <nl> if ( ! origContextType - > hasTypeParameter ( ) <nl> & & ! origContextType - > hasArchetype ( ) ) { <nl> class SubstFunctionTypeCollector { <nl> <nl> / / Extract structural substitutions . <nl> if ( origContextType - > hasTypeParameter ( ) ) <nl> - origContextType = origType . getGenericSignature ( ) - > getGenericEnvironment ( ) <nl> + origContextType = origSig - > getGenericEnvironment ( ) <nl> - > mapTypeIntoContext ( origContextType ) <nl> - - > getCanonicalType ( origType . getGenericSignature ( ) ) ; <nl> + - > getCanonicalType ( origSig ) ; <nl> <nl> auto result = origContextType <nl> - > substituteBindingsTo ( substType , <nl> static CanSILFunctionType getSILFunctionType ( <nl> if ( ! TC . Context . LangOpts . EnableSubstSILFunctionTypesForFunctionValues ) <nl> return false ; <nl> <nl> + / / We always use substituted function types for coroutines that are <nl> + / / being lowered in the context of another coroutine , which is to say , <nl> + / / for class override thunks . This is required to make the yields <nl> + / / match in abstraction to the base method ' s yields , which is necessary <nl> + / / to make the extracted continuation - function signatures match . <nl> + if ( constant ! = origConstant & & getAsCoroutineAccessor ( constant ) ) <nl> + return true ; <nl> + <nl> / / We don ' t currently use substituted function types for generic function <nl> / / type lowering , though we should for generic methods on classes and <nl> / / protocols . <nl> static CanSILFunctionType getSILFunctionType ( <nl> rep = = SILFunctionTypeRepresentation : : Thin ) ; <nl> } ( ) ; <nl> <nl> - SubstFunctionTypeCollector subst ( TC , expansionContext , <nl> + SubstFunctionTypeCollector subst ( TC , expansionContext , genericSig , <nl> shouldBuildSubstFunctionType ) ; <nl> <nl> / / Destructure the input tuple type . <nl> class SILTypeSubstituter : <nl> / / <nl> / / There are two caveats here . The first is that we haven ' t yet <nl> / / written all the code that would be necessary in order to handle <nl> - / / invocation substitutions everywhere , so we only build those if <nl> - / / there are pattern substitutions on a polymorphic type , which is <nl> - / / something that we only currently generate in narrow cases . <nl> - / / Instead we substitute the generic arguments into the components <nl> - / / and build a type with no invocation signature . <nl> + / / invocation substitutions everywhere , and so we never build those . <nl> + / / Instead , we substitute into the pattern substitutions if present , <nl> + / / or the components if not , and build a type with no invocation <nl> + / / signature . As a special case , when substituting a coroutine type , <nl> + / / we build pattern substitutions instead of substituting the <nl> + / / component types in order to preserve the original yield structure , <nl> + / / which factors into the continuation function ABI . <nl> / / <nl> / / The second is that this function is also used when substituting <nl> / / opaque archetypes . In this case , we may need to substitute <nl> class SILTypeSubstituter : <nl> / / Otherwise , we shouldn ' t substitute any components except <nl> / / when substituting opaque archetypes . <nl> <nl> + / / If we ' re doing a generic application , and there are pattern <nl> + / / substitutions , substitute into the pattern substitutions ; or if <nl> + / / it ' s a coroutine , build pattern substitutions ; or else , fall <nl> + / / through to substitute the component types as discussed above . <nl> + if ( isGenericApplication ) { <nl> + if ( patternSubs | | origType - > isCoroutine ( ) ) { <nl> + CanSILFunctionType substType = origType ; <nl> + if ( typeExpansionContext . shouldLookThroughOpaqueTypeArchetypes ( ) ) { <nl> + substType = <nl> + origType - > substituteOpaqueArchetypes ( TC , typeExpansionContext ) ; <nl> + } <nl> + <nl> + SubstitutionMap subs ; <nl> + if ( patternSubs ) { <nl> + subs = substSubstitutions ( patternSubs ) ; <nl> + } else { <nl> + subs = SubstitutionMap : : get ( sig , Subst , Conformances ) ; <nl> + } <nl> + auto witnessConformance = substWitnessConformance ( origType ) ; <nl> + substType = substType - > withPatternSpecialization ( nullptr , subs , <nl> + witnessConformance ) ; <nl> + <nl> + return substType ; <nl> + } <nl> + / / else fall down to component substitution <nl> + <nl> / / If we ' re substituting opaque archetypes , and there are pattern <nl> / / substitutions present , just substitute those and preserve the <nl> / / basic structure in the component types . Otherwise , fall through <nl> / / to substitute the component types . <nl> - if ( shouldSubstituteOpaqueArchetypes ) { <nl> - if ( patternSubs ) { <nl> - patternSubs = substOpaqueTypes ( patternSubs ) ; <nl> - return origType - > withPatternSubstitutions ( patternSubs ) ; <nl> - } <nl> - / / else fall down to component substitution <nl> - <nl> - / / If we ' re doing a generic application , and there are pattern <nl> - / / substitutions , build invocation substitutions and substitute <nl> - / / opaque types in the pattern subs . Otherwise , fall through <nl> - / / to substitue the component types as discussed above . <nl> - } else if ( isGenericApplication ) { <nl> + } else if ( shouldSubstituteOpaqueArchetypes ) { <nl> if ( patternSubs ) { <nl> patternSubs = substOpaqueTypes ( patternSubs ) ; <nl> - auto substType = origType - > withPatternSubstitutions ( patternSubs ) ; <nl> - <nl> - auto invocationSubs = SubstitutionMap : : get ( sig , Subst , Conformances ) ; <nl> - substType = substType - > withInvocationSubstitutions ( invocationSubs ) ; <nl> - <nl> - return substType ; <nl> + auto witnessConformance = substWitnessConformance ( origType ) ; <nl> + return origType - > withPatternSpecialization ( sig , patternSubs , <nl> + witnessConformance ) ; <nl> } <nl> / / else fall down to component substitution <nl> <nl> class SILTypeSubstituter : <nl> auto substType = origType ; <nl> if ( patternSubs ) { <nl> patternSubs = substOpaqueTypes ( patternSubs ) ; <nl> - substType = substType - > withPatternSubstitutions ( patternSubs ) ; <nl> + auto witnessConformance = substWitnessConformance ( origType ) ; <nl> + substType = substType - > withPatternSpecialization ( sig , patternSubs , <nl> + witnessConformance ) ; <nl> } <nl> return substType ; <nl> } <nl> class SILTypeSubstituter : <nl> / / into those and don ' t touch the component types . <nl> } else if ( patternSubs ) { <nl> patternSubs = substSubstitutions ( patternSubs ) ; <nl> - return origType - > withPatternSubstitutions ( patternSubs ) ; <nl> + auto witnessConformance = substWitnessConformance ( origType ) ; <nl> + return origType - > withPatternSpecialization ( nullptr , patternSubs , <nl> + witnessConformance ) ; <nl> } <nl> <nl> / / Otherwise , we need to substitute component types . <nl> class SILTypeSubstituter : <nl> substYields . push_back ( substInterface ( origYield ) ) ; <nl> } <nl> <nl> - ProtocolConformanceRef witnessMethodConformance ; <nl> - if ( auto conformance = origType - > getWitnessMethodConformanceOrInvalid ( ) ) { <nl> - assert ( origType - > getExtInfo ( ) . hasSelfParam ( ) ) ; <nl> - auto selfType = origType - > getSelfParameter ( ) . getInterfaceType ( ) ; <nl> - <nl> - / / The Self type can be nested in a few layers of metatypes ( etc . ) . <nl> - while ( auto metatypeType = dyn_cast < MetatypeType > ( selfType ) ) { <nl> - auto next = metatypeType . getInstanceType ( ) ; <nl> - if ( next = = selfType ) <nl> - break ; <nl> - selfType = next ; <nl> - } <nl> - <nl> - witnessMethodConformance = <nl> - conformance . subst ( selfType , Subst , Conformances ) ; <nl> - <nl> - / / Substitute the underlying conformance of opaque type archetypes if we <nl> - / / should look through opaque archetypes . <nl> - if ( typeExpansionContext . shouldLookThroughOpaqueTypeArchetypes ( ) ) { <nl> - SubstOptions substOptions ( None ) ; <nl> - auto substType = selfType . subst ( Subst , Conformances , substOptions ) <nl> - - > getCanonicalType ( ) ; <nl> - if ( substType - > hasOpaqueArchetype ( ) ) { <nl> - witnessMethodConformance = substOpaqueTypesWithUnderlyingTypes ( <nl> - witnessMethodConformance , substType , typeExpansionContext ) ; <nl> - } <nl> - } <nl> - } <nl> + auto witnessMethodConformance = substWitnessConformance ( origType ) ; <nl> <nl> / / The substituted type is no longer generic , so it ' d never be <nl> / / pseudogeneric . <nl> class SILTypeSubstituter : <nl> TC . Context , witnessMethodConformance ) ; <nl> } <nl> <nl> + ProtocolConformanceRef substWitnessConformance ( CanSILFunctionType origType ) { <nl> + auto conformance = origType - > getWitnessMethodConformanceOrInvalid ( ) ; <nl> + if ( ! conformance ) return conformance ; <nl> + <nl> + assert ( origType - > getExtInfo ( ) . hasSelfParam ( ) ) ; <nl> + auto selfType = origType - > getSelfParameter ( ) . getInterfaceType ( ) ; <nl> + <nl> + / / The Self type can be nested in a few layers of metatypes ( etc . ) . <nl> + while ( auto metatypeType = dyn_cast < MetatypeType > ( selfType ) ) { <nl> + auto next = metatypeType . getInstanceType ( ) ; <nl> + if ( next = = selfType ) <nl> + break ; <nl> + selfType = next ; <nl> + } <nl> + <nl> + auto substConformance = <nl> + conformance . subst ( selfType , Subst , Conformances ) ; <nl> + <nl> + / / Substitute the underlying conformance of opaque type archetypes if we <nl> + / / should look through opaque archetypes . <nl> + if ( typeExpansionContext . shouldLookThroughOpaqueTypeArchetypes ( ) ) { <nl> + SubstOptions substOptions ( None ) ; <nl> + auto substType = selfType . subst ( Subst , Conformances , substOptions ) <nl> + - > getCanonicalType ( ) ; <nl> + if ( substType - > hasOpaqueArchetype ( ) ) { <nl> + substConformance = substOpaqueTypesWithUnderlyingTypes ( <nl> + substConformance , substType , typeExpansionContext ) ; <nl> + } <nl> + } <nl> + <nl> + return substConformance ; <nl> + } <nl> + <nl> SILType subst ( SILType type ) { <nl> return SILType : : getPrimitiveType ( visit ( type . getASTType ( ) ) , <nl> type . getCategory ( ) ) ; <nl> class SILTypeSubstituter : <nl> } <nl> <nl> AbstractionPattern abstraction ( Sig , origType ) ; <nl> - / / If we looked through an opaque archetype to a function type we need to <nl> - / / use the function type ' s abstraction . <nl> - if ( isa < OpaqueTypeArchetypeType > ( origType ) & & <nl> - isa < AnyFunctionType > ( substType ) ) <nl> - abstraction = AbstractionPattern ( Sig , substType ) ; <nl> - <nl> return TC . getLoweredRValueType ( typeExpansionContext , abstraction , <nl> substType ) ; <nl> } <nl> mmm a / lib / SIL / SILVerifier . cpp <nl> ppp b / lib / SIL / SILVerifier . cpp <nl> class SILVerifier : public SILVerifierBase < SILVerifier > { <nl> <nl> SILVerifier ( const SILFunction & F , bool SingleFunction = true ) <nl> : M ( F . getModule ( ) . getSwiftModule ( ) ) , F ( F ) , <nl> - fnConv ( F . getLoweredFunctionType ( ) , F . getModule ( ) ) , <nl> + fnConv ( F . getConventionsInContext ( ) ) , <nl> TC ( F . getModule ( ) . Types ) , OpenedArchetypes ( & F ) , Dominance ( nullptr ) , <nl> InstNumbers ( numInstsInFunction ( F ) ) , <nl> DEBlocks ( & F ) , SingleFunction ( SingleFunction ) { <nl> class SILVerifier : public SILVerifierBase < SILVerifier > { <nl> } <nl> <nl> if ( subs . getGenericSignature ( ) - > getCanonicalSignature ( ) ! = <nl> - fnTy - > getSubstGenericSignature ( ) - > getCanonicalSignature ( ) ) { <nl> + fnTy - > getInvocationGenericSignature ( ) - > getCanonicalSignature ( ) ) { <nl> llvm : : dbgs ( ) < < " substitution map ' s generic signature : " ; <nl> subs . getGenericSignature ( ) - > print ( llvm : : dbgs ( ) ) ; <nl> llvm : : dbgs ( ) < < " \ n " ; <nl> llvm : : dbgs ( ) < < " callee ' s generic signature : " ; <nl> - fnTy - > getSubstGenericSignature ( ) - > print ( llvm : : dbgs ( ) ) ; <nl> + fnTy - > getInvocationGenericSignature ( ) - > print ( llvm : : dbgs ( ) ) ; <nl> llvm : : dbgs ( ) < < " \ n " ; <nl> require ( false , <nl> " Substitution map does not match callee in apply instruction " ) ; <nl> class SILVerifier : public SILVerifierBase < SILVerifier > { <nl> void checkThrowInst ( ThrowInst * TI ) { <nl> LLVM_DEBUG ( TI - > print ( llvm : : dbgs ( ) ) ) ; <nl> <nl> - CanSILFunctionType fnType = <nl> - F . getLoweredFunctionTypeInContext ( F . getTypeExpansionContext ( ) ) ; <nl> - require ( fnType - > hasErrorResult ( ) , <nl> + require ( fnConv . funcTy - > hasErrorResult ( ) , <nl> " throw in function that doesn ' t have an error result " ) ; <nl> <nl> SILType functionResultType = <nl> class SILVerifier : public SILVerifierBase < SILVerifier > { <nl> } <nl> <nl> void checkYieldInst ( YieldInst * YI ) { <nl> - CanSILFunctionType fnType = <nl> - F . getLoweredFunctionTypeInContext ( F . getTypeExpansionContext ( ) ) <nl> - - > getUnsubstitutedType ( F . getModule ( ) ) ; <nl> - require ( fnType - > isCoroutine ( ) , <nl> + require ( fnConv . funcTy - > isCoroutine ( ) , <nl> " yield in non - coroutine function " ) ; <nl> <nl> auto yieldedValues = YI - > getYieldedValues ( ) ; <nl> - auto yieldInfos = fnType - > getYields ( ) ; <nl> + auto yieldInfos = fnConv . funcTy - > getYields ( ) ; <nl> require ( yieldedValues . size ( ) = = yieldInfos . size ( ) , <nl> " wrong number of yielded values for function " ) ; <nl> for ( auto i : indices ( yieldedValues ) ) { <nl> class SILVerifier : public SILVerifierBase < SILVerifier > { <nl> <nl> for ( auto result : fnConv . getIndirectSILResults ( ) ) { <nl> assert ( fnConv . isSILIndirect ( result ) ) ; <nl> - check ( " result " , fnConv . getSILType ( result ) ) ; <nl> + check ( " indirect result " , fnConv . getSILType ( result ) ) ; <nl> } <nl> for ( auto param : F . getLoweredFunctionType ( ) - > getParameters ( ) ) { <nl> check ( " parameter " , fnConv . getSILType ( param ) ) ; <nl> mmm a / lib / SIL / TypeLowering . cpp <nl> ppp b / lib / SIL / TypeLowering . cpp <nl> <nl> # include " swift / AST / Pattern . h " <nl> # include " swift / AST / PrettyStackTrace . h " <nl> # include " swift / AST / PropertyWrappers . h " <nl> + # include " swift / AST / TypeDifferenceVisitor . h " <nl> # include " swift / AST / Types . h " <nl> # include " swift / ClangImporter / ClangModule . h " <nl> # include " swift / SIL / PrettyStackTrace . h " <nl> TypeConverter : : checkForABIDifferences ( SILModule & M , <nl> return ABIDifference : : NeedsThunk ; <nl> } <nl> <nl> + namespace { <nl> + class HaveDifferentAbstractStructure <nl> + : public CanTypeDifferenceVisitor < HaveDifferentAbstractStructure > { <nl> + public : <nl> + / / Treat any sort of abstract type as equivalent . <nl> + static bool isAbstract ( CanType type ) { <nl> + return ( isa < SubstitutableType > ( type ) | | isa < DependentMemberType > ( type ) ) ; <nl> + } ; <nl> + <nl> + / / We can fast - path some of these checks by proviing these two overrides : <nl> + bool visitSubstitutableType ( CanSubstitutableType type1 , <nl> + CanSubstitutableType type2 ) { <nl> + return false ; <nl> + } <nl> + bool visitDependentMemberType ( CanDependentMemberType type1 , <nl> + CanDependentMemberType type2 ) { <nl> + return false ; <nl> + } <nl> + <nl> + / / We also need to handle the general case where we have different <nl> + / / kinds of substitutable types . <nl> + bool visitDifferentComponentTypes ( CanType type1 , CanType type2 ) { <nl> + / / This is a difference only if both types aren ' t abstract . <nl> + return ! ( isAbstract ( type1 ) & & isAbstract ( type2 ) ) ; <nl> + } <nl> + <nl> + / / Change the rules used for SIL function types to only consider <nl> + / / the basic structure , not any substitutions . <nl> + bool visitSILFunctionType ( CanSILFunctionType type1 , <nl> + CanSILFunctionType type2 ) { <nl> + return visitSILFunctionTypeStructure ( type1 , type2 ) <nl> + | | visitSILFunctionTypeComponents ( type1 , type2 ) ; <nl> + } <nl> + } ; <nl> + } <nl> + <nl> + static bool haveDifferentAbstractStructure ( CanType type1 , CanType type2 ) { <nl> + return HaveDifferentAbstractStructure ( ) . visit ( type1 , type2 ) ; <nl> + } <nl> + <nl> + static TypeConverter : : ABIDifference <nl> + checkForABIDifferencesInYield ( TypeConverter & TC , SILModule & M , <nl> + SILFunctionType * fnTy1 , SILYieldInfo yield1 , <nl> + SILFunctionType * fnTy2 , SILYieldInfo yield2 ) { <nl> + / / Require the interface types to have the same basic abstract <nl> + / / structure , ignoring any substitutions from the function type . <nl> + / / This structure is what determines the signature of the continuation <nl> + / / function . <nl> + if ( haveDifferentAbstractStructure ( yield1 . getInterfaceType ( ) , <nl> + yield2 . getInterfaceType ( ) ) ) <nl> + return TypeConverter : : ABIDifference : : NeedsThunk ; <nl> + <nl> + / / Also make sure that the actual yield types match in ABI . <nl> + return TC . checkForABIDifferences ( M , yield1 . getSILStorageType ( M , fnTy1 ) , <nl> + yield2 . getSILStorageType ( M , fnTy2 ) ) ; <nl> + } <nl> + <nl> TypeConverter : : ABIDifference <nl> TypeConverter : : checkFunctionForABIDifferences ( SILModule & M , <nl> SILFunctionType * fnTy1 , <nl> TypeConverter : : checkFunctionForABIDifferences ( SILModule & M , <nl> if ( yield1 . getConvention ( ) ! = yield2 . getConvention ( ) ) <nl> return ABIDifference : : NeedsThunk ; <nl> <nl> - if ( checkForABIDifferences ( M , <nl> - yield1 . getSILStorageType ( M , fnTy1 ) , <nl> - yield2 . getSILStorageType ( M , fnTy2 ) , <nl> - / * thunk iuos * / fnTy1 - > getLanguage ( ) = = SILFunctionLanguage : : Swift ) <nl> + if ( checkForABIDifferencesInYield ( * this , M , fnTy1 , yield1 , fnTy2 , yield2 ) <nl> ! = ABIDifference : : CompatibleRepresentation ) <nl> return ABIDifference : : NeedsThunk ; <nl> } <nl> mmm a / lib / SILGen / SILGenPoly . cpp <nl> ppp b / lib / SILGen / SILGenPoly . cpp <nl> namespace { <nl> SILLocation Loc ; <nl> ArrayRef < ManagedValue > Inputs ; <nl> SmallVectorImpl < ManagedValue > & Outputs ; <nl> + CanSILFunctionType OutputTypesFuncTy ; <nl> ArrayRef < SILParameterInfo > OutputTypes ; <nl> public : <nl> TranslateArguments ( SILGenFunction & SGF , SILLocation loc , <nl> ArrayRef < ManagedValue > inputs , <nl> SmallVectorImpl < ManagedValue > & outputs , <nl> + CanSILFunctionType outputTypesFuncTy , <nl> ArrayRef < SILParameterInfo > outputTypes ) <nl> : SGF ( SGF ) , Loc ( loc ) , Inputs ( inputs ) , Outputs ( outputs ) , <nl> - OutputTypes ( outputTypes ) { } <nl> + OutputTypesFuncTy ( outputTypesFuncTy ) , OutputTypes ( outputTypes ) { } <nl> <nl> void translate ( AbstractionPattern inputOrigFunctionType , <nl> AnyFunctionType : : CanParamArrayRef inputSubstTypes , <nl> namespace { <nl> " Output is not a tuple and is not opaque ? " ) ; <nl> <nl> auto outputTy = SGF . getSILType ( claimNextOutputType ( ) , <nl> - CanSILFunctionType ( ) ) ; <nl> + OutputTypesFuncTy ) ; <nl> auto & outputTL = SGF . getTypeLowering ( outputTy ) ; <nl> if ( SGF . silConv . useLoweredAddresses ( ) ) { <nl> auto temp = SGF . emitTemporary ( Loc , outputTL ) ; <nl> namespace { <nl> auto & loweredTL = SGF . getTypeLowering ( outputOrigType , outputTupleType ) ; <nl> auto loweredTy = loweredTL . getLoweredType ( ) ; <nl> auto optionalTy = SGF . getSILType ( claimNextOutputType ( ) , <nl> - CanSILFunctionType ( ) ) ; <nl> + OutputTypesFuncTy ) ; <nl> auto someDecl = SGF . getASTContext ( ) . getOptionalSomeDecl ( ) ; <nl> if ( loweredTL . isLoadable ( ) | | ! SGF . silConv . useLoweredAddresses ( ) ) { <nl> auto payload = <nl> namespace { <nl> CanType outputSubstType , <nl> ManagedValue input , <nl> SILParameterInfo result ) { <nl> + auto resultTy = SGF . getSILType ( result , OutputTypesFuncTy ) ; <nl> / / Easy case : we want to pass exactly this value . <nl> - if ( input . getType ( ) = = SGF . getSILType ( result , CanSILFunctionType ( ) ) ) { <nl> + if ( input . getType ( ) = = resultTy ) { <nl> switch ( result . getConvention ( ) ) { <nl> case ParameterConvention : : Direct_Owned : <nl> case ParameterConvention : : Indirect_In : <nl> namespace { <nl> case ParameterConvention : : Direct_Unowned : <nl> translateIntoOwned ( inputOrigType , inputSubstType , outputOrigType , <nl> outputSubstType , input ) ; <nl> - assert ( Outputs . back ( ) . getType ( ) = = SGF . getSILType ( result , <nl> - CanSILFunctionType ( ) ) ) ; <nl> + assert ( Outputs . back ( ) . getType ( ) = = resultTy ) ; <nl> return ; <nl> case ParameterConvention : : Direct_Guaranteed : <nl> translateIntoGuaranteed ( inputOrigType , inputSubstType , outputOrigType , <nl> namespace { <nl> case ParameterConvention : : Indirect_In : { <nl> if ( SGF . silConv . useLoweredAddresses ( ) ) { <nl> translateIndirect ( inputOrigType , inputSubstType , outputOrigType , <nl> - outputSubstType , input , <nl> - SGF . getSILType ( result , CanSILFunctionType ( ) ) ) ; <nl> + outputSubstType , input , resultTy ) ; <nl> return ; <nl> } <nl> translateIntoOwned ( inputOrigType , inputSubstType , outputOrigType , <nl> outputSubstType , input ) ; <nl> - assert ( Outputs . back ( ) . getType ( ) = = <nl> - SGF . getSILType ( result , CanSILFunctionType ( ) ) ) ; <nl> + assert ( Outputs . back ( ) . getType ( ) = = resultTy ) ; <nl> return ; <nl> } <nl> case ParameterConvention : : Indirect_In_Guaranteed : { <nl> if ( SGF . silConv . useLoweredAddresses ( ) ) { <nl> translateIndirect ( inputOrigType , inputSubstType , outputOrigType , <nl> - outputSubstType , input , <nl> - SGF . getSILType ( result , CanSILFunctionType ( ) ) ) ; <nl> + outputSubstType , input , resultTy ) ; <nl> return ; <nl> } <nl> translateIntoGuaranteed ( inputOrigType , inputSubstType , outputOrigType , <nl> outputSubstType , input ) ; <nl> - assert ( Outputs . back ( ) . getType ( ) = = <nl> - SGF . getSILType ( result , CanSILFunctionType ( ) ) ) ; <nl> + assert ( Outputs . back ( ) . getType ( ) = = resultTy ) ; <nl> return ; <nl> } <nl> case ParameterConvention : : Indirect_Inout : <nl> namespace { <nl> CanType outputSubstType , <nl> ManagedValue input , <nl> SILParameterInfo result ) { <nl> + auto resultTy = SGF . getSILType ( result , OutputTypesFuncTy ) ; <nl> assert ( input . isLValue ( ) ) ; <nl> - if ( input . getType ( ) = = SGF . getSILType ( result , CanSILFunctionType ( ) ) ) { <nl> + if ( input . getType ( ) = = resultTy ) { <nl> Outputs . push_back ( input ) ; <nl> return ; <nl> } <nl> <nl> / / Create a temporary of the right type . <nl> - auto & temporaryTL = SGF . getTypeLowering ( result . getInterfaceType ( ) ) ; <nl> + auto & temporaryTL = SGF . getTypeLowering ( resultTy ) ; <nl> auto temporary = SGF . emitTemporary ( Loc , temporaryTL ) ; <nl> <nl> / / Take ownership of the input value . This leaves the input l - value <nl> static void translateYields ( SILGenFunction & SGF , SILLocation loc , <nl> / / Translate the yields as if they were arguments . <nl> SmallVector < ManagedValue , 4 > outerMVs ; <nl> TranslateArguments translator ( SGF , loc , innerMVs , outerMVs , <nl> + CanSILFunctionType ( ) , <nl> outerLoweredTypesAsParameters ) ; <nl> <nl> translator . translate ( innerInfos . getOrigTypes ( ) , innerInfos . getSubstTypes ( ) , <nl> static void buildThunkBody ( SILGenFunction & SGF , SILLocation loc , <nl> / / other direction ( the thunk receives an Int like a T , and passes it <nl> / / like a normal Int when calling the inner function ) . <nl> SmallVector < ManagedValue , 8 > args ; <nl> - TranslateArguments ( SGF , loc , params , args , argTypes ) <nl> + TranslateArguments ( SGF , loc , params , args , fnType , argTypes ) <nl> . translate ( outputOrigType , <nl> outputSubstType . getParams ( ) , <nl> inputOrigType , <nl> buildThunkSignature ( SILGenFunction & SGF , <nl> / / If there ' s no opened existential , we just inherit the generic environment <nl> / / from the parent function . <nl> if ( openedExistential = = nullptr ) { <nl> - auto genericSig = SGF . F . getLoweredFunctionType ( ) - > getSubstGenericSignature ( ) ; <nl> + auto genericSig = <nl> + SGF . F . getLoweredFunctionType ( ) - > getInvocationGenericSignature ( ) ; <nl> genericEnv = SGF . F . getGenericEnvironment ( ) ; <nl> interfaceSubs = SGF . F . getForwardingSubstitutionMap ( ) ; <nl> contextSubs = interfaceSubs ; <nl> buildThunkSignature ( SILGenFunction & SGF , <nl> int depth = 0 ; <nl> GenericSignature baseGenericSig ; <nl> if ( inheritGenericSig ) { <nl> - if ( auto genericSig = SGF . F . getLoweredFunctionType ( ) - > getSubstGenericSignature ( ) ) { <nl> + if ( auto genericSig = <nl> + SGF . F . getLoweredFunctionType ( ) - > getInvocationGenericSignature ( ) ) { <nl> baseGenericSig = genericSig ; <nl> depth = genericSig - > getGenericParams ( ) . back ( ) - > getDepth ( ) + 1 ; <nl> } <nl> buildThunkSignature ( SILGenFunction & SGF , <nl> / / Calculate substitutions to map the caller ' s archetypes to the thunk ' s <nl> / / archetypes . <nl> if ( auto calleeGenericSig = SGF . F . getLoweredFunctionType ( ) <nl> - - > getSubstGenericSignature ( ) ) { <nl> + - > getInvocationGenericSignature ( ) ) { <nl> contextSubs = SubstitutionMap : : get ( <nl> calleeGenericSig , <nl> [ & ] ( SubstitutableType * type ) - > Type { <nl> SILGenFunction : : emitVTableThunk ( SILDeclRef base , <nl> } <nl> <nl> auto subs = getForwardingSubstitutionMap ( ) ; <nl> - if ( auto genericSig = derivedFTy - > getSubstGenericSignature ( ) ) { <nl> + if ( auto genericSig = derivedFTy - > getInvocationGenericSignature ( ) ) { <nl> subs = SubstitutionMap : : get ( genericSig , subs ) ; <nl> <nl> derivedFTy = <nl> SILGenFunction : : emitVTableThunk ( SILDeclRef base , <nl> <nl> / / Reabstract the arguments . <nl> TranslateArguments ( * this , loc , thunkArgs , substArgs , <nl> - derivedFTy - > getParameters ( ) ) <nl> + derivedFTy , derivedFTy - > getParameters ( ) ) <nl> . translate ( inputOrigType , <nl> inputSubstType . getParams ( ) , <nl> outputOrigType , <nl> void SILGenFunction : : emitProtocolWitness ( AbstractionPattern reqtOrigTy , <nl> AbstractionPattern witnessOrigTy ( witnessInfo . LoweredType ) ; <nl> TranslateArguments ( * this , loc , <nl> origParams , witnessParams , <nl> - witnessUnsubstTy - > getParameters ( ) ) <nl> + witnessUnsubstTy , witnessUnsubstTy - > getParameters ( ) ) <nl> . translate ( reqtOrigTy , <nl> reqtSubstParams , <nl> witnessOrigTy , <nl> mmm a / lib / SILOptimizer / Utils / Generics . cpp <nl> ppp b / lib / SILOptimizer / Utils / Generics . cpp <nl> createSpecializedType ( CanSILFunctionType SubstFTy , SILModule & M ) const { <nl> <nl> unsigned IndirectResultIdx = 0 ; <nl> for ( SILResultInfo RI : SubstFTy - > getResults ( ) ) { <nl> + RI = RI . getUnsubstituted ( M , SubstFTy ) ; <nl> if ( RI . isFormalIndirect ( ) ) { <nl> bool isTrivial = TrivialArgs . test ( IndirectResultIdx ) ; <nl> if ( isFormalResultConverted ( IndirectResultIdx + + ) ) { <nl> createSpecializedType ( CanSILFunctionType SubstFTy , SILModule & M ) const { <nl> auto C = ( isTrivial <nl> ? ResultConvention : : Unowned <nl> : ResultConvention : : Owned ) ; <nl> - SpecializedResults . push_back ( SILResultInfo ( RI . getReturnValueType ( M , SubstFTy ) , C ) ) ; <nl> + SpecializedResults . push_back ( RI . getWithConvention ( C ) ) ; <nl> continue ; <nl> } <nl> } <nl> createSpecializedType ( CanSILFunctionType SubstFTy , SILModule & M ) const { <nl> } <nl> unsigned ParamIdx = 0 ; <nl> for ( SILParameterInfo PI : SubstFTy - > getParameters ( ) ) { <nl> + PI = PI . getUnsubstituted ( M , SubstFTy ) ; <nl> bool isTrivial = TrivialArgs . test ( param2ArgIndex ( ParamIdx ) ) ; <nl> if ( ! isParamConverted ( ParamIdx + + ) ) { <nl> / / No conversion : re - use the original , substituted parameter info . <nl> createSpecializedType ( CanSILFunctionType SubstFTy , SILModule & M ) const { <nl> C = ParameterConvention : : Direct_Owned ; <nl> } <nl> } <nl> - SpecializedParams . push_back ( SILParameterInfo ( PI . getArgumentType ( M , SubstFTy ) , C ) ) ; <nl> + SpecializedParams . push_back ( PI . getWithConvention ( C ) ) ; <nl> } <nl> for ( SILYieldInfo YI : SubstFTy - > getYields ( ) ) { <nl> - / / For now , always just use the original , substituted parameter info . <nl> - SpecializedYields . push_back ( YI ) ; <nl> + / / For now , always re - use the original , substituted yield info . <nl> + SpecializedYields . push_back ( YI . getUnsubstituted ( M , SubstFTy ) ) ; <nl> } <nl> <nl> auto Signature = SubstFTy - > isPolymorphic ( ) <nl> mmm a / lib / Sema / TypeCheckType . cpp <nl> ppp b / lib / Sema / TypeCheckType . cpp <nl> Type TypeResolver : : resolveSILFunctionType ( FunctionTypeRepr * repr , <nl> return ErrorType : : get ( Context ) ; <nl> <nl> Type selfType = params . back ( ) . getInterfaceType ( ) ; <nl> + if ( patternSubs ) <nl> + selfType = selfType . subst ( patternSubs ) ; <nl> if ( invocationSubs ) { <nl> selfType = selfType . subst ( invocationSubs ) ; <nl> } <nl> mmm a / test / SILGen / apply_abstraction_nested . swift <nl> ppp b / test / SILGen / apply_abstraction_nested . swift <nl> struct X : P { } <nl> var a = X ( ) <nl> ( a ~ > bar ) ( ( ) ) <nl> <nl> - / / CHECK : [ [ CHAINED_FUNC : % . * ] ] = apply { { % . * } } < X , ( ) , ( ) > ( { { % . * } } , { { % . * } } ) : $ @ convention ( thin ) < τ_0_0 , τ_0_1 , τ_0_2 where τ_0_0 : P > ( @ inout τ_0_0 , @ noescape @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 , τ_0_2 > ( @ inout τ_0_0 ) - > @ owned @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 > ( @ in_guaranteed τ_0_0 ) - > @ out τ_0_1 for < τ_0_1 , τ_0_2 > for < τ_0_0 , τ_0_1 , τ_0_2 > ) - > @ owned @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 > ( @ in_guaranteed τ_0_0 ) - > @ out τ_0_1 for < τ_0_1 , τ_0_2 > <nl> + / / CHECK : [ [ CHAINED_FUNC : % . * ] ] = apply { { % . * } } < X , ( ) , ( ) > ( { { % . * } } , { { % . * } } ) : $ @ convention ( thin ) < τ_0_0 , τ_0_1 , τ_0_2 where τ_0_0 : P > ( @ inout τ_0_0 , @ noescape @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 , τ_0_2 > ( @ inout τ_0_0 ) - > ( @ owned @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 > ( @ in_guaranteed τ_0_0 ) - > @ out τ_0_1 for < τ_0_1 , τ_0_2 > ) for < τ_0_0 , τ_0_1 , τ_0_2 > ) - > @ owned @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 > ( @ in_guaranteed τ_0_0 ) - > @ out τ_0_1 for < τ_0_1 , τ_0_2 > <nl> / / CHECK : [ [ CHAINED_FUNC_CONV : % . * ] ] = convert_function [ [ CHAINED_FUNC ] ] : $ @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 > ( @ in_guaranteed τ_0_0 ) - > @ out τ_0_1 for < ( ) , ( ) > to $ @ callee_guaranteed ( @ in_guaranteed ( ) ) - > @ out ( ) <nl> / / CHECK : [ [ REABSTRACT : % . * ] ] = function_ref @ $ sytytIegnr_Ieg_TR <nl> / / CHECK : [ [ CHAINED_FUNC_REABSTRACTED : % . * ] ] = partial_apply [ callee_guaranteed ] [ [ REABSTRACT ] ] ( [ [ CHAINED_FUNC_CONV ] ] ) <nl> new file mode 100644 <nl> index 000000000000 . . 3e53ec257b61 <nl> mmm / dev / null <nl> ppp b / test / SILGen / coroutine_subst_function_types . swift <nl> <nl> + / / RUN : % target - swift - emit - silgen - module - name mod % s | % FileCheck % s <nl> + <nl> + class Generic < T > { <nl> + init ( ) { preconditionFailure ( " death " ) } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ transparent ] [ ossa ] @ $ s3mod7GenericC7genericxvM : $ @ yield_once @ convention ( method ) < T > ( @ guaranteed Generic < T > ) - > @ yields @ inout T <nl> + var generic : T <nl> + <nl> + / / CHECK - LABEL : sil hidden [ transparent ] [ ossa ] @ $ s3mod7GenericC15genericFunctionxycvM : $ @ yield_once @ convention ( method ) < T > ( @ guaranteed Generic < T > ) - > @ yields @ inout @ callee_guaranteed @ substituted < τ_0_0 > ( ) - > @ out τ_0_0 for < T > <nl> + var genericFunction : ( ) - > T <nl> + <nl> + / / CHECK - LABEL : sil hidden [ transparent ] [ ossa ] @ $ s3mod7GenericC09returningB0xqd___tcluiM : $ @ yield_once @ convention ( method ) < T > < U > ( @ in_guaranteed U , @ guaranteed Generic < T > ) - > @ yields @ inout T <nl> + subscript < U > ( returningGeneric i : U ) - > T { <nl> + get { return generic } <nl> + set { } <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ transparent ] [ ossa ] @ $ s3mod7GenericC012returningOwnB0qd__qd___tcluiM : $ @ yield_once @ convention ( method ) < T > < U > ( @ in_guaranteed U , @ guaranteed Generic < T > ) - > @ yields @ inout U { <nl> + subscript < U > ( returningOwnGeneric i : U ) - > U { <nl> + get { return i } <nl> + set { } <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ transparent ] [ ossa ] @ $ s3mod7GenericC12complexTuplexSg_SDySSxGtvM : $ @ yield_once @ convention ( method ) < T > ( @ guaranteed Generic < T > ) - > @ yields @ inout ( Optional < T > , Dictionary < String , T > ) <nl> + var complexTuple : ( T ? , [ String : T ] ) <nl> + } <nl> + <nl> + class ConcreteWithInt : Generic < Int > { <nl> + override init ( ) { preconditionFailure ( " death " ) } <nl> + <nl> + / / The concrete implementations . Not actually important . <nl> + / / CHECK - LABEL : sil hidden [ transparent ] [ ossa ] @ $ s3mod15ConcreteWithIntC7genericSivM : $ @ yield_once @ convention ( method ) ( @ guaranteed ConcreteWithInt ) - > @ yields @ inout Int <nl> + / / CHECK - LABEL : sil hidden [ transparent ] [ ossa ] @ $ s3mod15ConcreteWithIntC15genericFunctionSiycvM : $ @ yield_once @ convention ( method ) ( @ guaranteed ConcreteWithInt ) - > @ yields @ inout @ callee_guaranteed ( ) - > Int <nl> + / / CHECK - LABEL : sil hidden [ transparent ] [ ossa ] @ $ s3mod15ConcreteWithIntC16returningGenericSix_tcluiM : $ @ yield_once @ convention ( method ) < U > ( @ in_guaranteed U , @ guaranteed ConcreteWithInt ) - > @ yields @ inout Int <nl> + / / CHECK - LABEL : sil hidden [ transparent ] [ ossa ] @ $ s3mod15ConcreteWithIntC19returningOwnGenericxx_tcluiM : $ @ yield_once @ convention ( method ) < U > ( @ in_guaranteed U , @ guaranteed ConcreteWithInt ) - > @ yields @ inout U <nl> + / / CHECK - LABEL : sil hidden [ transparent ] [ ossa ] @ $ s3mod15ConcreteWithIntC12complexTupleSiSg_SDySSSiGtvM : $ @ yield_once @ convention ( method ) ( @ guaranteed ConcreteWithInt ) - > @ yields @ inout ( Optional < Int > , Dictionary < String , Int > ) <nl> + <nl> + / / The override thunks . Note that the yields all exactly match the <nl> + / / original methods above in terms of where archetypes / type parameters <nl> + / / appear . <nl> + <nl> + / / CHECK - LABEL : sil private [ thunk ] [ ossa ] @ $ s3mod15ConcreteWithIntC7genericSivMAA7GenericCADxvMTV : $ @ yield_once @ convention ( method ) @ substituted < τ_0_0 > ( @ guaranteed ConcreteWithInt ) - > @ yields @ inout τ_0_0 for < Int > <nl> + override var generic : Int { <nl> + get { super . generic } <nl> + set { } <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil private [ thunk ] [ ossa ] @ $ s3mod15ConcreteWithIntC15genericFunctionSiycvMAA7GenericCADxycvMTV : $ @ yield_once @ convention ( method ) @ substituted < τ_0_0 > ( @ guaranteed ConcreteWithInt ) - > ( @ yields @ inout @ callee_guaranteed @ substituted < τ_0_0 > ( ) - > @ out τ_0_0 for < τ_0_0 > ) for < Int > <nl> + override var genericFunction : ( ) - > Int { <nl> + get { super . genericFunction } <nl> + set { } <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil private [ thunk ] [ ossa ] @ $ s3mod15ConcreteWithIntC16returningGenericSix_tcluiMAA0F0CADxqd___tcluiMTV : $ @ yield_once @ convention ( method ) < τ_0_0 > @ substituted < τ_0_0 , τ_0_1 > ( @ in_guaranteed τ_0_0 , @ guaranteed ConcreteWithInt ) - > @ yields @ inout τ_0_1 for < τ_0_0 , Int > <nl> + override subscript < U > ( returningGeneric i : U ) - > Int { <nl> + get { return 0 } <nl> + set { } <nl> + } <nl> + <nl> + / / This one doesn ' t need a thunk . <nl> + override subscript < U > ( returningOwnGeneric i : U ) - > U { <nl> + get { return i } <nl> + set { } <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil private [ thunk ] [ ossa ] @ $ s3mod15ConcreteWithIntC12complexTupleSiSg_SDySSSiGtvMAA7GenericCADxSg_SDySSxGtvMTV : $ @ yield_once @ convention ( method ) @ substituted < τ_0_0 , τ_0_1 > ( @ guaranteed ConcreteWithInt ) - > @ yields @ inout ( Optional < τ_0_0 > , Dictionary < String , τ_0_1 > ) for < Int , Int > <nl> + override var complexTuple : ( Int ? , [ String : Int ] ) { <nl> + get { super . complexTuple } <nl> + set { } <nl> + } <nl> + } <nl> + <nl> + protocol ProtoWithAssoc { <nl> + associatedtype Assoc <nl> + <nl> + @ _borrowed <nl> + var generic : Assoc { get set } <nl> + <nl> + @ _borrowed <nl> + var genericFunction : ( ) - > Assoc { get set } <nl> + <nl> + @ _borrowed <nl> + subscript < U > ( returningGeneric i : U ) - > Assoc { get set } <nl> + <nl> + @ _borrowed <nl> + subscript < U > ( returningOwnGeneric i : U ) - > U { get set } <nl> + <nl> + @ _borrowed <nl> + var complexTuple : ( Assoc ? , [ String : Assoc ] ) { get set } <nl> + } <nl> + extension ConcreteWithInt : ProtoWithAssoc { <nl> + / / The unsubstituted yields here should match the natural <nl> + / / abstractions for the protocol . <nl> + <nl> + / / var generic <nl> + / / CHECK - LABEL : sil private [ transparent ] [ thunk ] [ ossa ] @ $ s3mod15ConcreteWithIntCAA05ProtoC5AssocA2aDP7generic0F0QzvrTW : $ @ yield_once @ convention ( witness_method : ProtoWithAssoc ) @ substituted < τ_0_0 , τ_0_1 > ( @ in_guaranteed τ_0_0 ) - > @ yields @ in_guaranteed τ_0_1 for < ConcreteWithInt , Int > <nl> + / / CHECK - LABEL : sil private [ transparent ] [ thunk ] [ ossa ] @ $ s3mod15ConcreteWithIntCAA05ProtoC5AssocA2aDP7generic0F0QzvMTW : $ @ yield_once @ convention ( witness_method : ProtoWithAssoc ) @ substituted < τ_0_0 , τ_0_1 > ( @ inout τ_0_0 ) - > @ yields @ inout τ_0_1 for < ConcreteWithInt , Int > <nl> + <nl> + / / var genericFunction <nl> + / / CHECK - LABEL : sil private [ transparent ] [ thunk ] [ ossa ] @ $ s3mod15ConcreteWithIntCAA05ProtoC5AssocA2aDP15genericFunction0F0QzycvrTW : $ @ yield_once @ convention ( witness_method : ProtoWithAssoc ) @ substituted < τ_0_0 , τ_0_1 > ( @ in_guaranteed τ_0_0 ) - > ( @ yields @ guaranteed @ callee_guaranteed @ substituted < τ_0_0 > ( ) - > @ out τ_0_0 for < τ_0_1 > ) for < ConcreteWithInt , Int > <nl> + / / CHECK - LABEL : sil private [ transparent ] [ thunk ] [ ossa ] @ $ s3mod15ConcreteWithIntCAA05ProtoC5AssocA2aDP15genericFunction0F0QzycvMTW : $ @ yield_once @ convention ( witness_method : ProtoWithAssoc ) @ substituted < τ_0_0 , τ_0_1 > ( @ inout τ_0_0 ) - > ( @ yields @ inout @ callee_guaranteed @ substituted < τ_0_0 > ( ) - > @ out τ_0_0 for < τ_0_1 > ) for < ConcreteWithInt , Int > <nl> + <nl> + / / subscript ( returningGeneric : ) <nl> + / / CHECK - LABEL : sil private [ transparent ] [ thunk ] [ ossa ] @ $ s3mod15ConcreteWithIntCAA05ProtoC5AssocA2aDP16returningGeneric0F0Qzqd___tcluirTW : $ @ yield_once @ convention ( witness_method : ProtoWithAssoc ) < τ_0_0 > @ substituted < τ_0_0 , τ_0_1 , τ_0_2 > ( @ in_guaranteed τ_0_0 , @ in_guaranteed τ_0_1 ) - > @ yields @ in_guaranteed τ_0_2 for < τ_0_0 , ConcreteWithInt , Int > <nl> + / / CHECK - LABEL : sil private [ transparent ] [ thunk ] [ ossa ] @ $ s3mod15ConcreteWithIntCAA05ProtoC5AssocA2aDP16returningGeneric0F0Qzqd___tcluiMTW : $ @ yield_once @ convention ( witness_method : ProtoWithAssoc ) < τ_0_0 > @ substituted < τ_0_0 , τ_0_1 , τ_0_2 > ( @ in_guaranteed τ_0_0 , @ inout τ_0_1 ) - > @ yields @ inout τ_0_2 for < τ_0_0 , ConcreteWithInt , Int > <nl> + <nl> + / / subscript ( returningOwnGeneric : ) <nl> + / / CHECK - LABEL : sil private [ transparent ] [ thunk ] [ ossa ] @ $ s3mod15ConcreteWithIntCAA05ProtoC5AssocA2aDP19returningOwnGenericqd__qd___tcluirTW : $ @ yield_once @ convention ( witness_method : ProtoWithAssoc ) < τ_0_0 > @ substituted < τ_0_0 , τ_0_1 , τ_0_2 > ( @ in_guaranteed τ_0_0 , @ in_guaranteed τ_0_1 ) - > @ yields @ in_guaranteed τ_0_2 for < τ_0_0 , ConcreteWithInt , τ_0_0 > <nl> + / / CHECK - LABEL : sil private [ transparent ] [ thunk ] [ ossa ] @ $ s3mod15ConcreteWithIntCAA05ProtoC5AssocA2aDP19returningOwnGenericqd__qd___tcluiMTW : $ @ yield_once @ convention ( witness_method : ProtoWithAssoc ) < τ_0_0 > @ substituted < τ_0_0 , τ_0_1 , τ_0_2 > ( @ in_guaranteed τ_0_0 , @ inout τ_0_1 ) - > @ yields @ inout τ_0_2 for < τ_0_0 , ConcreteWithInt , τ_0_0 > <nl> + <nl> + / / var complexTuple <nl> + / / CHECK - LABEL : sil shared [ ossa ] @ $ s3mod15ConcreteWithIntC12complexTupleSiSg_SDySSSiGtvr : $ @ yield_once @ convention ( method ) ( @ guaranteed ConcreteWithInt ) - > ( @ yields Optional < Int > , @ yields @ guaranteed Dictionary < String , Int > ) <nl> + / / CHECK - LABEL : sil private [ transparent ] [ thunk ] [ ossa ] @ $ s3mod15ConcreteWithIntCAA05ProtoC5AssocA2aDP12complexTuple0F0QzSg_SDySSAHGtvMTW : $ @ yield_once @ convention ( witness_method : ProtoWithAssoc ) @ substituted < τ_0_0 , τ_0_1 , τ_0_2 > ( @ inout τ_0_0 ) - > @ yields @ inout ( Optional < τ_0_1 > , Dictionary < String , τ_0_2 > ) for < ConcreteWithInt , Int , Int > <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil_vtable ConcreteWithInt { <nl> + / / CHECK : # Generic . generic ! modify . 1 : < T > ( Generic < T > ) - > ( ) - > ( ) : @ $ s3mod15ConcreteWithIntC7genericSivMAA7GenericCADxvMTV [ override ] <nl> + / / CHECK : # Generic . genericFunction ! modify . 1 : < T > ( Generic < T > ) - > ( ) - > ( ) : @ $ s3mod15ConcreteWithIntC15genericFunctionSiycvMAA7GenericCADxycvMTV [ override ] <nl> + / / CHECK : # Generic . subscript ! modify . 1 : < T > < U > ( Generic < T > ) - > ( U ) - > ( ) : @ $ s3mod15ConcreteWithIntC16returningGenericSix_tcluiMAA0F0CADxqd___tcluiMTV [ override ] <nl> + / / CHECK : # Generic . subscript ! modify . 1 : < T > < U > ( Generic < T > ) - > ( U ) - > ( ) : @ $ s3mod15ConcreteWithIntC19returningOwnGenericxx_tcluiM [ override ] <nl> + / / CHECK : # Generic . complexTuple ! modify . 1 : < T > ( Generic < T > ) - > ( ) - > ( ) : @ $ s3mod15ConcreteWithIntC12complexTupleSiSg_SDySSSiGtvMAA7GenericCADxSg_SDySSxGtvMTV [ override ] <nl> + / / CHECK : } <nl> mmm a / test / SILGen / function_type_lowering . swift <nl> ppp b / test / SILGen / function_type_lowering . swift <nl> func c < T , U , V > ( _ x : ( V ) - > T , _ : U ) { } <nl> / / CHECK - LABEL : sil { { . * } } 003Hca { { . * } } : $ @ convention ( thin ) < T > ( @ noescape @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 > ( @ in_guaranteed τ_0_0 ) - > @ out τ_0_1 for < T , T > ) - > ( ) <nl> func ç < T > ( _ x : ( T ) - > T ) { } <nl> <nl> - / / CHECK - LABEL : sil { { . * } } returnsThrowing { { . * } } : $ @ convention ( thin ) < T , U , V > ( @ noescape @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 , τ_0_2 > ( @ in_guaranteed τ_0_0 ) - > @ owned @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 > ( @ in_guaranteed τ_0_0 ) - > ( @ out τ_0_1 , @ error Error ) for < τ_0_1 , τ_0_2 > for < T , U , V > ) - > ( ) <nl> + / / CHECK - LABEL : sil { { . * } } returnsThrowing { { . * } } : $ @ convention ( thin ) < T , U , V > ( @ noescape @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 , τ_0_2 > ( @ in_guaranteed τ_0_0 ) - > ( @ owned @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 > ( @ in_guaranteed τ_0_0 ) - > ( @ out τ_0_1 , @ error Error ) for < τ_0_1 , τ_0_2 > ) for < T , U , V > ) - > ( ) { <nl> func returnsThrowing < T , U , V > ( _ x : ( T ) - > ( U ) throws - > V ) { } <nl> <nl> <nl> func z < T : P > ( _ : ( SP < T > ) - > Void ) { } <nl> <nl> struct SCP < T : P , U : CP < T > > { } <nl> <nl> - / / CHECK - LABEL : sil { { . * } } 2z2 { { . * } } : $ @ convention ( thin ) < T , U where T : P , U : CP < T > > ( @ noescape @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 where τ_0_0 : P , τ_0_1 : CP < T > , τ_0_1 : _NativeClass > ( SCP < τ_0_0 , τ_0_1 > ) - > ( ) for < T , U > ) - > ( ) <nl> + / / CHECK - LABEL : sil { { . * } } 2z2 { { . * } } : $ @ convention ( thin ) < T , U where T : P , U : CP < T > > ( @ noescape @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 where τ_0_0 : P , τ_0_1 : CP < τ_0_0 > , τ_0_1 : _NativeClass > ( SCP < τ_0_0 , τ_0_1 > ) - > ( ) for < T , U > ) - > ( ) <nl> func z2 < T : P , U : CP < T > > ( _ : ( SCP < T , U > ) - > Void ) { } <nl> - / / CHECK - LABEL : sil { { . * } } 3z2a { { . * } } : $ @ convention ( thin ) < T , U where T : AnyObject , T : P , U : CP < T > > ( @ noescape @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 where τ_0_0 : _RefCountedObject , τ_0_0 : P , τ_0_1 : CP < T > , τ_0_1 : _NativeClass > ( SCP < τ_0_0 , τ_0_1 > ) - > ( ) for < T , U > ) - > ( ) <nl> + <nl> + / / CHECK - LABEL : sil { { . * } } 3z2a { { . * } } : $ @ convention ( thin ) < T , U where T : AnyObject , T : P , U : CP < T > > ( @ noescape @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 where τ_0_0 : _RefCountedObject , τ_0_0 : P , τ_0_1 : CP < τ_0_0 > , τ_0_1 : _NativeClass > ( SCP < τ_0_0 , τ_0_1 > ) - > ( ) for < T , U > ) - > ( ) <nl> func z2a < T : P & AnyObject , U : CP < T > > ( _ : ( SCP < T , U > ) - > Void ) { } <nl> <nl> / / CHECK - LABEL : sil { { . * } } 2z3 { { . * } } : $ @ convention ( thin ) < T , U where T : P , U : CP < T > > ( @ noescape @ callee_guaranteed @ substituted < τ_0_0 , τ_0_1 where τ_0_0 : _RefCountedObject , τ_0_1 : _RefCountedObject > ( S < τ_0_0 , τ_0_1 > ) - > ( ) for < U , U > ) - > ( ) <nl> mmm a / test / SILGen / multi_file . swift <nl> ppp b / test / SILGen / multi_file . swift <nl> class HasComputedProperty : ProtocolWithProperty { <nl> } <nl> } <nl> / / CHECK - LABEL : sil hidden [ transparent ] [ ossa ] @ $ s10multi_file19HasComputedPropertyC3fooSivM : $ @ yield_once @ convention ( method ) ( @ guaranteed HasComputedProperty ) - > @ yields @ inout Int { <nl> - / / CHECK - LABEL : sil private [ transparent ] [ thunk ] [ ossa ] @ $ s10multi_file19HasComputedPropertyCAA012ProtocolWithE0A2aDP3fooSivMTW : $ @ yield_once @ convention ( witness_method : ProtocolWithProperty ) ( @ inout HasComputedProperty ) - > @ yields @ inout Int { <nl> + / / CHECK - LABEL : sil private [ transparent ] [ thunk ] [ ossa ] @ $ s10multi_file19HasComputedPropertyCAA012ProtocolWithE0A2aDP3fooSivMTW : $ @ yield_once @ convention ( witness_method : ProtocolWithProperty ) @ substituted < τ_0_0 > ( @ inout τ_0_0 ) - > @ yields @ inout Int for < HasComputedProperty > { <nl>
Merge pull request from rjmccall / accessor - subst - function - types
apple/swift
81ba9fd2230d3023eda98d3ea3982829280c72f5
2020-03-10T17:05:28Z
mmm a / tensorflow / lite / python / convert . py <nl> ppp b / tensorflow / lite / python / convert . py <nl> def toco_convert_protos ( model_flags_str , <nl> fp_toco . write ( toco_flags_str ) <nl> fp_input . write ( input_data_str ) <nl> debug_info_str = debug_info_str if debug_info_str else " " <nl> - fp_debug . write ( debug_info_str ) <nl> + # if debug_info_str contains a " string value " , then the call to <nl> + # fp_debug . write ( debug_info_str ) will fail with the following error <nl> + # <nl> + # TypeError : a bytes - like object is required , not ' str ' <nl> + # <nl> + # Some of the subtests within the " convert_test " unit - test fail <nl> + # with the error shown above . So watch out for that scenario and <nl> + # convert debug_info_str to bytes where needed <nl> + if isinstance ( debug_info_str , str ) : <nl> + fp_debug . write ( debug_info_str . encode ( " utf - 8 " ) ) <nl> + else : <nl> + fp_debug . write ( debug_info_str ) <nl> <nl> # Reserve an output file <nl> with _tempfile . NamedTemporaryFile ( delete = False ) as fp : <nl> mmm a / tensorflow / python / compiler / xla / BUILD <nl> ppp b / tensorflow / python / compiler / xla / BUILD <nl> cuda_py_test ( <nl> ] , <nl> tags = [ <nl> " no_mac " , <nl> + " no_rocm " , # XLA support is not enabled on the ROCm platform <nl> " no_windows " , <nl> ] , <nl> xla_enable_strict_auto_jit = True , <nl> mmm a / tensorflow / python / keras / distribute / BUILD <nl> ppp b / tensorflow / python / keras / distribute / BUILD <nl> distribute_py_test ( <nl> shard_count = 5 , <nl> tags = [ <nl> " multi_and_single_gpu " , <nl> + " no_rocm " , # times out on ROCm <nl> " no_windows_gpu " , <nl> " notsan " , <nl> ] , <nl> distribute_py_test ( <nl> shard_count = 19 , <nl> tags = [ <nl> " multi_and_single_gpu " , <nl> + " no_rocm " , # times out on ROCm <nl> " no_windows_gpu " , <nl> # TODO ( b / 134764123 ) : Re - enable this test . <nl> " notap " , <nl> distribute_py_test ( <nl> shard_count = 4 , <nl> tags = [ <nl> " multi_and_single_gpu " , <nl> + " no_rocm " , # times out on ROCm <nl> " no_windows_gpu " , <nl> " notsan " , <nl> ] , <nl> distribute_py_test ( <nl> shard_count = 8 , <nl> tags = [ <nl> " multi_and_single_gpu " , <nl> + " no_rocm " , # times out on ROCm <nl> " no_windows_gpu " , <nl> " notsan " , <nl> ] , <nl>
Merge pull request from ROCmSoftwarePlatform : google_upstream_no_rocm_updates_190711
tensorflow/tensorflow
59ee7f9138482d85cd93c004aca961bea35820c7
2019-07-24T09:47:39Z
mmm a / tensorflow / core / BUILD <nl> ppp b / tensorflow / core / BUILD <nl> tf_cc_test ( <nl> " / / tensorflow / core / kernels : cast_op " , <nl> " / / tensorflow / core / kernels : concat_op " , <nl> " / / tensorflow / core / kernels : identity_op " , <nl> + " / / tensorflow / core / kernels : immutable_constant_op " , <nl> " / / tensorflow / core / kernels : matmul_op " , <nl> " / / third_party / eigen3 " , <nl> ] , <nl> mmm a / tensorflow / core / common_runtime / constant_folding . cc <nl> ppp b / tensorflow / core / common_runtime / constant_folding . cc <nl> void FindConstantFoldableNodes ( const Graph * graph , <nl> std : : vector < Node * > & nodes = * nodes_result ; <nl> bool internal_node_inserted = false ; <nl> / / Walk the nodes in data flow order <nl> - ReverseDFS ( * graph , nullptr , <nl> - [ & nodes , & node_set , & internal_node_inserted , opts , <nl> - flib_def ] ( Node * n ) { <nl> - if ( n - > IsConstant ( ) ) { <nl> - / / Constants with no control inputs ( except from _SOURCE node ) <nl> - / / are definitely constant foldable . <nl> - if ( n - > in_edges ( ) . size ( ) = = 0 | | <nl> - ( n - > in_edges ( ) . size ( ) = = 1 & & <nl> - ( * n - > in_edges ( ) . begin ( ) ) - > src ( ) - > IsSource ( ) ) ) { <nl> - node_set . insert ( n ) ; <nl> - nodes . push_back ( n ) ; <nl> - } <nl> - } else if ( IsConstantFoldable ( flib_def , n , opts . consider ) ) { <nl> - / / Check whether the set of this node ' s in_nodes is completely <nl> - / / included in the set of constant foldable nodes . If true , <nl> - / / then this node is also constant foldable . <nl> - bool all_parents_constant = true ; <nl> - for ( const Node * parent : n - > in_nodes ( ) ) { <nl> - if ( node_set . count ( parent ) = = 0 & & ! parent - > IsSource ( ) ) { <nl> - all_parents_constant = false ; <nl> - break ; <nl> - } <nl> - } <nl> - if ( all_parents_constant ) { <nl> - node_set . insert ( n ) ; <nl> - nodes . push_back ( n ) ; <nl> - internal_node_inserted = true ; <nl> - } <nl> - } <nl> - } ) ; <nl> + ReverseDFS ( * graph , nullptr , [ & nodes , & node_set , & internal_node_inserted , opts , <nl> + flib_def ] ( Node * n ) { <nl> + if ( n - > IsConstant ( ) ) { <nl> + / / Constants with no control inputs ( except from _SOURCE node ) <nl> + / / are definitely constant foldable . <nl> + if ( n - > in_edges ( ) . size ( ) = = 0 | | <nl> + ( n - > in_edges ( ) . size ( ) = = 1 & & <nl> + ( * n - > in_edges ( ) . begin ( ) ) - > src ( ) - > IsSource ( ) ) ) { <nl> + node_set . insert ( n ) ; <nl> + nodes . push_back ( n ) ; <nl> + } <nl> + } else if ( IsConstantFoldable ( flib_def , n , opts . consider ) ) { <nl> + / / Check whether the set of this node ' s in_nodes is completely <nl> + / / included in the set of constant foldable nodes . If true , <nl> + / / then this node is also constant foldable . <nl> + bool all_parents_constant = true ; <nl> + for ( const Node * parent : n - > in_nodes ( ) ) { <nl> + if ( node_set . count ( parent ) = = 0 & & ! parent - > IsSource ( ) ) { <nl> + all_parents_constant = false ; <nl> + break ; <nl> + } <nl> + } <nl> + if ( all_parents_constant ) { <nl> + node_set . insert ( n ) ; <nl> + nodes . push_back ( n ) ; <nl> + internal_node_inserted = true ; <nl> + } <nl> + } <nl> + } ) ; <nl> / / If we have inserted just leaf level nodes , then there is nothing to fold . <nl> if ( ! internal_node_inserted ) { <nl> nodes . clear ( ) ; <nl> int64 UniqueConstantId ( ) { <nl> return id . fetch_add ( 1 ) ; <nl> } <nl> <nl> - Device * GetCPUDevice ( ) { <nl> - static mutex mu ; <nl> - static Device * device GUARDED_BY ( mu ) = nullptr ; <nl> - mutex_lock l ( mu ) ; <nl> - if ( ! device ) { <nl> - std : : vector < Device * > devices ; <nl> - Status s = DeviceFactory : : GetFactory ( DEVICE_CPU ) <nl> - - > CreateDevices ( SessionOptions { } , " " , & devices ) ; <nl> - if ( s . ok ( ) & & devices . size ( ) > 0 ) { <nl> - device = devices [ 0 ] ; <nl> - } <nl> + std : : unique_ptr < Device > GetCPUDevice ( Env * env ) { <nl> + std : : vector < Device * > devices ; <nl> + SessionOptions session_options ; <nl> + session_options . env = env ; <nl> + Status s = DeviceFactory : : GetFactory ( DEVICE_CPU ) <nl> + - > CreateDevices ( session_options , " " , & devices ) ; <nl> + if ( s . ok ( ) & & devices . size ( ) > 0 ) { <nl> + return std : : unique_ptr < Device > ( devices [ 0 ] ) ; <nl> } <nl> - return device ; <nl> + return nullptr ; <nl> } <nl> <nl> - thread : : ThreadPool * GetThreadPool ( ) { <nl> + thread : : ThreadPool * GetThreadPool ( Env * env ) { <nl> static thread : : ThreadPool * thread_pool = <nl> - new thread : : ThreadPool ( Env : : Default ( ) , " Compute " , 1 ) ; <nl> + new thread : : ThreadPool ( env , " Compute " , 1 ) ; <nl> return thread_pool ; <nl> } <nl> <nl> bool ReplaceTensorWithConstant ( Graph * graph , Device * partition_device , <nl> } <nl> <nl> bool DoConstantFolding ( const ConstantFoldingOptions & opts , <nl> - FunctionLibraryRuntime * function_library , <nl> + FunctionLibraryRuntime * function_library , Env * env , <nl> Device * partition_device , Graph * graph ) { <nl> DumpGraph ( " Before " , graph ) ; <nl> - Device * device = GetCPUDevice ( ) ; <nl> - thread : : ThreadPool * thread_pool = GetThreadPool ( ) ; <nl> + std : : unique_ptr < Device > device = GetCPUDevice ( env ) ; <nl> + thread : : ThreadPool * thread_pool = GetThreadPool ( env ) ; <nl> if ( ! device | | ! thread_pool ) { <nl> VLOG ( 1 ) < < " Cannot find a device and / or a thread pool to do constant " <nl> " folding on " ; <nl> bool DoConstantFolding ( const ConstantFoldingOptions & opts , <nl> thread_pool - > Schedule ( c ) ; <nl> } ; <nl> LocalExecutorParams params ; <nl> - params . device = device ; <nl> + params . device = device . get ( ) ; <nl> params . function_library = function_library ; <nl> - params . create_kernel = [ device , constant_graph ] ( const NodeDef & ndef , <nl> - OpKernel * * kernel ) { <nl> - return CreateNonCachedKernel ( device , nullptr , ndef , <nl> + params . create_kernel = [ & device , constant_graph ] ( const NodeDef & ndef , <nl> + OpKernel * * kernel ) { <nl> + return CreateNonCachedKernel ( device . get ( ) , nullptr , ndef , <nl> constant_graph - > versions ( ) . producer ( ) , kernel ) ; <nl> } ; <nl> params . delete_kernel = [ ] ( OpKernel * kernel ) { delete kernel ; } ; <nl> mmm a / tensorflow / core / common_runtime / constant_folding . h <nl> ppp b / tensorflow / core / common_runtime / constant_folding . h <nl> namespace tensorflow { <nl> / / assumed to execute . <nl> / / Returns true if and only if " graph " has been mutated . <nl> bool DoConstantFolding ( const ConstantFoldingOptions & opts , <nl> - FunctionLibraryRuntime * function_library , <nl> + FunctionLibraryRuntime * function_library , Env * env , <nl> Device * partition_device , Graph * graph ) ; <nl> <nl> typedef std : : pair < Node * , int > NodeAndOutput ; <nl> mmm a / tensorflow / core / common_runtime / constant_folding_test . cc <nl> ppp b / tensorflow / core / common_runtime / constant_folding_test . cc <nl> limitations under the License . <nl> <nl> # include " tensorflow / core / common_runtime / constant_folding . h " <nl> <nl> + # include " tensorflow / cc / ops / standard_ops . h " <nl> # include " tensorflow / core / common_runtime / device_factory . h " <nl> # include " tensorflow / core / common_runtime / device_mgr . h " <nl> # include " tensorflow / core / framework / function_testlib . h " <nl> class ConstantFoldingTest : public : : testing : : Test { <nl> <nl> TEST_F ( ConstantFoldingTest , Basic ) { <nl> SIMPLE_GRAPH ; <nl> - EXPECT_TRUE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , nullptr , g ) ) ; <nl> + EXPECT_TRUE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , <nl> + Env : : Default ( ) , nullptr , g ) ) ; <nl> <nl> / / Nodes s1 and s2 now should now have a constant input <nl> EXPECT_EQ ( 1 , s1 - > num_inputs ( ) ) ; <nl> TEST_F ( ConstantFoldingTest , ConsiderFunction ) { <nl> ConstantFoldingOptions opts ; <nl> / / Do not allow constant folding of m2 <nl> opts . consider = [ m2 ] ( const Node * n ) { return m2 ! = n ; } ; <nl> - EXPECT_TRUE ( DoConstantFolding ( opts , nullptr , nullptr , g ) ) ; <nl> + EXPECT_TRUE ( DoConstantFolding ( opts , nullptr , Env : : Default ( ) , nullptr , g ) ) ; <nl> <nl> / / Node s1 now should now have a constant input <nl> EXPECT_EQ ( 1 , s1 - > num_inputs ( ) ) ; <nl> TEST_F ( ConstantFoldingTest , TestNoReplaceAnotherConstant ) { <nl> g - > AddControlEdge ( g - > source_node ( ) , d ) ; <nl> Node * s3 = test : : graph : : Send ( g , d , " d " , " sender " , 0 , " receiver " ) ; <nl> g - > AddControlEdge ( s3 , g - > sink_node ( ) ) ; <nl> - EXPECT_TRUE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , nullptr , g ) ) ; <nl> + EXPECT_TRUE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , <nl> + Env : : Default ( ) , nullptr , g ) ) ; <nl> <nl> / / Nodes s3 should still have d as input <nl> EXPECT_EQ ( 1 , s3 - > num_inputs ( ) ) ; <nl> TEST_F ( ConstantFoldingTest , TwoOutputs ) { <nl> g - > AddControlEdge ( b0 , g - > sink_node ( ) ) ; <nl> g - > AddControlEdge ( b1 , g - > sink_node ( ) ) ; <nl> <nl> - EXPECT_TRUE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , nullptr , g ) ) ; <nl> + EXPECT_TRUE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , <nl> + Env : : Default ( ) , nullptr , g ) ) ; <nl> EXPECT_EQ ( 1 , b0 - > num_inputs ( ) ) ; <nl> ExpectNodeEqual < int > ( * ( b0 - > in_nodes ( ) . begin ( ) ) , { 0 , 1 } , { 2 } ) ; <nl> EXPECT_EQ ( 1 , b1 - > num_inputs ( ) ) ; <nl> TEST_F ( ConstantFoldingTest , TwoOutputsFoldOneOutput ) { <nl> <nl> ConstantFoldingOptions opts ; <nl> opts . consider = [ b1_ident ] ( const Node * n ) { return b1_ident ! = n ; } ; <nl> - EXPECT_TRUE ( DoConstantFolding ( opts , nullptr , nullptr , g ) ) ; <nl> + EXPECT_TRUE ( DoConstantFolding ( opts , nullptr , Env : : Default ( ) , nullptr , g ) ) ; <nl> / / 0th output of b should have been folded . <nl> EXPECT_EQ ( 1 , b0 - > num_inputs ( ) ) ; <nl> ExpectNodeEqual < int > ( * ( b0 - > in_nodes ( ) . begin ( ) ) , { 0 , 1 } , { 2 } ) ; <nl> TEST_F ( ConstantFoldingTest , TestNoReplaceOnGPU ) { <nl> g - > AddControlEdge ( send , g - > sink_node ( ) ) ; <nl> <nl> / / No ops should be replaced , as there is no kernel for BFLOAT16 on GPU . <nl> - EXPECT_FALSE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , device , g ) ) ; <nl> + EXPECT_FALSE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , <nl> + Env : : Default ( ) , device , g ) ) ; <nl> <nl> / / But constant folding should have replaced the cast op with a constant when <nl> / / running on CPU . <nl> - EXPECT_TRUE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , nullptr , g ) ) ; <nl> + EXPECT_TRUE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , <nl> + Env : : Default ( ) , nullptr , g ) ) ; <nl> <nl> for ( auto d : devices ) { <nl> delete d ; <nl> TEST_F ( ConstantFoldingTest , TestNoReplaceLargeConstant ) { <nl> g - > AddControlEdge ( concat_send , g - > sink_node ( ) ) ; <nl> <nl> / / The above concat should not have been constant folded . <nl> - EXPECT_FALSE ( <nl> - DoConstantFolding ( ConstantFoldingOptions { } , nullptr , nullptr , g ) ) ; <nl> + EXPECT_FALSE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , <nl> + Env : : Default ( ) , nullptr , g ) ) ; <nl> } <nl> <nl> TEST_F ( ConstantFoldingTest , TestNoReplaceFunctionCall ) { <nl> TEST_F ( ConstantFoldingTest , TestNoReplaceFunctionCall ) { <nl> g - > AddControlEdge ( times_two_send , g - > sink_node ( ) ) ; <nl> <nl> / / The above function call should not have been constant folded . <nl> - EXPECT_FALSE ( <nl> - DoConstantFolding ( ConstantFoldingOptions { } , nullptr , nullptr , g ) ) ; <nl> + EXPECT_FALSE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , <nl> + Env : : Default ( ) , nullptr , g ) ) ; <nl> <nl> g_ = nullptr ; <nl> } <nl> <nl> + namespace { <nl> + <nl> + const char kTestMemRegionName [ ] = " test : / / test " ; <nl> + <nl> + class TestReadOnlyMemoryRegion : public : : tensorflow : : ReadOnlyMemoryRegion { <nl> + public : <nl> + ~ TestReadOnlyMemoryRegion ( ) override = default ; <nl> + TestReadOnlyMemoryRegion ( const void * data , uint64 length ) <nl> + : data_ ( data ) , length_ ( length ) { } <nl> + const void * data ( ) override { return data_ ; } <nl> + uint64 length ( ) override { return length_ ; } <nl> + <nl> + protected : <nl> + const void * data_ ; <nl> + uint64 length_ ; <nl> + } ; <nl> + <nl> + class TestTFFileSystem : public : : tensorflow : : NullFileSystem { <nl> + public : <nl> + TestTFFileSystem ( ) <nl> + : : : tensorflow : : NullFileSystem ( ) , <nl> + data_tensor_ ( test : : AsTensor < double > ( { 1 . , 2 . , 3 . , 4 . } , { 2 , 2 } ) ) { } <nl> + <nl> + : : tensorflow : : Status NewReadOnlyMemoryRegionFromFile ( <nl> + const string & fname , <nl> + std : : unique_ptr < : : tensorflow : : ReadOnlyMemoryRegion > * result ) override { <nl> + if ( fname ! = kTestMemRegionName ) { <nl> + return : : tensorflow : : errors : : Unimplemented ( <nl> + " NewReadOnlyMemoryRegionFromFile unimplemented " ) ; <nl> + } <nl> + const : : tensorflow : : StringPiece sp = data_tensor_ . tensor_data ( ) ; <nl> + * result = std : : unique_ptr < : : tensorflow : : ReadOnlyMemoryRegion > ( <nl> + new TestReadOnlyMemoryRegion ( sp . data ( ) , sp . size ( ) ) ) ; <nl> + return : : tensorflow : : Status : : OK ( ) ; <nl> + } <nl> + <nl> + protected : <nl> + : : tensorflow : : Tensor data_tensor_ ; <nl> + } ; <nl> + <nl> + / / A test TF environent that checks that the environment was used . <nl> + class TestTFEnvironment : public : : tensorflow : : EnvWrapper { <nl> + public : <nl> + using tf_base = : : tensorflow : : EnvWrapper ; <nl> + TestTFEnvironment ( ) : : : tensorflow : : EnvWrapper ( Default ( ) ) { } <nl> + : : tensorflow : : Status GetFileSystemForFile ( <nl> + const string & fname , : : tensorflow : : FileSystem * * result ) override { <nl> + was_used_ = true ; <nl> + if ( fname = = " test : / / test " ) { <nl> + * result = & test_filesystem_ ; <nl> + return : : tensorflow : : Status : : OK ( ) ; <nl> + } <nl> + return tf_base : : GetFileSystemForFile ( fname , result ) ; <nl> + } <nl> + bool was_used ( ) const { return was_used_ ; } <nl> + <nl> + protected : <nl> + TestTFFileSystem test_filesystem_ ; <nl> + bool was_used_ = false ; <nl> + } ; <nl> + } / / namespace <nl> + <nl> + TEST_F ( ConstantFoldingTest , TestImmutableConst ) { <nl> + Reset ( ) ; <nl> + Graph * g = g_ . get ( ) ; <nl> + Scope root = Scope : : NewRootScope ( ) ; <nl> + <nl> + auto a = ops : : ImmutableConst ( root , DT_DOUBLE , { 2 , 2 } , kTestMemRegionName ) ; <nl> + auto b = ops : : Const < double > ( root , { 1 . 0 , 2 . 0 , 3 . 0 , 4 . 0 } , { 2 , 2 } ) ; <nl> + auto c = ops : : RandomGamma ( root , { 2 , 2 } , 2 . 0 ) ; <nl> + auto result1 = ops : : MatMul ( root , a , b ) ; <nl> + auto result2 = ops : : MatMul ( root , result1 , c ) ; <nl> + TF_ASSERT_OK ( root . ToGraph ( g ) ) ; <nl> + TestTFEnvironment test_env ; <nl> + EXPECT_FALSE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , <nl> + Env : : Default ( ) , nullptr , g ) ) ; <nl> + EXPECT_TRUE ( DoConstantFolding ( ConstantFoldingOptions { } , nullptr , & test_env , <nl> + nullptr , g ) ) ; <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / common_runtime / direct_session . cc <nl> ppp b / tensorflow / core / common_runtime / direct_session . cc <nl> Status DirectSession : : GetOrCreateExecutors ( <nl> <nl> ek - > items . resize ( ek - > items . size ( ) + 1 ) ; <nl> auto * item = & ( ek - > items . back ( ) ) ; <nl> - item - > flib . reset ( <nl> - NewFunctionLibraryRuntime ( device_mgr_ . get ( ) , device , graph_def_version , <nl> - ek - > flib_def . get ( ) , optimizer_opts ) ) ; <nl> + item - > flib . reset ( NewFunctionLibraryRuntime ( <nl> + device_mgr_ . get ( ) , options_ . env , device , graph_def_version , <nl> + ek - > flib_def . get ( ) , optimizer_opts ) ) ; <nl> <nl> LocalExecutorParams params ; <nl> params . device = device ; <nl> Status DirectSession : : GetOrCreateExecutors ( <nl> params . node_outputs_cb = node_outputs_callback_ ; <nl> <nl> partition_graph = iter - > second . release ( ) ; <nl> - optimizer . Optimize ( lib , device , & partition_graph ) ; <nl> + optimizer . Optimize ( lib , options_ . env , device , & partition_graph ) ; <nl> <nl> / / EXPERIMENTAL : tfdb inserts debug nodes ( i . e . , probes ) to the graph <nl> if ( ! run_state_args - > debug_tensor_watches . empty ( ) ) { <nl> mmm a / tensorflow / core / common_runtime / function . cc <nl> ppp b / tensorflow / core / common_runtime / function . cc <nl> static const FunctionLibraryRuntime : : Handle kInvalidHandle = - 1 ; <nl> <nl> class FunctionLibraryRuntimeImpl : public FunctionLibraryRuntime { <nl> public : <nl> - FunctionLibraryRuntimeImpl ( const DeviceMgr * dmgr , Device * device , <nl> + FunctionLibraryRuntimeImpl ( const DeviceMgr * dmgr , Env * env , Device * device , <nl> int graph_def_version , <nl> const FunctionLibraryDefinition * lib_def , <nl> const OptimizerOptions & optimizer_options ) ; <nl> class FunctionLibraryRuntimeImpl : public FunctionLibraryRuntime { <nl> } <nl> <nl> Device * device ( ) override { return device_ ; } <nl> + Env * env ( ) override { return env_ ; } <nl> <nl> private : <nl> typedef FunctionLibraryRuntimeImpl ME ; <nl> <nl> const DeviceMgr * const device_mgr_ ; <nl> Device * const device_ ; <nl> + Env * const env_ ; <nl> const int graph_def_version_ ; <nl> const FunctionLibraryDefinition * const lib_def_ ; <nl> GraphOptimizer optimizer_ ; <nl> class FunctionLibraryRuntimeImpl : public FunctionLibraryRuntime { <nl> } ; <nl> <nl> FunctionLibraryRuntimeImpl : : FunctionLibraryRuntimeImpl ( <nl> - const DeviceMgr * dmgr , Device * device , int graph_def_version , <nl> + const DeviceMgr * dmgr , Env * env , Device * device , int graph_def_version , <nl> const FunctionLibraryDefinition * lib_def , <nl> const OptimizerOptions & optimizer_options ) <nl> : device_mgr_ ( dmgr ) , <nl> device_ ( device ) , <nl> + env_ ( env ) , <nl> graph_def_version_ ( graph_def_version ) , <nl> lib_def_ ( lib_def ) , <nl> optimizer_ ( optimizer_options ) { <nl> void OptimizeGraph ( FunctionLibraryRuntime * lib , Graph * * g ) { <nl> opts . set_do_function_inlining ( true ) ; <nl> opts . set_do_constant_folding ( true ) ; <nl> GraphOptimizer optimizer ( opts ) ; <nl> - optimizer . Optimize ( lib , lib - > device ( ) , g ) ; <nl> + optimizer . Optimize ( lib , lib - > env ( ) , lib - > device ( ) , g ) ; <nl> } <nl> <nl> Status FunctionLibraryRuntimeImpl : : CreateItem ( Handle handle , Item * * item ) { <nl> Status FunctionLibraryRuntimeImpl : : CreateItem ( Handle handle , Item * * item ) { <nl> Graph * g = new Graph ( lib_def_ ) ; <nl> CopyGraph ( * fbody - > graph , g ) ; <nl> <nl> - optimizer_ . Optimize ( this , device ( ) , & g ) ; <nl> + optimizer_ . Optimize ( this , env ( ) , device ( ) , & g ) ; <nl> auto s = EnsureMemoryTypes ( DeviceType ( device ( ) - > device_type ( ) ) , <nl> device ( ) - > name ( ) , g ) ; <nl> if ( ! s . ok ( ) ) { <nl> bool FunctionLibraryRuntimeImpl : : IsStateful ( const string & func ) { <nl> } <nl> <nl> FunctionLibraryRuntime * NewFunctionLibraryRuntime ( <nl> - const DeviceMgr * dmgr , Device * device , int graph_def_version , <nl> + const DeviceMgr * dmgr , Env * env , Device * device , int graph_def_version , <nl> const FunctionLibraryDefinition * lib_def , <nl> const OptimizerOptions & optimizer_options ) { <nl> - return new FunctionLibraryRuntimeImpl ( dmgr , device , graph_def_version , <nl> + return new FunctionLibraryRuntimeImpl ( dmgr , env , device , graph_def_version , <nl> lib_def , optimizer_options ) ; <nl> } <nl> <nl> mmm a / tensorflow / core / common_runtime / function . h <nl> ppp b / tensorflow / core / common_runtime / function . h <nl> namespace tensorflow { <nl> / / " lib_def " . The caller must ensure " device " and " lib_def " outlives <nl> / / the returned object . <nl> FunctionLibraryRuntime * NewFunctionLibraryRuntime ( <nl> - const DeviceMgr * device_mgr , Device * device , int graph_def_version , <nl> - const FunctionLibraryDefinition * lib_def , <nl> + const DeviceMgr * device_mgr , Env * env , Device * device , <nl> + int graph_def_version , const FunctionLibraryDefinition * lib_def , <nl> const OptimizerOptions & optimizer_options ) ; <nl> <nl> / / FunctionLibraryRuntime : : GetFunctionBody returns a description of an <nl> mmm a / tensorflow / core / common_runtime / function_test . cc <nl> ppp b / tensorflow / core / common_runtime / function_test . cc <nl> class FunctionLibraryRuntimeTest : public : : testing : : Test { <nl> lib_def_ = new FunctionLibraryDefinition ( OpRegistry : : Global ( ) , proto ) ; <nl> delete lib_ ; <nl> OptimizerOptions opts ; <nl> - lib_ = NewFunctionLibraryRuntime ( nullptr , device_ , TF_GRAPH_DEF_VERSION , <nl> - lib_def_ , opts ) ; <nl> + lib_ = NewFunctionLibraryRuntime ( nullptr , Env : : Default ( ) , device_ , <nl> + TF_GRAPH_DEF_VERSION , lib_def_ , opts ) ; <nl> } <nl> <nl> Status Run ( const string & name , InstantiateAttrValueSlice attrs , <nl> mmm a / tensorflow / core / common_runtime / graph_optimizer . cc <nl> ppp b / tensorflow / core / common_runtime / graph_optimizer . cc <nl> GraphOptimizer : : GraphOptimizer ( const OptimizerOptions & opts ) : opts_ ( opts ) { <nl> <nl> GraphOptimizer : : ~ GraphOptimizer ( ) { } <nl> <nl> - void GraphOptimizer : : Optimize ( FunctionLibraryRuntime * runtime , Device * device , <nl> - Graph * * graph ) { <nl> + void GraphOptimizer : : Optimize ( FunctionLibraryRuntime * runtime , Env * env , <nl> + Device * device , Graph * * graph ) { <nl> Graph * g = * graph ; <nl> for ( const Node * n : g - > nodes ( ) ) { <nl> if ( n - > IsControlFlow ( ) ) { <nl> void GraphOptimizer : : Optimize ( FunctionLibraryRuntime * runtime , Device * device , <nl> <nl> if ( opts_ . do_constant_folding ( ) ) { <nl> ConstantFoldingOptions cf_opts ; <nl> - if ( DoConstantFolding ( cf_opts , runtime , device , g ) ) { <nl> + if ( DoConstantFolding ( cf_opts , runtime , env , device , g ) ) { <nl> RemoveDeadNodes ( g ) ; <nl> DumpGraph ( " ConstFolding " , g ) ; <nl> changed = true ; <nl> mmm a / tensorflow / core / common_runtime / graph_optimizer . h <nl> ppp b / tensorflow / core / common_runtime / graph_optimizer . h <nl> limitations under the License . <nl> # include " tensorflow / core / framework / function . h " <nl> # include " tensorflow / core / graph / graph . h " <nl> # include " tensorflow / core / lib / core / status . h " <nl> + # include " tensorflow / core / platform / env . h " <nl> # include " tensorflow / core / protobuf / config . pb . h " <nl> <nl> namespace tensorflow { <nl> class GraphOptimizer { <nl> / / ' device ' is device on which the ' graph ' will execute . It ' s passed to the <nl> / / optimizers so that they can respect constraints if any , that should be <nl> / / respected . <nl> - void Optimize ( FunctionLibraryRuntime * runtime , Device * device , Graph * * graph ) ; <nl> + void Optimize ( FunctionLibraryRuntime * runtime , Env * env , Device * device , <nl> + Graph * * graph ) ; <nl> <nl> private : <nl> OptimizerOptions opts_ ; <nl> mmm a / tensorflow / core / distributed_runtime / graph_mgr . cc <nl> ppp b / tensorflow / core / distributed_runtime / graph_mgr . cc <nl> Status GraphMgr : : InitItem ( const string & session , const GraphDef & gdef , <nl> <nl> / / Function library runtime . <nl> unit - > lib = NewFunctionLibraryRuntime ( <nl> - worker_env_ - > device_mgr , unit - > device , def - > versions ( ) . producer ( ) , <nl> - item - > lib_def , graph_options . optimizer_options ( ) ) ; <nl> + worker_env_ - > device_mgr , worker_env_ - > env , unit - > device , <nl> + def - > versions ( ) . producer ( ) , item - > lib_def , <nl> + graph_options . optimizer_options ( ) ) ; <nl> <nl> / / Construct the root executor for the subgraph . <nl> params . device = unit - > device ; <nl> Status GraphMgr : : InitItem ( const string & session , const GraphDef & gdef , <nl> } <nl> } ; <nl> <nl> - optimizer . Optimize ( lib , params . device , & subgraph ) ; <nl> + optimizer . Optimize ( lib , worker_env_ - > env , params . device , & subgraph ) ; <nl> s = EnsureMemoryTypes ( DeviceType ( unit - > device - > device_type ( ) ) , <nl> unit - > device - > name ( ) , subgraph ) ; <nl> if ( ! s . ok ( ) ) { <nl> mmm a / tensorflow / core / framework / function . h <nl> ppp b / tensorflow / core / framework / function . h <nl> limitations under the License . <nl> # include " tensorflow / core / framework / op . h " <nl> # include " tensorflow / core / framework / selective_registration . h " <nl> # include " tensorflow / core / framework / types . h " <nl> + # include " tensorflow / core / platform / env . h " <nl> # include " tensorflow / core / platform / macros . h " <nl> # include " tensorflow / core / platform / protobuf . h " <nl> <nl> class FunctionLibraryRuntime { <nl> / / Returns the function library definition that backs this runtime . <nl> virtual const FunctionLibraryDefinition * GetFunctionLibraryDefinition ( ) <nl> const = 0 ; <nl> + <nl> + / / Return the environment on which the function executes . <nl> + virtual Env * env ( ) = 0 ; <nl> } ; <nl> <nl> / / To register a gradient function for a builtin op , one should use <nl>
Using Session ' s environment in optimization functions
tensorflow/tensorflow
bfb577a15b055abf9a239a1114dfe1bd26c67234
2016-08-11T16:18:20Z
mmm a / arangod / GeneralServer / RestHandlerFactory . cpp <nl> ppp b / arangod / GeneralServer / RestHandlerFactory . cpp <nl> RestHandler * RestHandlerFactory : : createHandler ( <nl> path . find ( " / _admin / cluster / health " ) = = std : : string : : npos & & <nl> path . find ( " / _admin / server / role " ) = = std : : string : : npos & & <nl> path . find ( " / _admin / server / availability " ) = = std : : string : : npos & & <nl> + path . find ( " / _admin / status " ) = = std : : string : : npos & & <nl> path . find ( " / _api / agency / agency - callbacks " ) = = std : : string : : npos & & <nl> path . find ( " / _api / cluster / " ) = = std : : string : : npos & & <nl> path . find ( " / _api / replication " ) = = std : : string : : npos & & <nl>
allow accessing " / _admin / status " on follower ( )
arangodb/arangodb
85b89bc8fe8a4ad59ce2a0cbf71a7a21ef82ec04
2018-06-01T12:40:43Z
mmm a / dbms / src / Formats / CapnProtoRowInputStream . cpp <nl> ppp b / dbms / src / Formats / CapnProtoRowInputStream . cpp <nl> void CapnProtoRowInputStream : : createActions ( const NestedFieldList & sortedFields <nl> { <nl> / / The field list here flattens Nested elements into multiple arrays <nl> / / In order to map Nested types in Cap ' nProto back , they need to be collected <nl> - actions . back ( ) . columns . push_back ( field . pos ) ; <nl> + / / Since the field names are sorted , the order of field positions must be preserved <nl> + / / For example , if the fields are { b @ 0 : Text , a @ 1 : Text } , the ` a ` would come first <nl> + / / even though it ' s position is second . <nl> + auto & columns = actions . back ( ) . columns ; <nl> + auto it = std : : upper_bound ( columns . cbegin ( ) , columns . cend ( ) , field . pos ) ; <nl> + columns . insert ( it , field . pos ) ; <nl> } <nl> else <nl> { <nl>
Merge pull request from vavrusa / fix - capnp - array - struct - mismatch
ClickHouse/ClickHouse
8c9f13e726c0d5fd70b2272040a4ee640faa854a
2018-10-04T23:07:08Z
mmm a / src / openalpr / segmentation / charactersegmenter . cpp <nl> ppp b / src / openalpr / segmentation / charactersegmenter . cpp <nl> CharacterSegmenter : : CharacterSegmenter ( PipelineData * pipeline_data ) <nl> pipeline_data - > clearThresholds ( ) ; <nl> pipeline_data - > thresholds = produceThresholds ( pipeline_data - > crop_gray , config ) ; <nl> <nl> - <nl> + / / TODO : Perhaps a bilateral filter would be better here . <nl> medianBlur ( pipeline_data - > crop_gray , pipeline_data - > crop_gray , 3 ) ; <nl> <nl> if ( this - > config - > debugCharSegmenter ) <nl> CharacterSegmenter : : CharacterSegmenter ( PipelineData * pipeline_data ) <nl> / / imgDbgGeneral . push_back ( bordered ) ; <nl> / / } <nl> <nl> - <nl> + <nl> <nl> for ( uint lineidx = 0 ; lineidx < pipeline_data - > textLines . size ( ) ; lineidx + + ) <nl> { <nl> this - > top = pipeline_data - > textLines [ lineidx ] . topLine ; <nl> this - > bottom = pipeline_data - > textLines [ lineidx ] . bottomLine ; <nl> <nl> - <nl> float avgCharHeight = pipeline_data - > textLines [ lineidx ] . lineHeight ; <nl> float height_to_width_ratio = pipeline_data - > config - > charHeightMM / pipeline_data - > config - > charWidthMM ; <nl> float avgCharWidth = avgCharHeight / height_to_width_ratio ; <nl> - / / float avgCharWidth = median ( charWidths . data ( ) , charWidths . size ( ) ) ; <nl> <nl> - / / removeSmallContours ( pipeline_data - > thresholds , charAnalysis - > allTextContours , avgCharWidth , avgCharHeight ) ; <nl> + removeSmallContours ( pipeline_data - > thresholds , avgCharHeight , pipeline_data - > textLines [ lineidx ] ) ; <nl> <nl> / / Do the histogram analysis to figure out char regions <nl> <nl> vector < Rect > CharacterSegmenter : : get1DHits ( Mat img , int yOffset ) <nl> return hits ; <nl> } <nl> <nl> - / / void CharacterSegmenter : : removeSmallContours ( vector < Mat > thresholds , vector < TextContours > contours , float avgCharWidth , float avgCharHeight ) <nl> - / / { <nl> - / / / / const float MIN_CHAR_AREA = 0 . 02 * avgCharWidth * avgCharHeight ; / / To clear out the tiny specks <nl> - / / const float MIN_CONTOUR_HEIGHT = 0 . 3 * avgCharHeight ; <nl> - / / <nl> - / / for ( uint i = 0 ; i < thresholds . size ( ) ; i + + ) <nl> - / / { <nl> - / / for ( uint c = 0 ; c < contours [ i ] . contours . size ( ) ; c + + ) <nl> - / / { <nl> - / / if ( contours [ i ] . contours [ c ] . size ( ) = = 0 ) <nl> - / / continue ; <nl> - / / <nl> - / / Rect mr = boundingRect ( contours [ i ] . contours [ c ] ) ; <nl> - / / if ( mr . height < MIN_CONTOUR_HEIGHT ) <nl> - / / { <nl> - / / / / Erase it <nl> - / / drawContours ( thresholds [ i ] , contours [ i ] . contours , c , Scalar ( 0 , 0 , 0 ) , - 1 ) ; <nl> - / / continue ; <nl> - / / } <nl> - / / } <nl> - / / } <nl> - / / } <nl> + void CharacterSegmenter : : removeSmallContours ( vector < Mat > thresholds , float avgCharHeight , TextLine textLine ) <nl> + { <nl> + / / const float MIN_CHAR_AREA = 0 . 02 * avgCharWidth * avgCharHeight ; / / To clear out the tiny specks <nl> + const float MIN_CONTOUR_HEIGHT = 0 . 3 * avgCharHeight ; <nl> + <nl> + Mat textLineMask = Mat : : zeros ( thresholds [ 0 ] . size ( ) , CV_8U ) ; <nl> + fillConvexPoly ( textLineMask , textLine . linePolygon . data ( ) , textLine . linePolygon . size ( ) , Scalar ( 255 , 255 , 255 ) ) ; <nl> + <nl> + for ( uint i = 0 ; i < thresholds . size ( ) ; i + + ) <nl> + { <nl> + vector < vector < Point > > contours ; <nl> + vector < Vec4i > hierarchy ; <nl> + Mat thresholdsCopy = Mat : : zeros ( thresholds [ i ] . size ( ) , thresholds [ i ] . type ( ) ) ; <nl> + <nl> + thresholds [ i ] . copyTo ( thresholdsCopy , textLineMask ) ; <nl> + findContours ( thresholdsCopy , contours , hierarchy , CV_RETR_TREE , CV_CHAIN_APPROX_SIMPLE ) ; <nl> + <nl> + for ( uint c = 0 ; c < contours . size ( ) ; c + + ) <nl> + { <nl> + if ( contours [ c ] . size ( ) = = 0 ) <nl> + continue ; <nl> + <nl> + Rect mr = boundingRect ( contours [ c ] ) ; <nl> + if ( mr . height < MIN_CONTOUR_HEIGHT ) <nl> + { <nl> + / / Erase it <nl> + drawContours ( thresholds [ i ] , contours , c , Scalar ( 0 , 0 , 0 ) , - 1 ) ; <nl> + continue ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> <nl> vector < Rect > CharacterSegmenter : : combineCloseBoxes ( vector < Rect > charBoxes , float biggestCharWidth ) <nl> { <nl> mmm a / src / openalpr / segmentation / charactersegmenter . h <nl> ppp b / src / openalpr / segmentation / charactersegmenter . h <nl> class CharacterSegmenter <nl> <nl> cv : : Mat getCharBoxMask ( cv : : Mat img_threshold , std : : vector < cv : : Rect > charBoxes ) ; <nl> <nl> - void removeSmallContours ( std : : vector < cv : : Mat > thresholds , std : : vector < TextContours > contours , float avgCharWidth , float avgCharHeight ) ; <nl> + void removeSmallContours ( std : : vector < cv : : Mat > thresholds , float avgCharHeight , TextLine textLine ) ; <nl> <nl> std : : vector < cv : : Rect > getHistogramBoxes ( VerticalHistogram histogram , float avgCharWidth , float avgCharHeight , float * score ) ; <nl> std : : vector < cv : : Rect > getBestCharBoxes ( cv : : Mat img , std : : vector < cv : : Rect > charBoxes , float avgCharWidth ) ; <nl>
Re - added despeckle filter
openalpr/openalpr
b47251e9af0b07a0b73e01db804f2709e7cc6de5
2014-10-21T03:48:32Z
mmm a / include / swift / AST / GenericSignatureBuilder . h <nl> ppp b / include / swift / AST / GenericSignatureBuilder . h <nl> class GenericSignatureBuilder { <nl> <nl> public : <nl> / / / Construct a new generic signature builder . <nl> - / / / <nl> - / / / \ param lookupConformance Conformance - lookup routine that will be used <nl> - / / / to satisfy conformance requirements for concrete types . <nl> - explicit GenericSignatureBuilder ( ASTContext & ctx , <nl> - std : : function < GenericFunction > lookupConformance ) ; <nl> - <nl> + explicit GenericSignatureBuilder ( ASTContext & ctx ) ; <nl> GenericSignatureBuilder ( GenericSignatureBuilder & & ) ; <nl> ~ GenericSignatureBuilder ( ) ; <nl> <nl> / / / Retrieve the AST context . <nl> ASTContext & getASTContext ( ) const { return Context ; } <nl> <nl> - / / / Retrieve the conformance - lookup function used by this generic signature builder . <nl> - std : : function < GenericFunction > getLookupConformanceFn ( ) const ; <nl> + / / / Functor class suitable for use as a \ c LookupConformanceFn to look up a <nl> + / / / conformance in a generic signature builder . <nl> + class LookUpConformanceInBuilder { <nl> + GenericSignatureBuilder * builder ; <nl> + public : <nl> + explicit LookUpConformanceInBuilder ( GenericSignatureBuilder * builder ) <nl> + : builder ( builder ) { } <nl> + <nl> + Optional < ProtocolConformanceRef > <nl> + operator ( ) ( CanType dependentType , <nl> + Type conformingReplacementType , <nl> + ProtocolType * conformedProtocol ) const { <nl> + return builder - > lookupConformance ( dependentType , <nl> + conformingReplacementType , <nl> + conformedProtocol ) ; <nl> + } <nl> + } ; <nl> + <nl> + / / / Retrieve a function that can perform conformance lookup for this <nl> + / / / builder . <nl> + LookUpConformanceInBuilder getLookupConformanceFn ( ) ; <nl> + <nl> + / / / Lookup a protocol conformance in a module - agnostic manner . <nl> + Optional < ProtocolConformanceRef > <nl> + lookupConformance ( CanType dependentType , Type conformingReplacementType , <nl> + ProtocolType * conformedProtocol ) ; <nl> + <nl> <nl> / / / Retrieve the lazy resolver , if there is one . <nl> LazyResolver * getLazyResolver ( ) const ; <nl> mmm a / lib / AST / ASTContext . cpp <nl> ppp b / lib / AST / ASTContext . cpp <nl> GenericSignatureBuilder * ASTContext : : getOrCreateGenericSignatureBuilder ( <nl> return known - > second . get ( ) ; <nl> <nl> / / Create a new generic signature builder with the given signature . <nl> - auto builder = <nl> - new GenericSignatureBuilder ( * this , LookUpConformanceInModule ( mod ) ) ; <nl> + auto builder = new GenericSignatureBuilder ( * this ) ; <nl> <nl> / / Store this generic signature builder ( no generic environment yet ) . <nl> Impl . GenericSignatureBuilders [ { sig , mod } ] = <nl> CanGenericSignature ASTContext : : getExistentialSignature ( CanType existential , <nl> <nl> assert ( existential . isExistentialType ( ) ) ; <nl> <nl> - GenericSignatureBuilder builder ( * this , LookUpConformanceInModule ( mod ) ) ; <nl> + GenericSignatureBuilder builder ( * this ) ; <nl> <nl> auto genericParam = GenericTypeParamType : : get ( 0 , 0 , * this ) ; <nl> builder . addGenericParameter ( genericParam ) ; <nl> mmm a / lib / AST / Builtins . cpp <nl> ppp b / lib / AST / Builtins . cpp <nl> namespace { <nl> TheGenericParamList = getGenericParams ( ctx , numGenericParams , <nl> GenericTypeParams ) ; <nl> <nl> - GenericSignatureBuilder Builder ( ctx , <nl> - LookUpConformanceInModule ( ctx . TheBuiltinModule ) ) ; <nl> + GenericSignatureBuilder Builder ( ctx ) ; <nl> for ( auto gp : GenericTypeParams ) { <nl> Builder . addGenericParameter ( gp ) ; <nl> } <nl> mmm a / lib / AST / GenericSignatureBuilder . cpp <nl> ppp b / lib / AST / GenericSignatureBuilder . cpp <nl> struct GenericSignatureBuilder : : Implementation { <nl> / / / Allocator . <nl> llvm : : BumpPtrAllocator Allocator ; <nl> <nl> - / / / Function used to look up conformances . <nl> - std : : function < GenericFunction > LookupConformance ; <nl> - <nl> / / / The generic parameters that this generic signature builder is working <nl> / / / with . <nl> SmallVector < GenericTypeParamType * , 4 > GenericParams ; <nl> GenericSignatureBuilder : : resolveConcreteConformance ( PotentialArchetype * pa , <nl> <nl> / / Lookup the conformance of the concrete type to this protocol . <nl> auto conformance = <nl> - getLookupConformanceFn ( ) ( pa - > getDependentType ( { } ) - > getCanonicalType ( ) , <nl> - concrete , <nl> - proto - > getDeclaredInterfaceType ( ) <nl> - - > castTo < ProtocolType > ( ) ) ; <nl> + lookupConformance ( pa - > getDependentType ( { } ) - > getCanonicalType ( ) , <nl> + concrete , <nl> + proto - > getDeclaredInterfaceType ( ) <nl> + - > castTo < ProtocolType > ( ) ) ; <nl> if ( ! conformance ) { <nl> if ( ! concrete - > hasError ( ) & & concreteSource - > getLoc ( ) . isValid ( ) ) { <nl> Impl - > HadAnyError = true ; <nl> const RequirementSource * GenericSignatureBuilder : : resolveSuperConformance ( <nl> <nl> / / Lookup the conformance of the superclass to this protocol . <nl> auto conformance = <nl> - getLookupConformanceFn ( ) ( pa - > getDependentType ( { } ) - > getCanonicalType ( ) , <nl> - superclass , <nl> - proto - > getDeclaredInterfaceType ( ) <nl> - - > castTo < ProtocolType > ( ) ) ; <nl> + lookupConformance ( pa - > getDependentType ( { } ) - > getCanonicalType ( ) , <nl> + superclass , <nl> + proto - > getDeclaredInterfaceType ( ) <nl> + - > castTo < ProtocolType > ( ) ) ; <nl> if ( ! conformance ) return nullptr ; <nl> <nl> / / Conformance to this protocol is redundant ; update the requirement source <nl> void EquivalenceClass : : modified ( GenericSignatureBuilder & builder ) { <nl> } <nl> <nl> GenericSignatureBuilder : : GenericSignatureBuilder ( <nl> - ASTContext & ctx , <nl> - std : : function < GenericFunction > lookupConformance ) <nl> + ASTContext & ctx ) <nl> : Context ( ctx ) , Diags ( Context . Diags ) , Impl ( new Implementation ) { <nl> - Impl - > LookupConformance = std : : move ( lookupConformance ) ; <nl> if ( Context . Stats ) <nl> Context . Stats - > getFrontendCounters ( ) . NumGenericSignatureBuilders + + ; <nl> } <nl> GenericSignatureBuilder : : GenericSignatureBuilder ( <nl> <nl> GenericSignatureBuilder : : ~ GenericSignatureBuilder ( ) = default ; <nl> <nl> - std : : function < GenericFunction > <nl> - GenericSignatureBuilder : : getLookupConformanceFn ( ) const { <nl> - return Impl - > LookupConformance ; <nl> + auto <nl> + GenericSignatureBuilder : : getLookupConformanceFn ( ) <nl> + - > LookUpConformanceInBuilder { <nl> + return LookUpConformanceInBuilder ( this ) ; <nl> + } <nl> + <nl> + Optional < ProtocolConformanceRef > <nl> + GenericSignatureBuilder : : lookupConformance ( CanType dependentType , <nl> + Type conformingReplacementType , <nl> + ProtocolType * conformedProtocol ) { <nl> + if ( conformingReplacementType - > isTypeParameter ( ) ) <nl> + return ProtocolConformanceRef ( conformedProtocol - > getDecl ( ) ) ; <nl> + <nl> + / / Figure out which module to look into . <nl> + / / FIXME : When lookupConformance ( ) starts respecting modules , we ' ll need <nl> + / / to do some filtering here . <nl> + ModuleDecl * searchModule = conformedProtocol - > getDecl ( ) - > getParentModule ( ) ; <nl> + return searchModule - > lookupConformance ( conformingReplacementType , <nl> + conformedProtocol - > getDecl ( ) ) ; <nl> } <nl> <nl> LazyResolver * GenericSignatureBuilder : : getLazyResolver ( ) const { <nl> GenericSignature * GenericSignatureBuilder : : computeGenericSignature ( <nl> / / over - minimizing . <nl> if ( allowBuilderToMove & & ! Impl - > HadAnyError & & <nl> ! Impl - > HadAnyRedundantConstraints ) { <nl> - / / Set the conformance lookup function to something that works canonically . <nl> - Impl - > LookupConformance = LookUpConformanceInModule ( & module ) ; <nl> - <nl> / / Register this generic signature builer as the canonical builder for the <nl> / / given signature . <nl> Context . registerGenericSignatureBuilder ( sig , module , std : : move ( * this ) ) ; <nl> GenericSignature * GenericSignatureBuilder : : computeGenericSignature ( <nl> GenericSignature * GenericSignatureBuilder : : computeRequirementSignature ( <nl> ProtocolDecl * proto ) { <nl> auto module = proto - > getParentModule ( ) ; <nl> - GenericSignatureBuilder builder ( proto - > getASTContext ( ) , <nl> - LookUpConformanceInModule ( module ) ) ; <nl> + GenericSignatureBuilder builder ( proto - > getASTContext ( ) ) ; <nl> <nl> / / Add the ' self ' parameter . <nl> auto selfType = <nl> mmm a / lib / ClangImporter / ImportDecl . cpp <nl> ppp b / lib / ClangImporter / ImportDecl . cpp <nl> DeclContext * ClangImporter : : Implementation : : importDeclContextImpl ( <nl> <nl> GenericSignature * ClangImporter : : Implementation : : buildGenericSignature ( <nl> GenericParamList * genericParams , DeclContext * dc ) { <nl> - GenericSignatureBuilder builder ( SwiftContext , <nl> - LookUpConformanceInModule ( dc - > getParentModule ( ) ) ) ; <nl> + GenericSignatureBuilder builder ( SwiftContext ) ; <nl> SmallVector < GenericTypeParamType * , 4 > allGenericParams ; <nl> for ( auto param : * genericParams ) { <nl> builder . addGenericParameter ( param ) ; <nl> mmm a / lib / SILGen / SILGenPoly . cpp <nl> ppp b / lib / SILGen / SILGenPoly . cpp <nl> buildThunkSignature ( SILGenFunction & SGF , <nl> return genericSig ; <nl> } <nl> <nl> - GenericSignatureBuilder builder ( ctx , LookUpConformanceInModule ( mod ) ) ; <nl> + GenericSignatureBuilder builder ( ctx ) ; <nl> <nl> / / Add the existing generic signature . <nl> int depth = 0 ; <nl> mmm a / lib / SILOptimizer / Utils / Generics . cpp <nl> ppp b / lib / SILOptimizer / Utils / Generics . cpp <nl> getGenericEnvironmentAndSignatureWithRequirements ( <nl> GenericSignature * OrigGenSig , GenericEnvironment * OrigGenericEnv , <nl> ArrayRef < Requirement > Requirements , SILModule & M ) { <nl> / / Form a new generic signature based on the old one . <nl> - GenericSignatureBuilder Builder ( M . getASTContext ( ) , <nl> - LookUpConformanceInModule ( M . getSwiftModule ( ) ) ) ; <nl> + GenericSignatureBuilder Builder ( M . getASTContext ( ) ) ; <nl> <nl> / / First , add the old generic signature . <nl> Builder . addGenericSignature ( OrigGenSig ) ; <nl> class FunctionSignaturePartialSpecializer { <nl> : CallerGenericSig ( CallerGenericSig ) , CallerGenericEnv ( CallerGenericEnv ) , <nl> CalleeGenericSig ( CalleeGenericSig ) , CalleeGenericEnv ( CalleeGenericEnv ) , <nl> M ( M ) , SM ( M . getSwiftModule ( ) ) , Ctx ( M . getASTContext ( ) ) , <nl> - Builder ( Ctx , LookUpConformanceInModule ( SM ) ) { <nl> + Builder ( Ctx ) { <nl> SpecializedGenericSig = nullptr ; <nl> SpecializedGenericEnv = nullptr ; <nl> CalleeInterfaceToCallerArchetypeMap = <nl> class FunctionSignaturePartialSpecializer { <nl> : CallerGenericSig ( CalleeGenericSig ) , CallerGenericEnv ( CalleeGenericEnv ) , <nl> CalleeGenericSig ( CalleeGenericSig ) , CalleeGenericEnv ( CalleeGenericEnv ) , <nl> M ( M ) , SM ( M . getSwiftModule ( ) ) , Ctx ( M . getASTContext ( ) ) , <nl> - Builder ( Ctx , LookUpConformanceInModule ( SM ) ) { <nl> + Builder ( Ctx ) { <nl> <nl> / / Create the new generic signature using provided requirements . <nl> std : : tie ( SpecializedGenericEnv , SpecializedGenericSig ) = <nl> mmm a / lib / Sema / TypeCheckAttr . cpp <nl> ppp b / lib / Sema / TypeCheckAttr . cpp <nl> void AttributeChecker : : visitSpecializeAttr ( SpecializeAttr * attr ) { <nl> } <nl> <nl> / / Form a new generic signature based on the old one . <nl> - GenericSignatureBuilder Builder ( D - > getASTContext ( ) , <nl> - LookUpConformanceInModule ( DC - > getParentModule ( ) ) ) ; <nl> + GenericSignatureBuilder Builder ( D - > getASTContext ( ) ) ; <nl> <nl> / / First , add the old generic signature . <nl> Builder . addGenericSignature ( genericSig ) ; <nl> mmm a / lib / Sema / TypeCheckGeneric . cpp <nl> ppp b / lib / Sema / TypeCheckGeneric . cpp <nl> TypeChecker : : validateGenericFuncSignature ( AbstractFunctionDecl * func ) { <nl> prepareGenericParamList ( gp , func ) ; <nl> <nl> / / Create the generic signature builder . <nl> - GenericSignatureBuilder builder ( Context , <nl> - LookUpConformanceInModule ( func - > getParentModule ( ) ) ) ; <nl> + GenericSignatureBuilder builder ( Context ) ; <nl> <nl> / / Type check the function declaration , treating all generic type <nl> / / parameters as dependent , unresolved . <nl> TypeChecker : : validateGenericSubscriptSignature ( SubscriptDecl * subscript ) { <nl> prepareGenericParamList ( gp , subscript ) ; <nl> <nl> / / Create the generic signature builder . <nl> - GenericSignatureBuilder builder ( Context , <nl> - LookUpConformanceInModule ( subscript - > getParentModule ( ) ) ) ; <nl> + GenericSignatureBuilder builder ( Context ) ; <nl> <nl> / / Type check the function declaration , treating all generic type <nl> / / parameters as dependent , unresolved . <nl> GenericEnvironment * TypeChecker : : checkGenericEnvironment ( <nl> } <nl> <nl> / / Create the generic signature builder . <nl> - GenericSignatureBuilder builder ( Context , <nl> - LookUpConformanceInModule ( dc - > getParentModule ( ) ) ) ; <nl> + GenericSignatureBuilder builder ( Context ) ; <nl> <nl> / / Type check the generic parameters , treating all generic type <nl> / / parameters as dependent , unresolved . <nl> mmm a / lib / Sema / TypeCheckProtocol . cpp <nl> ppp b / lib / Sema / TypeCheckProtocol . cpp <nl> RequirementEnvironment : : RequirementEnvironment ( <nl> / / Construct a generic signature builder by collecting the constraints <nl> / / from the requirement and the context of the conformance together , <nl> / / because both define the capabilities of the requirement . <nl> - GenericSignatureBuilder builder ( <nl> - ctx , <nl> - LookUpConformanceInModule ( conformanceDC - > getParentModule ( ) ) ) ; <nl> + GenericSignatureBuilder builder ( ctx ) ; <nl> <nl> auto source = <nl> GenericSignatureBuilder : : FloatingRequirementSource : : forAbstract ( ) ; <nl>
[ GSB ] Eliminate the stored LookupConformanceFn to the GSB .
apple/swift
ef542ffd8a0b29d7074cf7c53769e33d3f1fff1b
2017-10-10T16:41:23Z
mmm a / editor / editor_file_dialog . cpp <nl> ppp b / editor / editor_file_dialog . cpp <nl> void EditorFileDialog : : _notification ( int p_what ) { <nl> fav_down - > set_icon ( get_icon ( " MoveDown " , " EditorIcons " ) ) ; <nl> fav_rm - > set_icon ( get_icon ( " RemoveSmall " , " EditorIcons " ) ) ; <nl> <nl> - Theme : : get_default ( ) - > clear_icon ( " ResizedFile " , " EditorIcons " ) ; <nl> - Theme : : get_default ( ) - > clear_icon ( " ResizedFolder " , " EditorIcons " ) ; <nl> update_file_list ( ) ; <nl> } <nl> } <nl>
Merge pull request from volzhs / fix - icon_map - error
godotengine/godot
27cd90f2b017be2ecccf7be0ebf7fb935798de7e
2017-09-16T17:19:51Z
mmm a / include / grpc / impl / codegen / grpc_types . h <nl> ppp b / include / grpc / impl / codegen / grpc_types . h <nl> typedef struct { <nl> " grpc . max_channel_trace_event_memory_per_node " <nl> / * * If non - zero , gRPC library will track stats and information at at per channel <nl> * level . Disabling channelz naturally disables channel tracing . The default <nl> - * is for channelz to be disabled . * / <nl> + * is for channelz to be enabled . * / <nl> # define GRPC_ARG_ENABLE_CHANNELZ " grpc . enable_channelz " <nl> / * * If non - zero , Cronet transport will coalesce packets to fewer frames <nl> * when possible . * / <nl>
Merge pull request from grpc / ncteisen - patch - 1
grpc/grpc
e91e311a736a94e2f9d7ffdb7de4a5249f2b162f
2018-11-29T21:19:23Z
mmm a / main / main . cpp <nl> ppp b / main / main . cpp <nl> bool Main : : iteration ( ) { <nl> } <nl> <nl> if ( OS : : get_singleton ( ) - > is_in_low_processor_usage_mode ( ) | | ! OS : : get_singleton ( ) - > can_draw ( ) ) <nl> - OS : : get_singleton ( ) - > delay_usec ( 25000 ) ; / / apply some delay to force idle time <nl> + OS : : get_singleton ( ) - > delay_usec ( 16600 ) ; / / apply some delay to force idle time ( results in about 60 FPS max ) <nl> else { <nl> uint32_t frame_delay = OS : : get_singleton ( ) - > get_frame_delay ( ) ; <nl> if ( frame_delay ) <nl>
Change low processor usage mode to cap to 60 FPS rather than 40 FPS
godotengine/godot
cc5a020afe3e02a421b3fcfdfac3b6ea9cf6699d
2016-05-22T21:51:38Z
mmm a / . travis . yml <nl> ppp b / . travis . yml <nl> language : julia <nl> os : <nl> - linux <nl> - osx <nl> + osx_image : xcode8 <nl> julia : <nl> - 0 . 5 <nl> # - nightly 0 . 6 supports depends on # 170 <nl>
XCode8 supports thread_local in C + +
apache/incubator-mxnet
d0253f1ba0d4d681c3e755cc1dd4a9b63e1f79ac
2017-04-14T04:00:54Z
new file mode 100644 <nl> index 000000000000 . . 2224ee206b02 <nl> mmm / dev / null <nl> ppp b / validation - test / Evolution / Inputs / class_fixed_layout_add_virtual_method . swift <nl> <nl> + <nl> + public func getVersion ( ) - > Int { <nl> + # if BEFORE <nl> + return 0 <nl> + # else <nl> + return 1 <nl> + # endif <nl> + } <nl> + <nl> + # if BEFORE <nl> + <nl> + @ _fixed_layout <nl> + public class AddVirtualMethod { <nl> + public init ( ) { } <nl> + <nl> + public func firstMethod ( ) - > Int { <nl> + return 1 <nl> + } <nl> + <nl> + public func secondMethod ( ) - > Int { <nl> + return 2 <nl> + } <nl> + } <nl> + <nl> + # else <nl> + <nl> + @ _fixed_layout <nl> + public class AddVirtualMethod { <nl> + / / Note : methods were re - ordered , new method added in the middle <nl> + public func secondMethod ( ) - > Int { <nl> + return 2 <nl> + } <nl> + <nl> + public func thirdMethod ( ) - > Int { <nl> + return 3 <nl> + } <nl> + <nl> + public func firstMethod ( ) - > Int { <nl> + return 1 <nl> + } <nl> + <nl> + public init ( ) { } <nl> + } <nl> + <nl> + # endif <nl> new file mode 100644 <nl> index 000000000000 . . 8a08cd8fef6a <nl> mmm / dev / null <nl> ppp b / validation - test / Evolution / Inputs / class_fixed_layout_add_virtual_method_subclass . swift <nl> <nl> + <nl> + # if BEFORE <nl> + <nl> + @ _fixed_layout <nl> + open class AddVirtualMethod { <nl> + public init ( ) { } <nl> + <nl> + open func f1 ( ) - > Int { <nl> + return 1 <nl> + } <nl> + } <nl> + <nl> + # else <nl> + <nl> + @ _fixed_layout <nl> + open class AddVirtualMethod { <nl> + public init ( ) { } <nl> + <nl> + open func f1 ( ) - > Int { <nl> + return f2 ( ) + 1 <nl> + } <nl> + <nl> + open func f2 ( ) - > Int { <nl> + return 0 <nl> + } <nl> + } <nl> + <nl> + # endif <nl> new file mode 100644 <nl> index 000000000000 . . e9bb737d7ffa <nl> mmm / dev / null <nl> ppp b / validation - test / Evolution / Inputs / class_fixed_layout_superclass_reorder_methods . swift <nl> <nl> + public func getVersion ( ) - > Int { <nl> + # if BEFORE <nl> + return 0 <nl> + # else <nl> + return 1 <nl> + # endif <nl> + } <nl> + <nl> + # if BEFORE <nl> + @ _fixed_layout <nl> + open class Base { <nl> + public init ( ) { } <nl> + open func firstMethod ( ) - > Int { <nl> + return 1 <nl> + } <nl> + open func secondMethod ( ) - > Int { <nl> + return 2 <nl> + } <nl> + open func callOverriddenMethods ( ) - > Int { <nl> + return firstMethod ( ) * 10 + secondMethod ( ) <nl> + } <nl> + } <nl> + # else <nl> + @ _fixed_layout <nl> + open class Base { <nl> + public init ( ) { } <nl> + open func secondMethod ( ) - > Int { <nl> + return 2 <nl> + } <nl> + open func firstMethod ( ) - > Int { <nl> + return 1 <nl> + } <nl> + open func callOverriddenMethods ( ) - > Int { <nl> + return firstMethod ( ) * 10 + secondMethod ( ) <nl> + } <nl> + } <nl> + # endif <nl> + <nl> + @ _fixed_layout <nl> + public class Derived : Base { <nl> + public override func firstMethod ( ) - > Int { <nl> + return 10 <nl> + } <nl> + <nl> + public override func secondMethod ( ) - > Int { <nl> + return 20 <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 000000000000 . . d4d7a269ee6c <nl> mmm / dev / null <nl> ppp b / validation - test / Evolution / test_class_fixed_layout_add_virtual_method . swift <nl> <nl> + / / RUN : % target - resilience - test <nl> + / / REQUIRES : executable_test <nl> + <nl> + import StdlibUnittest <nl> + import class_fixed_layout_add_virtual_method <nl> + <nl> + <nl> + var ClassAddVirtualMethodTest = TestSuite ( " ClassAddVirtualMethod " ) <nl> + <nl> + ClassAddVirtualMethodTest . test ( " ClassAddVirtualMethod " ) { <nl> + let c = AddVirtualMethod ( ) <nl> + <nl> + do { <nl> + expectEqual ( 1 , c . firstMethod ( ) ) <nl> + expectEqual ( 2 , c . secondMethod ( ) ) <nl> + } <nl> + } <nl> + <nl> + runAllTests ( ) <nl> new file mode 100644 <nl> index 000000000000 . . 193148d549d2 <nl> mmm / dev / null <nl> ppp b / validation - test / Evolution / test_class_fixed_layout_add_virtual_method_subclass . swift <nl> <nl> + / / RUN : % target - resilience - test <nl> + / / REQUIRES : executable_test <nl> + <nl> + import StdlibUnittest <nl> + import class_fixed_layout_add_virtual_method_subclass <nl> + <nl> + <nl> + var ClassAddVirtualMethodSubclassTest = TestSuite ( " ClassAddVirtualMethodSubclass " ) <nl> + <nl> + class AddVirtualMethodSubclass : AddVirtualMethod { <nl> + func f3 ( ) - > Int { <nl> + return f1 ( ) + 1 <nl> + } <nl> + } <nl> + <nl> + ClassAddVirtualMethodSubclassTest . test ( " AddVirtualMethod " ) { <nl> + let t = AddVirtualMethodSubclass ( ) <nl> + <nl> + expectEqual ( 1 , t . f1 ( ) ) <nl> + expectEqual ( 2 , t . f3 ( ) ) <nl> + } <nl> + <nl> + class AddVirtualMethodGenericSubclass < T > : AddVirtualMethod { <nl> + func f3 ( _ t : T ) - > [ Int : T ] { <nl> + return [ f1 ( ) : t ] <nl> + } <nl> + } <nl> + <nl> + ClassAddVirtualMethodSubclassTest . test ( " AddVirtualMethodGeneric " ) { <nl> + let t = AddVirtualMethodGenericSubclass < String > ( ) <nl> + <nl> + expectEqual ( 1 , t . f1 ( ) ) <nl> + expectEqual ( [ 1 : " hi " ] , t . f3 ( " hi " ) ) <nl> + } <nl> + <nl> + runAllTests ( ) <nl> new file mode 100644 <nl> index 000000000000 . . 478955e10ce3 <nl> mmm / dev / null <nl> ppp b / validation - test / Evolution / test_class_fixed_layout_superclass_reorder_methods . swift <nl> <nl> + / / RUN : % target - resilience - test <nl> + / / REQUIRES : executable_test <nl> + <nl> + import StdlibUnittest <nl> + import class_fixed_layout_superclass_reorder_methods <nl> + <nl> + <nl> + var SuperclassReorderMethodsTest = TestSuite ( " SuperclassReorderMethods " ) <nl> + <nl> + SuperclassReorderMethodsTest . test ( " TestOverrides " ) { <nl> + class MyDerived : Base { <nl> + override func firstMethod ( ) - > Int { <nl> + return 3 <nl> + } <nl> + override func secondMethod ( ) - > Int { <nl> + return 4 <nl> + } <nl> + } <nl> + <nl> + expectEqual ( MyDerived ( ) . callOverriddenMethods ( ) , 34 ) <nl> + } <nl> + <nl> + SuperclassReorderMethodsTest . test ( " TestSuper " ) { <nl> + class MyDerived : Base { <nl> + override func firstMethod ( ) - > Int { <nl> + return super . firstMethod ( ) + 3 <nl> + } <nl> + override func secondMethod ( ) - > Int { <nl> + return super . secondMethod ( ) + 3 <nl> + } <nl> + } <nl> + <nl> + expectEqual ( MyDerived ( ) . callOverriddenMethods ( ) , 45 ) <nl> + } <nl> + <nl> + extension Derived { <nl> + public func firstMethodExt ( ) - > Int { <nl> + return firstMethod ( ) + super . firstMethod ( ) <nl> + } <nl> + <nl> + public func secondMethodExt ( ) - > Int { <nl> + return secondMethod ( ) + super . secondMethod ( ) <nl> + } <nl> + } <nl> + <nl> + SuperclassReorderMethodsTest . test ( " TestSuperExtension " ) { <nl> + let obj = Derived ( ) <nl> + expectEqual ( obj . firstMethodExt ( ) , 11 ) <nl> + expectEqual ( obj . secondMethodExt ( ) , 22 ) <nl> + } <nl> + <nl> + runAllTests ( ) <nl> + <nl>
Evolution : Add some tests for @ _fixed_layout classes
apple/swift
a5abb9d7edf128adf0800bd6d055e33da4894663
2018-11-30T04:20:02Z
mmm a / rabit - learn / linear / README . md <nl> ppp b / rabit - learn / linear / README . md <nl> Linear and Logistic Regression <nl> * input format : LibSVM <nl> * Local Example : [ run - linear . sh ] ( run - linear . sh ) <nl> * Runnig on Hadoop : [ run - hadoop . sh ] ( run - hadoop . sh ) <nl> - <nl> + - Set input data to stdin , and model_out = stdout <nl> + <nl> Parameters <nl> = = = <nl> All the parameters can be set by param = value <nl>
add hadoop linear example
dmlc/xgboost
e4ce8efab5b7db30ae8a2e823fe4ef42930a82b2
2015-03-02T04:36:48Z
mmm a / dlib / image_transforms / integral_image . h <nl> ppp b / dlib / image_transforms / integral_image . h <nl> namespace dlib <nl> const image_type & img <nl> ) <nl> { <nl> - COMPILE_TIME_ASSERT ( pixel_traits < typename image_type : : type > : : is_unsigned = = true ) ; <nl> - <nl> - unsigned long pixel ; <nl> + long pixel ; <nl> int_img . set_size ( img . nr ( ) , img . nc ( ) ) ; <nl> <nl> / / compute the first row of the integral image <nl> - unsigned long temp = 0 ; <nl> + long temp = 0 ; <nl> for ( long c = 0 ; c < img . nc ( ) ; + + c ) <nl> { <nl> assign_pixel ( pixel , img [ 0 ] [ c ] ) ; <nl> namespace dlib <nl> const rectangle & rect <nl> ) const <nl> { <nl> - DLIB_ASSERT ( get_rect ( * this ) . contains ( rect ) = = true , <nl> + DLIB_ASSERT ( get_rect ( * this ) . contains ( rect ) = = true & & rect . is_empty ( ) = = false , <nl> " \ tlong get_sum_of_area ( rect ) " <nl> < < " \ n \ tYou have given a rectangle that goes outside the image " <nl> < < " \ n \ tthis : " < < this <nl> + < < " \ n \ trect . is_empty ( ) : " < < rect . is_empty ( ) <nl> < < " \ n \ trect : " < < rect <nl> < < " \ n \ tget_rect ( * this ) : " < < get_rect ( * this ) <nl> ) ; <nl> <nl> - unsigned long top_left = 0 , top_right = 0 , bottom_left = 0 , bottom_right = 0 ; <nl> + long top_left = 0 , top_right = 0 , bottom_left = 0 , bottom_right = 0 ; <nl> <nl> bottom_right = int_img [ rect . bottom ( ) ] [ rect . right ( ) ] ; <nl> if ( rect . left ( ) - 1 > = 0 & & rect . top ( ) - 1 > = 0 ) <nl> namespace dlib <nl> <nl> private : <nl> <nl> - array2d < unsigned long > : : kernel_1a int_img ; <nl> + array2d < long > : : kernel_1a_c int_img ; <nl> <nl> <nl> } ; <nl> mmm a / dlib / image_transforms / integral_image_abstract . h <nl> ppp b / dlib / image_transforms / integral_image_abstract . h <nl> namespace dlib <nl> ) const ; <nl> / * ! <nl> requires <nl> + - rect . is_empty ( ) = = false <nl> - get_rect ( * this ) . contains ( rect ) = = true <nl> ( i . e . rect must not be outside the integral image ) <nl> ensures <nl>
Added a missing requires clause to the get_sum_of_area ( ) function
davisking/dlib
4af255e286251783100b5aa67b3c5805f2574dde
2011-04-08T01:11:31Z
mmm a / cmds / subscribe . cpp <nl> ppp b / cmds / subscribe . cpp <nl> void watchman_client_subscription : : updateSubscriptionTicks ( w_query_res * res ) { <nl> json_ref watchman_client_subscription : : buildSubscriptionResults ( <nl> const std : : shared_ptr < w_root_t > & root , <nl> ClockSpec & position , <nl> - bool ignoreStateTransitions ) { <nl> + OnStateTransition onStateTransition ) { <nl> auto since_spec = query - > since_spec . get ( ) ; <nl> <nl> if ( since_spec & & since_spec - > tag = = w_cs_clock ) { <nl> json_ref watchman_client_subscription : : buildSubscriptionResults ( <nl> / / update the clock in order to allow changes to be reported the next time <nl> / / the query is run . <nl> bool scmAwareQuery = since_spec & & since_spec - > hasScmParams ( ) ; <nl> - if ( ! ignoreStateTransitions & & scmAwareQuery ) { <nl> + if ( onStateTransition = = OnStateTransition : : DontAdvance & & scmAwareQuery ) { <nl> if ( root - > stateTransCount . load ( ) ! = res . stateTransCountAtStartOfQuery ) { <nl> watchman : : log ( <nl> watchman : : DBG , <nl> ClockSpec watchman_client_subscription : : runSubscriptionRules ( <nl> const std : : shared_ptr < w_root_t > & root ) { <nl> ClockSpec position ; <nl> <nl> - auto response = buildSubscriptionResults ( root , position ) ; <nl> + auto response = <nl> + buildSubscriptionResults ( root , position , OnStateTransition : : DontAdvance ) ; <nl> <nl> if ( response ) { <nl> add_root_warnings_to_response ( response , root ) ; <nl> static void cmd_flush_subscriptions ( <nl> " ( flush - subscriptions ) executing subscription " , <nl> sub - > name , <nl> " \ n " ) ; <nl> - auto sub_result = sub - > buildSubscriptionResults ( root , out_position , true ) ; <nl> + auto sub_result = sub - > buildSubscriptionResults ( <nl> + root , out_position , OnStateTransition : : QueryAnyway ) ; <nl> if ( sub_result ) { <nl> send_and_dispose_response ( client , std : : move ( sub_result ) ) ; <nl> json_array_append ( synced , w_string_to_json ( sub_name_str ) ) ; <nl> static void cmd_subscribe ( <nl> <nl> add_root_warnings_to_response ( resp , root ) ; <nl> ClockSpec position ; <nl> - initial_subscription_results = sub - > buildSubscriptionResults ( root , position ) ; <nl> + initial_subscription_results = sub - > buildSubscriptionResults ( <nl> + root , position , OnStateTransition : : DontAdvance ) ; <nl> resp . set ( " clock " , position . toJson ( ) ) ; <nl> <nl> send_and_dispose_response ( client , std : : move ( resp ) ) ; <nl> mmm a / watchman_client . h <nl> ppp b / watchman_client . h <nl> struct watchman_client : public std : : enable_shared_from_this < watchman_client > { <nl> <nl> struct watchman_user_client ; <nl> <nl> + enum class OnStateTransition { QueryAnyway , DontAdvance } ; <nl> + <nl> struct watchman_client_subscription <nl> : public std : : enable_shared_from_this < watchman_client_subscription > { <nl> std : : shared_ptr < w_root_t > root ; <nl> struct watchman_client_subscription <nl> json_ref buildSubscriptionResults ( <nl> const std : : shared_ptr < w_root_t > & root , <nl> ClockSpec & position , <nl> - bool ignoreStateTransitions = false ) ; <nl> + OnStateTransition onStateTransition ) ; <nl> <nl> private : <nl> ClockSpec runSubscriptionRules ( <nl>
Code clarity tidy - up
facebook/watchman
c1ebfeb46b4461a0871997f54ce25585f5eabb87
2017-11-08T07:05:37Z
mmm a / Rakefile <nl> ppp b / Rakefile <nl> task ' gem : native ' do <nl> <nl> gem update - - system - - no - document & & \ <nl> bundle & & \ <nl> - rake native : # { plat } pkg / # { spec . full_name } - # { plat } . gem \ <nl> + rake native : # { plat } pkg / # { spec . full_name } - # { plat } . gem pkg / # { spec . full_name } . gem \ <nl> RUBY_CC_VERSION = 2 . 7 . 0 : 2 . 6 . 0 : 2 . 5 . 0 : 2 . 4 . 0 : 2 . 3 . 0 \ <nl> V = # { verbose } \ <nl> GRPC_CONFIG = # { grpc_config } <nl>
Merge pull request from apolcyn / upmerged_source_only_to_master
grpc/grpc
1471dcdf9e9ae085a9c25c986f8a79651fe47f9a
2020-06-26T19:03:26Z
mmm a / folly / futures / Future - inl . h <nl> ppp b / folly / futures / Future - inl . h <nl> template < class E > <nl> Future < T > Future < T > : : within ( Duration dur , E e , Timekeeper * tk ) { <nl> <nl> struct Context { <nl> - Context ( E ex , Future < Unit > & & f ) <nl> - : exception ( std : : move ( ex ) ) , afterFuture ( std : : move ( f ) ) , promise ( ) { } <nl> + Context ( E ex ) : exception ( std : : move ( ex ) ) , promise ( ) { } <nl> E exception ; <nl> - Future < Unit > afterFuture ; <nl> - Future < Unit > thisFuture ; <nl> Promise < T > promise ; <nl> std : : atomic < bool > token { false } ; <nl> } ; <nl> + auto ctx = std : : make_shared < Context > ( std : : move ( e ) ) ; <nl> <nl> if ( ! tk ) { <nl> tk = folly : : detail : : getTimekeeperSingleton ( ) ; <nl> } <nl> <nl> - auto ctx = std : : make_shared < Context > ( std : : move ( e ) , tk - > after ( dur ) ) ; <nl> + tk - > after ( dur ) <nl> + . then ( [ ctx ] ( Try < Unit > const & t ) { <nl> + if ( ctx - > token . exchange ( true ) = = false ) { <nl> + if ( t . hasException ( ) ) { <nl> + ctx - > promise . setException ( std : : move ( t . exception ( ) ) ) ; <nl> + } else { <nl> + ctx - > promise . setException ( std : : move ( ctx - > exception ) ) ; <nl> + } <nl> + } <nl> + } ) ; <nl> <nl> - ctx - > thisFuture = this - > then ( [ ctx ] ( Try < T > & & t ) mutable { <nl> - / / " this " completed first , cancel " after " <nl> - ctx - > afterFuture . raise ( CancelTimer ( ) ) ; <nl> + this - > then ( [ ctx ] ( Try < T > & & t ) { <nl> if ( ctx - > token . exchange ( true ) = = false ) { <nl> ctx - > promise . setTry ( std : : move ( t ) ) ; <nl> } <nl> } ) ; <nl> <nl> - ctx - > afterFuture . then ( [ ctx ] ( Try < Unit > const & t ) mutable { <nl> - / / " after " completed first , cancel " this " <nl> - ctx - > thisFuture . raise ( TimedOut ( ) ) ; <nl> - if ( ctx - > token . exchange ( true ) = = false ) { <nl> - if ( t . hasException ( ) ) { <nl> - ctx - > promise . setException ( std : : move ( t . exception ( ) ) ) ; <nl> - } else { <nl> - ctx - > promise . setException ( std : : move ( ctx - > exception ) ) ; <nl> - } <nl> - } <nl> - } ) ; <nl> - <nl> return ctx - > promise . getFuture ( ) . via ( getExecutor ( ) ) ; <nl> } <nl> <nl> mmm a / folly / futures / FutureException . h <nl> ppp b / folly / futures / FutureException . h <nl> class TimedOut : public FutureException { <nl> TimedOut ( ) : FutureException ( " Timed out " ) { } <nl> } ; <nl> <nl> - class CancelTimer : public FutureException { <nl> - public : <nl> - CancelTimer ( ) : FutureException ( " Timer should be cancelled " ) { } <nl> - } ; <nl> - <nl> class PredicateDoesNotObtain : public FutureException { <nl> public : <nl> PredicateDoesNotObtain ( ) : FutureException ( " Predicate does not obtain " ) { } <nl> mmm a / folly / futures / test / InterruptTest . cpp <nl> ppp b / folly / futures / test / InterruptTest . cpp <nl> <nl> <nl> # include < folly / futures / Future . h > <nl> # include < folly / futures / Promise . h > <nl> - # include < folly / Baton . h > <nl> <nl> using namespace folly ; <nl> <nl> TEST ( Interrupt , secondInterruptNoop ) { <nl> f . cancel ( ) ; <nl> EXPECT_EQ ( 1 , count ) ; <nl> } <nl> - <nl> - TEST ( Interrupt , withinTimedOut ) { <nl> - Promise < int > p ; <nl> - Baton < > done ; <nl> - p . setInterruptHandler ( [ & ] ( const exception_wrapper & e ) { done . post ( ) ; } ) ; <nl> - p . getFuture ( ) . within ( std : : chrono : : milliseconds ( 1 ) ) ; <nl> - / / Give it 100ms to time out and call the interrupt handler <nl> - auto t = std : : chrono : : steady_clock : : now ( ) + std : : chrono : : milliseconds ( 100 ) ; <nl> - EXPECT_TRUE ( done . timed_wait ( t ) ) ; <nl> - } <nl> - <nl> - class DummyTimeKeeper : public Timekeeper { <nl> - public : <nl> - explicit DummyTimeKeeper ( ) : interrupted ( ) { } <nl> - <nl> - Future < Unit > after ( Duration ) override { <nl> - promise . setInterruptHandler ( <nl> - [ this ] ( const exception_wrapper & e ) { <nl> - EXPECT_THROW ( e . throwException ( ) , CancelTimer ) ; <nl> - interrupted . post ( ) ; <nl> - } <nl> - ) ; <nl> - return promise . getFuture ( ) ; <nl> - } <nl> - <nl> - Baton < > interrupted ; <nl> - <nl> - private : <nl> - Promise < Unit > promise ; <nl> - } ; <nl> - <nl> - TEST ( Interrupt , withinCancelTimer ) { <nl> - DummyTimeKeeper tk ; <nl> - Promise < int > p ; <nl> - Baton < > done ; <nl> - p . getFuture ( ) . within ( std : : chrono : : milliseconds ( 10 ) , TimedOut ( ) , & tk ) ; <nl> - p . setValue ( 1 ) ; / / this should cancel the timer <nl> - / / Give it 100ms to interrupt the timer Future <nl> - auto t = std : : chrono : : steady_clock : : now ( ) + std : : chrono : : milliseconds ( 100 ) ; <nl> - EXPECT_TRUE ( tk . interrupted . timed_wait ( t ) ) ; <nl> - } <nl>
Revert : ( Wangle ) within should raise TimedOut ( )
facebook/folly
8a8767014e221e18c4023c2864918db4e84435cd
2015-07-20T19:26:31Z
mmm a / . travis . yml <nl> ppp b / . travis . yml <nl> before_script : <nl> - if [ ! - d " $ { HOME } / . cache " ] ; then mkdir $ { HOME } / . cache ; fi <nl> - cp scripts / AGREEMENT . txt $ { HOME } / . cache / . apollo_agreement . txt <nl> script : <nl> - - . / docker / scripts / dev_start . sh dev - x86_64 - 20171120_1254 - ci <nl> + - . / docker / scripts / dev_start . sh dev - x86_64 - 20171122_1537 - ci <nl> - . / apollo_docker . sh $ { JOB } <nl> - rm - rf " $ { HOME } / . cache / bazel / _bazel_ $ { USER } / install " <nl> notifications : <nl> mmm a / apollo_docker . sh <nl> ppp b / apollo_docker . sh <nl> function start_build_docker ( ) { <nl> } <nl> <nl> function gen_docker ( ) { <nl> - IMG = " apolloauto / apollo : run - $ { MACHINE_ARCH } - 20171120_1254 " <nl> + IMG = " apolloauto / apollo : run - $ { MACHINE_ARCH } - 20171122_1537 " <nl> RELEASE_DIR = $ { HOME } / . cache / release <nl> RELEASE_NAME = " $ { DOCKER_REPO } : release - $ { MACHINE_ARCH } - $ { TIME } " <nl> DEFAULT_NAME = " $ { DOCKER_REPO } : release - $ { MACHINE_ARCH } - latest " <nl> mmm a / docker / scripts / dev_start . sh <nl> ppp b / docker / scripts / dev_start . sh <nl> <nl> <nl> VERSION = " " <nl> ARCH = $ ( uname - m ) <nl> - VERSION_X86_64 = " dev - x86_64 - 20171120_1254 " <nl> + VERSION_X86_64 = " dev - x86_64 - 20171122_1537 " <nl> VERSION_AARCH64 = " dev - aarch64 - 20170927_1111 " <nl> if [ [ $ # = = 1 ] ] ; then <nl> VERSION = $ 1 <nl> mmm a / scripts / docker_adduser . sh <nl> ppp b / scripts / docker_adduser . sh <nl> if [ - e / dev / video0 ] ; then <nl> fi <nl> <nl> MACHINE_ARCH = $ ( uname - m ) <nl> - ROS_TAR = " ros - indigo - apollo - 1 . 5 . 2 - $ { MACHINE_ARCH } . tar . gz " <nl> + ROS_TAR = " ros - indigo - apollo - 1 . 5 . 3 - $ { MACHINE_ARCH } . tar . gz " <nl> if [ " $ RELEASE_DOCKER " ! = " 1 " ] ; then <nl> # setup map data <nl> if [ - e / home / tmp / modules_data ] ; then <nl>
Update ros 1 . 5 . 3 , add libgeos - dev and shapely package .
ApolloAuto/apollo
331d20a0ef9175d9985dab3622abdc2ff27baf3e
2017-11-23T00:12:28Z
mmm a / lib / TBDGen / TBDGen . cpp <nl> ppp b / lib / TBDGen / TBDGen . cpp <nl> void TBDGenVisitor : : visitAbstractFunctionDecl ( AbstractFunctionDecl * AFD ) { <nl> <nl> visitDefaultArguments ( AFD , AFD - > getParameters ( ) ) ; <nl> <nl> - if ( AFD - > isAsyncContext ( ) ) { <nl> + if ( AFD - > hasAsync ( ) ) { <nl> addSymbol ( LinkEntity : : forAsyncFunctionPointer ( AFD ) ) ; <nl> } <nl> } <nl> new file mode 100644 <nl> index 000000000000 . . 42f76a068ab3 <nl> mmm / dev / null <nl> ppp b / test / TBD / async - function - pointer . swift <nl> <nl> + / / REQUIRES : VENDOR = apple <nl> + / / RUN : % target - swift - frontend - emit - ir % s - enable - experimental - concurrency - validate - tbd - against - ir = all - module - name test | % FileCheck % s <nl> + <nl> + / / CHECK : @ " $ s4test6testityyYFAD " = hidden global % swift . async_func_pointer <nl> + <nl> + @ asyncHandler <nl> + public func testit ( ) { } <nl>
Merge pull request from nate - chandler / concurrency / irgen / rdar72329062
apple/swift
d89ffe789a3a623912a161ac14be70701a069f0f
2020-12-15T21:26:42Z
mmm a / src / backends / base . cpp <nl> ppp b / src / backends / base . cpp <nl> <nl> # include " base . h " <nl> + # include < xxhash . h > <nl> # include < sstream > <nl> <nl> TLANG_NAMESPACE_BEGIN <nl> std : : string CodeGenBase : : get_source_name ( ) { <nl> return fmt : : format ( " tmp { : 04d } . { } " , id , suffix ) ; <nl> } <nl> <nl> + void CodeGenBase : : generate_binary ( ) { <nl> + write_source ( ) ; <nl> + auto compiler_config = get_current_program ( ) . config . compiler_config ( ) ; <nl> + auto pp_fn = get_source_path ( ) + " . i " ; <nl> + auto preprocess_cmd = <nl> + get_current_program ( ) . config . preprocess_cmd ( get_source_path ( ) , pp_fn ) ; <nl> + std : : system ( preprocess_cmd . c_str ( ) ) ; <nl> + std : : ifstream ifs ( pp_fn ) ; <nl> + TC_ASSERT ( ifs ) ; <nl> + auto hash_input = <nl> + compiler_config + std : : string ( std : : istreambuf_iterator < char > ( ifs ) , <nl> + std : : istreambuf_iterator < char > ( ) ) ; <nl> + auto hash = XXH64 ( hash_input . data ( ) , hash_input . size ( ) , 0 ) ; <nl> + <nl> + std : : string cached_binary_fn = db_folder ( ) + fmt : : format ( " / { } . so " , hash ) ; <nl> + std : : ifstream key_file ( cached_binary_fn ) ; <nl> + if ( key_file ) { <nl> + std : : system ( <nl> + fmt : : format ( " cp { } { } " , cached_binary_fn , get_library_path ( ) ) . c_str ( ) ) ; <nl> + } else { <nl> + auto cmd = get_current_program ( ) . config . compile_cmd ( get_source_path ( ) , <nl> + get_library_path ( ) ) ; <nl> + auto compile_ret = std : : system ( cmd . c_str ( ) ) ; <nl> + if ( compile_ret ! = 0 ) { <nl> + auto cmd = get_current_program ( ) . config . compile_cmd ( <nl> + get_source_path ( ) , get_library_path ( ) , true ) ; <nl> + trash ( std : : system ( cmd . c_str ( ) ) ) ; <nl> + TC_ERROR ( " Source { } compilation failed . " , get_source_path ( ) ) ; <nl> + } else { <nl> + std : : system ( fmt : : format ( " cp { } { } " , get_library_path ( ) , cached_binary_fn ) <nl> + . c_str ( ) ) ; <nl> + } <nl> + } <nl> + / / disassemble ( ) ; <nl> + } <nl> + <nl> TLANG_NAMESPACE_END <nl> mmm a / src / backends / base . h <nl> ppp b / src / backends / base . h <nl> <nl> # include " . . / util . h " <nl> # include " . . / snode . h " <nl> # include " . . / ir . h " <nl> + # include " . . / program . h " <nl> # include < dlfcn . h > <nl> <nl> TLANG_NAMESPACE_BEGIN <nl> class CodeGenBase { <nl> <nl> FunctionType load_function ( ) ; <nl> <nl> + void generate_binary ( ) ; <nl> + <nl> void disassemble ( ) ; <nl> } ; <nl> <nl> mmm a / src / backends / kernel . cpp <nl> ppp b / src / backends / kernel . cpp <nl> <nl> # include " kernel . h " <nl> # include < taichi / system / timer . h > <nl> - # include < xxhash . h > <nl> <nl> TLANG_NAMESPACE_BEGIN <nl> <nl> FunctionType KernelCodeGen : : compile ( taichi : : Tlang : : Program & prog , <nl> taichi : : Tlang : : Kernel & kernel ) { <nl> + auto t = Time : : get_time ( ) ; <nl> this - > prog = & kernel . program ; <nl> this - > kernel = & kernel ; <nl> lower ( ) ; <nl> codegen ( ) ; <nl> - write_source ( ) ; <nl> - auto t = Time : : get_time ( ) ; <nl> - auto compiler_config = get_current_program ( ) . config . compiler_config ( ) ; <nl> - auto pp_fn = get_source_path ( ) + " . i " ; <nl> - auto preprocess_cmd = <nl> - get_current_program ( ) . config . preprocess_cmd ( get_source_path ( ) , pp_fn ) ; <nl> - std : : system ( preprocess_cmd . c_str ( ) ) ; <nl> - std : : ifstream ifs ( pp_fn ) ; <nl> - TC_ASSERT ( ifs ) ; <nl> - auto hash_input = <nl> - compiler_config + std : : string ( std : : istreambuf_iterator < char > ( ifs ) , <nl> - std : : istreambuf_iterator < char > ( ) ) ; <nl> - auto hash = XXH64 ( hash_input . data ( ) , hash_input . size ( ) , 0 ) ; <nl> - <nl> - std : : string cached_binary_fn = db_folder ( ) + fmt : : format ( " / { } . so " , hash ) ; <nl> - std : : ifstream key_file ( cached_binary_fn ) ; <nl> - if ( key_file ) { <nl> - std : : system ( <nl> - fmt : : format ( " cp { } { } " , cached_binary_fn , get_library_path ( ) ) . c_str ( ) ) ; <nl> - } else { <nl> - auto cmd = get_current_program ( ) . config . compile_cmd ( get_source_path ( ) , <nl> - get_library_path ( ) ) ; <nl> - auto compile_ret = std : : system ( cmd . c_str ( ) ) ; <nl> - if ( compile_ret ! = 0 ) { <nl> - auto cmd = get_current_program ( ) . config . compile_cmd ( <nl> - get_source_path ( ) , get_library_path ( ) , true ) ; <nl> - trash ( std : : system ( cmd . c_str ( ) ) ) ; <nl> - TC_ERROR ( " Source { } compilation failed . " , get_source_path ( ) ) ; <nl> - } else { <nl> - std : : system ( fmt : : format ( " cp { } { } " , get_library_path ( ) , cached_binary_fn ) <nl> - . c_str ( ) ) ; <nl> - } <nl> - } <nl> - disassemble ( ) ; <nl> + generate_binary ( ) ; <nl> + TC_P ( Time : : get_time ( ) - t ) ; <nl> return load_function ( ) ; <nl> } <nl> <nl> mmm a / src / backends / struct . cpp <nl> ppp b / src / backends / struct . cpp <nl> void StructCompiler : : run ( SNode & node ) { <nl> emit ( " # endif " ) ; <nl> write_source ( ) ; <nl> <nl> - auto cmd = get_current_program ( ) . config . compile_cmd ( get_source_path ( ) , <nl> - get_library_path ( ) ) ; <nl> - auto compile_ret = std : : system ( cmd . c_str ( ) ) ; <nl> - <nl> - if ( compile_ret ! = 0 ) { <nl> - auto cmd = get_current_program ( ) . config . compile_cmd ( <nl> - get_source_path ( ) , get_library_path ( ) , true ) ; <nl> - trash ( std : : system ( cmd . c_str ( ) ) ) ; <nl> - TC_ERROR ( " Compilation failed " ) ; <nl> - } <nl> - disassemble ( ) ; <nl> + generate_binary ( ) ; <nl> load_dll ( ) ; <nl> creator = load_function < void * ( * ) ( ) > ( " create_data_structure " ) ; <nl> load_accessors ( node ) ; <nl>
cache struct file binaries : 47 . 56s
taichi-dev/taichi
824c0921e6f21dc6815c14e30e06902f8f60fa1d
2019-04-15T00:31:14Z
mmm a / contracts / eosio . system / delegate_bandwidth . cpp <nl> ppp b / contracts / eosio . system / delegate_bandwidth . cpp <nl> namespace eosiosystem { <nl> require_auth ( payer ) ; <nl> eosio_assert ( quant . amount > 0 , " must purchase a positive amount " ) ; <nl> <nl> + auto fee = quant ; <nl> + fee . amount / = 200 ; / / / . 5 % fee <nl> + auto quant_after_fee = quant ; <nl> + quant_after_fee . amount - = fee . amount ; <nl> + <nl> if ( payer ! = N ( eosio ) ) { <nl> INLINE_ACTION_SENDER ( eosio : : token , transfer ) ( N ( eosio . token ) , { payer , N ( active ) } , <nl> - { payer , N ( eosio ) , quant , std : : string ( " buy ram " ) } ) ; <nl> + { payer , N ( eosio . ram ) , quant_after_fee , std : : string ( " buy ram " ) } ) ; <nl> } <nl> <nl> + if ( fee . amount > 0 ) { <nl> + INLINE_ACTION_SENDER ( eosio : : token , transfer ) ( N ( eosio . token ) , { payer , N ( active ) } , <nl> + { payer , N ( eosio . ramfee ) , fee , std : : string ( " ram fee " ) } ) ; <nl> + } <nl> <nl> int64_t bytes_out ; <nl> <nl> auto itr = _rammarket . find ( S ( 4 , RAMCORE ) ) ; <nl> _rammarket . modify ( itr , 0 , [ & ] ( auto & es ) { <nl> - bytes_out = es . convert ( quant , S ( 0 , RAM ) ) . amount ; <nl> + bytes_out = es . convert ( quant_after_fee , S ( 0 , RAM ) ) . amount ; <nl> } ) ; <nl> <nl> - <nl> eosio_assert ( bytes_out > 0 , " must reserve a positive amount " ) ; <nl> <nl> _gstate . total_ram_bytes_reserved + = uint64_t ( bytes_out ) ; <nl> - _gstate . total_ram_stake + = quant . amount ; <nl> + _gstate . total_ram_stake + = quant_after_fee . amount ; <nl> <nl> user_resources_table userres ( _self , receiver ) ; <nl> auto res_itr = userres . find ( receiver ) ; <nl> namespace eosiosystem { <nl> set_resource_limits ( res_itr - > owner , res_itr - > ram_bytes , res_itr - > net_weight . amount , res_itr - > cpu_weight . amount ) ; <nl> <nl> if ( N ( eosio ) ! = account ) { <nl> - INLINE_ACTION_SENDER ( eosio : : token , transfer ) ( N ( eosio . token ) , { N ( eosio ) , N ( active ) } , <nl> - { N ( eosio ) , account , asset ( tokens_out ) , std : : string ( " sell ram " ) } ) ; <nl> + INLINE_ACTION_SENDER ( eosio : : token , transfer ) ( N ( eosio . token ) , { N ( eosio . ram ) , N ( active ) } , <nl> + { N ( eosio . ram ) , account , asset ( tokens_out ) , std : : string ( " sell ram " ) } ) ; <nl> + auto fee = tokens_out . amount / 200 ; <nl> + if ( fee > 0 ) { <nl> + INLINE_ACTION_SENDER ( eosio : : token , transfer ) ( N ( eosio . token ) , { account , N ( active ) } , <nl> + { account , N ( eosio . ramfee ) , asset ( fee ) , std : : string ( " sell ram fee " ) } ) ; <nl> + } <nl> } <nl> } <nl> <nl> namespace eosiosystem { <nl> } / / tot_itr can be invalid , should go out of scope <nl> <nl> / / create refund or update from existing refund <nl> - if ( N ( eosio ) ! = source_stake_from ) { / / for eosio both transfer and refund make no sense <nl> + if ( N ( eosio . stake ) ! = source_stake_from ) { / / for eosio both transfer and refund make no sense <nl> refunds_table refunds_tbl ( _self , from ) ; <nl> auto req = refunds_tbl . find ( from ) ; <nl> <nl> namespace eosiosystem { <nl> auto transfer_amount = net_balance + cpu_balance ; <nl> if ( asset ( 0 ) < transfer_amount ) { <nl> INLINE_ACTION_SENDER ( eosio : : token , transfer ) ( N ( eosio . token ) , { source_stake_from , N ( active ) } , <nl> - { source_stake_from , N ( eosio ) , asset ( transfer_amount ) , std : : string ( " stake bandwidth " ) } ) ; <nl> + { source_stake_from , N ( eosio . stake ) , asset ( transfer_amount ) , std : : string ( " stake bandwidth " ) } ) ; <nl> } <nl> } <nl> <nl> namespace eosiosystem { <nl> / / allow people to get their tokens earlier than the 3 day delay if the unstake happened immediately after many <nl> / / consecutive missed blocks . <nl> <nl> - INLINE_ACTION_SENDER ( eosio : : token , transfer ) ( N ( eosio . token ) , { N ( eosio ) , N ( active ) } , <nl> - { N ( eosio ) , req - > owner , req - > net_amount + req - > cpu_amount , std : : string ( " unstake " ) } ) ; <nl> + INLINE_ACTION_SENDER ( eosio : : token , transfer ) ( N ( eosio . token ) , { N ( eosio . stake ) , N ( active ) } , <nl> + { N ( eosio . stake ) , req - > owner , req - > net_amount + req - > cpu_amount , std : : string ( " unstake " ) } ) ; <nl> <nl> refunds_tbl . erase ( req ) ; <nl> } <nl> mmm a / contracts / eosio . system / producer_pay . cpp <nl> ppp b / contracts / eosio . system / producer_pay . cpp <nl> namespace eosiosystem { <nl> INLINE_ACTION_SENDER ( eosio : : token , issue ) ( N ( eosio . token ) , { { N ( eosio ) , N ( active ) } } , <nl> { N ( eosio ) , asset ( new_tokens ) , std : : string ( " issue tokens for producer pay and savings " ) } ) ; <nl> <nl> + INLINE_ACTION_SENDER ( eosio : : token , transfer ) ( N ( eosio . token ) , { N ( eosio ) , N ( active ) } , <nl> + { N ( eosio ) , N ( eosio . saving ) , asset ( to_savings ) , " unallocated inflation " } ) ; <nl> + <nl> + INLINE_ACTION_SENDER ( eosio : : token , transfer ) ( N ( eosio . token ) , { N ( eosio ) , N ( active ) } , <nl> + { N ( eosio ) , N ( eosio . bpay ) , asset ( to_per_block_pay ) , " fund per - block bucket " } ) ; <nl> + <nl> + INLINE_ACTION_SENDER ( eosio : : token , transfer ) ( N ( eosio . token ) , { N ( eosio ) , N ( active ) } , <nl> + { N ( eosio ) , N ( eosio . vpay ) , asset ( to_per_vote_pay ) , " fund per - vote bucket " } ) ; <nl> + <nl> _gstate . pervote_bucket + = to_per_vote_pay ; <nl> _gstate . perblock_bucket + = to_per_block_pay ; <nl> _gstate . savings + = to_savings ; <nl> namespace eosiosystem { <nl> if ( producer_per_vote_pay < min_pervote_daily_pay ) { <nl> producer_per_vote_pay = 0 ; <nl> } <nl> - int64_t total_pay = producer_per_block_pay + producer_per_vote_pay ; <nl> - <nl> _gstate . pervote_bucket - = producer_per_vote_pay ; <nl> _gstate . perblock_bucket - = producer_per_block_pay ; <nl> _gstate . total_unpaid_blocks - = prod . unpaid_blocks ; <nl> namespace eosiosystem { <nl> p . unpaid_blocks = 0 ; <nl> } ) ; <nl> <nl> - if ( total_pay > 0 ) { <nl> - INLINE_ACTION_SENDER ( eosio : : token , transfer ) ( N ( eosio . token ) , { N ( eosio ) , N ( active ) } , <nl> - { N ( eosio ) , owner , asset ( total_pay ) , std : : string ( " producer pay " ) } ) ; <nl> + if ( producer_per_block_pay > 0 ) { <nl> + INLINE_ACTION_SENDER ( eosio : : token , transfer ) ( N ( eosio . token ) , { N ( eosio . bpay ) , N ( active ) } , <nl> + { N ( eosio . bpay ) , owner , asset ( producer_per_block_pay ) , std : : string ( " producer block pay " ) } ) ; <nl> + } <nl> + if ( producer_per_vote_pay > 0 ) { <nl> + INLINE_ACTION_SENDER ( eosio : : token , transfer ) ( N ( eosio . token ) , { N ( eosio . vpay ) , N ( active ) } , <nl> + { N ( eosio . vpay ) , owner , asset ( producer_per_vote_pay ) , std : : string ( " producer vote pay " ) } ) ; <nl> } <nl> } <nl> <nl> mmm a / externals / binaryen <nl> ppp b / externals / binaryen <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit b4b5dc9dee60489c4206af99227524b13f2eb3aa <nl> + Subproject commit 579f3a099c286a45f58ea1ffc7bf671c415be0a6 <nl> mmm a / libraries / appbase <nl> ppp b / libraries / appbase <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 70e23f7ebbdccb64f9ac11ade9fa41ba78b31b5e <nl> + Subproject commit 50dc015b2f0e25c0cd01cf520245da23c0ed446b <nl> mmm a / tests / testUtils . py <nl> ppp b / tests / testUtils . py <nl> def bootstrap ( totalNodes , prodCount , biosHost , biosPort , dontKill = False , onlyBio <nl> if trans is None : <nl> Utils . Print ( " ERROR : Failed to create account % s " % ( eosioTokenAccount . name ) ) <nl> return False <nl> + <nl> + eosioRamAccount = copy . deepcopy ( eosioAccount ) <nl> + eosioRamAccount . name = " eosio . ram " <nl> + trans = biosNode . createAccount ( eosioRamAccount , eosioAccount , 0 ) <nl> + if trans is None : <nl> + Utils . Print ( " ERROR : Failed to create account % s " % ( eosioRamAccount . name ) ) <nl> + return False <nl> + <nl> + eosioRamfeeAccount = copy . deepcopy ( eosioAccount ) <nl> + eosioRamfeeAccount . name = " eosio . ramfee " <nl> + trans = biosNode . createAccount ( eosioRamfeeAccount , eosioAccount , 0 ) <nl> + if trans is None : <nl> + Utils . Print ( " ERROR : Failed to create account % s " % ( eosioRamfeeAccount . name ) ) <nl> + return False <nl> + <nl> + eosioStakeAccount = copy . deepcopy ( eosioAccount ) <nl> + eosioStakeAccount . name = " eosio . stake " <nl> + trans = biosNode . createAccount ( eosioStakeAccount , eosioAccount , 0 ) <nl> + if trans is None : <nl> + Utils . Print ( " ERROR : Failed to create account % s " % ( eosioStakeAccount . name ) ) <nl> + return False <nl> <nl> Node . validateTransaction ( trans ) <nl> transId = Node . getTransId ( trans ) <nl> mmm a / unittests / CMakeLists . txt <nl> ppp b / unittests / CMakeLists . txt <nl> target_link_libraries ( unit_test eosio_chain chainbase eosio_testing eos_utiliti <nl> target_include_directories ( unit_test PUBLIC $ { CMAKE_BINARY_DIR } / contracts $ { CMAKE_CURRENT_BINARY_DIR } / tests / contracts ) <nl> target_include_directories ( unit_test PUBLIC $ { CMAKE_CURRENT_SOURCE_DIR } / wasm_tests ) <nl> target_include_directories ( unit_test PUBLIC $ { CMAKE_CURRENT_BINARY_DIR } / include ) <nl> - add_dependencies ( unit_test asserter test_api test_api_mem test_api_db test_api_multi_index exchange eosio . token proxy identity identity_test stltest infinite eosio . system eosio . token eosio . bios test . inline multi_index_test noop dice eosio . msig payloadless tic_tac_toe ) <nl> + add_dependencies ( unit_test asserter test_api test_api_mem test_api_db test_ram_limit test_api_multi_index exchange eosio . token proxy identity identity_test stltest infinite eosio . system eosio . token eosio . bios test . inline multi_index_test noop dice eosio . msig payloadless tic_tac_toe ) <nl> <nl> # Manually run unit_test for all supported runtimes <nl> # To run unit_test with all log from blockchain displayed , put - - verbose after - - , i . e . unit_test - - - - verbose <nl> mmm a / unittests / bootseq_tests . cpp <nl> ppp b / unittests / bootseq_tests . cpp <nl> class bootseq_tester : public TESTER { <nl> <nl> auto delegate_bandwidth ( name from , name receiver , asset net , asset cpu , uint8_t transfer = 1 ) { <nl> auto r = base_tester : : push_action ( N ( eosio ) , N ( delegatebw ) , from , mvo ( ) <nl> - ( " from " , " eosio " ) <nl> + ( " from " , from ) <nl> ( " receiver " , receiver ) <nl> ( " stake_net_quantity " , net ) <nl> ( " stake_cpu_quantity " , cpu ) <nl> BOOST_FIXTURE_TEST_CASE ( bootseq_test , bootseq_tester ) { <nl> try { <nl> <nl> / / Create eosio . msig and eosio . token <nl> - create_accounts ( { N ( eosio . msig ) , N ( eosio . token ) } ) ; <nl> + create_accounts ( { N ( eosio . msig ) , N ( eosio . token ) , N ( eosio . ram ) , N ( eosio . ramfee ) , N ( eosio . stake ) , N ( eosio . vpay ) , N ( eosio . bpay ) , N ( eosio . saving ) } ) ; <nl> <nl> / / Set code for the following accounts : <nl> / / - eosio ( code : eosio . bios ) ( already set by tester constructor ) <nl> BOOST_FIXTURE_TEST_CASE ( bootseq_test , bootseq_tester ) { <nl> auto r = buyram ( N ( eosio ) , a . aname , asset ( ram ) ) ; <nl> BOOST_REQUIRE ( ! r - > except_ptr ) ; <nl> <nl> - r = delegate_bandwidth ( N ( eosio ) , a . aname , asset ( net ) , asset ( cpu ) ) ; <nl> + r = delegate_bandwidth ( N ( eosio . stake ) , a . aname , asset ( net ) , asset ( cpu ) ) ; <nl> BOOST_REQUIRE ( ! r - > except_ptr ) ; <nl> } <nl> <nl> mmm a / unittests / eosio . system_tests . cpp <nl> ppp b / unittests / eosio . system_tests . cpp <nl> BOOST_AUTO_TEST_SUITE ( eosio_system_tests ) <nl> <nl> BOOST_FIXTURE_TEST_CASE ( buysell , eosio_system_tester ) try { <nl> <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 1000000000 . 0000 " ) , get_balance ( " eosio " ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( core_from_string ( " 1000000000 . 0000 " ) , get_balance ( " eosio " ) + get_balance ( " eosio . ramfee " ) + get_balance ( " eosio . stake " ) ) ; <nl> BOOST_REQUIRE_EQUAL ( core_from_string ( " 0 . 0000 " ) , get_balance ( " alice1111111 " ) ) ; <nl> <nl> transfer ( " eosio " , " alice1111111 " , core_from_string ( " 1000 . 0000 " ) , " eosio " ) ; <nl> BOOST_FIXTURE_TEST_CASE ( buysell , eosio_system_tester ) try { <nl> BOOST_REQUIRE_EQUAL ( true , 0 < bought_bytes ) ; <nl> <nl> BOOST_REQUIRE_EQUAL ( success ( ) , sellram ( " alice1111111 " , bought_bytes ) ) ; <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 999 . 9999 " ) , get_balance ( " alice1111111 " ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( core_from_string ( " 998 . 0050 " ) , get_balance ( " alice1111111 " ) ) ; <nl> total = get_total_stake ( " alice1111111 " ) ; <nl> BOOST_REQUIRE_EQUAL ( true , total [ " ram_bytes " ] . as_uint64 ( ) = = init_bytes ) ; <nl> <nl> transfer ( " eosio " , " alice1111111 " , core_from_string ( " 100000000 . 0000 " ) , " eosio " ) ; <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 100000999 . 9999 " ) , get_balance ( " alice1111111 " ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( core_from_string ( " 100000998 . 0050 " ) , get_balance ( " alice1111111 " ) ) ; <nl> + / / alice buys ram for 10000000 . 0000 , 0 . 5 % = 50000 . 0000 got to ramfee <nl> + / / after fee 9950000 . 0000 got to bought bytes <nl> + / / when selling back bought bytes , pay 0 . 5 % fee and get back 99 . 5 % of 9950000 . 0000 = 9900250 . 0000 <nl> + / / expected account after that is 90000998 . 0050 + 9900250 . 0000 = 99901248 . 0050 with a difference <nl> + / / of order 0 . 0001 due to rounding errors <nl> BOOST_REQUIRE_EQUAL ( success ( ) , buyram ( " alice1111111 " , " alice1111111 " , core_from_string ( " 10000000 . 0000 " ) ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( core_from_string ( " 90000998 . 0050 " ) , get_balance ( " alice1111111 " ) ) ; <nl> <nl> total = get_total_stake ( " alice1111111 " ) ; <nl> bytes = total [ " ram_bytes " ] . as_uint64 ( ) ; <nl> BOOST_FIXTURE_TEST_CASE ( buysell , eosio_system_tester ) try { <nl> wdump ( ( init_bytes ) ( bought_bytes ) ( bytes ) ) ; <nl> <nl> BOOST_REQUIRE_EQUAL ( true , total [ " ram_bytes " ] . as_uint64 ( ) = = init_bytes ) ; <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 100000999 . 9993 " ) , get_balance ( " alice1111111 " ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( core_from_string ( " 99901248 . 0044 " ) , get_balance ( " alice1111111 " ) ) ; <nl> <nl> <nl> BOOST_REQUIRE_EQUAL ( success ( ) , buyram ( " alice1111111 " , " alice1111111 " , core_from_string ( " 100 . 0000 " ) ) ) ; <nl> BOOST_FIXTURE_TEST_CASE ( buysell , eosio_system_tester ) try { <nl> BOOST_REQUIRE_EQUAL ( success ( ) , buyram ( " alice1111111 " , " alice1111111 " , core_from_string ( " 10 . 0000 " ) ) ) ; <nl> BOOST_REQUIRE_EQUAL ( success ( ) , buyram ( " alice1111111 " , " alice1111111 " , core_from_string ( " 10 . 0000 " ) ) ) ; <nl> BOOST_REQUIRE_EQUAL ( success ( ) , buyram ( " alice1111111 " , " alice1111111 " , core_from_string ( " 30 . 0000 " ) ) ) ; <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 100000439 . 9993 " ) , get_balance ( " alice1111111 " ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( core_from_string ( " 99900688 . 0044 " ) , get_balance ( " alice1111111 " ) ) ; <nl> <nl> auto newtotal = get_total_stake ( " alice1111111 " ) ; <nl> <nl> BOOST_FIXTURE_TEST_CASE ( buysell , eosio_system_tester ) try { <nl> wdump ( ( newbytes ) ( bytes ) ( bought_bytes ) ) ; <nl> <nl> BOOST_REQUIRE_EQUAL ( success ( ) , sellram ( " alice1111111 " , bought_bytes ) ) ; <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 100000999 . 9991 " ) , get_balance ( " alice1111111 " ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( core_from_string ( " 99901242 . 4183 " ) , get_balance ( " alice1111111 " ) ) ; <nl> <nl> <nl> newtotal = get_total_stake ( " alice1111111 " ) ; <nl> BOOST_FIXTURE_TEST_CASE ( buysell , eosio_system_tester ) try { <nl> BOOST_REQUIRE_EQUAL ( success ( ) , buyram ( " alice1111111 " , " alice1111111 " , core_from_string ( " 100000 . 0000 " ) ) ) ; <nl> BOOST_REQUIRE_EQUAL ( success ( ) , buyram ( " alice1111111 " , " alice1111111 " , core_from_string ( " 100000 . 0000 " ) ) ) ; <nl> BOOST_REQUIRE_EQUAL ( success ( ) , buyram ( " alice1111111 " , " alice1111111 " , core_from_string ( " 300000 . 0000 " ) ) ) ; <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 49400999 . 9991 " ) , get_balance ( " alice1111111 " ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( core_from_string ( " 49301242 . 4183 " ) , get_balance ( " alice1111111 " ) ) ; <nl> <nl> auto finaltotal = get_total_stake ( " alice1111111 " ) ; <nl> auto endbytes = finaltotal [ " ram_bytes " ] . as_uint64 ( ) ; <nl> BOOST_FIXTURE_TEST_CASE ( buysell , eosio_system_tester ) try { <nl> <nl> BOOST_REQUIRE_EQUAL ( success ( ) , sellram ( " alice1111111 " , bought_bytes ) ) ; <nl> <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 100000999 . 9943 " ) , get_balance ( " alice1111111 " ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( core_from_string ( " 99396507 . 4147 " ) , get_balance ( " alice1111111 " ) ) ; <nl> <nl> } FC_LOG_AND_RETHROW ( ) <nl> <nl> BOOST_FIXTURE_TEST_CASE ( stake_unstake , eosio_system_tester ) try { <nl> - / / issue ( " eosio " , core_from_string ( " 1000 . 0000 " ) , config : : system_account_name ) ; <nl> <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 1000000000 . 0000 " ) , get_balance ( " eosio " ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( core_from_string ( " 1000000000 . 0000 " ) , get_balance ( " eosio " ) + get_balance ( " eosio . ramfee " ) + get_balance ( " eosio . stake " ) ) ; <nl> BOOST_REQUIRE_EQUAL ( core_from_string ( " 0 . 0000 " ) , get_balance ( " alice1111111 " ) ) ; <nl> transfer ( " eosio " , " alice1111111 " , core_from_string ( " 1000 . 0000 " ) , " eosio " ) ; <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 999999000 . 0000 " ) , get_balance ( " eosio " ) ) ; <nl> + <nl> BOOST_REQUIRE_EQUAL ( core_from_string ( " 1000 . 0000 " ) , get_balance ( " alice1111111 " ) ) ; <nl> BOOST_REQUIRE_EQUAL ( success ( ) , stake ( " eosio " , " alice1111111 " , core_from_string ( " 200 . 0000 " ) , core_from_string ( " 100 . 0000 " ) ) ) ; <nl> <nl> BOOST_FIXTURE_TEST_CASE ( stake_unstake , eosio_system_tester ) try { <nl> } FC_LOG_AND_RETHROW ( ) <nl> <nl> BOOST_FIXTURE_TEST_CASE ( stake_unstake_with_transfer , eosio_system_tester ) try { <nl> - / / issue ( " eosio " , core_from_string ( " 1000 . 0000 " ) , config : : system_account_name ) ; <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 1000000000 . 0000 " ) , get_balance ( " eosio " ) ) ; <nl> + issue ( " eosio " , core_from_string ( " 1000 . 0000 " ) , config : : system_account_name ) ; <nl> + issue ( " eosio . stake " , core_from_string ( " 1000 . 0000 " ) , config : : system_account_name ) ; <nl> BOOST_REQUIRE_EQUAL ( core_from_string ( " 0 . 0000 " ) , get_balance ( " alice1111111 " ) ) ; <nl> <nl> / / eosio stakes for alice with transfer flag <nl> + <nl> transfer ( " eosio " , " bob111111111 " , core_from_string ( " 1000 . 0000 " ) , " eosio " ) ; <nl> BOOST_REQUIRE_EQUAL ( success ( ) , stake_with_transfer ( " bob111111111 " , " alice1111111 " , core_from_string ( " 200 . 0000 " ) , core_from_string ( " 100 . 0000 " ) ) ) ; <nl> <nl> BOOST_FIXTURE_TEST_CASE ( stake_unstake_with_transfer , eosio_system_tester ) try <nl> BOOST_REQUIRE_EQUAL ( core_from_string ( " 110 . 0000 " ) , total [ " cpu_weight " ] . as < asset > ( ) ) ; <nl> REQUIRE_MATCHING_OBJECT ( voter ( " alice1111111 " , core_from_string ( " 300 . 0000 " ) ) , get_voter_info ( " alice1111111 " ) ) ; <nl> <nl> - / / BOOST_REQUIRE_EQUAL ( core_from_string ( " 999999700 . 0000 " ) , get_balance ( " eosio " ) ) ; <nl> BOOST_REQUIRE_EQUAL ( core_from_string ( " 0 . 0000 " ) , get_balance ( " alice1111111 " ) ) ; <nl> <nl> / / alice stakes for herself <nl> BOOST_FIXTURE_TEST_CASE ( stake_unstake_with_transfer , eosio_system_tester ) try <nl> BOOST_REQUIRE_EQUAL ( success ( ) , unstake ( " alice1111111 " , " alice1111111 " , core_from_string ( " 400 . 0000 " ) , core_from_string ( " 200 . 0000 " ) ) ) ; <nl> BOOST_REQUIRE_EQUAL ( core_from_string ( " 700 . 0000 " ) , get_balance ( " alice1111111 " ) ) ; <nl> <nl> + edump ( ( get_balance ( " eosio . stake " ) ) ) ; <nl> + <nl> produce_block ( fc : : hours ( 3 * 24 - 1 ) ) ; <nl> produce_blocks ( 1 ) ; <nl> BOOST_REQUIRE_EQUAL ( core_from_string ( " 700 . 0000 " ) , get_balance ( " alice1111111 " ) ) ; <nl> / / after 3 days funds should be released <nl> + <nl> produce_block ( fc : : hours ( 1 ) ) ; <nl> produce_blocks ( 1 ) ; <nl> + <nl> BOOST_REQUIRE_EQUAL ( core_from_string ( " 1300 . 0000 " ) , get_balance ( " alice1111111 " ) ) ; <nl> <nl> / / stake should be equal to what was staked in constructor , votring power should be 0 <nl> mmm a / unittests / eosio_system_tester . hpp <nl> ppp b / unittests / eosio_system_tester . hpp <nl> class eosio_system_tester : public TESTER { <nl> <nl> produce_blocks ( 2 ) ; <nl> <nl> - create_accounts ( { N ( eosio . token ) } ) ; <nl> + create_accounts ( { N ( eosio . token ) , N ( eosio . ram ) , N ( eosio . ramfee ) , N ( eosio . stake ) , <nl> + N ( eosio . bpay ) , N ( eosio . vpay ) , N ( eosio . saving ) } ) ; <nl> <nl> produce_blocks ( 100 ) ; <nl> <nl> class eosio_system_tester : public TESTER { <nl> create_account_with_resources ( N ( carol1111111 ) , config : : system_account_name , core_from_string ( " 1 . 0000 " ) , false ) ; <nl> <nl> <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 1000000000 . 0000 " ) , get_balance ( " eosio " ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( core_from_string ( " 1000000000 . 0000 " ) , get_balance ( " eosio " ) + get_balance ( " eosio . ramfee " ) + get_balance ( " eosio . stake " ) ) ; <nl> } <nl> <nl> <nl> class eosio_system_tester : public TESTER { <nl> } <nl> <nl> asset get_balance ( const account_name & act ) { <nl> - <nl> vector < char > data = get_row_by_account ( N ( eosio . token ) , act , N ( accounts ) , symbol ( CORE_SYMBOL ) . to_symbol_code ( ) . value ) ; <nl> return data . empty ( ) ? asset ( 0 , symbol ( CORE_SYMBOL ) ) : token_abi_ser . binary_to_variant ( " account " , data ) [ " balance " ] . as < asset > ( ) ; <nl> } <nl> mmm a / unittests / multisig_tests . cpp <nl> ppp b / unittests / multisig_tests . cpp <nl> class eosio_msig_tester : public tester { <nl> public : <nl> <nl> eosio_msig_tester ( ) { <nl> - create_accounts ( { N ( eosio . msig ) , N ( alice ) , N ( bob ) , N ( carol ) } ) ; <nl> + create_accounts ( { N ( eosio . msig ) , N ( eosio . stake ) , N ( eosio . ram ) , N ( eosio . ramfee ) , N ( alice ) , N ( bob ) , N ( carol ) } ) ; <nl> produce_block ( ) ; <nl> <nl> auto trace = base_tester : : push_action ( config : : system_account_name , N ( setpriv ) , <nl> BOOST_FIXTURE_TEST_CASE ( update_system_contract_all_approve , eosio_msig_tester ) <nl> <nl> create_currency ( N ( eosio . token ) , config : : system_account_name , core_from_string ( " 10000000000 . 0000 " ) ) ; <nl> issue ( config : : system_account_name , core_from_string ( " 1000000000 . 0000 " ) ) ; <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 1000000000 . 0000 " ) , get_balance ( " eosio " ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( core_from_string ( " 1000000000 . 0000 " ) , get_balance ( " eosio " ) + get_balance ( " eosio . ramfee " ) + get_balance ( " eosio . stake " ) ) ; <nl> <nl> set_code ( config : : system_account_name , eosio_system_wast ) ; <nl> set_abi ( config : : system_account_name , eosio_system_abi ) ; <nl> BOOST_FIXTURE_TEST_CASE ( update_system_contract_all_approve , eosio_msig_tester ) <nl> create_account_with_resources ( N ( bob111111111 ) , N ( eosio ) , core_from_string ( " 0 . 4500 " ) , false ) ; <nl> create_account_with_resources ( N ( carol1111111 ) , N ( eosio ) , core_from_string ( " 1 . 0000 " ) , false ) ; <nl> <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 1000000000 . 0000 " ) , get_balance ( " eosio " ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( core_from_string ( " 1000000000 . 0000 " ) , get_balance ( " eosio " ) + get_balance ( " eosio . ramfee " ) + get_balance ( " eosio . stake " ) ) ; <nl> <nl> vector < permission_level > perm = { { N ( alice ) , config : : active_name } , { N ( bob ) , config : : active_name } , <nl> { N ( carol ) , config : : active_name } } ; <nl> BOOST_FIXTURE_TEST_CASE ( update_system_contract_major_approve , eosio_msig_tester <nl> create_account_with_resources ( N ( bob111111111 ) , N ( eosio ) , core_from_string ( " 0 . 4500 " ) , false ) ; <nl> create_account_with_resources ( N ( carol1111111 ) , N ( eosio ) , core_from_string ( " 1 . 0000 " ) , false ) ; <nl> <nl> - BOOST_REQUIRE_EQUAL ( core_from_string ( " 1000000000 . 0000 " ) , get_balance ( " eosio " ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( core_from_string ( " 1000000000 . 0000 " ) , get_balance ( " eosio " ) + get_balance ( " eosio . ramfee " ) + get_balance ( " eosio . stake " ) ) ; <nl> <nl> vector < permission_level > perm = { { N ( alice ) , config : : active_name } , { N ( bob ) , config : : active_name } , <nl> { N ( carol ) , config : : active_name } , { N ( apple ) , config : : active_name } } ; <nl> mmm a / unittests / ram_tests . cpp <nl> ppp b / unittests / ram_tests . cpp <nl> BOOST_FIXTURE_TEST_CASE ( ram_tests , eosio_system : : eosio_system_tester ) { try { <nl> create_account_with_resources ( N ( testram11111 ) , N ( eosio ) , init_request_bytes ) ; <nl> create_account_with_resources ( N ( testram22222 ) , N ( eosio ) , init_request_bytes ) ; <nl> produce_blocks ( 10 ) ; <nl> - BOOST_REQUIRE_EQUAL ( success ( ) , stake ( " eosio " , " testram11111 " , core_from_string ( " 10 . 0000 " ) , core_from_string ( " 5 . 0000 " ) ) ) ; <nl> + BOOST_REQUIRE_EQUAL ( success ( ) , stake ( " eosio . stake " , " testram11111 " , core_from_string ( " 10 . 0000 " ) , core_from_string ( " 5 . 0000 " ) ) ) ; <nl> produce_blocks ( 10 ) ; <nl> <nl> for ( auto i = 0 ; i < 10 ; + + i ) { <nl> BOOST_FIXTURE_TEST_CASE ( ram_tests , eosio_system : : eosio_system_tester ) { try { <nl> ( " payer " , " testram11111 " ) <nl> ( " from " , 1 ) <nl> ( " to " , 10 ) <nl> - ( " size " , 1910 ) ) ; <nl> + ( " size " , 1780 / * 1910 * / ) ) ; <nl> produce_blocks ( 1 ) ; <nl> auto ram_usage = rlm . get_account_ram_usage ( N ( testram11111 ) ) ; <nl> <nl> BOOST_FIXTURE_TEST_CASE ( ram_tests , eosio_system : : eosio_system_tester ) { try { <nl> ( " payer " , " testram11111 " ) <nl> ( " from " , 1 ) <nl> ( " to " , 10 ) <nl> - ( " size " , 1920 ) ) , <nl> + ( " size " , 1790 / * 1920 * / ) ) , <nl> ram_usage_exceeded , <nl> fc_exception_message_starts_with ( " account testram11111 has insufficient ram " ) ) ; <nl> wlog ( " ram_tests 2 % % % % % % " ) ; <nl> BOOST_FIXTURE_TEST_CASE ( ram_tests , eosio_system : : eosio_system_tester ) { try { <nl> ( " payer " , " testram11111 " ) <nl> ( " from " , 1 ) <nl> ( " to " , 10 ) <nl> - ( " size " , 1810 ) ) ; <nl> + ( " size " , 1680 / * 1810 * / ) ) ; <nl> produce_blocks ( 1 ) ; <nl> BOOST_REQUIRE_EQUAL ( ram_usage - 1000 , rlm . get_account_ram_usage ( N ( testram11111 ) ) ) ; <nl> <nl> BOOST_FIXTURE_TEST_CASE ( ram_tests , eosio_system : : eosio_system_tester ) { try { <nl> ( " payer " , " testram11111 " ) <nl> ( " from " , 1 ) <nl> ( " to " , 11 ) <nl> - ( " size " , 1810 ) ) , <nl> + ( " size " , 1680 / * 1810 * / ) ) , <nl> ram_usage_exceeded , <nl> fc_exception_message_starts_with ( " account testram11111 has insufficient ram " ) ) ; <nl> produce_blocks ( 1 ) ; <nl> BOOST_FIXTURE_TEST_CASE ( ram_tests , eosio_system : : eosio_system_tester ) { try { <nl> ( " payer " , " testram11111 " ) <nl> ( " from " , 1 ) <nl> ( " to " , 11 ) <nl> - ( " size " , 1720 ) ) ; <nl> + ( " size " , 1600 / * 1720 * / ) ) ; <nl> produce_blocks ( 1 ) ; <nl> <nl> tester - > push_action ( N ( testram11111 ) , N ( rmentry ) , N ( testram11111 ) , mvo ( ) <nl> ( " from " , 3 ) <nl> ( " to " , 3 ) ) ; <nl> produce_blocks ( 1 ) ; <nl> - <nl> + <nl> / / verify that the new entry will exceed the allocation bytes limit <nl> BOOST_REQUIRE_EXCEPTION ( <nl> tester - > push_action ( N ( testram11111 ) , N ( setentry ) , N ( testram11111 ) , mvo ( ) <nl> ( " payer " , " testram11111 " ) <nl> ( " from " , 12 ) <nl> ( " to " , 12 ) <nl> - ( " size " , 1900 ) ) , <nl> + ( " size " , 1780 ) ) , <nl> ram_usage_exceeded , <nl> fc_exception_message_starts_with ( " account testram11111 has insufficient ram " ) ) ; <nl> produce_blocks ( 1 ) ; <nl> BOOST_FIXTURE_TEST_CASE ( ram_tests , eosio_system : : eosio_system_tester ) { try { <nl> ( " payer " , " testram11111 " ) <nl> ( " from " , 12 ) <nl> ( " to " , 12 ) <nl> - ( " size " , 1720 ) ) ; <nl> + ( " size " , 1620 / * 1720 * / ) ) ; <nl> produce_blocks ( 1 ) ; <nl> <nl> / / verify that anoth new entry will exceed the allocation bytes limit , to setup testing of new payer <nl> BOOST_FIXTURE_TEST_CASE ( ram_tests , eosio_system : : eosio_system_tester ) { try { <nl> ( " payer " , " testram11111 " ) <nl> ( " from " , 13 ) <nl> ( " to " , 13 ) <nl> - ( " size " , 1720 ) ) , <nl> + ( " size " , 1660 ) ) , <nl> ram_usage_exceeded , <nl> fc_exception_message_starts_with ( " account testram11111 has insufficient ram " ) ) ; <nl> produce_blocks ( 1 ) ; <nl> <nl> + # if 0 <nl> / / verify that the new entry is under the allocation bytes limit <nl> tester - > push_action ( N ( testram11111 ) , N ( setentry ) , { N ( testram11111 ) , N ( testram22222 ) } , mvo ( ) <nl> ( " payer " , " testram22222 " ) <nl> BOOST_FIXTURE_TEST_CASE ( ram_tests , eosio_system : : eosio_system_tester ) { try { <nl> ( " to " , 22 ) <nl> ( " size " , 1910 ) ) ; <nl> produce_blocks ( 1 ) ; <nl> + # endif <nl> } FC_LOG_AND_RETHROW ( ) } <nl> <nl> BOOST_AUTO_TEST_SUITE_END ( ) <nl>
Merge pull request from EOSIO / issue3291
EOSIO/eos
adfbbe2bd2ee7389f02fac3a3ae6f890f2c6275e
2018-05-24T00:23:28Z
mmm a / src / webui / api / appcontroller . cpp <nl> ppp b / src / webui / api / appcontroller . cpp <nl> void AppController : : preferencesAction ( ) <nl> { <nl> const Preferences * const pref = Preferences : : instance ( ) ; <nl> const auto * session = BitTorrent : : Session : : instance ( ) ; <nl> - QVariantMap data ; <nl> + QVariantHash data ; <nl> <nl> / / Downloads <nl> / / When adding a torrent <nl> void AppController : : preferencesAction ( ) <nl> data [ " export_dir_fin " ] = Utils : : Fs : : toNativePath ( session - > finishedTorrentExportDirectory ( ) ) ; <nl> / / Automatically add torrents from <nl> const QVariantHash dirs = pref - > getScanDirs ( ) ; <nl> - QVariantMap nativeDirs ; <nl> + QVariantHash nativeDirs ; <nl> for ( auto i = dirs . cbegin ( ) ; i ! = dirs . cend ( ) ; + + i ) { <nl> if ( i . value ( ) . type ( ) = = QVariant : : Int ) <nl> nativeDirs . insert ( Utils : : Fs : : toNativePath ( i . key ( ) ) , i . value ( ) . toInt ( ) ) ; <nl> void AppController : : preferencesAction ( ) <nl> data [ " announce_to_all_tiers " ] = session - > announceToAllTiers ( ) ; <nl> data [ " announce_ip " ] = session - > announceIP ( ) ; <nl> <nl> - setResult ( QJsonObject : : fromVariantMap ( data ) ) ; <nl> + setResult ( QJsonObject : : fromVariantHash ( data ) ) ; <nl> } <nl> <nl> void AppController : : setPreferencesAction ( ) <nl> void AppController : : setPreferencesAction ( ) <nl> <nl> Preferences * const pref = Preferences : : instance ( ) ; <nl> auto session = BitTorrent : : Session : : instance ( ) ; <nl> - const QVariantMap m = QJsonDocument : : fromJson ( params ( ) [ " json " ] . toUtf8 ( ) ) . toVariant ( ) . toMap ( ) ; <nl> + const QVariantHash m = QJsonDocument : : fromJson ( params ( ) [ " json " ] . toUtf8 ( ) ) . toVariant ( ) . toHash ( ) ; <nl> <nl> - QVariantMap : : ConstIterator it ; <nl> + QVariantHash : : ConstIterator it ; <nl> const auto hasKey = [ & it , & m ] ( const char * key ) - > bool <nl> { <nl> it = m . find ( QLatin1String ( key ) ) ; <nl> void AppController : : setPreferencesAction ( ) <nl> session - > setFinishedTorrentExportDirectory ( it . value ( ) . toString ( ) ) ; <nl> / / Automatically add torrents from <nl> if ( hasKey ( " scan_dirs " ) ) { <nl> - const QVariantMap nativeDirs = it . value ( ) . toMap ( ) ; <nl> + const QVariantHash nativeDirs = it . value ( ) . toHash ( ) ; <nl> QVariantHash oldScanDirs = pref - > getScanDirs ( ) ; <nl> QVariantHash scanDirs ; <nl> ScanFoldersModel * model = ScanFoldersModel : : instance ( ) ; <nl> void AppController : : setPreferencesAction ( ) <nl> return ( ! iface . addressEntries ( ) . isEmpty ( ) ) & & ( iface . name ( ) = = ifaceValue ) ; <nl> } ) ; <nl> const QString ifaceName = ( ifacesIter ! = ifaces . cend ( ) ) ? ifacesIter - > humanReadableName ( ) : QString { } ; <nl> - <nl> + <nl> session - > setNetworkInterface ( ifaceValue ) ; <nl> session - > setNetworkInterfaceName ( ifaceName ) ; <nl> } <nl> void AppController : : setPreferencesAction ( ) <nl> / / Resolve peer countries <nl> if ( hasKey ( " resolve_peer_countries " ) ) <nl> pref - > resolvePeerCountries ( it . value ( ) . toBool ( ) ) ; <nl> - <nl> + <nl> / / libtorrent preferences <nl> / / Async IO threads <nl> if ( hasKey ( " async_io_threads " ) ) <nl> void AppController : : networkInterfaceListAction ( ) <nl> QVariantList ifaceList ; <nl> for ( const QNetworkInterface & iface : asConst ( QNetworkInterface : : allInterfaces ( ) ) ) { <nl> if ( ! iface . addressEntries ( ) . isEmpty ( ) ) { <nl> - ifaceList . append ( QVariantMap { <nl> + ifaceList . append ( QVariantHash { <nl> { " name " , iface . humanReadableName ( ) } , <nl> { " value " , iface . name ( ) } <nl> } ) ; <nl> mmm a / src / webui / api / logcontroller . cpp <nl> ppp b / src / webui / api / logcontroller . cpp <nl> void LogController : : mainAction ( ) <nl> | | ( msg . type = = Log : : CRITICAL & & isCritical ) ) ) <nl> continue ; <nl> <nl> - msgList . append ( QVariantMap { <nl> + msgList . append ( QVariantHash { <nl> { KEY_LOG_ID , msg . id } , <nl> { KEY_LOG_TIMESTAMP , msg . timestamp } , <nl> { KEY_LOG_MSG_TYPE , msg . type } , <nl> void LogController : : peersAction ( ) <nl> QVariantList peerList ; <nl> <nl> for ( const Log : : Peer & peer : asConst ( logger - > getPeers ( lastKnownId ) ) ) { <nl> - peerList . append ( QVariantMap { <nl> + peerList . append ( QVariantHash { <nl> { KEY_LOG_ID , peer . id } , <nl> { KEY_LOG_TIMESTAMP , peer . timestamp } , <nl> { KEY_LOG_PEER_IP , peer . ip } , <nl> mmm a / src / webui / api / torrentscontroller . cpp <nl> ppp b / src / webui / api / torrentscontroller . cpp <nl> namespace <nl> const QString privateMsg { QCoreApplication : : translate ( " TrackerListWidget " , " This torrent is private " ) } ; <nl> const bool isTorrentPrivate = torrent - > isPrivate ( ) ; <nl> <nl> - const QVariantMap dht { <nl> + const QVariantHash dht { <nl> { KEY_TRACKER_URL , " * * [ DHT ] * * " } , <nl> { KEY_TRACKER_TIER , " " } , <nl> { KEY_TRACKER_MSG , ( isTorrentPrivate ? privateMsg : " " ) } , <nl> namespace <nl> { KEY_TRACKER_LEECHES_COUNT , leechesDHT } <nl> } ; <nl> <nl> - const QVariantMap pex { <nl> + const QVariantHash pex { <nl> { KEY_TRACKER_URL , " * * [ PeX ] * * " } , <nl> { KEY_TRACKER_TIER , " " } , <nl> { KEY_TRACKER_MSG , ( isTorrentPrivate ? privateMsg : " " ) } , <nl> namespace <nl> { KEY_TRACKER_LEECHES_COUNT , leechesPeX } <nl> } ; <nl> <nl> - const QVariantMap lsd { <nl> + const QVariantHash lsd { <nl> { KEY_TRACKER_URL , " * * [ LSD ] * * " } , <nl> { KEY_TRACKER_TIER , " " } , <nl> { KEY_TRACKER_MSG , ( isTorrentPrivate ? privateMsg : " " ) } , <nl> void TorrentsController : : propertiesAction ( ) <nl> checkParams ( { " hash " } ) ; <nl> <nl> const QString hash { params ( ) [ " hash " ] } ; <nl> - QVariantMap dataDict ; <nl> + QVariantHash dataDict ; <nl> BitTorrent : : TorrentHandle * const torrent = BitTorrent : : Session : : instance ( ) - > findTorrent ( hash ) ; <nl> if ( ! torrent ) <nl> throw APIError ( APIErrorType : : NotFound ) ; <nl> void TorrentsController : : propertiesAction ( ) <nl> dataDict [ KEY_PROP_SAVE_PATH ] = Utils : : Fs : : toNativePath ( torrent - > savePath ( ) ) ; <nl> dataDict [ KEY_PROP_COMMENT ] = torrent - > comment ( ) ; <nl> <nl> - setResult ( QJsonObject : : fromVariantMap ( dataDict ) ) ; <nl> + setResult ( QJsonObject : : fromVariantHash ( dataDict ) ) ; <nl> } <nl> <nl> / / Returns the trackers for a torrent in JSON format . <nl> void TorrentsController : : trackersAction ( ) <nl> for ( const BitTorrent : : TrackerEntry & tracker : asConst ( torrent - > trackers ( ) ) ) { <nl> const BitTorrent : : TrackerInfo data = trackersData . value ( tracker . url ( ) ) ; <nl> <nl> - trackerList < < QVariantMap { <nl> + trackerList < < QVariantHash { <nl> { KEY_TRACKER_URL , tracker . url ( ) } , <nl> { KEY_TRACKER_TIER , tracker . tier ( ) } , <nl> { KEY_TRACKER_STATUS , static_cast < int > ( tracker . status ( ) ) } , <nl> void TorrentsController : : webseedsAction ( ) <nl> throw APIError ( APIErrorType : : NotFound ) ; <nl> <nl> for ( const QUrl & webseed : asConst ( torrent - > urlSeeds ( ) ) ) { <nl> - webSeedList . append ( QVariantMap { <nl> + webSeedList . append ( QVariantHash { <nl> { KEY_WEBSEED_URL , webseed . toString ( ) } <nl> } ) ; <nl> } <nl> void TorrentsController : : filesAction ( ) <nl> const QVector < qreal > fileAvailability = torrent - > availableFileFractions ( ) ; <nl> const BitTorrent : : TorrentInfo info = torrent - > info ( ) ; <nl> for ( int i = 0 ; i < torrent - > filesCount ( ) ; + + i ) { <nl> - QVariantMap fileDict = { <nl> + QVariantHash fileDict = { <nl> { KEY_FILE_PROGRESS , fp [ i ] } , <nl> { KEY_FILE_PRIORITY , static_cast < int > ( priorities [ i ] ) } , <nl> { KEY_FILE_SIZE , torrent - > fileSize ( i ) } , <nl> void TorrentsController : : uploadLimitAction ( ) <nl> checkParams ( { " hashes " } ) ; <nl> <nl> const QStringList hashes { params ( ) [ " hashes " ] . split ( ' | ' ) } ; <nl> - QVariantMap map ; <nl> + QVariantHash map ; <nl> for ( const QString & hash : hashes ) { <nl> int limit = - 1 ; <nl> const BitTorrent : : TorrentHandle * const torrent = BitTorrent : : Session : : instance ( ) - > findTorrent ( hash ) ; <nl> void TorrentsController : : uploadLimitAction ( ) <nl> map [ hash ] = limit ; <nl> } <nl> <nl> - setResult ( QJsonObject : : fromVariantMap ( map ) ) ; <nl> + setResult ( QJsonObject : : fromVariantHash ( map ) ) ; <nl> } <nl> <nl> void TorrentsController : : downloadLimitAction ( ) <nl> void TorrentsController : : downloadLimitAction ( ) <nl> checkParams ( { " hashes " } ) ; <nl> <nl> const QStringList hashes { params ( ) [ " hashes " ] . split ( ' | ' ) } ; <nl> - QVariantMap map ; <nl> + QVariantHash map ; <nl> for ( const QString & hash : hashes ) { <nl> int limit = - 1 ; <nl> const BitTorrent : : TorrentHandle * const torrent = BitTorrent : : Session : : instance ( ) - > findTorrent ( hash ) ; <nl> void TorrentsController : : downloadLimitAction ( ) <nl> map [ hash ] = limit ; <nl> } <nl> <nl> - setResult ( QJsonObject : : fromVariantMap ( map ) ) ; <nl> + setResult ( QJsonObject : : fromVariantHash ( map ) ) ; <nl> } <nl> <nl> void TorrentsController : : setUploadLimitAction ( ) <nl>
Replace QVariantMap by QVariantHash
qbittorrent/qBittorrent
f6ee96ed83d65ebec344ca16f84f978bd5d109e0
2019-07-31T15:48:41Z
mmm a / src / builtins / x64 / builtins - x64 . cc <nl> ppp b / src / builtins / x64 / builtins - x64 . cc <nl> void Builtins : : Generate_GenericJSToWasmWrapper ( MacroAssembler * masm ) { <nl> __ j ( equal , & convert_param ) ; <nl> / / Change the paramfrom Smi to int32 . <nl> __ SmiUntag ( param ) ; <nl> + / / Zero extend . <nl> + __ movl ( param , param ) ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> / / Param conversion done . <nl> void Builtins : : Generate_GenericJSToWasmWrapper ( MacroAssembler * masm ) { <nl> __ bind ( & kWasmI32_not_smi ) ; <nl> __ Call ( BUILTIN_CODE ( masm - > isolate ( ) , WasmTaggedNonSmiToInt32 ) , <nl> RelocInfo : : CODE_TARGET ) ; <nl> + / / Param is the result of the builtin . <nl> + __ AssertZeroExtended ( param ) ; <nl> __ jmp ( & restore_after_buitlin_call ) ; <nl> <nl> __ bind ( & kWasmI64 ) ; <nl>
[ wasm ] Zero extend register containing i32 parameter in generic wrapper
v8/v8
64610bda593a339f21bf463dd562bcb23521d5de
2020-09-15T14:01:37Z
mmm a / include / v8 - platform . h <nl> ppp b / include / v8 - platform . h <nl> class JobHandle { <nl> virtual void CancelAndDetach ( ) { Cancel ( ) ; } <nl> <nl> / * * <nl> - * Returns true if there ' s no work pending and no worker running . <nl> + * Returns true if there ' s currently no work pending and no worker running . <nl> * / <nl> virtual bool IsCompleted ( ) = 0 ; <nl> <nl> / * * <nl> * Returns true if associated with a Job and other methods may be called . <nl> - * Returns false after Join ( ) or Cancel ( ) was called . <nl> + * Returns false after Join ( ) or Cancel ( ) was called . This may return true <nl> + * even if no workers are running and IsCompleted ( ) returns true <nl> + * TODO ( etiennep ) : Deprecate IsRunning in favor of IsValid once implemented by <nl> + * all embedders . <nl> * / <nl> virtual bool IsRunning ( ) = 0 ; <nl> + virtual bool IsValid ( ) { return IsRunning ( ) ; } <nl> <nl> / * * <nl> * Returns true if job priority can be changed . <nl> mmm a / src / heap / concurrent - marking . cc <nl> ppp b / src / heap / concurrent - marking . cc <nl> size_t ConcurrentMarking : : GetMaxConcurrency ( size_t worker_count ) { <nl> void ConcurrentMarking : : ScheduleTasks ( ) { <nl> DCHECK ( FLAG_parallel_marking | | FLAG_concurrent_marking ) ; <nl> DCHECK ( ! heap_ - > IsTearingDown ( ) ) ; <nl> - DCHECK ( ! job_handle_ | | ! job_handle_ - > IsRunning ( ) ) ; <nl> + DCHECK ( ! job_handle_ | | ! job_handle_ - > IsValid ( ) ) ; <nl> <nl> job_handle_ = V8 : : GetCurrentPlatform ( ) - > PostJob ( <nl> TaskPriority : : kUserVisible , <nl> std : : make_unique < JobTask > ( this , heap_ - > mark_compact_collector ( ) - > epoch ( ) , <nl> heap_ - > is_current_gc_forced ( ) ) ) ; <nl> - DCHECK ( job_handle_ - > IsRunning ( ) ) ; <nl> + DCHECK ( job_handle_ - > IsValid ( ) ) ; <nl> } <nl> <nl> void ConcurrentMarking : : RescheduleTasksIfNeeded ( ) { <nl> void ConcurrentMarking : : RescheduleTasksIfNeeded ( ) { <nl> weak_objects_ - > discovered_ephemerons . IsGlobalPoolEmpty ( ) ) { <nl> return ; <nl> } <nl> - if ( ! job_handle_ | | ! job_handle_ - > IsRunning ( ) ) <nl> + if ( ! job_handle_ | | ! job_handle_ - > IsValid ( ) ) <nl> ScheduleTasks ( ) ; <nl> else <nl> job_handle_ - > NotifyConcurrencyIncrease ( ) ; <nl> void ConcurrentMarking : : RescheduleTasksIfNeeded ( ) { <nl> <nl> bool ConcurrentMarking : : Stop ( StopRequest stop_request ) { <nl> DCHECK ( FLAG_parallel_marking | | FLAG_concurrent_marking ) ; <nl> - if ( ! job_handle_ | | ! job_handle_ - > IsRunning ( ) ) return false ; <nl> + if ( ! job_handle_ | | ! job_handle_ - > IsValid ( ) ) return false ; <nl> <nl> if ( stop_request = = StopRequest : : PREEMPT_TASKS ) { <nl> job_handle_ - > Cancel ( ) ; <nl> bool ConcurrentMarking : : Stop ( StopRequest stop_request ) { <nl> bool ConcurrentMarking : : IsStopped ( ) { <nl> if ( ! FLAG_concurrent_marking ) return true ; <nl> <nl> - return ! job_handle_ | | ! job_handle_ - > IsRunning ( ) ; <nl> + return ! job_handle_ | | ! job_handle_ - > IsValid ( ) ; <nl> } <nl> <nl> void ConcurrentMarking : : FlushNativeContexts ( NativeContextStats * main_stats ) { <nl> - DCHECK ( ! job_handle_ | | ! job_handle_ - > IsRunning ( ) ) ; <nl> + DCHECK ( ! job_handle_ | | ! job_handle_ - > IsValid ( ) ) ; <nl> for ( int i = 1 ; i < = kMaxTasks ; i + + ) { <nl> main_stats - > Merge ( task_state_ [ i ] . native_context_stats ) ; <nl> task_state_ [ i ] . native_context_stats . Clear ( ) ; <nl> void ConcurrentMarking : : FlushNativeContexts ( NativeContextStats * main_stats ) { <nl> <nl> void ConcurrentMarking : : FlushMemoryChunkData ( <nl> MajorNonAtomicMarkingState * marking_state ) { <nl> - DCHECK ( ! job_handle_ | | ! job_handle_ - > IsRunning ( ) ) ; <nl> + DCHECK ( ! job_handle_ | | ! job_handle_ - > IsValid ( ) ) ; <nl> for ( int i = 1 ; i < = kMaxTasks ; i + + ) { <nl> MemoryChunkDataMap & memory_chunk_data = task_state_ [ i ] . memory_chunk_data ; <nl> for ( auto & pair : memory_chunk_data ) { <nl> void ConcurrentMarking : : FlushMemoryChunkData ( <nl> } <nl> <nl> void ConcurrentMarking : : ClearMemoryChunkData ( MemoryChunk * chunk ) { <nl> - DCHECK ( ! job_handle_ | | ! job_handle_ - > IsRunning ( ) ) ; <nl> + DCHECK ( ! job_handle_ | | ! job_handle_ - > IsValid ( ) ) ; <nl> for ( int i = 1 ; i < = kMaxTasks ; i + + ) { <nl> auto it = task_state_ [ i ] . memory_chunk_data . find ( chunk ) ; <nl> if ( it ! = task_state_ [ i ] . memory_chunk_data . end ( ) ) { <nl> mmm a / src / heap / cppgc / concurrent - marker . cc <nl> ppp b / src / heap / cppgc / concurrent - marker . cc <nl> void ConcurrentMarkerBase : : Start ( ) { <nl> } <nl> <nl> void ConcurrentMarkerBase : : Cancel ( ) { <nl> - if ( concurrent_marking_handle_ & & concurrent_marking_handle_ - > IsRunning ( ) ) <nl> + if ( concurrent_marking_handle_ & & concurrent_marking_handle_ - > IsValid ( ) ) <nl> concurrent_marking_handle_ - > Cancel ( ) ; <nl> } <nl> <nl> void ConcurrentMarkerBase : : JoinForTesting ( ) { <nl> - if ( concurrent_marking_handle_ & & concurrent_marking_handle_ - > IsRunning ( ) ) <nl> + if ( concurrent_marking_handle_ & & concurrent_marking_handle_ - > IsValid ( ) ) <nl> concurrent_marking_handle_ - > Join ( ) ; <nl> } <nl> <nl> ConcurrentMarkerBase : : ~ ConcurrentMarkerBase ( ) { <nl> CHECK_IMPLIES ( concurrent_marking_handle_ , <nl> - ! concurrent_marking_handle_ - > IsRunning ( ) ) ; <nl> + ! concurrent_marking_handle_ - > IsValid ( ) ) ; <nl> } <nl> <nl> bool ConcurrentMarkerBase : : NotifyIncrementalMutatorStepCompleted ( ) { <nl> mmm a / src / heap / cppgc / sweeper . cc <nl> ppp b / src / heap / cppgc / sweeper . cc <nl> class Sweeper : : SweeperImpl final { <nl> void FinishIfRunning ( ) { <nl> if ( ! is_in_progress_ ) return ; <nl> <nl> - if ( concurrent_sweeper_handle_ & & concurrent_sweeper_handle_ - > IsRunning ( ) & & <nl> + if ( concurrent_sweeper_handle_ & & concurrent_sweeper_handle_ - > IsValid ( ) & & <nl> concurrent_sweeper_handle_ - > UpdatePriorityEnabled ( ) ) { <nl> concurrent_sweeper_handle_ - > UpdatePriority ( <nl> cppgc : : TaskPriority : : kUserBlocking ) ; <nl> class Sweeper : : SweeperImpl final { <nl> <nl> void CancelSweepers ( ) { <nl> if ( incremental_sweeper_handle_ ) incremental_sweeper_handle_ . Cancel ( ) ; <nl> - if ( concurrent_sweeper_handle_ & & concurrent_sweeper_handle_ - > IsRunning ( ) ) <nl> + if ( concurrent_sweeper_handle_ & & concurrent_sweeper_handle_ - > IsValid ( ) ) <nl> concurrent_sweeper_handle_ - > Cancel ( ) ; <nl> } <nl> <nl> mmm a / src / libplatform / default - job . h <nl> ppp b / src / libplatform / default - job . h <nl> class V8_PLATFORM_EXPORT DefaultJobHandle : public JobHandle { <nl> void Cancel ( ) override ; <nl> void CancelAndDetach ( ) override ; <nl> bool IsCompleted ( ) override ; <nl> - bool IsRunning ( ) override { return state_ ! = nullptr ; } <nl> + bool IsRunning ( ) override { return IsValid ( ) ; } <nl> + bool IsValid ( ) override { return state_ ! = nullptr ; } <nl> <nl> bool UpdatePriorityEnabled ( ) const override { return true ; } <nl> <nl> mmm a / src / wasm / module - compiler . cc <nl> ppp b / src / wasm / module - compiler . cc <nl> void CompilationStateImpl : : SchedulePublishCompilationResults ( <nl> } <nl> <nl> void CompilationStateImpl : : ScheduleCompileJobForNewUnits ( ) { <nl> - if ( current_compile_job_ & & current_compile_job_ - > IsRunning ( ) ) { <nl> + if ( current_compile_job_ & & current_compile_job_ - > IsValid ( ) ) { <nl> current_compile_job_ - > NotifyConcurrencyIncrease ( ) ; <nl> return ; <nl> } <nl> mmm a / src / wasm / wasm - engine . cc <nl> ppp b / src / wasm / wasm - engine . cc <nl> WasmEngine : : ~ WasmEngine ( ) { <nl> compile_job_handles = compile_job_handles_ ; <nl> } <nl> for ( auto & job_handle : compile_job_handles ) { <nl> - if ( job_handle - > IsRunning ( ) ) job_handle - > Cancel ( ) ; <nl> + if ( job_handle - > IsValid ( ) ) job_handle - > Cancel ( ) ; <nl> } <nl> <nl> / / All AsyncCompileJobs have been canceled . <nl> mmm a / test / cctest / wasm / test - streaming - compilation . cc <nl> ppp b / test / cctest / wasm / test - streaming - compilation . cc <nl> class MockPlatform final : public TestPlatform { <nl> <nl> void ExecuteTasks ( ) { <nl> for ( auto * job_handle : job_handles_ ) { <nl> - if ( job_handle - > IsRunning ( ) ) job_handle - > Join ( ) ; <nl> + if ( job_handle - > IsValid ( ) ) job_handle - > Join ( ) ; <nl> } <nl> task_runner_ - > ExecuteTasks ( ) ; <nl> } <nl> class MockPlatform final : public TestPlatform { <nl> void CancelAndDetach ( ) override { orig_handle_ - > CancelAndDetach ( ) ; } <nl> bool IsCompleted ( ) override { return orig_handle_ - > IsCompleted ( ) ; } <nl> bool IsRunning ( ) override { return orig_handle_ - > IsRunning ( ) ; } <nl> + bool IsValid ( ) override { return orig_handle_ - > IsValid ( ) ; } <nl> <nl> private : <nl> std : : unique_ptr < JobHandle > orig_handle_ ; <nl> mmm a / test / cctest / wasm / test - wasm - metrics . cc <nl> ppp b / test / cctest / wasm / test - wasm - metrics . cc <nl> class MockPlatform final : public TestPlatform { <nl> <nl> void ExecuteTasks ( ) { <nl> for ( auto * job_handle : job_handles_ ) { <nl> - if ( job_handle - > IsRunning ( ) ) job_handle - > Join ( ) ; <nl> + if ( job_handle - > IsValid ( ) ) job_handle - > Join ( ) ; <nl> } <nl> task_runner_ - > ExecuteTasks ( ) ; <nl> } <nl> class MockPlatform final : public TestPlatform { <nl> void Cancel ( ) override { orig_handle_ - > Cancel ( ) ; } <nl> void CancelAndDetach ( ) override { orig_handle_ - > CancelAndDetach ( ) ; } <nl> bool IsRunning ( ) override { return orig_handle_ - > IsRunning ( ) ; } <nl> + bool IsValid ( ) override { return orig_handle_ - > IsValid ( ) ; } <nl> bool IsCompleted ( ) override { return orig_handle_ - > IsCompleted ( ) ; } <nl> <nl> private : <nl>
[ Jobs API ] Rename IsRunning - > IsValid
v8/v8
10b847c76589c6fd3d80c2cded20fb218b9e55e3
2020-10-19T17:01:48Z
mmm a / docs / en / query_language / functions / array_functions . md <nl> ppp b / docs / en / query_language / functions / array_functions . md <nl> Result : <nl> ` ` ` <nl> # # arrayAUC ( arr_scores , arr_labels ) <nl> <nl> - Returns AUC ( Area Under the Curve , which is a concept in machine learning , see more details : https : / / developers . google . com / machine - learning / crash - course / classification / roc - and - auc ) ; <nl> + Returns AUC ( Area Under the Curve , which is a concept in machine learning , see more details : https : / / developers . google . com / machine - learning / crash - course / classification / roc - and - auc ) . <nl> <nl> ` arr_scores ` represents scores prediction model gives , while ` arr_labels ` represents labels of samples , usually 1 for positive sample and 0 for negtive sample . <nl> <nl>
Update array_functions . md
ClickHouse/ClickHouse
b470ff7aaedd10c24752d68fa7899d52f258980a
2020-02-03T03:11:12Z
mmm a / tensorflow / python / eager / benchmarks / resnet50 / hvp_test . py <nl> ppp b / tensorflow / python / eager / benchmarks / resnet50 / hvp_test . py <nl> def _forward_over_back_hvp ( model , images , labels , vector ) : <nl> model . trainable_variables , vector ) as acc : <nl> with tf . GradientTape ( ) as grad_tape : <nl> logits = model ( images , training = True ) <nl> - loss = tf . losses . softmax_cross_entropy ( <nl> + loss = tf . compat . v1 . losses . softmax_cross_entropy ( <nl> logits = logits , onehot_labels = labels ) <nl> grads = grad_tape . gradient ( loss , model . trainable_variables ) <nl> return acc . jvp ( grads ) <nl> def _back_over_forward_hvp ( model , images , labels , vector ) : <nl> with forwardprop . ForwardAccumulator ( <nl> model . trainable_variables , vector ) as acc : <nl> logits = model ( images , training = True ) <nl> - loss = tf . losses . softmax_cross_entropy ( <nl> + loss = tf . compat . v1 . losses . softmax_cross_entropy ( <nl> logits = logits , onehot_labels = labels ) <nl> return grad_tape . gradient ( acc . jvp ( loss ) , model . trainable_variables ) <nl> <nl> def _back_over_forward_hvp ( model , images , labels , vector ) : <nl> def _tf_gradients_forward_over_back_hvp ( model , images , labels , vector ) : <nl> with tf . GradientTape ( ) as grad_tape : <nl> logits = model ( images , training = True ) <nl> - loss = tf . losses . softmax_cross_entropy ( <nl> + loss = tf . compat . v1 . losses . softmax_cross_entropy ( <nl> logits = logits , onehot_labels = labels ) <nl> variables = model . trainable_variables <nl> grads = grad_tape . gradient ( loss , variables ) <nl> def _back_over_back_hvp ( model , images , labels , vector ) : <nl> with tf . GradientTape ( ) as outer_tape : <nl> with tf . GradientTape ( ) as inner_tape : <nl> logits = model ( images , training = True ) <nl> - loss = tf . losses . softmax_cross_entropy ( <nl> + loss = tf . compat . v1 . losses . softmax_cross_entropy ( <nl> logits = logits , onehot_labels = labels ) <nl> grads = inner_tape . gradient ( loss , model . trainable_variables ) <nl> return outer_tape . gradient ( <nl> mmm a / tensorflow / python / eager / benchmarks / resnet50 / resnet50_graph_test . py <nl> ppp b / tensorflow / python / eager / benchmarks / resnet50 / resnet50_graph_test . py <nl> def benchmark_graph_train ( self ) : <nl> <nl> model = resnet50 . ResNet50 ( data_format ( ) ) <nl> logits = model ( images , training = True ) <nl> - loss = tf . losses . softmax_cross_entropy ( <nl> + loss = tf . compat . v1 . losses . softmax_cross_entropy ( <nl> logits = logits , onehot_labels = labels ) <nl> optimizer = tf . train . GradientDescentOptimizer ( learning_rate = 1 . 0 ) <nl> train_op = optimizer . minimize ( loss ) <nl> mmm a / tensorflow / python / eager / benchmarks / resnet50 / resnet50_test . py <nl> ppp b / tensorflow / python / eager / benchmarks / resnet50 / resnet50_test . py <nl> <nl> def compute_gradients ( model , images , labels , num_replicas = 1 ) : <nl> with tf . GradientTape ( ) as grad_tape : <nl> logits = model ( images , training = True ) <nl> - loss = tf . losses . softmax_cross_entropy ( <nl> + loss = tf . compat . v1 . losses . softmax_cross_entropy ( <nl> logits = logits , onehot_labels = labels ) <nl> tf . compat . v2 . summary . write ( ' loss ' , loss ) <nl> if num_replicas ! = 1 : <nl> mmm a / tensorflow / python / eager / benchmarks / resnet50 / resnet50_test_util . py <nl> ppp b / tensorflow / python / eager / benchmarks / resnet50 / resnet50_test_util . py <nl> def random_batch ( batch_size , data_format ) : <nl> shape = ( batch_size , ) + shape <nl> <nl> num_classes = 1000 <nl> - images = tf . random_uniform ( shape ) <nl> - labels = tf . random_uniform ( <nl> - [ batch_size ] , minval = 0 , maxval = num_classes , dtype = tf . int32 ) <nl> + images = tf . random . uniform ( shape ) <nl> + labels = tf . random . uniform ( [ batch_size ] , <nl> + minval = 0 , <nl> + maxval = num_classes , <nl> + dtype = tf . int32 ) <nl> one_hot = tf . one_hot ( labels , num_classes ) <nl> <nl> return images , one_hot <nl>
Merge pull request from ROCmSoftwarePlatform : google_upstream_resnet50
tensorflow/tensorflow
82273f00d547f691743156bba7cbbf089a9f682a
2020-01-22T20:50:38Z
mmm a / contrib / Python / cntk / ops / tests / non_linear_test . py <nl> ppp b / contrib / Python / cntk / ops / tests / non_linear_test . py <nl> <nl> CLIP_TUPLES = [ <nl> ( [ 1 . 5 ] , [ 1 . 0 ] , [ 2 . 0 ] ) , # value shouldn ' t be clipped ; gradient is [ 1 . 0 ] <nl> ( [ 0 . 5 ] , [ 1 . 0 ] , [ 2 . 0 ] ) , # value should be clipped to 1 . 0 ; gradient is [ 0 . 0 ] <nl> - ( [ 2 . 5 ] , [ 1 . 0 ] , [ 2 . 0 ] ) , # value should be clipped to 2 . 0 <nl> - ( [ [ 1 . 5 , 2 . 1 , 0 . 9 ] ] , [ 1 . 0 ] , [ 2 . 0 ] ) , # should clip to [ 1 . 5 , 2 . 0 , 1 . 0 ] <nl> - # should clip to [ [ 1 . 0 , 2 . 0 ] , [ 1 . 0 , 2 . 0 ] , [ 1 . 5 , 2 . 0 ] ] <nl> + ( [ 2 . 5 ] , [ 1 . 0 ] , [ 2 . 0 ] ) , # value should be clipped to 2 . 0 ; gradient is [ 0 . 0 ] <nl> + <nl> + # should clip to [ 1 . 5 , 2 . 0 , 1 . 0 ] ; gradient is [ [ 1 . 0 , 0 . 0 , 0 . 0 ] ] <nl> + ( [ [ 1 . 5 , 2 . 1 , 0 . 9 ] ] , [ 1 . 0 ] , [ 2 . 0 ] ) , <nl> + <nl> + # should clip to [ [ 1 . 0 , 2 . 0 ] , [ 1 . 0 , 2 . 0 ] , [ 1 . 5 , 2 . 0 ] ] ; <nl> + # gradient is [ [ 0 . 0 , 0 . 0 ] , [ 1 . 0 , 1 . 0 ] , [ 1 . 0 , 0 . 0 ] ] <nl> ( [ [ 0 . 0 , 3 . 0 ] , [ 1 . 0 , 2 . 0 ] , [ 1 . 5 , 2 . 5 ] ] , [ 1 . 0 ] , [ 2 . 0 ] ) , <nl> + <nl> + # test what happens if a user puts a higher " min " value than their " max " value <nl> + # should clip to [ [ 5 . 0 , 5 . 0 , 5 . 0 , 5 . 0 , 5 . 0 ] ] because min is evaluated first <nl> + # gradient should be all zeros : [ [ 0 . 0 , 0 . 0 , 0 . 0 , 0 . 0 , 0 . 0 ] ] <nl> + ( [ [ 1 . 5 , 2 . 1 , 0 . 9 , 1 . 0 , 2 . 0 ] ] , [ 5 . 0 ] , [ 0 . 5 ] ) , <nl> ] <nl> <nl> # - - clip_by_value operation tests - - <nl> def test_op_clip_by_value ( x , min_value , max_value , device_id , precision ) : <nl> # has not been clipped , and 0 if it has been clipped <nl> # We only test for the case where the input_node is a - - backpropping into <nl> # the others doesn ' t make sense ( they are constants ) <nl> - expected = [ [ np . array ( np . logical_not ( np . logical_xor ( np . greater ( x , max_value ) , np . less ( x , min_value ) ) ) , dtype = float ) ] ] <nl> + expected = [ [ np . array ( np . logical_not ( np . logical_or ( np . greater ( x , max_value ) , np . less ( x , min_value ) ) ) , dtype = float ) ] ] <nl> unittest_helper ( result , None , expected , device_id = device_id , <nl> precision = precision , clean_up = False , backward_pass = True , input_node = a ) <nl> \ No newline at end of file <nl>
added additional tests , fixed corner case for gradient
microsoft/CNTK
44088da71cc5c817a27d547ec2fdf34054747452
2016-04-25T10:07:18Z
mmm a / buildroot / bin / use_example_configs <nl> ppp b / buildroot / bin / use_example_configs <nl> <nl> <nl> restore_configs <nl> <nl> - eval " cp Marlin / src / config / examples / $ { 1 } / Configuration * Marlin / " <nl> + cp Marlin / src / config / examples / " $ @ " / Configuration * Marlin / <nl> <nl> - if [ - f " Marlin / src / config / examples / $ { 1 } / _Bootscreen . h " ] ; then <nl> - cp " Marlin / src / config / examples / $ { 1 } / _Bootscreen . h " Marlin / src / config / <nl> + if [ - f " Marlin / src / config / examples / $ @ / _Bootscreen . h " ] ; then <nl> + cp " Marlin / src / config / examples / $ @ / _Bootscreen . h " Marlin / src / config / <nl> fi <nl>
Fix use_example_configs to handle spaces in path
MarlinFirmware/Marlin
2690ce7a96aff4fdd1e37899761ab37ea93b397e
2017-12-16T23:29:35Z
mmm a / src / SConscript . client <nl> ppp b / src / SConscript . client <nl> clientSource = [ <nl> ' mongo / base / make_string_vector . cpp ' , <nl> ' mongo / base / parse_number . cpp ' , <nl> ' mongo / base / status . cpp ' , <nl> + ' mongo / base / string_data . cpp ' , <nl> ' mongo / bson / oid . cpp ' , <nl> ' mongo / buildinfo . cpp ' , <nl> " mongo / client / authentication_table_common . cpp " , <nl>
SERVER - 7886 add string_data . cpp to client
mongodb/mongo
d7d055b768462d8e4b8277ae45fd3ad08f90be91
2012-12-10T20:16:11Z
mmm a / lib / IRGen / GenReflection . cpp <nl> ppp b / lib / IRGen / GenReflection . cpp <nl> void IRGenModule : : emitFieldMetadataRecord ( const NominalTypeDecl * Decl ) { <nl> if ( var ) <nl> addUsedGlobal ( var ) ; <nl> } <nl> + <nl> + void IRGenModule : : emitReflectionMetadataVersion ( ) { <nl> + auto Init = <nl> + llvm : : ConstantInt : : get ( Int16Ty , SWIFT_REFLECTION_METADATA_VERSION ) ; <nl> + auto Version = new llvm : : GlobalVariable ( Module , Int16Ty , / * constant * / true , <nl> + llvm : : GlobalValue : : LinkOnceODRLinkage , <nl> + Init , <nl> + " __swift_reflection_version " ) ; <nl> + addUsedGlobal ( Version ) ; <nl> + } <nl> mmm a / lib / IRGen / IRGen . cpp <nl> ppp b / lib / IRGen / IRGen . cpp <nl> static std : : unique_ptr < llvm : : Module > performIRGeneration ( IRGenOptions & Opts , <nl> IGM . emitProtocolConformances ( ) ; <nl> IGM . emitTypeMetadataRecords ( ) ; <nl> IGM . emitBuiltinReflectionMetadata ( ) ; <nl> + IGM . emitReflectionMetadataVersion ( ) ; <nl> } <nl> <nl> / / Okay , emit any definitions that we suddenly need . <nl> mmm a / lib / IRGen / IRGenModule . h <nl> ppp b / lib / IRGen / IRGenModule . h <nl> class IRGenModule { <nl> void emitAssociatedTypeMetadataRecord ( const ProtocolConformance * Conformance ) ; <nl> void emitFieldMetadataRecord ( const NominalTypeDecl * Decl ) ; <nl> void emitBuiltinReflectionMetadata ( ) ; <nl> + void emitReflectionMetadataVersion ( ) ; <nl> std : : string getBuiltinTypeMetadataSectionName ( ) ; <nl> std : : string getFieldTypeMetadataSectionName ( ) ; <nl> std : : string getAssociatedTypeMetadataSectionName ( ) ; <nl> mmm a / test / IRGen / unused . sil <nl> ppp b / test / IRGen / unused . sil <nl> bb0 : <nl> return % 1 : $ ( ) <nl> } <nl> <nl> - / / CHECK - macho : @ llvm . used = appending global [ 1 x i8 * ] [ i8 * bitcast ( void ( ) * @ frieda to i8 * ) ] , section " llvm . metadata " , align 8 <nl> - / / CHECK - elf : @ llvm . used = appending global [ 2 x i8 * ] [ i8 * bitcast ( void ( ) * @ frieda to i8 * ) , i8 * getelementptr inbounds ( [ 0 x i8 ] , [ 0 x i8 ] * @ _swift1_autolink_entries , i32 0 , i32 0 ) ] , section " llvm . metadata " , align 8 <nl> + / / CHECK - macho : @ llvm . used = appending global [ 2 x i8 * ] [ i8 * bitcast ( void ( ) * @ frieda to i8 * ) , i8 * bitcast ( i16 * @ __swift_reflection_version to i8 * ) ] , section " llvm . metadata " <nl> + / / CHECK - elf : @ llvm . used = appending global [ 3 x i8 * ] [ i8 * bitcast ( void ( ) * @ frieda to i8 * ) , i8 * bitcast ( i16 * @ __swift_reflection_version to i8 * ) , i8 * getelementptr inbounds ( [ 0 x i8 ] , [ 0 x i8 ] * @ _swift1_autolink_entries , i32 0 , i32 0 ) ] , section " llvm . metadata " <nl> <nl> / / CHECK : define linkonce_odr hidden void @ qux ( ) <nl> / / CHECK : define hidden void @ fred ( ) <nl>
IRGen : Emit reflection metadata version into Swift binaries
apple/swift
fd46a6078532b088ce75289cc9628b3cd3a243c8
2016-07-09T00:21:25Z
mmm a / xbmc / cores / dvdplayer / DVDDemuxers / DVDDemuxBXA . cpp <nl> ppp b / xbmc / cores / dvdplayer / DVDDemuxers / DVDDemuxBXA . cpp <nl> CDVDDemuxBXA : : CDVDDemuxBXA ( ) : CDVDDemux ( ) <nl> { <nl> m_pInput = NULL ; <nl> m_stream = NULL ; <nl> + m_bytes = 0 ; <nl> memset ( & m_header , 0x0 , sizeof ( Demux_BXA_FmtHeader ) ) ; <nl> } <nl> <nl> void CDVDDemuxBXA : : Dispose ( ) <nl> m_stream = NULL ; <nl> <nl> m_pInput = NULL ; <nl> - m_pts = 0 ; <nl> + m_bytes = 0 ; <nl> <nl> memset ( & m_header , 0x0 , sizeof ( Demux_BXA_FmtHeader ) ) ; <nl> } <nl> DemuxPacket * CDVDDemuxBXA : : Read ( ) <nl> int n = ( m_header . channels * m_header . bitsPerSample * m_header . sampleRate ) > > 3 ; <nl> if ( n > 0 ) <nl> { <nl> - m_pts + = ( ( double ) pPacket - > iSize * DVD_TIME_BASE ) / n ; <nl> - pPacket - > dts = m_pts ; <nl> - pPacket - > pts = m_pts ; <nl> + m_bytes + = pPacket - > iSize ; <nl> + pPacket - > dts = ( double ) m_bytes * DVD_TIME_BASE / n ; <nl> + pPacket - > pts = pPacket - > dts ; <nl> } <nl> else <nl> { <nl> mmm a / xbmc / cores / dvdplayer / DVDDemuxers / DVDDemuxBXA . h <nl> ppp b / xbmc / cores / dvdplayer / DVDDemuxers / DVDDemuxBXA . h <nl> class CDVDDemuxBXA : public CDVDDemux <nl> protected : <nl> friend class CDemuxStreamAudioBXA ; <nl> CDVDInputStream * m_pInput ; <nl> - double m_pts ; <nl> + int64_t m_bytes ; <nl> <nl> CDemuxStreamAudioBXA * m_stream ; <nl> <nl>
Fix AirTunes pts accuracy
xbmc/xbmc
a4e2d5bf67dead28204e1d63283fe5e560469b3d
2013-04-17T09:43:29Z
mmm a / xbmc / cores / omxplayer / OMXImage . cpp <nl> ppp b / xbmc / cores / omxplayer / OMXImage . cpp <nl> OMX_IMAGE_CODINGTYPE COMXImageFile : : GetCodingType ( unsigned int & width , unsigned <nl> unsigned short block_size = 0 ; <nl> bool nMarker = false ; <nl> <nl> - while ( p < q ) <nl> + while ( p < q & & ! progressive ) <nl> { <nl> switch ( marker ) <nl> { <nl>
OMXImage : Skip out earlier when image is progressive
xbmc/xbmc
f36e7c36b99173e062ec8a64443c2b6cdd9db3e3
2018-02-26T15:40:12Z
new file mode 100644 <nl> index 000000000000 . . 1dc789a95357 <nl> mmm / dev / null <nl> ppp b / jstests / aggregation / bugs / server6121 . js <nl> <nl> + / * <nl> + * SERVER - 6121 : aggregation framework converts Timestamp to long long in result set <nl> + * <nl> + * This test validates the SERVER - 6121 ticket . Add support for timestamps to Aggregation and <nl> + * ensure they can do everything dates can . Previously timestamps were awkwardly used as dates <nl> + * and long longs . <nl> + * / <nl> + <nl> + / * <nl> + * 1 ) Clear and create testing db <nl> + * 2 ) Run an aggregation with all date expressions on a timestamp and a date <nl> + * 3 ) Run an aggregation that will show timestamp and date can not be compared <nl> + * 4 ) Run an aggregation comparing two timestamps to show inc matters <nl> + * / <nl> + <nl> + / / Clear db <nl> + db . s6121 . drop ( ) ; <nl> + / / Populate db <nl> + db . s6121 . save ( { date : new Timestamp ( 1341337661000 , 1 ) } ) ; <nl> + db . s6121 . save ( { date : new Date ( 1341337661000 ) } ) ; <nl> + / / Aggregate checking various combinations of the constant and the field <nl> + var s6121 = db . runCommand ( <nl> + { aggregate : " s6121 " , pipeline : [ <nl> + { $ project : { <nl> + _id : 0 , <nl> + dayOfMonth : { $ dayOfMonth : ' $ date ' } , <nl> + dayOfWeek : { $ dayOfWeek : ' $ date ' } , <nl> + dayOfYear : { $ dayOfYear : ' $ date ' } , <nl> + hour : { $ hour : ' $ date ' } , <nl> + minute : { $ minute : ' $ date ' } , <nl> + month : { $ month : ' $ date ' } , <nl> + second : { $ second : ' $ date ' } , <nl> + week : { $ week : ' $ date ' } , <nl> + year : { $ year : ' $ date ' } <nl> + } } <nl> + ] } ) ; <nl> + / / Assert the two entries are equal <nl> + assert . eq ( s6121 . result [ 0 ] , s6121 . result [ 1 ] , ' s6121 failed ' ) ; <nl> + <nl> + <nl> + / / Clear db for timestamp to date compare test <nl> + db . s6121 . drop ( ) ; <nl> + db . s6121 . save ( { time : new Timestamp ( 1341337661000 , 1 ) , date : new Date ( 1341337661000 ) } ) ; <nl> + var s6121 = db . runCommand ( <nl> + { aggregate : " s6121 " , pipeline : [ <nl> + { $ project : { <nl> + _id : 0 , <nl> + dates_arent_times : { $ eq : [ ' $ time ' , ' $ date ' ] } <nl> + } } <nl> + ] } ) ; <nl> + / / Assert we get the error we want <nl> + assert . eq ( s6121 . result , [ ] , ' s6121 failed confirming that date and timestamp cant be compared ' ) ; <nl> + assert . eq ( s6121 . ok , 0 , ' s6121 failed confirming that date and timestamp cant be compared ' ) ; <nl> + assert . eq ( s6121 . code , 15994 , ' s6121 failed confirming that date and timestamp cant be compared ' ) ; <nl> + <nl> + <nl> + / / Clear db for timestamp comparison tests <nl> + db . s6121 . drop ( ) ; <nl> + db . s6121 . save ( { time : new Timestamp ( 1341337661000 , 1 ) , time2 : new Timestamp ( 1341337661000 , 2 ) } ) ; <nl> + var s6121 = db . runCommand ( <nl> + { aggregate : " s6121 " , pipeline : [ <nl> + { $ project : { <nl> + _id : 0 , <nl> + cmp : { $ cmp : [ ' $ time ' , ' $ time2 ' ] } , <nl> + eq : { $ eq : [ ' $ time ' , ' $ time2 ' ] } , <nl> + gt : { $ gt : [ ' $ time ' , ' $ time2 ' ] } , <nl> + gte : { $ gte : [ ' $ time ' , ' $ time2 ' ] } , <nl> + lt : { $ lt : [ ' $ time ' , ' $ time2 ' ] } , <nl> + lte : { $ lte : [ ' $ time ' , ' $ time2 ' ] } , <nl> + ne : { $ ne : [ ' $ time ' , ' $ time2 ' ] } <nl> + } } <nl> + ] } ) ; <nl> + var s6121result = [ { <nl> + cmp : - 1 , <nl> + eq : false , <nl> + gt : false , <nl> + gte : false , <nl> + lt : true , <nl> + lte : true , <nl> + ne : true <nl> + } ] ; <nl> + / / Assert the results are as expected <nl> + assert . eq ( s6121 . result , s6121result , ' s6121 failed comparing two timestamps ' ) ; <nl> mmm a / src / mongo / bson / bsonobjbuilder . h <nl> ppp b / src / mongo / bson / bsonobjbuilder . h <nl> namespace mongo { <nl> return * this ; <nl> } <nl> <nl> + BSONArrayBuilder & appendTimestamp ( unsigned int sec , unsigned int inc ) { <nl> + _b . appendTimestamp ( num ( ) , sec , inc ) ; <nl> + return * this ; <nl> + } <nl> + <nl> bool isArray ( ) const { <nl> return true ; <nl> } <nl> mmm a / src / mongo / db / pipeline / builder . cpp <nl> ppp b / src / mongo / db / pipeline / builder . cpp <nl> namespace mongo { <nl> pBuilder - > append ( fieldName , pDone - > arr ( ) ) ; <nl> } <nl> <nl> + void BuilderObj : : append ( const OpTime & ot ) { <nl> + pBuilder - > appendTimestamp ( fieldName , ot . getSecs ( ) , ot . getInc ( ) ) ; <nl> + } <nl> + <nl> BuilderObj : : BuilderObj ( <nl> BSONObjBuilder * pObjBuilder , string theFieldName ) : <nl> pBuilder ( pObjBuilder ) , <nl> namespace mongo { <nl> pBuilder - > append ( pDone - > arr ( ) ) ; <nl> } <nl> <nl> + void BuilderArray : : append ( const OpTime & ot ) { <nl> + pBuilder - > appendTimestamp ( ot . getSecs ( ) , ot . getInc ( ) ) ; <nl> + } <nl> + <nl> BuilderArray : : BuilderArray ( <nl> BSONArrayBuilder * pArrayBuilder ) : <nl> pBuilder ( pArrayBuilder ) { <nl> mmm a / src / mongo / db / pipeline / builder . h <nl> ppp b / src / mongo / db / pipeline / builder . h <nl> namespace mongo { <nl> virtual void append ( string s ) = 0 ; <nl> virtual void append ( const OID & o ) = 0 ; <nl> virtual void append ( const Date_t & d ) = 0 ; <nl> + virtual void append ( const OpTime & ot ) = 0 ; <nl> virtual void append ( BSONObjBuilder * pDone ) = 0 ; <nl> virtual void append ( BSONArrayBuilder * pDone ) = 0 ; <nl> } ; <nl> namespace mongo { <nl> virtual void append ( string s ) ; <nl> virtual void append ( const OID & o ) ; <nl> virtual void append ( const Date_t & d ) ; <nl> + virtual void append ( const OpTime & ot ) ; <nl> virtual void append ( BSONObjBuilder * pDone ) ; <nl> virtual void append ( BSONArrayBuilder * pDone ) ; <nl> <nl> namespace mongo { <nl> virtual void append ( string s ) ; <nl> virtual void append ( const OID & o ) ; <nl> virtual void append ( const Date_t & d ) ; <nl> + virtual void append ( const OpTime & ot ) ; <nl> virtual void append ( BSONObjBuilder * pDone ) ; <nl> virtual void append ( BSONArrayBuilder * pDone ) ; <nl> <nl> mmm a / src / mongo / db / pipeline / expression . cpp <nl> ppp b / src / mongo / db / pipeline / expression . cpp <nl> namespace mongo { <nl> cmp = signum ( Value : : compare ( pLeft , pRight ) ) ; <nl> break ; <nl> <nl> + case Timestamp : <nl> + cmp = signum ( Value : : compare ( pLeft , pRight ) ) ; <nl> + break ; <nl> + <nl> default : <nl> uassert ( 15995 , str : : stream ( ) < < <nl> " can ' t compare values of type " < < typeName ( leftType ) , false ) ; <nl> mmm a / src / mongo / db / pipeline / value . cpp <nl> ppp b / src / mongo / db / pipeline / value . cpp <nl> namespace mongo { <nl> break ; <nl> <nl> case Timestamp : <nl> - simple . timestampValue = 0 ; <nl> + timestampValue = OpTime ( ) ; <nl> break ; <nl> <nl> case NumberLong : <nl> namespace mongo { <nl> break ; <nl> <nl> case Timestamp : <nl> - dateValue = pBsonElement - > timestampTime ( ) ; <nl> + timestampValue = pBsonElement - > _opTime ( ) ; <nl> break ; <nl> <nl> case NumberLong : <nl> namespace mongo { <nl> return pValue ; <nl> } <nl> <nl> + Value : : Value ( const OpTime & value ) : <nl> + type ( Timestamp ) , <nl> + pDocumentValue ( ) , <nl> + vpValue ( ) { <nl> + timestampValue = value ; <nl> + } <nl> + <nl> + intrusive_ptr < const Value > Value : : createTimestamp ( const OpTime & value ) { <nl> + intrusive_ptr < const Value > pValue ( new Value ( value ) ) ; <nl> + return pValue ; <nl> + } <nl> + <nl> Value : : Value ( const string & value ) : <nl> type ( String ) , <nl> pDocumentValue ( ) , <nl> namespace mongo { <nl> return dateValue ; <nl> } <nl> <nl> + OpTime Value : : getTimestamp ( ) const { <nl> + verify ( getType ( ) = = Timestamp ) ; <nl> + return timestampValue ; <nl> + } <nl> + <nl> string Value : : getRegex ( ) const { <nl> verify ( getType ( ) = = RegEx ) ; <nl> return stringValue ; <nl> namespace mongo { <nl> return simple . intValue ; <nl> } <nl> <nl> - unsigned long long Value : : getTimestamp ( ) const { <nl> - verify ( getType ( ) = = Timestamp ) ; <nl> - return dateValue ; <nl> - } <nl> - <nl> long long Value : : getLong ( ) const { <nl> BSONType type = getType ( ) ; <nl> if ( type = = NumberInt ) <nl> namespace mongo { <nl> break ; <nl> <nl> case Timestamp : <nl> - pBuilder - > append ( ( long long ) getTimestamp ( ) ) ; <nl> + pBuilder - > append ( getTimestamp ( ) ) ; <nl> break ; <nl> <nl> case NumberLong : <nl> namespace mongo { <nl> case Date : <nl> return dateValue ; <nl> <nl> + case Timestamp : <nl> + return Date_t ( timestampValue . getSecs ( ) * 1000ULL ) ; <nl> + <nl> default : <nl> uassert ( 16006 , str : : stream ( ) < < <nl> " can ' t convert from BSON type " < < typeName ( type ) < < " to Date " , <nl> namespace mongo { <nl> case String : <nl> return stringValue ; <nl> <nl> + case Timestamp : <nl> + ss < < timestampValue . toStringPretty ( ) ; <nl> + return ss . str ( ) ; <nl> + <nl> case Date : <nl> return dateValue . toString ( ) ; <nl> <nl> namespace mongo { <nl> return " " ; <nl> } <nl> <nl> + OpTime Value : : coerceToTimestamp ( ) const { <nl> + switch ( type ) { <nl> + <nl> + case Timestamp : <nl> + return timestampValue ; <nl> + <nl> + default : <nl> + uassert ( 16373 , str : : stream ( ) < < <nl> + " can ' t convert from BSON type " < < typeName ( type ) < < <nl> + " to timestamp " , <nl> + false ) ; <nl> + } / / switch ( type ) <nl> + } <nl> + <nl> int Value : : compare ( const intrusive_ptr < const Value > & rL , <nl> const intrusive_ptr < const Value > & rR ) { <nl> BSONType lType = rL - > getType ( ) ; <nl> namespace mongo { <nl> return rL - > stringValue . compare ( rR - > stringValue ) ; <nl> <nl> case Timestamp : <nl> - if ( rL - > dateValue < rR - > dateValue ) <nl> + if ( rL - > timestampValue < rR - > timestampValue ) <nl> return - 1 ; <nl> - if ( rL - > dateValue > rR - > dateValue ) <nl> + if ( rL - > timestampValue > rR - > timestampValue ) <nl> return 1 ; <nl> return 0 ; <nl> <nl> namespace mongo { <nl> break ; <nl> <nl> case Timestamp : <nl> - boost : : hash_combine ( seed , ( unsigned long long ) dateValue ) ; <nl> + boost : : hash_combine ( seed , timestampValue . asLL ( ) ) ; <nl> break ; <nl> <nl> case Undefined : <nl> mmm a / src / mongo / db / pipeline / value . h <nl> ppp b / src / mongo / db / pipeline / value . h <nl> <nl> # include " bson / bsontypes . h " <nl> # include " bson / oid . h " <nl> # include " util / intrusive_counter . h " <nl> + # include " util / optime . h " <nl> <nl> namespace mongo { <nl> class BSONElement ; <nl> namespace mongo { <nl> * / <nl> static intrusive_ptr < const Value > createDate ( const Date_t & value ) ; <nl> <nl> + static intrusive_ptr < const Value > createTimestamp ( const OpTime & value ) ; <nl> + <nl> / * <nl> Construct a document - valued Value . <nl> <nl> namespace mongo { <nl> OID getOid ( ) const ; <nl> bool getBool ( ) const ; <nl> Date_t getDate ( ) const ; <nl> + OpTime getTimestamp ( ) const ; <nl> string getRegex ( ) const ; <nl> string getSymbol ( ) const ; <nl> int getInt ( ) const ; <nl> - unsigned long long getTimestamp ( ) const ; <nl> long long getLong ( ) const ; <nl> <nl> / * <nl> namespace mongo { <nl> * / <nl> Date_t coerceToDate ( ) const ; <nl> <nl> + OpTime coerceToTimestamp ( ) const ; <nl> + <nl> / * <nl> Coerce ( cast ) a value to a string , using JSON rules . <nl> <nl> namespace mongo { <nl> Value ( long long longValue ) ; <nl> Value ( double doubleValue ) ; <nl> Value ( const Date_t & dateValue ) ; <nl> + Value ( const OpTime & timestampValue ) ; <nl> Value ( const string & stringValue ) ; <nl> Value ( const intrusive_ptr < Document > & pDocument ) ; <nl> Value ( const vector < intrusive_ptr < const Value > > & vpValue ) ; <nl> namespace mongo { <nl> double doubleValue ; <nl> bool boolValue ; <nl> int intValue ; <nl> - unsigned long long timestampValue ; <nl> long long longValue ; <nl> - <nl> } simple ; / / values that don ' t need a ctor / dtor <nl> OID oidValue ; <nl> Date_t dateValue ; <nl> string stringValue ; / / String , Regex , Symbol <nl> + OpTime timestampValue ; <nl> intrusive_ptr < Document > pDocumentValue ; <nl> vector < intrusive_ptr < const Value > > vpValue ; / / for arrays <nl> <nl>
SERVER - 6121 make timestamps exist in aggro
mongodb/mongo
38d752f9c7c98ccd3b67965bc1a8b85bba8c2922
2012-07-03T22:52:37Z
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> if ( BUILD_TESTS ) <nl> <nl> DIRECTORY lang / test / <nl> TEST bits_test SOURCES BitsTest . cpp <nl> + TEST cold_class_test SOURCES ColdClassTest . cpp <nl> <nl> DIRECTORY memory / test / <nl> TEST arena_test SOURCES ArenaTest . cpp <nl> mmm a / folly / test / Makefile . am <nl> ppp b / folly / test / Makefile . am <nl> timeout_queue_test_SOURCES = TimeoutQueueTest . cpp <nl> timeout_queue_test_LDADD = libfollytestmain . la <nl> TESTS + = timeout_queue_test <nl> <nl> + cold_class_test_SOURCES = . . / lang / test / ColdClassTest . cpp <nl> + cold_class_test_LDADD = libfollytestmain . la $ ( top_builddir ) / libfollybenchmark . la <nl> + TESTS + = cold_class_test <nl> + <nl> conv_test_SOURCES = ConvTest . cpp <nl> conv_test_LDADD = libfollytestmain . la $ ( top_builddir ) / libfollybenchmark . la <nl> TESTS + = conv_test <nl>
Add ColdClassTest . cpp to oss builds
facebook/folly
228c05ff964352538798bee101d6bf32d7940897
2018-01-02T01:04:40Z
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> if ( " $ { CMAKE_CXX_COMPILER_ID } " STREQUAL " GNU " OR " $ { CMAKE_CXX_COMPILER_ID } " MATCH <nl> add_compile_options ( " - Wfatal - errors " ) <nl> endif ( ) <nl> <nl> - set ( HEADER_BASE " $ { CMAKE_CURRENT_SOURCE_DIR } / include / spdlog " ) <nl> - <nl> # mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> # address sanitizers check <nl> # mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> option ( SPDLOG_BUILD_TESTS " Build tests " $ { SPDLOG_MASTER_PROJECT } ) <nl> option ( SPDLOG_FMT_EXTERNAL " Use external fmt library instead of bundled " OFF ) <nl> option ( SPDLOG_INSTALL " Generate the install target . " $ { SPDLOG_MASTER_PROJECT } ) <nl> <nl> + set ( HEADER_BASE " $ { CMAKE_CURRENT_SOURCE_DIR } / include / spdlog " ) <nl> + <nl> if ( SPDLOG_STATIC_LIB ) <nl> add_definitions ( - DSPDLOG_STATIC_LIB ) <nl> file ( GLOB SRC_FILES $ { HEADER_BASE } / impl / * . cpp ) <nl>
wip cmake
gabime/spdlog
338125b93a99253cfe80dcb77503eeba0b733702
2019-04-27T16:40:24Z
mmm a / modules / highgui / src / window . cpp <nl> ppp b / modules / highgui / src / window . cpp <nl> int waitKey ( int delay ) <nl> return cvWaitKey ( delay ) ; <nl> } <nl> <nl> - void saveWindowParameters ( const string & windowName ) <nl> - { <nl> - cvSaveWindowParameters ( windowName . c_str ( ) ) ; <nl> - } <nl> - <nl> - void loadWindowParameters ( const string & windowName ) <nl> - { <nl> - cvLoadWindowParameters ( windowName . c_str ( ) ) ; <nl> - } <nl> - <nl> int createTrackbar ( const string & trackbarName , const string & winName , <nl> int * value , int count , TrackbarCallback callback , <nl> void * userdata ) <nl> void stopLoop ( ) <nl> { <nl> cvStopLoop ( ) ; <nl> } <nl> + <nl> + void saveWindowParameters ( const string & windowName ) <nl> + { <nl> + cvSaveWindowParameters ( windowName . c_str ( ) ) ; <nl> + } <nl> + <nl> + void loadWindowParameters ( const string & windowName ) <nl> + { <nl> + cvLoadWindowParameters ( windowName . c_str ( ) ) ; <nl> + } <nl> + <nl> # endif <nl> <nl> } <nl>
New functions with QT GUI :
opencv/opencv
89d9345cae875a5e9739b3f3e736aae7f8e83b69
2010-06-30T14:50:29Z
mmm a / src / compiler / bytecode - analysis . cc <nl> ppp b / src / compiler / bytecode - analysis . cc <nl> void UpdateInLiveness ( Bytecode bytecode , BytecodeLivenessState & in_liveness , <nl> void UpdateOutLiveness ( Bytecode bytecode , BytecodeLivenessState & out_liveness , <nl> BytecodeLivenessState * next_bytecode_in_liveness , <nl> const interpreter : : BytecodeArrayAccessor & accessor , <nl> + Handle < BytecodeArray > bytecode_array , <nl> const BytecodeLivenessMap & liveness_map ) { <nl> int current_offset = accessor . current_offset ( ) ; <nl> - const Handle < BytecodeArray > & bytecode_array = accessor . bytecode_array ( ) ; <nl> <nl> / / Special case Suspend and Resume to just pass through liveness . <nl> if ( bytecode = = Bytecode : : kSuspendGenerator | | <nl> void UpdateOutLiveness ( Bytecode bytecode , BytecodeLivenessState & out_liveness , <nl> void UpdateLiveness ( Bytecode bytecode , BytecodeLiveness & liveness , <nl> BytecodeLivenessState * * next_bytecode_in_liveness , <nl> const interpreter : : BytecodeArrayAccessor & accessor , <nl> + Handle < BytecodeArray > bytecode_array , <nl> const BytecodeLivenessMap & liveness_map ) { <nl> UpdateOutLiveness ( bytecode , * liveness . out , * next_bytecode_in_liveness , <nl> - accessor , liveness_map ) ; <nl> + accessor , bytecode_array , liveness_map ) ; <nl> liveness . in - > CopyFrom ( * liveness . out ) ; <nl> UpdateInLiveness ( bytecode , * liveness . in , accessor ) ; <nl> <nl> void BytecodeAnalysis : : Analyze ( BailoutId osr_bailout_id ) { <nl> BytecodeLiveness & liveness = liveness_map_ . InitializeLiveness ( <nl> current_offset , bytecode_array ( ) - > register_count ( ) , zone ( ) ) ; <nl> UpdateLiveness ( bytecode , liveness , & next_bytecode_in_liveness , iterator , <nl> - liveness_map_ ) ; <nl> + bytecode_array ( ) , liveness_map_ ) ; <nl> } <nl> } <nl> <nl> void BytecodeAnalysis : : Analyze ( BailoutId osr_bailout_id ) { <nl> BytecodeLiveness & liveness = liveness_map_ . GetLiveness ( current_offset ) ; <nl> <nl> UpdateLiveness ( bytecode , liveness , & next_bytecode_in_liveness , iterator , <nl> - liveness_map_ ) ; <nl> + bytecode_array ( ) , liveness_map_ ) ; <nl> } <nl> / / Now we are at the loop header . Since the in - liveness of the header <nl> / / can ' t change , we need only to update the out - liveness . <nl> UpdateOutLiveness ( iterator . current_bytecode ( ) , * header_liveness . out , <nl> - next_bytecode_in_liveness , iterator , liveness_map_ ) ; <nl> + next_bytecode_in_liveness , iterator , bytecode_array ( ) , <nl> + liveness_map_ ) ; <nl> } <nl> <nl> / / Process the generator switch statement separately , once the loops are done . <nl> void BytecodeAnalysis : : Analyze ( BailoutId osr_bailout_id ) { <nl> DCHECK_NE ( bytecode , Bytecode : : kJumpLoop ) ; <nl> <nl> UpdateLiveness ( bytecode , liveness , & next_bytecode_in_liveness , iterator , <nl> - liveness_map_ ) ; <nl> + bytecode_array ( ) , liveness_map_ ) ; <nl> } <nl> } <nl> } <nl> bool BytecodeAnalysis : : ResumeJumpTargetLeavesResolveSuspendIds ( <nl> valid = false ; <nl> } else { <nl> / / Make sure we ' re resuming to a Resume bytecode <nl> - interpreter : : BytecodeArrayAccessor assessor ( bytecode_array ( ) , <nl> + interpreter : : BytecodeArrayAccessor accessor ( bytecode_array ( ) , <nl> target . target_offset ( ) ) ; <nl> - if ( assessor . current_bytecode ( ) ! = Bytecode : : kResumeGenerator ) { <nl> + if ( accessor . current_bytecode ( ) ! = Bytecode : : kResumeGenerator ) { <nl> PrintF ( stderr , <nl> " Expected resume target for id % d , offset % d , to be " <nl> " ResumeGenerator , but found % s \ n " , <nl> target . suspend_id ( ) , target . target_offset ( ) , <nl> - Bytecodes : : ToString ( assessor . current_bytecode ( ) ) ) ; <nl> + Bytecodes : : ToString ( accessor . current_bytecode ( ) ) ) ; <nl> <nl> valid = false ; <nl> } <nl> bool BytecodeAnalysis : : LivenessIsValid ( ) { <nl> previous_liveness . CopyFrom ( * liveness . out ) ; <nl> <nl> UpdateOutLiveness ( bytecode , * liveness . out , next_bytecode_in_liveness , <nl> - iterator , liveness_map_ ) ; <nl> + iterator , bytecode_array ( ) , liveness_map_ ) ; <nl> / / UpdateOutLiveness skips kJumpLoop , so we update it manually . <nl> if ( bytecode = = Bytecode : : kJumpLoop ) { <nl> int target_offset = iterator . GetJumpTargetOffset ( ) ; <nl> mmm a / src / compiler / serializer - for - background - compilation . cc <nl> ppp b / src / compiler / serializer - for - background - compilation . cc <nl> Hints SerializerForBackgroundCompilation : : Run ( ) { <nl> class ExceptionHandlerMatcher { <nl> public : <nl> explicit ExceptionHandlerMatcher ( <nl> - BytecodeArrayIterator const & bytecode_iterator ) <nl> + BytecodeArrayIterator const & bytecode_iterator , <nl> + Handle < BytecodeArray > bytecode_array ) <nl> : bytecode_iterator_ ( bytecode_iterator ) { <nl> - HandlerTable table ( * bytecode_iterator_ . bytecode_array ( ) ) ; <nl> + HandlerTable table ( * bytecode_array ) ; <nl> for ( int i = 0 , n = table . NumberOfRangeEntries ( ) ; i < n ; + + i ) { <nl> handlers_ . insert ( table . GetRangeHandler ( i ) ) ; <nl> } <nl> void SerializerForBackgroundCompilation : : TraverseBytecode ( ) { <nl> broker ( ) , handle ( environment ( ) - > function ( ) . shared - > GetBytecodeArray ( ) , <nl> broker ( ) - > isolate ( ) ) ) ; <nl> BytecodeArrayIterator iterator ( bytecode_array . object ( ) ) ; <nl> - ExceptionHandlerMatcher handler_matcher ( iterator ) ; <nl> + ExceptionHandlerMatcher handler_matcher ( iterator , bytecode_array . object ( ) ) ; <nl> <nl> for ( ; ! iterator . done ( ) ; iterator . Advance ( ) ) { <nl> IncorporateJumpTargetEnvironment ( iterator . current_offset ( ) ) ; <nl> mmm a / src / interpreter / bytecode - array - accessor . cc <nl> ppp b / src / interpreter / bytecode - array - accessor . cc <nl> namespace v8 { <nl> namespace internal { <nl> namespace interpreter { <nl> <nl> + namespace { <nl> + <nl> + class OnHeapBytecodeArray final : public AbstractBytecodeArray { <nl> + public : <nl> + explicit OnHeapBytecodeArray ( Handle < BytecodeArray > bytecode_array ) <nl> + : array_ ( bytecode_array ) { } <nl> + <nl> + int length ( ) const override { return array_ - > length ( ) ; } <nl> + <nl> + int parameter_count ( ) const override { return array_ - > parameter_count ( ) ; } <nl> + <nl> + uint8_t get ( int index ) const override { return array_ - > get ( index ) ; } <nl> + <nl> + void set ( int index , uint8_t value ) override { <nl> + return array_ - > set ( index , value ) ; <nl> + } <nl> + <nl> + Address GetFirstBytecodeAddress ( ) const override { <nl> + return array_ - > GetFirstBytecodeAddress ( ) ; <nl> + } <nl> + <nl> + Handle < Object > GetConstantAtIndex ( int index , <nl> + Isolate * isolate ) const override { <nl> + return handle ( array_ - > constant_pool ( ) . get ( index ) , isolate ) ; <nl> + } <nl> + <nl> + bool IsConstantAtIndexSmi ( int index ) const override { <nl> + return array_ - > constant_pool ( ) . get ( index ) . IsSmi ( ) ; <nl> + } <nl> + <nl> + Smi GetConstantAtIndexAsSmi ( int index ) const override { <nl> + return Smi : : cast ( array_ - > constant_pool ( ) . get ( index ) ) ; <nl> + } <nl> + <nl> + private : <nl> + Handle < BytecodeArray > array_ ; <nl> + } ; <nl> + <nl> + } / / namespace <nl> + <nl> BytecodeArrayAccessor : : BytecodeArrayAccessor ( <nl> - Handle < BytecodeArray > bytecode_array , int initial_offset ) <nl> + AbstractBytecodeArray * bytecode_array , int initial_offset ) <nl> : bytecode_array_ ( bytecode_array ) , <nl> bytecode_offset_ ( initial_offset ) , <nl> operand_scale_ ( OperandScale : : kSingle ) , <nl> BytecodeArrayAccessor : : BytecodeArrayAccessor ( <nl> UpdateOperandScale ( ) ; <nl> } <nl> <nl> + BytecodeArrayAccessor : : BytecodeArrayAccessor ( <nl> + Handle < BytecodeArray > bytecode_array , int initial_offset ) <nl> + : BytecodeArrayAccessor ( new OnHeapBytecodeArray ( bytecode_array ) , <nl> + initial_offset ) { } <nl> + <nl> void BytecodeArrayAccessor : : SetOffset ( int offset ) { <nl> bytecode_offset_ = offset ; <nl> UpdateOperandScale ( ) ; <nl> void BytecodeArrayAccessor : : ApplyDebugBreak ( ) { <nl> / / scaling prefix , which we can patch with the matching debug - break <nl> / / variant . <nl> interpreter : : Bytecode bytecode = <nl> - interpreter : : Bytecodes : : FromByte ( bytecode_array_ - > get ( bytecode_offset_ ) ) ; <nl> + interpreter : : Bytecodes : : FromByte ( bytecode_array ( ) - > get ( bytecode_offset_ ) ) ; <nl> if ( interpreter : : Bytecodes : : IsDebugBreak ( bytecode ) ) return ; <nl> interpreter : : Bytecode debugbreak = <nl> interpreter : : Bytecodes : : GetDebugBreak ( bytecode ) ; <nl> - bytecode_array_ - > set ( bytecode_offset_ , <nl> - interpreter : : Bytecodes : : ToByte ( debugbreak ) ) ; <nl> + bytecode_array ( ) - > set ( bytecode_offset_ , <nl> + interpreter : : Bytecodes : : ToByte ( debugbreak ) ) ; <nl> } <nl> <nl> void BytecodeArrayAccessor : : UpdateOperandScale ( ) { <nl> Runtime : : FunctionId BytecodeArrayAccessor : : GetIntrinsicIdOperand ( <nl> <nl> Handle < Object > BytecodeArrayAccessor : : GetConstantAtIndex ( <nl> int index , Isolate * isolate ) const { <nl> - return handle ( bytecode_array ( ) - > constant_pool ( ) . get ( index ) , isolate ) ; <nl> + return bytecode_array ( ) - > GetConstantAtIndex ( index , isolate ) ; <nl> } <nl> <nl> bool BytecodeArrayAccessor : : IsConstantAtIndexSmi ( int index ) const { <nl> - return bytecode_array ( ) - > constant_pool ( ) . get ( index ) . IsSmi ( ) ; <nl> + return bytecode_array ( ) - > IsConstantAtIndexSmi ( index ) ; <nl> } <nl> <nl> Smi BytecodeArrayAccessor : : GetConstantAtIndexAsSmi ( int index ) const { <nl> - return Smi : : cast ( bytecode_array ( ) - > constant_pool ( ) . get ( index ) ) ; <nl> + return bytecode_array ( ) - > GetConstantAtIndexAsSmi ( index ) ; <nl> } <nl> <nl> Handle < Object > BytecodeArrayAccessor : : GetConstantForIndexOperand ( <nl> mmm a / src / interpreter / bytecode - array - accessor . h <nl> ppp b / src / interpreter / bytecode - array - accessor . h <nl> class V8_EXPORT_PRIVATE JumpTableTargetOffsets final { <nl> int case_value_base_ ; <nl> } ; <nl> <nl> + class V8_EXPORT_PRIVATE AbstractBytecodeArray { <nl> + public : <nl> + virtual int length ( ) const = 0 ; <nl> + virtual int parameter_count ( ) const = 0 ; <nl> + virtual uint8_t get ( int index ) const = 0 ; <nl> + virtual void set ( int index , uint8_t value ) = 0 ; <nl> + virtual Address GetFirstBytecodeAddress ( ) const = 0 ; <nl> + <nl> + virtual Handle < Object > GetConstantAtIndex ( int index , <nl> + Isolate * isolate ) const = 0 ; <nl> + virtual bool IsConstantAtIndexSmi ( int index ) const = 0 ; <nl> + virtual Smi GetConstantAtIndexAsSmi ( int index ) const = 0 ; <nl> + <nl> + virtual ~ AbstractBytecodeArray ( ) = default ; <nl> + } ; <nl> + <nl> class V8_EXPORT_PRIVATE BytecodeArrayAccessor { <nl> public : <nl> + BytecodeArrayAccessor ( AbstractBytecodeArray * bytecode_array , <nl> + int initial_offset ) ; <nl> + <nl> BytecodeArrayAccessor ( Handle < BytecodeArray > bytecode_array , <nl> int initial_offset ) ; <nl> <nl> class V8_EXPORT_PRIVATE BytecodeArrayAccessor { <nl> int current_offset ( ) const { return bytecode_offset_ ; } <nl> OperandScale current_operand_scale ( ) const { return operand_scale_ ; } <nl> int current_prefix_offset ( ) const { return prefix_offset_ ; } <nl> - const Handle < BytecodeArray > & bytecode_array ( ) const { <nl> - return bytecode_array_ ; <nl> + AbstractBytecodeArray * bytecode_array ( ) const { <nl> + return bytecode_array_ . get ( ) ; <nl> } <nl> <nl> uint32_t GetFlagOperand ( int operand_index ) const ; <nl> class V8_EXPORT_PRIVATE BytecodeArrayAccessor { <nl> <nl> void UpdateOperandScale ( ) ; <nl> <nl> - Handle < BytecodeArray > bytecode_array_ ; <nl> + std : : unique_ptr < AbstractBytecodeArray > bytecode_array_ ; <nl> int bytecode_offset_ ; <nl> OperandScale operand_scale_ ; <nl> int prefix_offset_ ; <nl> mmm a / src / interpreter / bytecode - array - iterator . cc <nl> ppp b / src / interpreter / bytecode - array - iterator . cc <nl> namespace v8 { <nl> namespace internal { <nl> namespace interpreter { <nl> <nl> + BytecodeArrayIterator : : BytecodeArrayIterator ( <nl> + AbstractBytecodeArray * bytecode_array ) <nl> + : BytecodeArrayAccessor ( bytecode_array , 0 ) { } <nl> + <nl> BytecodeArrayIterator : : BytecodeArrayIterator ( <nl> Handle < BytecodeArray > bytecode_array ) <nl> : BytecodeArrayAccessor ( bytecode_array , 0 ) { } <nl> mmm a / src / interpreter / bytecode - array - iterator . h <nl> ppp b / src / interpreter / bytecode - array - iterator . h <nl> namespace interpreter { <nl> class V8_EXPORT_PRIVATE BytecodeArrayIterator final <nl> : public BytecodeArrayAccessor { <nl> public : <nl> - explicit BytecodeArrayIterator ( Handle < BytecodeArray > bytecode_array ) ; <nl> + explicit BytecodeArrayIterator ( AbstractBytecodeArray * array ) ; <nl> + explicit BytecodeArrayIterator ( Handle < BytecodeArray > array ) ; <nl> <nl> void Advance ( ) ; <nl> bool done ( ) const ; <nl> mmm a / src / interpreter / bytecode - array - random - iterator . cc <nl> ppp b / src / interpreter / bytecode - array - random - iterator . cc <nl> namespace v8 { <nl> namespace internal { <nl> namespace interpreter { <nl> <nl> + BytecodeArrayRandomIterator : : BytecodeArrayRandomIterator ( <nl> + AbstractBytecodeArray * bytecode_array , Zone * zone ) <nl> + : BytecodeArrayAccessor ( bytecode_array , 0 ) , offsets_ ( zone ) { <nl> + Initialize ( ) ; <nl> + } <nl> + <nl> BytecodeArrayRandomIterator : : BytecodeArrayRandomIterator ( <nl> Handle < BytecodeArray > bytecode_array , Zone * zone ) <nl> : BytecodeArrayAccessor ( bytecode_array , 0 ) , offsets_ ( zone ) { <nl> + Initialize ( ) ; <nl> + } <nl> + <nl> + void BytecodeArrayRandomIterator : : Initialize ( ) { <nl> / / Run forwards through the bytecode array to determine the offset of each <nl> / / bytecode . <nl> - while ( current_offset ( ) < bytecode_array - > length ( ) ) { <nl> + while ( current_offset ( ) < bytecode_array ( ) - > length ( ) ) { <nl> offsets_ . push_back ( current_offset ( ) ) ; <nl> SetOffset ( current_offset ( ) + current_bytecode_size ( ) ) ; <nl> } <nl> mmm a / src / interpreter / bytecode - array - random - iterator . h <nl> ppp b / src / interpreter / bytecode - array - random - iterator . h <nl> namespace interpreter { <nl> class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final <nl> : public BytecodeArrayAccessor { <nl> public : <nl> + explicit BytecodeArrayRandomIterator ( AbstractBytecodeArray * bytecode_array , <nl> + Zone * zone ) ; <nl> explicit BytecodeArrayRandomIterator ( Handle < BytecodeArray > bytecode_array , <nl> Zone * zone ) ; <nl> <nl> class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final <nl> ZoneVector < int > offsets_ ; <nl> int current_index_ ; <nl> <nl> + void Initialize ( ) ; <nl> void UpdateOffsetFromIndex ( ) ; <nl> <nl> DISALLOW_COPY_AND_ASSIGN ( BytecodeArrayRandomIterator ) ; <nl>
Introduce bytecode array abstraction .
v8/v8
bb7bb8b78b2257152386e6b03c33f54e7bde916b
2019-06-19T06:15:19Z
mmm a / cmake / OpenCVDownload . cmake <nl> ppp b / cmake / OpenCVDownload . cmake <nl> file ( WRITE " $ { OPENCV_DOWNLOAD_LOG } " " use_cache \ " $ { OPENCV_DOWNLOAD_PATH } \ " \ n " ) <nl> function ( ocv_download ) <nl> cmake_parse_arguments ( DL " UNPACK ; RELATIVE_URL " " FILENAME ; HASH ; DESTINATION_DIR ; ID ; STATUS " " URL " $ { ARGN } ) <nl> <nl> - macro ( ocv_download_log ) <nl> + function ( ocv_download_log ) <nl> file ( APPEND " $ { OPENCV_DOWNLOAD_LOG } " " $ { ARGN } \ n " ) <nl> - endmacro ( ) <nl> + endfunction ( ) <nl> <nl> ocv_assert ( DL_FILENAME ) <nl> ocv_assert ( DL_HASH ) <nl>
Merge pull request from jchazalon : patch - 1
opencv/opencv
687394fa0746ef29984f92e6ba363b76cb953d18
2018-03-08T17:54:14Z
mmm a / src / core / ext / client_channel / client_channel . c <nl> ppp b / src / core / ext / client_channel / client_channel . c <nl> static const grpc_mdstr_hash_table_vtable method_parameters_vtable = { <nl> <nl> static void * method_parameters_create_from_json ( const grpc_json * json ) { <nl> wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET ; <nl> - gpr_timespec timeout = { 0 , 0 , GPR_TIMESPAN } ; <nl> - for ( grpc_json * field = json - > child ; field ! = NULL ; field = field - > next ) { <nl> + gpr_timespec timeout = { 0 , 0 , GPR_TIMESPAN } ; <nl> + for ( grpc_json * field = json - > child ; field ! = NULL ; field = field - > next ) { <nl> if ( field - > key = = NULL ) continue ; <nl> if ( strcmp ( field - > key , " wait_for_ready " ) = = 0 ) { <nl> if ( wait_for_ready ! = WAIT_FOR_READY_UNSET ) return NULL ; / / Duplicate . <nl> if ( field - > type ! = GRPC_JSON_TRUE & & field - > type ! = GRPC_JSON_FALSE ) { <nl> return NULL ; <nl> } <nl> - wait_for_ready = field - > type = = GRPC_JSON_TRUE <nl> - ? WAIT_FOR_READY_TRUE : WAIT_FOR_READY_FALSE ; <nl> + wait_for_ready = field - > type = = GRPC_JSON_TRUE ? WAIT_FOR_READY_TRUE <nl> + : WAIT_FOR_READY_FALSE ; <nl> } else if ( strcmp ( field - > key , " timeout " ) = = 0 ) { <nl> if ( timeout . tv_sec > 0 | | timeout . tv_nsec > 0 ) return NULL ; / / Duplicate . <nl> if ( field - > type ! = GRPC_JSON_OBJECT ) return NULL ; <nl> if ( field - > child = = NULL ) return NULL ; <nl> - for ( grpc_json * subfield = field - > child ; subfield ! = NULL ; <nl> + for ( grpc_json * subfield = field - > child ; subfield ! = NULL ; <nl> subfield = subfield - > next ) { <nl> if ( subfield - > key = = NULL ) return NULL ; <nl> if ( strcmp ( subfield - > key , " seconds " ) = = 0 ) { <nl> static void on_resolver_result_changed ( grpc_exec_ctx * exec_ctx , void * arg , <nl> grpc_channel_args_find ( lb_policy_args . args , GRPC_ARG_SERVICE_CONFIG ) ; <nl> if ( channel_arg ! = NULL ) { <nl> GPR_ASSERT ( channel_arg - > type = = GRPC_ARG_POINTER ) ; <nl> - grpc_json_tree * json_tree = channel_arg - > value . pointer . p ; <nl> + grpc_json_tree * json_tree = channel_arg - > value . pointer . p ; <nl> method_params_table = grpc_method_config_table_create_from_json ( <nl> json_tree - > root , method_parameters_create_from_json , <nl> & method_parameters_vtable ) ; <nl> mmm a / src / core / lib / json / json . c <nl> ppp b / src / core / lib / json / json . c <nl> <nl> <nl> # include " src / core / lib / json / json . h " <nl> <nl> - grpc_json * grpc_json_create ( grpc_json_type type ) { <nl> - grpc_json * json = gpr_malloc ( sizeof ( * json ) ) ; <nl> + grpc_json * grpc_json_create ( grpc_json_type type ) { <nl> + grpc_json * json = gpr_malloc ( sizeof ( * json ) ) ; <nl> memset ( json , 0 , sizeof ( * json ) ) ; <nl> json - > type = type ; <nl> <nl> return json ; <nl> } <nl> <nl> - void grpc_json_destroy ( grpc_json * json ) { <nl> + void grpc_json_destroy ( grpc_json * json ) { <nl> while ( json - > child ) { <nl> grpc_json_destroy ( json - > child ) ; <nl> } <nl> mmm a / src / core / lib / json / json . h <nl> ppp b / src / core / lib / json / json . h <nl> <nl> * are not owned by it . <nl> * / <nl> typedef struct grpc_json { <nl> - struct grpc_json * next ; <nl> - struct grpc_json * prev ; <nl> - struct grpc_json * child ; <nl> - struct grpc_json * parent ; <nl> + struct grpc_json * next ; <nl> + struct grpc_json * prev ; <nl> + struct grpc_json * child ; <nl> + struct grpc_json * parent ; <nl> <nl> grpc_json_type type ; <nl> - const char * key ; <nl> - const char * value ; <nl> + const char * key ; <nl> + const char * value ; <nl> } grpc_json ; <nl> <nl> / * The next two functions are going to parse the input string , and <nl> typedef struct grpc_json { <nl> * <nl> * Delete the allocated tree afterward using grpc_json_destroy ( ) . <nl> * / <nl> - grpc_json * grpc_json_parse_string_with_len ( char * input , size_t size ) ; <nl> - grpc_json * grpc_json_parse_string ( char * input ) ; <nl> + grpc_json * grpc_json_parse_string_with_len ( char * input , size_t size ) ; <nl> + grpc_json * grpc_json_parse_string ( char * input ) ; <nl> <nl> / * This function will create a new string using gpr_realloc , and will <nl> * deserialize the grpc_json tree into it . It ' ll be zero - terminated , <nl> grpc_json * grpc_json_parse_string ( char * input ) ; <nl> * If indent is 0 , then newlines will be suppressed as well , and the <nl> * output will be condensed at its maximum . <nl> * / <nl> - char * grpc_json_dump_to_string ( grpc_json * json , int indent ) ; <nl> + char * grpc_json_dump_to_string ( grpc_json * json , int indent ) ; <nl> <nl> / * Use these to create or delete a grpc_json object . <nl> * Deletion is recursive . We will not attempt to free any of the strings <nl> * in any of the objects of that tree . <nl> * / <nl> - grpc_json * grpc_json_create ( grpc_json_type type ) ; <nl> - void grpc_json_destroy ( grpc_json * json ) ; <nl> + grpc_json * grpc_json_create ( grpc_json_type type ) ; <nl> + void grpc_json_destroy ( grpc_json * json ) ; <nl> <nl> / * Compares two JSON trees . * / <nl> int grpc_json_cmp ( const grpc_json * json1 , const grpc_json * json2 ) ; <nl> mmm a / src / core / lib / support / string . c <nl> ppp b / src / core / lib / support / string . c <nl> int int64_ttoa ( int64_t value , char * string ) { <nl> return i ; <nl> } <nl> <nl> - int gpr_parse_nonnegative_number ( const char * value ) { <nl> - char * end ; <nl> + int gpr_parse_nonnegative_number ( const char * value ) { <nl> + char * end ; <nl> long result = strtol ( value , & end , 0 ) ; <nl> if ( * end ! = ' \ 0 ' | | result < 0 | | result > INT_MAX ) return - 1 ; <nl> return ( int ) result ; <nl> mmm a / src / core / lib / support / string . h <nl> ppp b / src / core / lib / support / string . h <nl> where long is 32bit is size . * / <nl> int int64_ttoa ( int64_t value , char * output ) ; <nl> <nl> / / Parses a non - negative number from a value string . Returns - 1 on error . <nl> - int gpr_parse_nonnegative_number ( const char * value ) ; <nl> + int gpr_parse_nonnegative_number ( const char * value ) ; <nl> <nl> / * Reverse a run of bytes * / <nl> void gpr_reverse_bytes ( char * str , int len ) ; <nl> mmm a / src / core / lib / transport / method_config . c <nl> ppp b / src / core / lib / transport / method_config . c <nl> static char * parse_json_method_name ( grpc_json * json ) { <nl> / / Parses the method config from \ a json . Adds an entry to \ a entries for <nl> / / each name found , incrementing \ a idx for each entry added . <nl> static bool parse_json_method_config ( <nl> - grpc_json * json , <nl> - void * ( * create_value ) ( const grpc_json * method_config_json ) , <nl> + grpc_json * json , void * ( * create_value ) ( const grpc_json * method_config_json ) , <nl> const grpc_mdstr_hash_table_vtable * vtable , <nl> - grpc_mdstr_hash_table_entry * entries , size_t * idx ) { <nl> + grpc_mdstr_hash_table_entry * entries , size_t * idx ) { <nl> / / Construct value . <nl> void * method_config = create_value ( json ) ; <nl> if ( method_config = = NULL ) return NULL ; <nl> grpc_mdstr_hash_table * grpc_method_config_table_create_from_json ( <nl> num_entries + = count_names_in_method_config_json ( method ) ; <nl> } <nl> / / Populate method config table entries . <nl> - entries = <nl> - gpr_malloc ( num_entries * sizeof ( grpc_mdstr_hash_table_entry ) ) ; <nl> + entries = gpr_malloc ( num_entries * sizeof ( grpc_mdstr_hash_table_entry ) ) ; <nl> size_t idx = 0 ; <nl> for ( grpc_json * method = field - > child ; method ! = NULL ; <nl> method = method - > next ) { <nl> mmm a / test / core / end2end / connection_refused_test . c <nl> ppp b / test / core / end2end / connection_refused_test . c <nl> static void run_test ( bool wait_for_ready , bool use_service_config ) { <nl> grpc_channel_args * args = NULL ; <nl> if ( use_service_config ) { <nl> GPR_ASSERT ( wait_for_ready ) ; <nl> - grpc_json_tree * service_config_json = grpc_json_tree_create ( <nl> + grpc_json_tree * service_config_json = grpc_json_tree_create ( <nl> " { \ n " <nl> " \ " method_config \ " : [ { \ n " <nl> " \ " name \ " : [ \ n " <nl> mmm a / test / core / end2end / tests / cancel_after_accept . c <nl> ppp b / test / core / end2end / tests / cancel_after_accept . c <nl> static void test_cancel_after_accept ( grpc_end2end_test_config config , <nl> <nl> grpc_channel_args * args = NULL ; <nl> if ( use_service_config ) { <nl> - grpc_json_tree * service_config_json = grpc_json_tree_create ( <nl> + grpc_json_tree * service_config_json = grpc_json_tree_create ( <nl> " { \ n " <nl> " \ " method_config \ " : [ { \ n " <nl> " \ " name \ " : [ \ n " <nl> mmm a / test / core / end2end / tests / max_message_length . c <nl> ppp b / test / core / end2end / tests / max_message_length . c <nl> static void test_max_message_length_on_request ( grpc_end2end_test_config config , <nl> if ( use_service_config ) { <nl> / / We don ' t currently support service configs on the server side . <nl> GPR_ASSERT ( send_limit ) ; <nl> - grpc_json_tree * service_config_json = grpc_json_tree_create ( <nl> + grpc_json_tree * service_config_json = grpc_json_tree_create ( <nl> " { \ n " <nl> " \ " method_config \ " : [ { \ n " <nl> " \ " name \ " : [ \ n " <nl> static void test_max_message_length_on_response ( grpc_end2end_test_config config , <nl> if ( use_service_config ) { <nl> / / We don ' t currently support service configs on the server side . <nl> GPR_ASSERT ( ! send_limit ) ; <nl> - grpc_json_tree * service_config_json = grpc_json_tree_create ( <nl> + grpc_json_tree * service_config_json = grpc_json_tree_create ( <nl> " { \ n " <nl> " \ " method_config \ " : [ { \ n " <nl> " \ " name \ " : [ \ n " <nl>
clang - format
grpc/grpc
47f1084ce835d4288ddfa9f280e07d0a8d5719c5
2016-11-03T15:45:27Z
mmm a / modules / core / src / drawing . cpp <nl> ppp b / modules / core / src / drawing . cpp <nl> FillEdgeCollection ( Mat & img , vector < PolyEdge > & edges , const void * color ) <nl> { <nl> PolyEdge & e1 = edges [ i ] ; <nl> assert ( e1 . y0 < e1 . y1 ) ; <nl> + / / Determine x - coordinate of the end of the edge . <nl> + / / ( This is not necessary x - coordinate of any vertex in the array . ) <nl> + int x1 = e1 . x + ( e1 . y1 - e1 . y0 ) * e1 . dx ; <nl> y_min = std : : min ( y_min , e1 . y0 ) ; <nl> y_max = std : : max ( y_max , e1 . y1 ) ; <nl> x_min = std : : min ( x_min , e1 . x ) ; <nl> x_max = std : : max ( x_max , e1 . x ) ; <nl> + x_min = std : : min ( x_min , x1 ) ; <nl> + x_max = std : : max ( x_max , x1 ) ; <nl> } <nl> <nl> if ( y_max < 0 | | y_min > = size . height | | x_max < 0 | | x_min > = ( size . width < < XY_SHIFT ) ) <nl>
Fix determining bounding box of the edge collection in FillEdgeCollection ( ) .
opencv/opencv
7512b5fdde3b48845b414d94c6e0cb09c653f3c6
2014-11-25T17:54:47Z
mmm a / tests / integration / tables / deb_packages . cpp <nl> ppp b / tests / integration / tables / deb_packages . cpp <nl> TEST_F ( DebPackages , test_sanity ) { <nl> for ( const auto & row : rows ) { <nl> auto pckg_name = row . at ( " name " ) ; <nl> all_packages . insert ( pckg_name ) ; <nl> + if ( pckg_name = = " dpkg " ) <nl> + break ; <nl> } <nl> <nl> ASSERT_EQ ( all_packages . count ( " dpkg " ) , 1u ) ; <nl> - ASSERT_EQ ( all_packages . count ( " linux - base " ) , 1u ) ; <nl> - ASSERT_EQ ( all_packages . count ( " linux - firmware " ) , 1u ) ; <nl> - ASSERT_EQ ( all_packages . count ( " linux - generic " ) , 1u ) ; <nl> <nl> } else { <nl> LOG ( WARNING ) < < " Empty results of query from ' deb_packages ' , assume there " <nl>
Improve deb_packages test ( )
osquery/osquery
7a9f099903af936a4a9a7d0b917871e926d55da0
2019-11-01T14:36:42Z
mmm a / db / db . vcproj <nl> ppp b / db / db . vcproj <nl> <nl> RelativePath = " . . \ util \ thread_pool . h " <nl> > <nl> < / File > <nl> + < File <nl> + RelativePath = " . . \ util \ text . cpp " <nl> + > <nl> + < / File > <nl> + < File <nl> + RelativePath = " . . \ util \ text . h " <nl> + > <nl> + < / File > <nl> < File <nl> RelativePath = " . . \ util \ unittest . h " <nl> > <nl> mmm a / s / balancer_policy . cpp <nl> ppp b / s / balancer_policy . cpp <nl> <nl> * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> * / <nl> <nl> - # include " . . / pch . h " <nl> + # include " pch . h " <nl> # include " . . / client / dbclient . h " <nl> # include " config . h " <nl> <nl> mmm a / s / dbgrid . vcproj <nl> ppp b / s / dbgrid . vcproj <nl> <nl> RelativePath = " . \ balance . cpp " <nl> > <nl> < / File > <nl> + < File <nl> + RelativePath = " . \ balancer_policy . cpp " <nl> + > <nl> + < / File > <nl> < File <nl> RelativePath = " . \ chunk . cpp " <nl> > <nl> <nl> UniqueIdentifier = " { 93995380 - 89BD - 4b04 - 88EB - 625FBE52EBFB } " <nl> > <nl> < File <nl> - RelativePath = " . \ gridconfig . h " <nl> - > <nl> - < / File > <nl> - < File <nl> - RelativePath = " . \ griddatabase . h " <nl> + RelativePath = " . \ balancer_policy . h " <nl> > <nl> < / File > <nl> < File <nl> <nl> RelativePath = " . . \ pch . h " <nl> > <nl> < / File > <nl> + < File <nl> + RelativePath = " . . \ util \ text . h " <nl> + > <nl> + < / File > <nl> < / Filter > <nl> < / Filter > <nl> < Filter <nl> <nl> RelativePath = " . . \ util \ thread_pool . cpp " <nl> > <nl> < / File > <nl> + < File <nl> + RelativePath = " . . \ util \ text . cpp " <nl> + > <nl> + < / File > <nl> < File <nl> RelativePath = " . . \ util \ util . cpp " <nl> > <nl> mmm a / util / mmap_win . cpp <nl> ppp b / util / mmap_win . cpp <nl> <nl> <nl> # include " pch . h " <nl> # include " mmap . h " <nl> + # include " text . h " <nl> # include < windows . h > <nl> <nl> namespace mongo { <nl> namespace mongo { <nl> CloseHandle ( fd ) ; <nl> fd = 0 ; <nl> } <nl> - <nl> - std : : wstring toWideString ( const char * s ) { <nl> - std : : basic_ostringstream < TCHAR > buf ; <nl> - buf < < s ; <nl> - return buf . str ( ) ; <nl> - } <nl> <nl> unsigned mapped = 0 ; <nl> <nl> mmm a / util / ntservice . cpp <nl> ppp b / util / ntservice . cpp <nl> <nl> <nl> # include " pch . h " <nl> # include " ntservice . h " <nl> + # include " text . h " <nl> # include < direct . h > <nl> <nl> # if defined ( _WIN32 ) <nl> namespace mongo { <nl> std : : string arg ( argv [ i ] ) ; <nl> <nl> / / replace install command to indicate process is being started as a service <nl> - if ( arg = = " - - install " ) <nl> + if ( arg = = " - - install " ) { <nl> arg = " - - service " ; <nl> + } <nl> <nl> commandLine < < arg < < " " ; <nl> } <nl> <nl> SC_HANDLE schSCManager = : : OpenSCManager ( NULL , NULL , SC_MANAGER_ALL_ACCESS ) ; <nl> - if ( schSCManager = = NULL ) <nl> + if ( schSCManager = = NULL ) { <nl> return false ; <nl> + } <nl> <nl> std : : basic_ostringstream < TCHAR > commandLineWide ; <nl> commandLineWide < < commandLine . str ( ) . c_str ( ) ; <nl> <nl> + log ( ) < < " Creating service " < < toUtf8String ( serviceName ) < < " . " < < endl ; <nl> + <nl> / / create new service <nl> SC_HANDLE schService = : : CreateService ( schSCManager , serviceName . c_str ( ) , displayName . c_str ( ) , <nl> SERVICE_ALL_ACCESS , SERVICE_WIN32_OWN_PROCESS , <nl> namespace mongo { <nl> commandLineWide . str ( ) . c_str ( ) , NULL , NULL , L " \ 0 \ 0 " , NULL , NULL ) ; <nl> <nl> if ( schService = = NULL ) { <nl> + log ( ) < < " Error creating service . " < < endl ; <nl> : : CloseServiceHandle ( schSCManager ) ; <nl> return false ; <nl> } <nl> namespace mongo { <nl> <nl> / / set service recovery options <nl> serviceInstalled = : : ChangeServiceConfig2 ( schService , SERVICE_CONFIG_FAILURE_ACTIONS , & serviceFailure ) ; <nl> + <nl> + log ( ) < < " Service creation successful . " < < endl ; <nl> } <nl> - <nl> + else { <nl> + log ( ) < < " Service creation seems to have partially failed . Check the event log for more details . " < < endl ; <nl> + } <nl> + <nl> : : CloseServiceHandle ( schService ) ; <nl> : : CloseServiceHandle ( schSCManager ) ; <nl> <nl> namespace mongo { <nl> <nl> bool ServiceController : : removeService ( const std : : wstring & serviceName ) { <nl> SC_HANDLE schSCManager = : : OpenSCManager ( NULL , NULL , SC_MANAGER_ALL_ACCESS ) ; <nl> - if ( schSCManager = = NULL ) <nl> + if ( schSCManager = = NULL ) { <nl> return false ; <nl> + } <nl> <nl> SC_HANDLE schService = : : OpenService ( schSCManager , serviceName . c_str ( ) , SERVICE_ALL_ACCESS ) ; <nl> - <nl> if ( schService = = NULL ) { <nl> + log ( ) < < " Could not get a service handle for " < < toUtf8String ( serviceName ) < < " . " < < endl ; <nl> : : CloseServiceHandle ( schSCManager ) ; <nl> return false ; <nl> } <nl> namespace mongo { <nl> <nl> / / stop service if its running <nl> if ( : : ControlService ( schService , SERVICE_CONTROL_STOP , & serviceStatus ) ) { <nl> + log ( ) < < " Service " < < toUtf8String ( serviceName ) < < " is currently running . Stopping service . " < < endl ; <nl> while ( : : QueryServiceStatus ( schService , & serviceStatus ) ) { <nl> if ( serviceStatus . dwCurrentState = = SERVICE_STOP_PENDING ) <nl> - { <nl> - Sleep ( 1000 ) ; <nl> - } <nl> - else { break ; } <nl> + { <nl> + Sleep ( 1000 ) ; <nl> + } <nl> + else { break ; } <nl> } <nl> + log ( ) < < " Service stopped . " < < endl ; <nl> } <nl> <nl> + log ( ) < < " Deleting service " < < toUtf8String ( serviceName ) < < " . " < < endl ; <nl> bool serviceRemoved = : : DeleteService ( schService ) ; <nl> <nl> : : CloseServiceHandle ( schService ) ; <nl> : : CloseServiceHandle ( schSCManager ) ; <nl> <nl> + if ( serviceRemoved ) { <nl> + log ( ) < < " Service deleted successfully . " < < endl ; <nl> + } <nl> + else { <nl> + log ( ) < < " Failed to delete service . " < < endl ; <nl> + } <nl> + <nl> return serviceRemoved ; <nl> } <nl> <nl> namespace mongo { <nl> <nl> } / / namespace mongo <nl> <nl> - # endif <nl> + # endif <nl> \ No newline at end of file <nl> mmm a / util / text . cpp <nl> ppp b / util / text . cpp <nl> namespace mongo { <nl> } <nl> if ( left ! = 0 ) return false ; / / string ended mid - codepoint <nl> return true ; <nl> + } <nl> + <nl> + # if defined ( _WIN32 ) <nl> + <nl> + std : : string toUtf8String ( const std : : wstring & wide ) <nl> + { <nl> + if ( wide . size ( ) > boost : : integer_traits < int > : : const_max ) <nl> + throw std : : length_error ( <nl> + " Wide string cannot be more than INT_MAX characters long . " ) ; <nl> + if ( wide . size ( ) = = 0 ) <nl> + return " " ; <nl> + <nl> + / / Calculate necessary buffer size <nl> + int len = : : WideCharToMultiByte ( <nl> + CP_UTF8 , 0 , wide . c_str ( ) , static_cast < int > ( wide . size ( ) ) , <nl> + NULL , 0 , NULL , NULL ) ; <nl> + <nl> + / / Perform actual conversion <nl> + if ( len > 0 ) <nl> + { <nl> + std : : vector < char > buffer ( len ) ; <nl> + len = : : WideCharToMultiByte ( <nl> + CP_UTF8 , 0 , wide . c_str ( ) , static_cast < int > ( wide . size ( ) ) , <nl> + & buffer [ 0 ] , static_cast < int > ( buffer . size ( ) ) , NULL , NULL ) ; <nl> + if ( len > 0 ) <nl> + { <nl> + assert ( len = = static_cast < int > ( buffer . size ( ) ) ) ; <nl> + return std : : string ( & buffer [ 0 ] , buffer . size ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + throw boost : : system : : system_error ( <nl> + : : GetLastError ( ) , boost : : system : : system_category ) ; <nl> + } <nl> + <nl> + std : : wstring toWideString ( const char * s ) { <nl> + std : : basic_ostringstream < TCHAR > buf ; <nl> + buf < < s ; <nl> + return buf . str ( ) ; <nl> } <nl> + <nl> + # endif <nl> } <nl> mmm a / util / text . h <nl> ppp b / util / text . h <nl> namespace mongo { <nl> * guarantee that the codepoints are valid . <nl> * / <nl> bool isValidUTF8 ( const char * s ) ; <nl> - inline bool isValidUTF8 ( string s ) { return isValidUTF8 ( s . c_str ( ) ) ; } <nl> + inline bool isValidUTF8 ( string s ) { return isValidUTF8 ( s . c_str ( ) ) ; } <nl> + <nl> + # if defined ( _WIN32 ) <nl> + <nl> + std : : string toUtf8String ( const std : : wstring & wide ) ; <nl> + <nl> + std : : wstring toWideString ( const char * s ) ; <nl> + <nl> + # endif <nl> <nl> } <nl>
Clean remerge of SERVER - 1189 Improves logging in util / ntservice . cpp
mongodb/mongo
efde5cf575afd1cb8ffe9493d0452042287f3a99
2010-06-05T01:38:54Z
mmm a / csharp / tests / Facebook . Yoga / YGMarginTest . cs <nl> ppp b / csharp / tests / Facebook . Yoga / YGMarginTest . cs <nl> public void Test_margin_auto_top_and_bottom_strech ( ) <nl> Assert . AreEqual ( 50f , root_child1 . LayoutHeight ) ; <nl> } <nl> <nl> + [ Test ] <nl> + public void Test_margin_should_not_be_part_of_max_height ( ) <nl> + { <nl> + YogaConfig config = new YogaConfig ( ) ; <nl> + <nl> + YogaNode root = new YogaNode ( config ) ; <nl> + root . Width = 250 ; <nl> + root . Height = 250 ; <nl> + <nl> + YogaNode root_child0 = new YogaNode ( config ) ; <nl> + root_child0 . MarginTop = 20 ; <nl> + root_child0 . Width = 100 ; <nl> + root_child0 . Height = 100 ; <nl> + root_child0 . MaxHeight = 100 ; <nl> + root . Insert ( 0 , root_child0 ) ; <nl> + root . StyleDirection = YogaDirection . LTR ; <nl> + root . CalculateLayout ( ) ; <nl> + <nl> + Assert . AreEqual ( 0f , root . LayoutX ) ; <nl> + Assert . AreEqual ( 0f , root . LayoutY ) ; <nl> + Assert . AreEqual ( 250f , root . LayoutWidth ) ; <nl> + Assert . AreEqual ( 250f , root . LayoutHeight ) ; <nl> + <nl> + Assert . AreEqual ( 0f , root_child0 . LayoutX ) ; <nl> + Assert . AreEqual ( 20f , root_child0 . LayoutY ) ; <nl> + Assert . AreEqual ( 100f , root_child0 . LayoutWidth ) ; <nl> + Assert . AreEqual ( 100f , root_child0 . LayoutHeight ) ; <nl> + <nl> + root . StyleDirection = YogaDirection . RTL ; <nl> + root . CalculateLayout ( ) ; <nl> + <nl> + Assert . AreEqual ( 0f , root . LayoutX ) ; <nl> + Assert . AreEqual ( 0f , root . LayoutY ) ; <nl> + Assert . AreEqual ( 250f , root . LayoutWidth ) ; <nl> + Assert . AreEqual ( 250f , root . LayoutHeight ) ; <nl> + <nl> + Assert . AreEqual ( 150f , root_child0 . LayoutX ) ; <nl> + Assert . AreEqual ( 20f , root_child0 . LayoutY ) ; <nl> + Assert . AreEqual ( 100f , root_child0 . LayoutWidth ) ; <nl> + Assert . AreEqual ( 100f , root_child0 . LayoutHeight ) ; <nl> + } <nl> + <nl> + [ Test ] <nl> + public void Test_margin_should_not_be_part_of_max_width ( ) <nl> + { <nl> + YogaConfig config = new YogaConfig ( ) ; <nl> + <nl> + YogaNode root = new YogaNode ( config ) ; <nl> + root . Width = 250 ; <nl> + root . Height = 250 ; <nl> + <nl> + YogaNode root_child0 = new YogaNode ( config ) ; <nl> + root_child0 . MarginLeft = 20 ; <nl> + root_child0 . Width = 100 ; <nl> + root_child0 . MaxWidth = 100 ; <nl> + root_child0 . Height = 100 ; <nl> + root . Insert ( 0 , root_child0 ) ; <nl> + root . StyleDirection = YogaDirection . LTR ; <nl> + root . CalculateLayout ( ) ; <nl> + <nl> + Assert . AreEqual ( 0f , root . LayoutX ) ; <nl> + Assert . AreEqual ( 0f , root . LayoutY ) ; <nl> + Assert . AreEqual ( 250f , root . LayoutWidth ) ; <nl> + Assert . AreEqual ( 250f , root . LayoutHeight ) ; <nl> + <nl> + Assert . AreEqual ( 20f , root_child0 . LayoutX ) ; <nl> + Assert . AreEqual ( 0f , root_child0 . LayoutY ) ; <nl> + Assert . AreEqual ( 100f , root_child0 . LayoutWidth ) ; <nl> + Assert . AreEqual ( 100f , root_child0 . LayoutHeight ) ; <nl> + <nl> + root . StyleDirection = YogaDirection . RTL ; <nl> + root . CalculateLayout ( ) ; <nl> + <nl> + Assert . AreEqual ( 0f , root . LayoutX ) ; <nl> + Assert . AreEqual ( 0f , root . LayoutY ) ; <nl> + Assert . AreEqual ( 250f , root . LayoutWidth ) ; <nl> + Assert . AreEqual ( 250f , root . LayoutHeight ) ; <nl> + <nl> + Assert . AreEqual ( 150f , root_child0 . LayoutX ) ; <nl> + Assert . AreEqual ( 0f , root_child0 . LayoutY ) ; <nl> + Assert . AreEqual ( 100f , root_child0 . LayoutWidth ) ; <nl> + Assert . AreEqual ( 100f , root_child0 . LayoutHeight ) ; <nl> + } <nl> + <nl> } <nl> } <nl> mmm a / gentest / fixtures / YGMarginTest . html <nl> ppp b / gentest / fixtures / YGMarginTest . html <nl> <nl> < div style = " width : 50px ; height : 50px ; margin - top : auto ; margin - bottom : auto ; " > < / div > <nl> < div style = " width : 50px ; height : 50px ; " > < / div > <nl> < / div > <nl> + <nl> + < div id = " margin_should_not_be_part_of_max_height " style = " width : 250px ; height : 250px ; " > <nl> + < div style = " width : 100px ; height : 100px ; max - height : 100px ; margin - top : 20px ; " > < / div > <nl> + < / div > <nl> + <nl> + < div id = " margin_should_not_be_part_of_max_width " style = " width : 250px ; height : 250px ; " > <nl> + < div style = " width : 100px ; height : 100px ; max - width : 100px ; margin - left : 20px ; " > < / div > <nl> + < / div > <nl> mmm a / java / tests / com / facebook / yoga / YGMarginTest . java <nl> ppp b / java / tests / com / facebook / yoga / YGMarginTest . java <nl> public void test_margin_auto_top_and_bottom_strech ( ) { <nl> assertEquals ( 50f , root_child1 . getLayoutHeight ( ) , 0 . 0f ) ; <nl> } <nl> <nl> + @ Test <nl> + public void test_margin_should_not_be_part_of_max_height ( ) { <nl> + YogaConfig config = new YogaConfig ( ) ; <nl> + <nl> + final YogaNode root = new YogaNode ( config ) ; <nl> + root . setWidth ( 250f ) ; <nl> + root . setHeight ( 250f ) ; <nl> + <nl> + final YogaNode root_child0 = new YogaNode ( config ) ; <nl> + root_child0 . setMargin ( YogaEdge . TOP , 20f ) ; <nl> + root_child0 . setWidth ( 100f ) ; <nl> + root_child0 . setHeight ( 100f ) ; <nl> + root_child0 . setMaxHeight ( 100f ) ; <nl> + root . addChildAt ( root_child0 , 0 ) ; <nl> + root . setDirection ( YogaDirection . LTR ) ; <nl> + root . calculateLayout ( YogaConstants . UNDEFINED , YogaConstants . UNDEFINED ) ; <nl> + <nl> + assertEquals ( 0f , root . getLayoutX ( ) , 0 . 0f ) ; <nl> + assertEquals ( 0f , root . getLayoutY ( ) , 0 . 0f ) ; <nl> + assertEquals ( 250f , root . getLayoutWidth ( ) , 0 . 0f ) ; <nl> + assertEquals ( 250f , root . getLayoutHeight ( ) , 0 . 0f ) ; <nl> + <nl> + assertEquals ( 0f , root_child0 . getLayoutX ( ) , 0 . 0f ) ; <nl> + assertEquals ( 20f , root_child0 . getLayoutY ( ) , 0 . 0f ) ; <nl> + assertEquals ( 100f , root_child0 . getLayoutWidth ( ) , 0 . 0f ) ; <nl> + assertEquals ( 100f , root_child0 . getLayoutHeight ( ) , 0 . 0f ) ; <nl> + <nl> + root . setDirection ( YogaDirection . RTL ) ; <nl> + root . calculateLayout ( YogaConstants . UNDEFINED , YogaConstants . UNDEFINED ) ; <nl> + <nl> + assertEquals ( 0f , root . getLayoutX ( ) , 0 . 0f ) ; <nl> + assertEquals ( 0f , root . getLayoutY ( ) , 0 . 0f ) ; <nl> + assertEquals ( 250f , root . getLayoutWidth ( ) , 0 . 0f ) ; <nl> + assertEquals ( 250f , root . getLayoutHeight ( ) , 0 . 0f ) ; <nl> + <nl> + assertEquals ( 150f , root_child0 . getLayoutX ( ) , 0 . 0f ) ; <nl> + assertEquals ( 20f , root_child0 . getLayoutY ( ) , 0 . 0f ) ; <nl> + assertEquals ( 100f , root_child0 . getLayoutWidth ( ) , 0 . 0f ) ; <nl> + assertEquals ( 100f , root_child0 . getLayoutHeight ( ) , 0 . 0f ) ; <nl> + } <nl> + <nl> + @ Test <nl> + public void test_margin_should_not_be_part_of_max_width ( ) { <nl> + YogaConfig config = new YogaConfig ( ) ; <nl> + <nl> + final YogaNode root = new YogaNode ( config ) ; <nl> + root . setWidth ( 250f ) ; <nl> + root . setHeight ( 250f ) ; <nl> + <nl> + final YogaNode root_child0 = new YogaNode ( config ) ; <nl> + root_child0 . setMargin ( YogaEdge . LEFT , 20f ) ; <nl> + root_child0 . setWidth ( 100f ) ; <nl> + root_child0 . setMaxWidth ( 100f ) ; <nl> + root_child0 . setHeight ( 100f ) ; <nl> + root . addChildAt ( root_child0 , 0 ) ; <nl> + root . setDirection ( YogaDirection . LTR ) ; <nl> + root . calculateLayout ( YogaConstants . UNDEFINED , YogaConstants . UNDEFINED ) ; <nl> + <nl> + assertEquals ( 0f , root . getLayoutX ( ) , 0 . 0f ) ; <nl> + assertEquals ( 0f , root . getLayoutY ( ) , 0 . 0f ) ; <nl> + assertEquals ( 250f , root . getLayoutWidth ( ) , 0 . 0f ) ; <nl> + assertEquals ( 250f , root . getLayoutHeight ( ) , 0 . 0f ) ; <nl> + <nl> + assertEquals ( 20f , root_child0 . getLayoutX ( ) , 0 . 0f ) ; <nl> + assertEquals ( 0f , root_child0 . getLayoutY ( ) , 0 . 0f ) ; <nl> + assertEquals ( 100f , root_child0 . getLayoutWidth ( ) , 0 . 0f ) ; <nl> + assertEquals ( 100f , root_child0 . getLayoutHeight ( ) , 0 . 0f ) ; <nl> + <nl> + root . setDirection ( YogaDirection . RTL ) ; <nl> + root . calculateLayout ( YogaConstants . UNDEFINED , YogaConstants . UNDEFINED ) ; <nl> + <nl> + assertEquals ( 0f , root . getLayoutX ( ) , 0 . 0f ) ; <nl> + assertEquals ( 0f , root . getLayoutY ( ) , 0 . 0f ) ; <nl> + assertEquals ( 250f , root . getLayoutWidth ( ) , 0 . 0f ) ; <nl> + assertEquals ( 250f , root . getLayoutHeight ( ) , 0 . 0f ) ; <nl> + <nl> + assertEquals ( 150f , root_child0 . getLayoutX ( ) , 0 . 0f ) ; <nl> + assertEquals ( 0f , root_child0 . getLayoutY ( ) , 0 . 0f ) ; <nl> + assertEquals ( 100f , root_child0 . getLayoutWidth ( ) , 0 . 0f ) ; <nl> + assertEquals ( 100f , root_child0 . getLayoutHeight ( ) , 0 . 0f ) ; <nl> + } <nl> + <nl> } <nl> mmm a / javascript / tests / Facebook . Yoga / YGMarginTest . js <nl> ppp b / javascript / tests / Facebook . Yoga / YGMarginTest . js <nl> it ( " margin_auto_top_and_bottom_strech " , function ( ) { <nl> } <nl> } <nl> } ) ; <nl> + it ( " margin_should_not_be_part_of_max_height " , function ( ) { <nl> + try { <nl> + var root = Yoga . Node . create ( config ) ; <nl> + root . setWidth ( 250 ) ; <nl> + root . setHeight ( 250 ) ; <nl> + <nl> + var root_child0 = Yoga . Node . create ( config ) ; <nl> + root_child0 . setMargin ( Yoga . EDGE_TOP , 20 ) ; <nl> + root_child0 . setWidth ( 100 ) ; <nl> + root_child0 . setHeight ( 100 ) ; <nl> + root_child0 . setMaxHeight ( 100 ) ; <nl> + root . insertChild ( root_child0 , 0 ) ; <nl> + root . calculateLayout ( Yoga . UNDEFINED , Yoga . UNDEFINED , Yoga . DIRECTION_LTR ) ; <nl> + <nl> + console . assert ( 0 = = = root . getComputedLeft ( ) , " 0 = = = root . getComputedLeft ( ) ( " + root . getComputedLeft ( ) + " ) " ) ; <nl> + console . assert ( 0 = = = root . getComputedTop ( ) , " 0 = = = root . getComputedTop ( ) ( " + root . getComputedTop ( ) + " ) " ) ; <nl> + console . assert ( 250 = = = root . getComputedWidth ( ) , " 250 = = = root . getComputedWidth ( ) ( " + root . getComputedWidth ( ) + " ) " ) ; <nl> + console . assert ( 250 = = = root . getComputedHeight ( ) , " 250 = = = root . getComputedHeight ( ) ( " + root . getComputedHeight ( ) + " ) " ) ; <nl> + <nl> + console . assert ( 0 = = = root_child0 . getComputedLeft ( ) , " 0 = = = root_child0 . getComputedLeft ( ) ( " + root_child0 . getComputedLeft ( ) + " ) " ) ; <nl> + console . assert ( 20 = = = root_child0 . getComputedTop ( ) , " 20 = = = root_child0 . getComputedTop ( ) ( " + root_child0 . getComputedTop ( ) + " ) " ) ; <nl> + console . assert ( 100 = = = root_child0 . getComputedWidth ( ) , " 100 = = = root_child0 . getComputedWidth ( ) ( " + root_child0 . getComputedWidth ( ) + " ) " ) ; <nl> + console . assert ( 100 = = = root_child0 . getComputedHeight ( ) , " 100 = = = root_child0 . getComputedHeight ( ) ( " + root_child0 . getComputedHeight ( ) + " ) " ) ; <nl> + <nl> + root . calculateLayout ( Yoga . UNDEFINED , Yoga . UNDEFINED , Yoga . DIRECTION_RTL ) ; <nl> + <nl> + console . assert ( 0 = = = root . getComputedLeft ( ) , " 0 = = = root . getComputedLeft ( ) ( " + root . getComputedLeft ( ) + " ) " ) ; <nl> + console . assert ( 0 = = = root . getComputedTop ( ) , " 0 = = = root . getComputedTop ( ) ( " + root . getComputedTop ( ) + " ) " ) ; <nl> + console . assert ( 250 = = = root . getComputedWidth ( ) , " 250 = = = root . getComputedWidth ( ) ( " + root . getComputedWidth ( ) + " ) " ) ; <nl> + console . assert ( 250 = = = root . getComputedHeight ( ) , " 250 = = = root . getComputedHeight ( ) ( " + root . getComputedHeight ( ) + " ) " ) ; <nl> + <nl> + console . assert ( 150 = = = root_child0 . getComputedLeft ( ) , " 150 = = = root_child0 . getComputedLeft ( ) ( " + root_child0 . getComputedLeft ( ) + " ) " ) ; <nl> + console . assert ( 20 = = = root_child0 . getComputedTop ( ) , " 20 = = = root_child0 . getComputedTop ( ) ( " + root_child0 . getComputedTop ( ) + " ) " ) ; <nl> + console . assert ( 100 = = = root_child0 . getComputedWidth ( ) , " 100 = = = root_child0 . getComputedWidth ( ) ( " + root_child0 . getComputedWidth ( ) + " ) " ) ; <nl> + console . assert ( 100 = = = root_child0 . getComputedHeight ( ) , " 100 = = = root_child0 . getComputedHeight ( ) ( " + root_child0 . getComputedHeight ( ) + " ) " ) ; <nl> + } finally { <nl> + if ( typeof root ! = = " undefined " ) { <nl> + root . freeRecursive ( ) ; <nl> + } <nl> + } <nl> + } ) ; <nl> + it ( " margin_should_not_be_part_of_max_width " , function ( ) { <nl> + try { <nl> + var root = Yoga . Node . create ( config ) ; <nl> + root . setWidth ( 250 ) ; <nl> + root . setHeight ( 250 ) ; <nl> + <nl> + var root_child0 = Yoga . Node . create ( config ) ; <nl> + root_child0 . setMargin ( Yoga . EDGE_LEFT , 20 ) ; <nl> + root_child0 . setWidth ( 100 ) ; <nl> + root_child0 . setMaxWidth ( 100 ) ; <nl> + root_child0 . setHeight ( 100 ) ; <nl> + root . insertChild ( root_child0 , 0 ) ; <nl> + root . calculateLayout ( Yoga . UNDEFINED , Yoga . UNDEFINED , Yoga . DIRECTION_LTR ) ; <nl> + <nl> + console . assert ( 0 = = = root . getComputedLeft ( ) , " 0 = = = root . getComputedLeft ( ) ( " + root . getComputedLeft ( ) + " ) " ) ; <nl> + console . assert ( 0 = = = root . getComputedTop ( ) , " 0 = = = root . getComputedTop ( ) ( " + root . getComputedTop ( ) + " ) " ) ; <nl> + console . assert ( 250 = = = root . getComputedWidth ( ) , " 250 = = = root . getComputedWidth ( ) ( " + root . getComputedWidth ( ) + " ) " ) ; <nl> + console . assert ( 250 = = = root . getComputedHeight ( ) , " 250 = = = root . getComputedHeight ( ) ( " + root . getComputedHeight ( ) + " ) " ) ; <nl> + <nl> + console . assert ( 20 = = = root_child0 . getComputedLeft ( ) , " 20 = = = root_child0 . getComputedLeft ( ) ( " + root_child0 . getComputedLeft ( ) + " ) " ) ; <nl> + console . assert ( 0 = = = root_child0 . getComputedTop ( ) , " 0 = = = root_child0 . getComputedTop ( ) ( " + root_child0 . getComputedTop ( ) + " ) " ) ; <nl> + console . assert ( 100 = = = root_child0 . getComputedWidth ( ) , " 100 = = = root_child0 . getComputedWidth ( ) ( " + root_child0 . getComputedWidth ( ) + " ) " ) ; <nl> + console . assert ( 100 = = = root_child0 . getComputedHeight ( ) , " 100 = = = root_child0 . getComputedHeight ( ) ( " + root_child0 . getComputedHeight ( ) + " ) " ) ; <nl> + <nl> + root . calculateLayout ( Yoga . UNDEFINED , Yoga . UNDEFINED , Yoga . DIRECTION_RTL ) ; <nl> + <nl> + console . assert ( 0 = = = root . getComputedLeft ( ) , " 0 = = = root . getComputedLeft ( ) ( " + root . getComputedLeft ( ) + " ) " ) ; <nl> + console . assert ( 0 = = = root . getComputedTop ( ) , " 0 = = = root . getComputedTop ( ) ( " + root . getComputedTop ( ) + " ) " ) ; <nl> + console . assert ( 250 = = = root . getComputedWidth ( ) , " 250 = = = root . getComputedWidth ( ) ( " + root . getComputedWidth ( ) + " ) " ) ; <nl> + console . assert ( 250 = = = root . getComputedHeight ( ) , " 250 = = = root . getComputedHeight ( ) ( " + root . getComputedHeight ( ) + " ) " ) ; <nl> + <nl> + console . assert ( 150 = = = root_child0 . getComputedLeft ( ) , " 150 = = = root_child0 . getComputedLeft ( ) ( " + root_child0 . getComputedLeft ( ) + " ) " ) ; <nl> + console . assert ( 0 = = = root_child0 . getComputedTop ( ) , " 0 = = = root_child0 . getComputedTop ( ) ( " + root_child0 . getComputedTop ( ) + " ) " ) ; <nl> + console . assert ( 100 = = = root_child0 . getComputedWidth ( ) , " 100 = = = root_child0 . getComputedWidth ( ) ( " + root_child0 . getComputedWidth ( ) + " ) " ) ; <nl> + console . assert ( 100 = = = root_child0 . getComputedHeight ( ) , " 100 = = = root_child0 . getComputedHeight ( ) ( " + root_child0 . getComputedHeight ( ) + " ) " ) ; <nl> + } finally { <nl> + if ( typeof root ! = = " undefined " ) { <nl> + root . freeRecursive ( ) ; <nl> + } <nl> + } <nl> + } ) ; <nl> mmm a / tests / YGMarginTest . cpp <nl> ppp b / tests / YGMarginTest . cpp <nl> TEST ( YogaTest , margin_auto_top_and_bottom_strech ) { <nl> <nl> YGConfigFree ( config ) ; <nl> } <nl> + <nl> + TEST ( YogaTest , margin_should_not_be_part_of_max_height ) { <nl> + const YGConfigRef config = YGConfigNew ( ) ; <nl> + <nl> + const YGNodeRef root = YGNodeNewWithConfig ( config ) ; <nl> + YGNodeStyleSetWidth ( root , 250 ) ; <nl> + YGNodeStyleSetHeight ( root , 250 ) ; <nl> + <nl> + const YGNodeRef root_child0 = YGNodeNewWithConfig ( config ) ; <nl> + YGNodeStyleSetMargin ( root_child0 , YGEdgeTop , 20 ) ; <nl> + YGNodeStyleSetWidth ( root_child0 , 100 ) ; <nl> + YGNodeStyleSetHeight ( root_child0 , 100 ) ; <nl> + YGNodeStyleSetMaxHeight ( root_child0 , 100 ) ; <nl> + YGNodeInsertChild ( root , root_child0 , 0 ) ; <nl> + YGNodeCalculateLayout ( root , YGUndefined , YGUndefined , YGDirectionLTR ) ; <nl> + <nl> + ASSERT_FLOAT_EQ ( 0 , YGNodeLayoutGetLeft ( root ) ) ; <nl> + ASSERT_FLOAT_EQ ( 0 , YGNodeLayoutGetTop ( root ) ) ; <nl> + ASSERT_FLOAT_EQ ( 250 , YGNodeLayoutGetWidth ( root ) ) ; <nl> + ASSERT_FLOAT_EQ ( 250 , YGNodeLayoutGetHeight ( root ) ) ; <nl> + <nl> + ASSERT_FLOAT_EQ ( 0 , YGNodeLayoutGetLeft ( root_child0 ) ) ; <nl> + ASSERT_FLOAT_EQ ( 20 , YGNodeLayoutGetTop ( root_child0 ) ) ; <nl> + ASSERT_FLOAT_EQ ( 100 , YGNodeLayoutGetWidth ( root_child0 ) ) ; <nl> + ASSERT_FLOAT_EQ ( 100 , YGNodeLayoutGetHeight ( root_child0 ) ) ; <nl> + <nl> + YGNodeCalculateLayout ( root , YGUndefined , YGUndefined , YGDirectionRTL ) ; <nl> + <nl> + ASSERT_FLOAT_EQ ( 0 , YGNodeLayoutGetLeft ( root ) ) ; <nl> + ASSERT_FLOAT_EQ ( 0 , YGNodeLayoutGetTop ( root ) ) ; <nl> + ASSERT_FLOAT_EQ ( 250 , YGNodeLayoutGetWidth ( root ) ) ; <nl> + ASSERT_FLOAT_EQ ( 250 , YGNodeLayoutGetHeight ( root ) ) ; <nl> + <nl> + ASSERT_FLOAT_EQ ( 150 , YGNodeLayoutGetLeft ( root_child0 ) ) ; <nl> + ASSERT_FLOAT_EQ ( 20 , YGNodeLayoutGetTop ( root_child0 ) ) ; <nl> + ASSERT_FLOAT_EQ ( 100 , YGNodeLayoutGetWidth ( root_child0 ) ) ; <nl> + ASSERT_FLOAT_EQ ( 100 , YGNodeLayoutGetHeight ( root_child0 ) ) ; <nl> + <nl> + YGNodeFreeRecursive ( root ) ; <nl> + <nl> + YGConfigFree ( config ) ; <nl> + } <nl> + <nl> + TEST ( YogaTest , margin_should_not_be_part_of_max_width ) { <nl> + const YGConfigRef config = YGConfigNew ( ) ; <nl> + <nl> + const YGNodeRef root = YGNodeNewWithConfig ( config ) ; <nl> + YGNodeStyleSetWidth ( root , 250 ) ; <nl> + YGNodeStyleSetHeight ( root , 250 ) ; <nl> + <nl> + const YGNodeRef root_child0 = YGNodeNewWithConfig ( config ) ; <nl> + YGNodeStyleSetMargin ( root_child0 , YGEdgeLeft , 20 ) ; <nl> + YGNodeStyleSetWidth ( root_child0 , 100 ) ; <nl> + YGNodeStyleSetMaxWidth ( root_child0 , 100 ) ; <nl> + YGNodeStyleSetHeight ( root_child0 , 100 ) ; <nl> + YGNodeInsertChild ( root , root_child0 , 0 ) ; <nl> + YGNodeCalculateLayout ( root , YGUndefined , YGUndefined , YGDirectionLTR ) ; <nl> + <nl> + ASSERT_FLOAT_EQ ( 0 , YGNodeLayoutGetLeft ( root ) ) ; <nl> + ASSERT_FLOAT_EQ ( 0 , YGNodeLayoutGetTop ( root ) ) ; <nl> + ASSERT_FLOAT_EQ ( 250 , YGNodeLayoutGetWidth ( root ) ) ; <nl> + ASSERT_FLOAT_EQ ( 250 , YGNodeLayoutGetHeight ( root ) ) ; <nl> + <nl> + ASSERT_FLOAT_EQ ( 20 , YGNodeLayoutGetLeft ( root_child0 ) ) ; <nl> + ASSERT_FLOAT_EQ ( 0 , YGNodeLayoutGetTop ( root_child0 ) ) ; <nl> + ASSERT_FLOAT_EQ ( 100 , YGNodeLayoutGetWidth ( root_child0 ) ) ; <nl> + ASSERT_FLOAT_EQ ( 100 , YGNodeLayoutGetHeight ( root_child0 ) ) ; <nl> + <nl> + YGNodeCalculateLayout ( root , YGUndefined , YGUndefined , YGDirectionRTL ) ; <nl> + <nl> + ASSERT_FLOAT_EQ ( 0 , YGNodeLayoutGetLeft ( root ) ) ; <nl> + ASSERT_FLOAT_EQ ( 0 , YGNodeLayoutGetTop ( root ) ) ; <nl> + ASSERT_FLOAT_EQ ( 250 , YGNodeLayoutGetWidth ( root ) ) ; <nl> + ASSERT_FLOAT_EQ ( 250 , YGNodeLayoutGetHeight ( root ) ) ; <nl> + <nl> + ASSERT_FLOAT_EQ ( 150 , YGNodeLayoutGetLeft ( root_child0 ) ) ; <nl> + ASSERT_FLOAT_EQ ( 0 , YGNodeLayoutGetTop ( root_child0 ) ) ; <nl> + ASSERT_FLOAT_EQ ( 100 , YGNodeLayoutGetWidth ( root_child0 ) ) ; <nl> + ASSERT_FLOAT_EQ ( 100 , YGNodeLayoutGetHeight ( root_child0 ) ) ; <nl> + <nl> + YGNodeFreeRecursive ( root ) ; <nl> + <nl> + YGConfigFree ( config ) ; <nl> + } <nl> mmm a / yoga / Yoga . c <nl> ppp b / yoga / Yoga . c <nl> static float YGNodeRelativePosition ( const YGNodeRef node , <nl> : - YGNodeTrailingPosition ( node , axis , axisSize ) ; <nl> } <nl> <nl> - static void YGConstrainMaxSizeForMode ( const float maxSize , YGMeasureMode * mode , float * size ) { <nl> + static void YGConstrainMaxSizeForMode ( const YGNodeRef node , <nl> + const enum YGFlexDirection axis , <nl> + const float parentAxisSize , <nl> + const float parentWidth , <nl> + YGMeasureMode * mode , <nl> + float * size ) { <nl> + const float maxSize = YGValueResolve ( & node - > style . maxDimensions [ dim [ axis ] ] , parentAxisSize ) + <nl> + YGNodeMarginForAxis ( node , axis , parentWidth ) ; <nl> switch ( * mode ) { <nl> case YGMeasureModeExactly : <nl> case YGMeasureModeAtMost : <nl> static void YGNodeComputeFlexBasisForChild ( const YGNodeRef node , <nl> } <nl> } <nl> <nl> - YGConstrainMaxSizeForMode ( YGValueResolve ( & child - > style . maxDimensions [ YGDimensionWidth ] , <nl> - parentWidth ) , <nl> - & childWidthMeasureMode , <nl> - & childWidth ) ; <nl> - YGConstrainMaxSizeForMode ( YGValueResolve ( & child - > style . maxDimensions [ YGDimensionHeight ] , <nl> - parentHeight ) , <nl> - & childHeightMeasureMode , <nl> - & childHeight ) ; <nl> + YGConstrainMaxSizeForMode ( <nl> + child , YGFlexDirectionRow , parentWidth , parentWidth , & childWidthMeasureMode , & childWidth ) ; <nl> + YGConstrainMaxSizeForMode ( <nl> + child , YGFlexDirectionColumn , parentHeight , parentWidth , & childHeightMeasureMode , & childHeight ) ; <nl> <nl> / / Measure the child <nl> YGLayoutNodeInternal ( child , <nl> static void YGNodelayoutImpl ( const YGNodeRef node , <nl> childCrossSize + = marginCross ; <nl> } <nl> <nl> - YGConstrainMaxSizeForMode ( <nl> - YGValueResolve ( & currentRelativeChild - > style . maxDimensions [ dim [ mainAxis ] ] , <nl> - availableInnerWidth ) , <nl> - & childMainMeasureMode , <nl> - & childMainSize ) ; <nl> - YGConstrainMaxSizeForMode ( <nl> - YGValueResolve ( & currentRelativeChild - > style . maxDimensions [ dim [ crossAxis ] ] , <nl> - availableInnerHeight ) , <nl> - & childCrossMeasureMode , <nl> - & childCrossSize ) ; <nl> + YGConstrainMaxSizeForMode ( currentRelativeChild , <nl> + mainAxis , <nl> + availableInnerMainDim , <nl> + availableInnerWidth , <nl> + & childMainMeasureMode , <nl> + & childMainSize ) ; <nl> + YGConstrainMaxSizeForMode ( currentRelativeChild , <nl> + crossAxis , <nl> + availableInnerCrossDim , <nl> + availableInnerWidth , <nl> + & childCrossMeasureMode , <nl> + & childCrossSize ) ; <nl> <nl> const bool requiresStretchLayout = <nl> ! YGNodeIsStyleDimDefined ( currentRelativeChild , crossAxis , availableInnerCrossDim ) & & <nl> static void YGNodelayoutImpl ( const YGNodeRef node , <nl> <nl> YGMeasureMode childMainMeasureMode = YGMeasureModeExactly ; <nl> YGMeasureMode childCrossMeasureMode = YGMeasureModeExactly ; <nl> - YGConstrainMaxSizeForMode ( YGValueResolve ( & child - > style . maxDimensions [ dim [ mainAxis ] ] , <nl> - availableInnerMainDim ) , <nl> + YGConstrainMaxSizeForMode ( child , <nl> + mainAxis , <nl> + availableInnerMainDim , <nl> + availableInnerWidth , <nl> & childMainMeasureMode , <nl> & childMainSize ) ; <nl> - YGConstrainMaxSizeForMode ( YGValueResolve ( & child - > style . maxDimensions [ dim [ crossAxis ] ] , <nl> - availableInnerCrossDim ) , <nl> + YGConstrainMaxSizeForMode ( child , <nl> + crossAxis , <nl> + availableInnerCrossDim , <nl> + availableInnerWidth , <nl> & childCrossMeasureMode , <nl> & childCrossSize ) ; <nl> <nl>
Take margin into account on max dimension
facebook/yoga
09f0c2d8ce375fe95322055fcdb2361bbb5c11e1
2017-03-09T11:56:00Z
mmm a / examples / python / helloworld / greeter_client_with_options . py <nl> ppp b / examples / python / helloworld / greeter_client_with_options . py <nl> def run ( ) : <nl> ( ' grpc . enable_retries ' , 0 ) , <nl> ( ' grpc . keepalive_timeout_ms ' , 10000 ) ] ) as channel : <nl> stub = helloworld_pb2_grpc . GreeterStub ( channel ) <nl> - # timeout in second <nl> - response = stub . SayHello ( helloworld_pb2 . HelloRequest ( name = ' you ' ) , timeout = 1 ) <nl> + # Timeout in seconds . <nl> + response = stub . SayHello ( helloworld_pb2 . HelloRequest ( name = ' you ' ) , timeout = 10 ) <nl> print ( " Greeter client received : " + response . message ) <nl> <nl> <nl>
Update timeout : 1 - > 10
grpc/grpc
0f9733eb85403417b5c749557d8c17a42d773962
2018-10-23T04:45:25Z
mmm a / hphp / runtime / vm / translator / hopt / hhbctranslator . cpp <nl> ppp b / hphp / runtime / vm / translator / hopt / hhbctranslator . cpp <nl> void HhbcTranslator : : emitPackCont ( int64_t labelId ) { <nl> gen ( UnlinkContVarEnv , m_tb - > getFp ( ) ) ; <nl> gen ( AssertLoc , Type : : Obj , LocalId ( 0 ) , m_tb - > getFp ( ) ) ; <nl> auto const cont = m_tb - > genLdLoc ( 0 ) ; <nl> - m_tb - > genSetPropCell ( cont , CONTOFF ( m_value ) , popC ( ) ) ; <nl> + auto const newVal = popC ( ) ; <nl> + auto const oldValue = gen ( LdProp , Type : : Cell , cont , cns ( CONTOFF ( m_value ) ) ) ; <nl> + gen ( StProp , cont , cns ( CONTOFF ( m_value ) ) , newVal ) ; <nl> + gen ( DecRef , oldValue ) ; <nl> gen ( <nl> StRaw , cont , cns ( RawMemSlot : : ContLabel ) , cns ( labelId ) <nl> ) ; <nl> void HhbcTranslator : : emitContRetC ( ) { <nl> gen ( <nl> StRaw , cont , cns ( RawMemSlot : : ContDone ) , cns ( true ) <nl> ) ; <nl> - m_tb - > genSetPropCell ( cont , CONTOFF ( m_value ) , popC ( ) ) ; <nl> + auto const newVal = popC ( ) ; <nl> + auto const oldVal = gen ( LdProp , Type : : Cell , cont , cns ( CONTOFF ( m_value ) ) ) ; <nl> + gen ( StProp , cont , cns ( CONTOFF ( m_value ) ) , newVal ) ; <nl> + gen ( DecRef , oldVal ) ; <nl> <nl> / / transfer control <nl> emitContExitImpl ( ) ; <nl> void HhbcTranslator : : emitContNext ( ) { <nl> assert ( getCurClass ( ) ) ; <nl> SSATmp * cont = gen ( LdThis , m_tb - > getFp ( ) ) ; <nl> gen ( ContPreNext , getExitSlowTrace ( ) , cont ) ; <nl> - m_tb - > genSetPropCell ( cont , CONTOFF ( m_received ) , m_tb - > genDefInitNull ( ) ) ; <nl> + <nl> + auto const oldVal = gen ( LdProp , Type : : Cell , cont , cns ( CONTOFF ( m_received ) ) ) ; <nl> + gen ( StProp , cont , cns ( CONTOFF ( m_received ) ) , m_tb - > genDefInitNull ( ) ) ; <nl> + gen ( DecRef , oldVal ) ; <nl> } <nl> <nl> void HhbcTranslator : : emitContSendImpl ( bool raise ) { <nl> void HhbcTranslator : : emitContSendImpl ( bool raise ) { <nl> gen ( ContPreNext , getExitSlowTrace ( ) , cont ) ; <nl> <nl> gen ( AssertLoc , Type : : Cell , LocalId ( 0 ) , m_tb - > getFp ( ) ) ; <nl> - auto const value = gen ( IncRef , m_tb - > genLdLoc ( 0 ) ) ; <nl> - m_tb - > genSetPropCell ( cont , CONTOFF ( m_received ) , value ) ; <nl> + auto const newVal = gen ( IncRef , m_tb - > genLdLoc ( 0 ) ) ; <nl> + auto const oldVal = gen ( LdProp , Type : : Cell , cont , cns ( CONTOFF ( m_received ) ) ) ; <nl> + gen ( StProp , cont , cns ( CONTOFF ( m_received ) ) , newVal ) ; <nl> + gen ( DecRef , oldVal ) ; <nl> if ( raise ) { <nl> gen ( <nl> StRaw , cont , cns ( RawMemSlot : : ContShouldThrow ) , cns ( true ) <nl> void HhbcTranslator : : emitCmp ( Opcode opc ) { <nl> / / src2 opc src1 <nl> SSATmp * src1 = popC ( ) ; <nl> SSATmp * src2 = popC ( ) ; <nl> - push ( m_tb - > genCmp ( opc , src2 , src1 ) ) ; <nl> + push ( gen ( opc , src2 , src1 ) ) ; <nl> gen ( DecRef , src2 ) ; <nl> gen ( DecRef , src1 ) ; <nl> } <nl> void HhbcTranslator : : emitFCall ( uint32_t numParams , <nl> } <nl> <nl> void HhbcTranslator : : emitFCallBuiltin ( uint32_t numArgs , <nl> - uint32_t numNonDefault , int32_t funcId ) { <nl> + uint32_t numNonDefault , <nl> + int32_t funcId ) { <nl> const NamedEntityPair & nep = lookupNamedEntityPairId ( funcId ) ; <nl> const StringData * name = nep . first ; <nl> const Func * callee = Unit : : lookupFunc ( nep . second , name ) ; <nl> void HhbcTranslator : : emitFCallBuiltin ( uint32_t numArgs , <nl> <nl> / / spill args to stack . We need to spill these for two resons : <nl> / / 1 . some of the arguments may be passed by reference , for which <nl> - / / case we will generate LdStackAddr ( ) ( see below ) . <nl> + / / case we will pass a stack address . <nl> / / 2 . type conversions of the arguments ( using tvCast * helpers ) <nl> / / may throw an exception , so we need to have the VM stack <nl> / / in a clean state at that point . <nl> exceptionBarrier ( ) ; <nl> - / / Convert types if needed <nl> + <nl> + / / Convert types if needed . <nl> for ( int i = 0 ; i < numNonDefault ; i + + ) { <nl> const Func : : ParamInfo & pi = callee - > params ( ) [ i ] ; <nl> switch ( pi . builtinType ( ) ) { <nl> void HhbcTranslator : : emitFCallBuiltin ( uint32_t numArgs , <nl> } <nl> } <nl> <nl> - / / pass arguments for call <nl> + / / Pass arguments for CallBuiltin . <nl> SSATmp * args [ numArgs + 1 ] ; <nl> - <nl> + args [ 0 ] = cns ( callee ) ; <nl> for ( int i = numArgs - 1 ; i > = 0 ; i - - ) { <nl> const Func : : ParamInfo & pi = callee - > params ( ) [ i ] ; <nl> switch ( pi . builtinType ( ) ) { <nl> case KindOfBoolean : <nl> case KindOfInt64 : <nl> - args [ i ] = top ( Type : : fromDataType ( pi . builtinType ( ) , KindOfInvalid ) , <nl> - numArgs - i - 1 ) ; <nl> + args [ i + 1 ] = top ( Type : : fromDataType ( pi . builtinType ( ) , KindOfInvalid ) , <nl> + numArgs - i - 1 ) ; <nl> break ; <nl> case KindOfDouble : assert ( false ) ; <nl> default : <nl> - args [ i ] = loadStackAddr ( numArgs - i - 1 ) ; <nl> + args [ i + 1 ] = loadStackAddr ( numArgs - i - 1 ) ; <nl> break ; <nl> } <nl> } <nl> - / / generate call and set return type <nl> - SSATmp * func = cns ( callee ) ; <nl> - Type type = Type : : fromDataTypeWithRef ( callee - > returnType ( ) , <nl> - ( callee - > attrs ( ) & ClassInfo : : IsReference ) ) ; <nl> - SSATmp * ret = m_tb - > genCallBuiltin ( func , type , numArgs , args ) ; <nl> <nl> - / / decref and free args <nl> + / / Generate call and set return type <nl> + SSATmp * * decayedPtr = args ; <nl> + auto const ret = gen ( <nl> + CallBuiltin , <nl> + Type : : fromDataTypeWithRef ( callee - > returnType ( ) , <nl> + ( callee - > attrs ( ) & ClassInfo : : IsReference ) ) , <nl> + std : : make_pair ( numArgs + 1 , decayedPtr ) <nl> + ) ; <nl> + <nl> + / / Decref and free args <nl> for ( int i = 0 ; i < numArgs ; i + + ) { <nl> - SSATmp * arg = popR ( ) ; <nl> + auto const arg = popR ( ) ; <nl> if ( i > = numArgs - numNonDefault ) { <nl> gen ( DecRef , arg ) ; <nl> } <nl> } <nl> <nl> - / / push return value <nl> push ( ret ) ; <nl> } <nl> <nl> void HhbcTranslator : : emitRetFromInlined ( Type type ) { <nl> emitMarker ( ) ; <nl> } <nl> <nl> - / * <nl> - * In case retVal comes from a local , the logic below tweaks the code <nl> - * so that retVal is DecRef ' d and the corresponding local ' s SSATmp is <nl> - * returned . This enables the ref - count optimization to eliminate the <nl> - * IncRef / DecRef pair in the main trace . <nl> - * / <nl> SSATmp * HhbcTranslator : : emitDecRefLocalsInline ( SSATmp * retVal ) { <nl> SSATmp * retValSrcLoc = nullptr ; <nl> Opcode retValSrcOpc = Nop ; / / Nop flags the ref - count opt is impossible <nl> IRInstruction * retValSrcInstr = retVal - > inst ( ) ; <nl> + <nl> + / * <nl> + * In case retVal comes from a local , the logic below tweaks the code <nl> + * so that retVal is DecRef ' d and the corresponding local ' s SSATmp is <nl> + * returned . This enables the ref - count optimization to eliminate the <nl> + * IncRef / DecRef pair in the main trace . <nl> + * / <nl> if ( retValSrcInstr - > op ( ) = = IncRef ) { <nl> retValSrcLoc = retValSrcInstr - > getSrc ( 0 ) ; <nl> retValSrcOpc = retValSrcLoc - > inst ( ) - > op ( ) ; <nl> SSATmp * HhbcTranslator : : emitDecRefLocalsInline ( SSATmp * retVal ) { <nl> <nl> if ( mayHaveThis ( getCurFunc ( ) ) ) { <nl> if ( retValSrcLoc & & retValSrcOpc = = LdThis ) { <nl> + / / Note that this doesn ' t need to be DecRefThis or <nl> + / / DecRefKillThis because we ' re carefully setting things up to <nl> + / / get turned to DecRefNZ . This means even if a <nl> + / / debug_backtrace ( ) occurs it can ' t see a stale $ this on the <nl> + / / ActRec . <nl> gen ( DecRef , retVal ) ; <nl> } else { <nl> - m_tb - > genDecRefThis ( ) ; <nl> + gen ( DecRefThis , m_tb - > getFp ( ) ) ; <nl> } <nl> } <nl> <nl> void HhbcTranslator : : emitRet ( Type type , bool freeInline ) { <nl> sp = gen ( RetAdjustStack , m_tb - > getFp ( ) ) ; <nl> } else { <nl> if ( mayHaveThis ( curFunc ) ) { <nl> - m_tb - > genDecRefThis ( ) ; <nl> + gen ( DecRefThis , m_tb - > getFp ( ) ) ; <nl> } <nl> sp = gen ( <nl> GenericRetDecRefs , m_tb - > getFp ( ) , retVal , cns ( curFunc - > numLocals ( ) ) <nl> mmm a / hphp / runtime / vm / translator / hopt / hhbctranslator . h <nl> ppp b / hphp / runtime / vm / translator / hopt / hhbctranslator . h <nl> struct HhbcTranslator { <nl> <nl> Kind getKind ( ) const { return m_kind ; } <nl> uint32_t getIndex ( ) const { return m_index ; } <nl> - Type getType ( ) const { return m_type ; } <nl> <nl> private : <nl> Kind m_kind ; <nl> mmm a / hphp / runtime / vm / translator / hopt / simplifier . cpp <nl> ppp b / hphp / runtime / vm / translator / hopt / simplifier . cpp <nl> SSATmp * Simplifier : : simplifyNot ( SSATmp * src ) { <nl> / / <nl> / / TODO ( # 2058865 ) : This would make more sense with a real Not <nl> / / instruction and allowing boolean output types for query ops . <nl> - return m_tb - > genCmp ( negateQueryOp ( op ) , <nl> - inst - > getSrc ( 0 ) , <nl> - inst - > getSrc ( 1 ) ) ; <nl> + return gen ( negateQueryOp ( op ) , inst - > getSrc ( 0 ) , inst - > getSrc ( 1 ) ) ; <nl> case InstanceOf : <nl> case NInstanceOf : <nl> case InstanceOfBitmask : <nl> SSATmp * Simplifier : : simplifyCmp ( Opcode opName , SSATmp * src1 , SSATmp * src2 ) { <nl> } <nl> / / Type is neither a string nor an object - simplify to OpEq / OpNeq <nl> if ( opName = = OpSame ) { <nl> - return m_tb - > genCmp ( OpEq , src1 , src2 ) ; <nl> + return gen ( OpEq , src1 , src2 ) ; <nl> } <nl> - return m_tb - > genCmp ( OpNeq , src1 , src2 ) ; <nl> + return gen ( OpNeq , src1 , src2 ) ; <nl> } <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> SSATmp * Simplifier : : simplifyCmp ( Opcode opName , SSATmp * src1 , SSATmp * src2 ) { <nl> / / E . g . ` some - int > false ` is equivalent to ` some - int = = true ` <nl> if ( opName ! = OpEq ) { <nl> if ( cmpOp ( opName , false , b ) ) { <nl> - return m_tb - > genCmp ( OpEq , src1 , cns ( false ) ) ; <nl> + return gen ( OpEq , src1 , cns ( false ) ) ; <nl> } else { <nl> - return m_tb - > genCmp ( OpEq , src1 , cns ( true ) ) ; <nl> + return gen ( OpEq , src1 , cns ( true ) ) ; <nl> } <nl> } <nl> } <nl> SSATmp * Simplifier : : simplifyCmp ( Opcode opName , SSATmp * src1 , SSATmp * src2 ) { <nl> if ( src1 - > type ( ) = = src2 - > type ( ) | | <nl> ( src1 - > type ( ) . isString ( ) & & src2 - > type ( ) . isString ( ) ) ) { <nl> if ( src1 - > isConst ( ) & & ! src2 - > isConst ( ) ) { <nl> - return m_tb - > genCmp ( commuteQueryOp ( opName ) , src2 , src1 ) ; <nl> + return gen ( commuteQueryOp ( opName ) , src2 , src1 ) ; <nl> } <nl> return nullptr ; <nl> } <nl> SSATmp * Simplifier : : simplifyCmp ( Opcode opName , SSATmp * src1 , SSATmp * src2 ) { <nl> <nl> / / nulls get canonicalized to the right <nl> if ( src1 - > type ( ) . isNull ( ) ) { <nl> - return m_tb - > genCmp ( commuteQueryOp ( opName ) , src2 , src1 ) ; <nl> + return gen ( commuteQueryOp ( opName ) , src2 , src1 ) ; <nl> } <nl> <nl> / / case 1 : null cmp string . Convert null to " " <nl> if ( src1 - > type ( ) . isString ( ) & & src2 - > type ( ) . isNull ( ) ) { <nl> - return m_tb - > genCmp ( opName , src1 , <nl> - cns ( StringData : : GetStaticString ( " " ) ) ) ; <nl> + return gen ( opName , src1 , cns ( StringData : : GetStaticString ( " " ) ) ) ; <nl> } <nl> <nl> / / case 2a : null cmp anything . Convert null to false <nl> if ( src2 - > type ( ) . isNull ( ) ) { <nl> - return m_tb - > genCmp ( opName , src1 , cns ( false ) ) ; <nl> + return gen ( opName , src1 , cns ( false ) ) ; <nl> } <nl> <nl> / / bools get canonicalized to the right <nl> if ( src1 - > type ( ) = = Type : : Bool ) { <nl> - return m_tb - > genCmp ( commuteQueryOp ( opName ) , src2 , src1 ) ; <nl> + return gen ( commuteQueryOp ( opName ) , src2 , src1 ) ; <nl> } <nl> <nl> / / case 2b : bool cmp anything . Convert anything to bool <nl> if ( src2 - > type ( ) = = Type : : Bool ) { <nl> if ( src1 - > isConst ( ) ) { <nl> if ( src1 - > type ( ) = = Type : : Int ) { <nl> - return m_tb - > genCmp ( opName , cns ( bool ( src1 - > getValInt ( ) ) ) , src2 ) ; <nl> + return gen ( opName , cns ( bool ( src1 - > getValInt ( ) ) ) , src2 ) ; <nl> } else if ( src1 - > type ( ) . isString ( ) ) { <nl> auto str = src1 - > getValStr ( ) ; <nl> - return m_tb - > genCmp ( opName , cns ( str - > toBoolean ( ) ) , src2 ) ; <nl> + return gen ( opName , cns ( str - > toBoolean ( ) ) , src2 ) ; <nl> } <nl> } <nl> <nl> SSATmp * Simplifier : : simplifyCmp ( Opcode opName , SSATmp * src1 , SSATmp * src2 ) { <nl> always_assert ( opName = = OpEq ) ; <nl> <nl> if ( src2 - > getValBool ( ) ) { <nl> - return m_tb - > genCmp ( OpNeq , src1 , cns ( 0 ) ) ; <nl> + return gen ( OpNeq , src1 , cns ( 0 ) ) ; <nl> } else { <nl> - return m_tb - > genCmp ( OpEq , src1 , cns ( 0 ) ) ; <nl> + return gen ( OpEq , src1 , cns ( 0 ) ) ; <nl> } <nl> } <nl> <nl> / / Nothing fancy to do - perform juggling as normal . <nl> - return m_tb - > genCmp ( opName , m_tb - > genConvToBool ( src1 ) , src2 ) ; <nl> + return gen ( opName , m_tb - > genConvToBool ( src1 ) , src2 ) ; <nl> } <nl> <nl> / / From here on , we must be careful of how Type : : Obj gets dealt with , <nl> SSATmp * Simplifier : : simplifyCmp ( Opcode opName , SSATmp * src1 , SSATmp * src2 ) { <nl> <nl> / / strings get canonicalized to the left <nl> if ( src2 - > type ( ) . isString ( ) ) { <nl> - return m_tb - > genCmp ( commuteQueryOp ( opName ) , src2 , src1 ) ; <nl> + return gen ( commuteQueryOp ( opName ) , src2 , src1 ) ; <nl> } <nl> <nl> / / ints get canonicalized to the right <nl> if ( src1 - > type ( ) = = Type : : Int ) { <nl> - return m_tb - > genCmp ( commuteQueryOp ( opName ) , src2 , src1 ) ; <nl> + return gen ( commuteQueryOp ( opName ) , src2 , src1 ) ; <nl> } <nl> <nl> / / case 4 : number / string / resource cmp . Convert to number ( int OR double ) <nl> SSATmp * Simplifier : : simplifyCmp ( Opcode opName , SSATmp * src1 , SSATmp * src2 ) { <nl> int64_t si ; double sd ; <nl> auto st = str - > isNumericWithVal ( si , sd , true / * allow errors * / ) ; <nl> if ( st = = KindOfDouble ) { <nl> - return m_tb - > genCmp ( opName , cns ( sd ) , src2 ) ; <nl> + return gen ( opName , cns ( sd ) , src2 ) ; <nl> } <nl> if ( st = = KindOfNull ) { <nl> si = 0 ; <nl> } <nl> - return m_tb - > genCmp ( opName , cns ( si ) , src2 ) ; <nl> + return gen ( opName , cns ( si ) , src2 ) ; <nl> } <nl> <nl> / / case 5 : array cmp array . No juggling to do <nl> mmm a / hphp / runtime / vm / translator / hopt / tracebuilder . cpp <nl> ppp b / hphp / runtime / vm / translator / hopt / tracebuilder . cpp <nl> TraceBuilder : : ~ TraceBuilder ( ) { <nl> for ( State * state : m_snapshots ) delete state ; <nl> } <nl> <nl> - void TraceBuilder : : genSetPropCell ( SSATmp * base , int64_t offset , SSATmp * value ) { <nl> - SSATmp * oldVal = gen ( LdProp , Type : : Cell , base , cns ( offset ) ) ; <nl> - gen ( StProp , base , cns ( offset ) , value ) ; <nl> - gen ( DecRef , oldVal ) ; <nl> - } <nl> - <nl> / * * <nl> * Checks if the given SSATmp , or any of its aliases , is available in <nl> * any VM location , including locals and the This pointer . <nl> SSATmp * TraceBuilder : : genConvToBool ( SSATmp * src ) { <nl> } <nl> } <nl> <nl> - SSATmp * TraceBuilder : : genCmp ( Opcode opc , SSATmp * src1 , SSATmp * src2 ) { <nl> - return gen ( opc , src1 , src2 ) ; <nl> - } <nl> - <nl> SSATmp * TraceBuilder : : genBoxLoc ( uint32_t id ) { <nl> SSATmp * prevValue = genLdLoc ( id ) ; <nl> Type prevType = prevValue - > type ( ) ; <nl> SSATmp * TraceBuilder : : genLdStackAddr ( SSATmp * sp , int64_t index ) { <nl> return gen ( LdStackAddr , type . ptr ( ) , sp , cns ( index ) ) ; <nl> } <nl> <nl> - SSATmp * TraceBuilder : : genCallBuiltin ( SSATmp * func , <nl> - Type type , <nl> - uint32_t numArgs , <nl> - SSATmp * * args ) { <nl> - SSATmp * srcs [ numArgs + 1 ] ; <nl> - srcs [ 0 ] = func ; <nl> - std : : copy ( args , args + numArgs , srcs + 1 ) ; <nl> - SSATmp * * decayedPtr = srcs ; <nl> - return gen ( CallBuiltin , type , std : : make_pair ( numArgs + 1 , decayedPtr ) ) ; <nl> - } <nl> - <nl> void TraceBuilder : : genDecRefStack ( Type type , uint32_t stackOff ) { <nl> bool spansCall = false ; <nl> Type knownType = Type : : None ; <nl> void TraceBuilder : : genDecRefStack ( Type type , uint32_t stackOff ) { <nl> } <nl> } <nl> <nl> - void TraceBuilder : : genDecRefThis ( ) { <nl> - if ( isThisAvailable ( ) ) { <nl> - auto const thiss = gen ( LdThis , m_fpValue ) ; <nl> - auto const thisInst = thiss - > inst ( ) ; <nl> - <nl> - if ( thisInst - > op ( ) = = IncRef & & <nl> - callerLocalHasValue ( thisInst - > getSrc ( 0 ) ) ) { <nl> - gen ( DecRefNZ , thiss ) ; <nl> - return ; <nl> - } <nl> - <nl> - / / It ' s a shame to keep a reference to the frame just to kill the <nl> - / / this pointer . This is handled in optimizeActRecs . <nl> - gen ( DecRefKillThis , thiss , m_fpValue ) ; <nl> - return ; <nl> - } <nl> - <nl> - gen ( DecRefThis , m_fpValue ) ; <nl> - } <nl> - <nl> SSATmp * TraceBuilder : : genSpillStack ( uint32_t stackAdjustment , <nl> uint32_t numOpnds , <nl> SSATmp * * spillOpnds ) { <nl> SSATmp * TraceBuilder : : preOptimizeDecRef ( IRInstruction * inst ) { <nl> return nullptr ; <nl> } <nl> <nl> + SSATmp * TraceBuilder : : preOptimizeDecRefThis ( IRInstruction * inst ) { <nl> + / * <nl> + * If $ this is available , convert to an instruction sequence that <nl> + * doesn ' t need to test if it ' s already live . <nl> + * / <nl> + if ( isThisAvailable ( ) ) { <nl> + auto const thiss = gen ( LdThis , m_fpValue ) ; <nl> + auto const thisInst = thiss - > inst ( ) ; <nl> + <nl> + / * <nl> + * DecRef optimization for $ this in an inlined frame : if a caller <nl> + * local contains the $ this , we know it can ' t go to zero and can <nl> + * switch DecRef to DecRefNZ . <nl> + * <nl> + * It ' s ok not to do DecRefThis ( which normally nulls out the ActRec <nl> + * $ this ) , because there is still a reference to it in the caller <nl> + * frame , so debug_backtrace ( ) can ' t see a non - live pointer value . <nl> + * / <nl> + if ( thisInst - > op ( ) = = IncRef & & <nl> + callerLocalHasValue ( thisInst - > getSrc ( 0 ) ) ) { <nl> + gen ( DecRefNZ , thiss ) ; <nl> + inst - > convertToNop ( ) ; <nl> + return nullptr ; <nl> + } <nl> + <nl> + / / If we ' re in an inlined callee , it ' s a shame to keep a reference <nl> + / / to the frame just to kill the $ this pointer . But this is <nl> + / / handled in optimizeActRecs . <nl> + assert ( inst - > getSrc ( 0 ) = = m_fpValue ) ; <nl> + gen ( DecRefKillThis , thiss , m_fpValue ) ; <nl> + inst - > convertToNop ( ) ; <nl> + return nullptr ; <nl> + } <nl> + <nl> + return nullptr ; <nl> + } <nl> + <nl> SSATmp * TraceBuilder : : preOptimize ( IRInstruction * inst ) { <nl> # define X ( op ) case op : return preOptimize # # op ( inst ) <nl> switch ( inst - > op ( ) ) { <nl> SSATmp * TraceBuilder : : preOptimize ( IRInstruction * inst ) { <nl> X ( LdThis ) ; <nl> X ( LdCtx ) ; <nl> X ( DecRef ) ; <nl> + X ( DecRefThis ) ; <nl> default : <nl> break ; <nl> } <nl> mmm a / hphp / runtime / vm / translator / hopt / tracebuilder . h <nl> ppp b / hphp / runtime / vm / translator / hopt / tracebuilder . h <nl> struct TraceBuilder { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / dubious <nl> <nl> - void genSetPropCell ( SSATmp * base , int64_t offset , SSATmp * value ) ; <nl> - <nl> / / TODO ( # 2058865 ) : we should have a real not opcode <nl> SSATmp * genNot ( SSATmp * src ) ; <nl> <nl> - SSATmp * genCmp ( Opcode opc , SSATmp * src1 , SSATmp * src2 ) ; <nl> SSATmp * genCastStk ( uint32_t id , Type type ) ; <nl> SSATmp * genConvToBool ( SSATmp * src ) ; <nl> - SSATmp * genCallBuiltin ( SSATmp * func , Type type , <nl> - uint32_t numArgs , SSATmp * * args ) ; <nl> - void genDecRefThis ( ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / control flow <nl> struct TraceBuilder { <nl> SSATmp * preOptimizeLdThis ( IRInstruction * ) ; <nl> SSATmp * preOptimizeLdCtx ( IRInstruction * ) ; <nl> SSATmp * preOptimizeDecRef ( IRInstruction * ) ; <nl> + SSATmp * preOptimizeDecRefThis ( IRInstruction * ) ; <nl> <nl> SSATmp * preOptimize ( IRInstruction * inst ) ; <nl> SSATmp * optimizeWork ( IRInstruction * inst ) ; <nl>
Remove some easy remaining TraceBuilder : : genFoo functions
facebook/hhvm
c3619ecd7bbb47a45eefaa99733f912028871c5c
2013-05-02T04:00:46Z
new file mode 100644 <nl> index 00000000000 . . 15df92e6416 <nl> mmm / dev / null <nl> ppp b / samples / cpp / text_skewness_correction . cpp <nl> <nl> + / * <nl> + This tutorial demonstrates how to correct the skewness in a text . <nl> + The program takes as input a skewed source image and shows non skewed text . <nl> + <nl> + * / <nl> + <nl> + # include < opencv2 / core . hpp > <nl> + # include < opencv2 / imgcodecs . hpp > <nl> + # include < opencv2 / highgui . hpp > <nl> + # include < opencv2 / imgproc . hpp > <nl> + <nl> + # include < iostream > <nl> + # include < iomanip > <nl> + # include < string > <nl> + <nl> + using namespace cv ; <nl> + using namespace std ; <nl> + <nl> + <nl> + int main ( int argc , char * * argv ) <nl> + { <nl> + CommandLineParser parser ( argc , argv , " { @ input | imageTextR . png | input image } " ) ; <nl> + <nl> + / / Load image from the disk <nl> + Mat image = imread ( samples : : findFile ( parser . get < String > ( " @ input " ) ) , IMREAD_COLOR ) ; <nl> + if ( image . empty ( ) ) <nl> + { <nl> + cout < < " Cannot load the image " + parser . get < String > ( " @ input " ) < < endl ; <nl> + return - 1 ; <nl> + } <nl> + <nl> + Mat gray ; <nl> + cvtColor ( image , gray , COLOR_BGR2GRAY ) ; <nl> + <nl> + / / Threshold the image , setting all foreground pixels to 255 and all background pixels to 0 <nl> + Mat thresh ; <nl> + threshold ( gray , thresh , 0 , 255 , THRESH_BINARY_INV | THRESH_OTSU ) ; <nl> + <nl> + / / Applying erode filter to remove random noise <nl> + int erosion_size = 1 ; <nl> + Mat element = getStructuringElement ( MORPH_RECT , Size ( 2 * erosion_size + 1 , 2 * erosion_size + 1 ) , Point ( erosion_size , erosion_size ) ) ; <nl> + erode ( thresh , thresh , element ) ; <nl> + <nl> + cv : : Mat coords ; <nl> + findNonZero ( thresh , coords ) ; <nl> + <nl> + RotatedRect box = minAreaRect ( coords ) ; <nl> + float angle = box . angle ; <nl> + <nl> + / / The cv : : minAreaRect function returns values in the range [ - 90 , 0 ) <nl> + / / if the angle is less than - 45 we need to add 90 to it <nl> + if ( angle < - 45 . 0f ) <nl> + { <nl> + angle = ( 90 . 0f + angle ) ; <nl> + } <nl> + <nl> + / / Obtaining the rotation matrix <nl> + Point2f center ( ( image . cols ) / 2 . 0f , ( image . rows ) / 2 . 0f ) ; <nl> + Mat M = getRotationMatrix2D ( center , angle , 1 . 0f ) ; <nl> + Mat rotated ; <nl> + <nl> + / / Rotating the image by required angle <nl> + stringstream angle_to_str ; <nl> + angle_to_str < < fixed < < setprecision ( 2 ) < < angle ; <nl> + warpAffine ( image , rotated , M , image . size ( ) , INTER_CUBIC , BORDER_REPLICATE ) ; <nl> + putText ( rotated , " Angle " + angle_to_str . str ( ) + " degrees " , Point ( 10 , 30 ) , FONT_HERSHEY_SIMPLEX , 0 . 7 , Scalar ( 0 , 0 , 255 ) , 2 ) ; <nl> + cout < < " [ INFO ] angle : " < < angle_to_str . str ( ) < < endl ; <nl> + <nl> + / / Show the image <nl> + imshow ( " Input " , image ) ; <nl> + imshow ( " Rotated " , rotated ) ; <nl> + waitKey ( 0 ) ; <nl> + return 0 ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . c8ee33b39d6 <nl> mmm / dev / null <nl> ppp b / samples / python / text_skewness_correction . py <nl> <nl> + ' ' ' <nl> + Text skewness correction <nl> + This tutorial demonstrates how to correct the skewness in a text . <nl> + The program takes as input a skewed source image and shows non skewed text . <nl> + <nl> + Usage : <nl> + python text_skewness_correction . py - - image " Image path " <nl> + ' ' ' <nl> + <nl> + import numpy as np <nl> + import cv2 as cv <nl> + import sys <nl> + import argparse <nl> + <nl> + <nl> + def main ( ) : <nl> + parser = argparse . ArgumentParser ( ) <nl> + parser . add_argument ( " - i " , " - - image " , required = True , help = " path to input image file " ) <nl> + args = vars ( parser . parse_args ( ) ) <nl> + <nl> + # load the image from disk <nl> + image = cv . imread ( cv . samples . findFile ( args [ " image " ] ) ) <nl> + if image is None : <nl> + print ( " can ' t read image " + args [ " image " ] ) <nl> + sys . exit ( - 1 ) <nl> + gray = cv . cvtColor ( image , cv . COLOR_BGR2GRAY ) <nl> + <nl> + # threshold the image , setting all foreground pixels to <nl> + # 255 and all background pixels to 0 <nl> + thresh = cv . threshold ( gray , 0 , 255 , cv . THRESH_BINARY_INV | cv . THRESH_OTSU ) [ 1 ] <nl> + <nl> + # Applying erode filter to remove random noise <nl> + erosion_size = 1 <nl> + element = cv . getStructuringElement ( cv . MORPH_RECT , ( 2 * erosion_size + 1 , 2 * erosion_size + 1 ) , ( erosion_size , erosion_size ) ) <nl> + thresh = cv . erode ( thresh , element ) <nl> + <nl> + coords = cv . findNonZero ( thresh ) <nl> + angle = cv . minAreaRect ( coords ) [ - 1 ] <nl> + # the ` cv . minAreaRect ` function returns values in the <nl> + # range [ - 90 , 0 ) if the angle is less than - 45 we need to add 90 to it <nl> + if angle < - 45 : <nl> + angle = ( 90 + angle ) <nl> + <nl> + ( h , w ) = image . shape [ : 2 ] <nl> + center = ( w / / 2 , h / / 2 ) <nl> + M = cv . getRotationMatrix2D ( center , angle , 1 . 0 ) <nl> + rotated = cv . warpAffine ( image , M , ( w , h ) , flags = cv . INTER_CUBIC , borderMode = cv . BORDER_REPLICATE ) <nl> + cv . putText ( rotated , " Angle : { : . 2f } degrees " . format ( angle ) , ( 10 , 30 ) , cv . FONT_HERSHEY_SIMPLEX , 0 . 7 , ( 0 , 0 , 255 ) , 2 ) <nl> + <nl> + # show the output image <nl> + print ( " [ INFO ] angle : { : . 2f } " . format ( angle ) ) <nl> + cv . imshow ( " Input " , image ) <nl> + cv . imshow ( " Rotated " , rotated ) <nl> + cv . waitKey ( 0 ) <nl> + <nl> + <nl> + if __name__ = = " __main__ " : <nl> + main ( ) <nl>
Merge pull request from themechanicalcoder : tutorial_1
opencv/opencv
043b9fbb3148305a6366cad1fb7f61174afa1a42
2020-01-24T21:26:18Z
mmm a / include / spdlog / sinks / base_sink . h <nl> ppp b / include / spdlog / sinks / base_sink . h <nl> <nl> <nl> # pragma once <nl> / / <nl> - / / base sink templated over a mutex ( either dummy or realy ) <nl> - / / concrete implementation should only overrid the _sink_it method . <nl> + / / base sink templated over a mutex ( either dummy or real ) <nl> + / / concrete implementation should only override the _sink_it method . <nl> / / all locking is taken care of here so no locking needed by the implementers . . <nl> / / <nl> <nl>
Merge pull request from odeits / patch - 5
gabime/spdlog
0c89beaa581888eaefc7b218521df1991991b153
2017-04-07T07:39:33Z
mmm a / src / addrman . h <nl> ppp b / src / addrman . h <nl> class CAddrInfo : public CAddress <nl> <nl> template < typename Stream , typename Operation > <nl> inline void SerializationOp ( Stream & s , Operation ser_action ) { <nl> - READWRITE ( * static_cast < CAddress * > ( this ) ) ; <nl> + READWRITEAS ( CAddress , * this ) ; <nl> READWRITE ( source ) ; <nl> READWRITE ( nLastSuccess ) ; <nl> READWRITE ( nAttempts ) ; <nl> mmm a / src / primitives / block . h <nl> ppp b / src / primitives / block . h <nl> class CBlock : public CBlockHeader <nl> <nl> template < typename Stream , typename Operation > <nl> inline void SerializationOp ( Stream & s , Operation ser_action ) { <nl> - READWRITE ( * static_cast < CBlockHeader * > ( this ) ) ; <nl> + READWRITEAS ( CBlockHeader , * this ) ; <nl> READWRITE ( vtx ) ; <nl> } <nl> <nl> mmm a / src / protocol . h <nl> ppp b / src / protocol . h <nl> class CAddress : public CService <nl> uint64_t nServicesInt = nServices ; <nl> READWRITE ( nServicesInt ) ; <nl> nServices = static_cast < ServiceFlags > ( nServicesInt ) ; <nl> - READWRITE ( * static_cast < CService * > ( this ) ) ; <nl> + READWRITEAS ( CService , * this ) ; <nl> } <nl> <nl> / / TODO : make private ( improves encapsulation ) <nl> mmm a / src / script / script . h <nl> ppp b / src / script / script . h <nl> class CScript : public CScriptBase <nl> <nl> template < typename Stream , typename Operation > <nl> inline void SerializationOp ( Stream & s , Operation ser_action ) { <nl> - READWRITE ( static_cast < CScriptBase & > ( * this ) ) ; <nl> + READWRITEAS ( CScriptBase , * this ) ; <nl> } <nl> <nl> CScript & operator + = ( const CScript & b ) <nl> mmm a / src / serialize . h <nl> ppp b / src / serialize . h <nl> enum <nl> SER_GETHASH = ( 1 < < 2 ) , <nl> } ; <nl> <nl> - # define READWRITE ( . . . ) ( : : SerReadWriteMany ( s , ser_action , __VA_ARGS__ ) ) <nl> + / / ! Convert the reference base type to X , without changing constness or reference type . <nl> + template < typename X > X & ReadWriteAsHelper ( X & x ) { return x ; } <nl> + template < typename X > const X & ReadWriteAsHelper ( const X & x ) { return x ; } <nl> + <nl> + # define READWRITE ( . . . ) ( : : SerReadWriteMany ( s , ser_action , __VA_ARGS__ ) ) <nl> + # define READWRITEAS ( type , obj ) ( : : SerReadWriteMany ( s , ser_action , ReadWriteAsHelper < type > ( obj ) ) ) <nl> <nl> / * * <nl> * Implement three methods for serializable objects . These are actually wrappers over <nl> mmm a / src / txdb . h <nl> ppp b / src / txdb . h <nl> struct CDiskTxPos : public CDiskBlockPos <nl> <nl> template < typename Stream , typename Operation > <nl> inline void SerializationOp ( Stream & s , Operation ser_action ) { <nl> - READWRITE ( * static_cast < CDiskBlockPos * > ( this ) ) ; <nl> + READWRITEAS ( CDiskBlockPos , * this ) ; <nl> READWRITE ( VARINT ( nTxOffset ) ) ; <nl> } <nl> <nl> mmm a / src / wallet / wallet . h <nl> ppp b / src / wallet / wallet . h <nl> class CWalletTx : public CMerkleTx <nl> mapValueCopy [ " timesmart " ] = strprintf ( " % u " , nTimeSmart ) ; <nl> } <nl> <nl> - s < < * static_cast < const CMerkleTx * > ( this ) ; <nl> + s < < static_cast < const CMerkleTx & > ( * this ) ; <nl> std : : vector < CMerkleTx > vUnused ; / / ! < Used to be vtxPrev <nl> s < < vUnused < < mapValueCopy < < vOrderForm < < fTimeReceivedIsTxTime < < nTimeReceived < < fFromMe < < fSpent ; <nl> } <nl> class CWalletTx : public CMerkleTx <nl> Init ( nullptr ) ; <nl> char fSpent ; <nl> <nl> - s > > * static_cast < CMerkleTx * > ( this ) ; <nl> + s > > static_cast < CMerkleTx & > ( * this ) ; <nl> std : : vector < CMerkleTx > vUnused ; / / ! < Used to be vtxPrev <nl> s > > vUnused > > mapValue > > vOrderForm > > fTimeReceivedIsTxTime > > nTimeReceived > > fFromMe > > fSpent ; <nl> <nl>
Merge : Support serialization as another type without casting
bitcoin/bitcoin
0a8054e7cd5c76d01e4ac7234e3883d05f6f5fdd
2018-04-10T18:54:33Z
mmm a / src / compiler / heap - refs . h <nl> ppp b / src / compiler / heap - refs . h <nl> enum class OddballType : uint8_t { <nl> V ( FeedbackVector ) \ <nl> V ( FixedArrayBase ) \ <nl> V ( FunctionTemplateInfo ) \ <nl> + V ( HeapNumber ) \ <nl> V ( JSReceiver ) \ <nl> V ( Map ) \ <nl> V ( Name ) \ <nl> enum class OddballType : uint8_t { <nl> / * Subtypes of Object * / \ <nl> V ( HeapObject ) <nl> <nl> - # define HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST ( V ) \ <nl> - V ( HeapNumber ) \ <nl> - V ( ScopeInfo ) <nl> + # define HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST ( V ) V ( ScopeInfo ) <nl> <nl> class CompilationDependencies ; <nl> struct FeedbackSource ; <nl> mmm a / src / compiler / js - heap - broker . cc <nl> ppp b / src / compiler / js - heap - broker . cc <nl> class JSRegExpData : public JSObjectData { <nl> ObjectData * last_index_ = nullptr ; <nl> } ; <nl> <nl> + class HeapNumberData : public HeapObjectData { <nl> + public : <nl> + HeapNumberData ( JSHeapBroker * broker , ObjectData * * storage , <nl> + Handle < HeapNumber > object ) <nl> + : HeapObjectData ( broker , storage , object ) , value_ ( object - > value ( ) ) { } <nl> + <nl> + double value ( ) const { return value_ ; } <nl> + <nl> + private : <nl> + double const value_ ; <nl> + } ; <nl> + <nl> class ContextData : public HeapObjectData { <nl> public : <nl> ContextData ( JSHeapBroker * broker , ObjectData * * storage , <nl> base : : Optional < ObjectRef > JSArrayRef : : GetOwnCowElement ( <nl> <nl> double HeapNumberRef : : value ( ) const { <nl> IF_ACCESS_FROM_HEAP_C ( HeapNumber , value ) ; <nl> - UNREACHABLE ( ) ; <nl> + return data ( ) - > AsHeapNumber ( ) - > value ( ) ; <nl> } <nl> <nl> uint64_t BigIntRef : : AsUint64 ( ) const { <nl>
Revert " [ compiler ] Replace HeapNumberData with direct reads "
v8/v8
f7a4c311723712d4bb7ec92050c4f067eaa6f9f0
2020-08-19T12:12:33Z
mmm a / dbms / src / Common / ErrorCodes . cpp <nl> ppp b / dbms / src / Common / ErrorCodes . cpp <nl> namespace ErrorCodes <nl> extern const int UNABLE_TO_SKIP_UNUSED_SHARDS = 507 ; <nl> extern const int UNKNOWN_ACCESS_TYPE = 508 ; <nl> extern const int INVALID_GRANT = 509 ; <nl> + extern const int CACHE_DICTIONARY_UPDATE_FAIL = 510 ; <nl> <nl> extern const int KEEPER_EXCEPTION = 999 ; <nl> extern const int POCO_EXCEPTION = 1000 ; <nl> mmm a / dbms / src / Dictionaries / CacheDictionary . cpp <nl> ppp b / dbms / src / Dictionaries / CacheDictionary . cpp <nl> <nl> # include < Common / typeid_cast . h > <nl> # include < ext / range . h > <nl> # include < ext / size . h > <nl> + # include < Common / setThreadName . h > <nl> # include " CacheDictionary . inc . h " <nl> # include " DictionaryBlockInputStream . h " <nl> # include " DictionaryFactory . h " <nl> CacheDictionary : : CacheDictionary ( <nl> const std : : string & name_ , <nl> const DictionaryStructure & dict_struct_ , <nl> DictionarySourcePtr source_ptr_ , <nl> - const DictionaryLifetime dict_lifetime_ , <nl> - const size_t size_ ) <nl> + DictionaryLifetime dict_lifetime_ , <nl> + size_t size_ , <nl> + bool allow_read_expired_keys_ , <nl> + size_t max_update_queue_size_ , <nl> + size_t update_queue_push_timeout_milliseconds_ , <nl> + size_t max_threads_for_updates_ ) <nl> : database ( database_ ) <nl> , name ( name_ ) <nl> , full_name { database_ . empty ( ) ? name_ : ( database_ + " . " + name_ ) } <nl> , dict_struct ( dict_struct_ ) <nl> , source_ptr { std : : move ( source_ptr_ ) } <nl> , dict_lifetime ( dict_lifetime_ ) <nl> + , allow_read_expired_keys ( allow_read_expired_keys_ ) <nl> + , max_update_queue_size ( max_update_queue_size_ ) <nl> + , update_queue_push_timeout_milliseconds ( update_queue_push_timeout_milliseconds_ ) <nl> + , max_threads_for_updates ( max_threads_for_updates_ ) <nl> , log ( & Logger : : get ( " ExternalDictionaries " ) ) <nl> , size { roundUpToPowerOfTwoOrZero ( std : : max ( size_ , size_t ( max_collision_length ) ) ) } <nl> , size_overlap_mask { this - > size - 1 } <nl> , cells { this - > size } <nl> , rnd_engine ( randomSeed ( ) ) <nl> + , update_queue ( max_update_queue_size_ ) <nl> + , update_pool ( max_threads_for_updates ) <nl> { <nl> if ( ! this - > source_ptr - > supportsSelectiveLoad ( ) ) <nl> throw Exception { full_name + " : source cannot be used with CacheDictionary " , ErrorCodes : : UNSUPPORTED_METHOD } ; <nl> <nl> createAttributes ( ) ; <nl> + for ( size_t i = 0 ; i < max_threads_for_updates ; + + i ) <nl> + update_pool . scheduleOrThrowOnError ( [ this ] { updateThreadFunction ( ) ; } ) ; <nl> + } <nl> + <nl> + CacheDictionary : : ~ CacheDictionary ( ) <nl> + { <nl> + finished = true ; <nl> + update_queue . clear ( ) ; <nl> + for ( size_t i = 0 ; i < max_threads_for_updates ; + + i ) <nl> + { <nl> + auto empty_finishing_ptr = std : : make_shared < UpdateUnit > ( std : : vector < Key > ( ) ) ; <nl> + update_queue . push ( empty_finishing_ptr ) ; <nl> + } <nl> + update_pool . wait ( ) ; <nl> } <nl> <nl> <nl> CacheDictionary : : FindResult CacheDictionary : : findCellIdx ( const Key & id , const C <nl> <nl> void CacheDictionary : : has ( const PaddedPODArray < Key > & ids , PaddedPODArray < UInt8 > & out ) const <nl> { <nl> + / / / There are three types of ids . <nl> + / / / - Valid ids . These ids are presented in local cache and their lifetime is not expired . <nl> + / / / - CacheExpired ids . Ids that are in local cache , but their values are rotted ( lifetime is expired ) . <nl> + / / / - CacheNotFound ids . We have to go to external storage to know its value . <nl> + <nl> / / / Mapping : < id > - > { all indices ` i ` of ` ids ` such that ` ids [ i ] ` = < id > } <nl> - std : : unordered_map < Key , std : : vector < size_t > > outdated_ids ; <nl> + std : : unordered_map < Key , std : : vector < size_t > > cache_expired_ids ; <nl> + std : : unordered_map < Key , std : : vector < size_t > > cache_not_found_ids ; <nl> <nl> - size_t cache_expired = 0 , cache_not_found = 0 , cache_hit = 0 ; <nl> + size_t cache_hit = 0 ; <nl> <nl> const auto rows = ext : : size ( ids ) ; <nl> { <nl> void CacheDictionary : : has ( const PaddedPODArray < Key > & ids , PaddedPODArray < UInt8 > <nl> const auto id = ids [ row ] ; <nl> const auto find_result = findCellIdx ( id , now ) ; <nl> const auto & cell_idx = find_result . cell_idx ; <nl> + <nl> + auto insert_to_answer_routine = [ & ] ( ) <nl> + { <nl> + out [ row ] = ! cells [ cell_idx ] . isDefault ( ) ; <nl> + } ; <nl> + <nl> if ( ! find_result . valid ) <nl> { <nl> - outdated_ids [ id ] . push_back ( row ) ; <nl> if ( find_result . outdated ) <nl> - + + cache_expired ; <nl> + { <nl> + cache_expired_ids [ id ] . push_back ( row ) ; <nl> + <nl> + if ( allow_read_expired_keys ) <nl> + insert_to_answer_routine ( ) ; <nl> + } <nl> else <nl> - + + cache_not_found ; <nl> + { <nl> + cache_not_found_ids [ id ] . push_back ( row ) ; <nl> + } <nl> } <nl> else <nl> { <nl> + + cache_hit ; <nl> - const auto & cell = cells [ cell_idx ] ; <nl> - out [ row ] = ! cell . isDefault ( ) ; <nl> + insert_to_answer_routine ( ) ; <nl> } <nl> } <nl> } <nl> <nl> - ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysExpired , cache_expired ) ; <nl> - ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysNotFound , cache_not_found ) ; <nl> + ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysExpired , cache_expired_ids . size ( ) ) ; <nl> + ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysNotFound , cache_not_found_ids . size ( ) ) ; <nl> ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysHit , cache_hit ) ; <nl> <nl> query_count . fetch_add ( rows , std : : memory_order_relaxed ) ; <nl> - hit_count . fetch_add ( rows - outdated_ids . size ( ) , std : : memory_order_release ) ; <nl> + hit_count . fetch_add ( rows - cache_expired_ids . size ( ) - cache_not_found_ids . size ( ) , std : : memory_order_release ) ; <nl> <nl> - if ( outdated_ids . empty ( ) ) <nl> - return ; <nl> - <nl> - std : : vector < Key > required_ids ( outdated_ids . size ( ) ) ; <nl> - std : : transform ( std : : begin ( outdated_ids ) , std : : end ( outdated_ids ) , std : : begin ( required_ids ) , [ ] ( auto & pair ) { return pair . first ; } ) ; <nl> + if ( cache_not_found_ids . empty ( ) ) <nl> + { <nl> + / / / Nothing to update - return ; <nl> + if ( cache_expired_ids . empty ( ) ) <nl> + return ; <nl> <nl> - / / / request new values <nl> - update ( <nl> - required_ids , <nl> - [ & ] ( const auto id , const auto ) <nl> + if ( allow_read_expired_keys ) <nl> { <nl> - for ( const auto row : outdated_ids [ id ] ) <nl> - out [ row ] = true ; <nl> - } , <nl> - [ & ] ( const auto id , const auto ) <nl> - { <nl> - for ( const auto row : outdated_ids [ id ] ) <nl> - out [ row ] = false ; <nl> - } ) ; <nl> + std : : vector < Key > required_expired_ids ; <nl> + required_expired_ids . reserve ( cache_expired_ids . size ( ) ) ; <nl> + std : : transform ( <nl> + std : : begin ( cache_expired_ids ) , std : : end ( cache_expired_ids ) , <nl> + std : : back_inserter ( required_expired_ids ) , [ ] ( auto & pair ) { return pair . first ; } ) ; <nl> + <nl> + / / / Callbacks are empty because we don ' t want to receive them after an unknown period of time . <nl> + auto update_unit_ptr = std : : make_shared < UpdateUnit > ( required_expired_ids ) ; <nl> + <nl> + tryPushToUpdateQueueOrThrow ( update_unit_ptr ) ; <nl> + / / / Update is async - no need to wait . <nl> + return ; <nl> + } <nl> + } <nl> + <nl> + / / / At this point we have two situations . <nl> + / / / There may be both types of keys : cache_expired_ids and cache_not_found_ids . <nl> + / / / We will update them all synchronously . <nl> + <nl> + std : : vector < Key > required_ids ; <nl> + required_ids . reserve ( cache_not_found_ids . size ( ) + cache_expired_ids . size ( ) ) ; <nl> + std : : transform ( <nl> + std : : begin ( cache_not_found_ids ) , std : : end ( cache_not_found_ids ) , <nl> + std : : back_inserter ( required_ids ) , [ ] ( auto & pair ) { return pair . first ; } ) ; <nl> + std : : transform ( <nl> + std : : begin ( cache_expired_ids ) , std : : end ( cache_expired_ids ) , <nl> + std : : back_inserter ( required_ids ) , [ ] ( auto & pair ) { return pair . first ; } ) ; <nl> + <nl> + auto on_cell_updated = [ & ] ( const Key id , const size_t ) <nl> + { <nl> + for ( const auto row : cache_not_found_ids [ id ] ) <nl> + out [ row ] = true ; <nl> + for ( const auto row : cache_expired_ids [ id ] ) <nl> + out [ row ] = true ; <nl> + } ; <nl> + <nl> + auto on_id_not_found = [ & ] ( const Key id , const size_t ) <nl> + { <nl> + for ( const auto row : cache_not_found_ids [ id ] ) <nl> + out [ row ] = false ; <nl> + for ( const auto row : cache_expired_ids [ id ] ) <nl> + out [ row ] = true ; <nl> + } ; <nl> + <nl> + auto update_unit_ptr = std : : make_shared < UpdateUnit > ( required_ids , on_cell_updated , on_id_not_found ) ; <nl> + <nl> + tryPushToUpdateQueueOrThrow ( update_unit_ptr ) ; <nl> + waitForCurrentUpdateFinish ( update_unit_ptr ) ; <nl> } <nl> <nl> <nl> void registerDictionaryCache ( DictionaryFactory & factory ) <nl> DictionarySourcePtr source_ptr ) - > DictionaryPtr <nl> { <nl> if ( dict_struct . key ) <nl> - throw Exception { " ' key ' is not supported for dictionary of layout ' cache ' " , ErrorCodes : : UNSUPPORTED_METHOD } ; <nl> + throw Exception { " ' key ' is not supported for dictionary of layout ' cache ' " , <nl> + ErrorCodes : : UNSUPPORTED_METHOD } ; <nl> <nl> if ( dict_struct . range_min | | dict_struct . range_max ) <nl> throw Exception { full_name <nl> void registerDictionaryCache ( DictionaryFactory & factory ) <nl> " for a dictionary of layout ' range_hashed ' " , <nl> ErrorCodes : : BAD_ARGUMENTS } ; <nl> const auto & layout_prefix = config_prefix + " . layout " ; <nl> - const auto size = config . getInt ( layout_prefix + " . cache . size_in_cells " ) ; <nl> + <nl> + const size_t size = config . getUInt64 ( layout_prefix + " . cache . size_in_cells " ) ; <nl> if ( size = = 0 ) <nl> - throw Exception { full_name + " : dictionary of layout ' cache ' cannot have 0 cells " , ErrorCodes : : TOO_SMALL_BUFFER_SIZE } ; <nl> + throw Exception { full_name + " : dictionary of layout ' cache ' cannot have 0 cells " , <nl> + ErrorCodes : : TOO_SMALL_BUFFER_SIZE } ; <nl> <nl> const bool require_nonempty = config . getBool ( config_prefix + " . require_nonempty " , false ) ; <nl> if ( require_nonempty ) <nl> void registerDictionaryCache ( DictionaryFactory & factory ) <nl> const String database = config . getString ( config_prefix + " . database " , " " ) ; <nl> const String name = config . getString ( config_prefix + " . name " ) ; <nl> const DictionaryLifetime dict_lifetime { config , config_prefix + " . lifetime " } ; <nl> - return std : : make_unique < CacheDictionary > ( database , name , dict_struct , std : : move ( source_ptr ) , dict_lifetime , size ) ; <nl> + <nl> + const size_t max_update_queue_size = <nl> + config . getUInt64 ( layout_prefix + " . cache . max_update_queue_size " , 100000 ) ; <nl> + if ( max_update_queue_size = = 0 ) <nl> + throw Exception { name + " : dictionary of layout ' cache ' cannot have empty update queue of size 0 " , <nl> + ErrorCodes : : TOO_SMALL_BUFFER_SIZE } ; <nl> + <nl> + const bool allow_read_expired_keys = <nl> + config . getBool ( layout_prefix + " . cache . allow_read_expired_keys " , false ) ; <nl> + <nl> + const size_t update_queue_push_timeout_milliseconds = <nl> + config . getUInt64 ( layout_prefix + " . cache . update_queue_push_timeout_milliseconds " , 10 ) ; <nl> + if ( update_queue_push_timeout_milliseconds < 10 ) <nl> + throw Exception { name + " : dictionary of layout ' cache ' have too little update_queue_push_timeout " , <nl> + ErrorCodes : : BAD_ARGUMENTS } ; <nl> + <nl> + const size_t max_threads_for_updates = <nl> + config . getUInt64 ( layout_prefix + " . max_threads_for_updates " , 4 ) ; <nl> + if ( max_threads_for_updates = = 0 ) <nl> + throw Exception { name + " : dictionary of layout ' cache ' cannot have zero threads for updates . " , <nl> + ErrorCodes : : BAD_ARGUMENTS } ; <nl> + <nl> + return std : : make_unique < CacheDictionary > ( <nl> + database , name , dict_struct , std : : move ( source_ptr ) , dict_lifetime , size , <nl> + allow_read_expired_keys , max_update_queue_size , update_queue_push_timeout_milliseconds , <nl> + max_threads_for_updates ) ; <nl> } ; <nl> factory . registerLayout ( " cache " , create_layout , false ) ; <nl> } <nl> <nl> + void CacheDictionary : : updateThreadFunction ( ) <nl> + { <nl> + setThreadName ( " AsyncUpdater " ) ; <nl> + while ( ! finished ) <nl> + { <nl> + UpdateUnitPtr first_popped ; <nl> + update_queue . pop ( first_popped ) ; <nl> + <nl> + if ( finished ) <nl> + break ; <nl> + <nl> + / / / Here we pop as many unit pointers from update queue as we can . <nl> + / / / We fix current size to avoid livelock ( or too long waiting ) , <nl> + / / / when this thread pops from the queue and other threads push to the queue . <nl> + const size_t current_queue_size = update_queue . size ( ) ; <nl> + <nl> + if ( current_queue_size > 0 ) <nl> + LOG_TRACE ( log , " Performing bunch of keys update in cache dictionary with " <nl> + < < current_queue_size + 1 < < " keys " ) ; <nl> + <nl> + std : : vector < UpdateUnitPtr > update_request ; <nl> + update_request . reserve ( current_queue_size + 1 ) ; <nl> + update_request . emplace_back ( first_popped ) ; <nl> + <nl> + UpdateUnitPtr current_unit_ptr ; <nl> + <nl> + while ( update_request . size ( ) & & update_queue . tryPop ( current_unit_ptr ) ) <nl> + update_request . emplace_back ( std : : move ( current_unit_ptr ) ) ; <nl> + <nl> + BunchUpdateUnit bunch_update_unit ( update_request ) ; <nl> + <nl> + try <nl> + { <nl> + / / / Update a bunch of ids . <nl> + update ( bunch_update_unit ) ; <nl> + <nl> + / / / Notify all threads about finished updating the bunch of ids <nl> + / / / where their own ids were included . <nl> + std : : unique_lock < std : : mutex > lock ( update_mutex ) ; <nl> + <nl> + for ( auto & unit_ptr : update_request ) <nl> + unit_ptr - > is_done = true ; <nl> + <nl> + is_update_finished . notify_all ( ) ; <nl> + } <nl> + catch ( . . . ) <nl> + { <nl> + std : : unique_lock < std : : mutex > lock ( update_mutex ) ; <nl> + / / / It is a big trouble , because one bad query can make other threads fail with not relative exception . <nl> + / / / So at this point all threads ( and queries ) will receive the same exception . <nl> + for ( auto & unit_ptr : update_request ) <nl> + unit_ptr - > current_exception = std : : current_exception ( ) ; <nl> + <nl> + is_update_finished . notify_all ( ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void CacheDictionary : : waitForCurrentUpdateFinish ( UpdateUnitPtr & update_unit_ptr ) const <nl> + { <nl> + std : : unique_lock < std : : mutex > lock ( update_mutex ) ; <nl> + <nl> + / * <nl> + * We wait here without any timeout to avoid SEGFAULT ' s . <nl> + * Consider timeout for wait had expired and main query ' s thread ended with exception <nl> + * or some other error . But the UpdateUnit with callbacks is left in the queue . <nl> + * It has these callback that capture god knows what from the current thread <nl> + * ( most of the variables lies on the stack of finished thread ) that <nl> + * intended to do a synchronous update . AsyncUpdate thread can touch deallocated memory and explode . <nl> + * * / <nl> + is_update_finished . wait ( <nl> + lock , <nl> + [ & ] { return update_unit_ptr - > is_done | | update_unit_ptr - > current_exception ; } ) ; <nl> + <nl> + if ( update_unit_ptr - > current_exception ) <nl> + std : : rethrow_exception ( update_unit_ptr - > current_exception ) ; <nl> + } <nl> + <nl> + void CacheDictionary : : tryPushToUpdateQueueOrThrow ( UpdateUnitPtr & update_unit_ptr ) const <nl> + { <nl> + if ( ! update_queue . tryPush ( update_unit_ptr , update_queue_push_timeout_milliseconds ) ) <nl> + throw DB : : Exception ( <nl> + " Cannot push to internal update queue in dictionary " + getFullName ( ) + " . Timelimit of " + <nl> + std : : to_string ( update_queue_push_timeout_milliseconds ) + " ms . exceeded . Current queue size is " + <nl> + std : : to_string ( update_queue . size ( ) ) , ErrorCodes : : CACHE_DICTIONARY_UPDATE_FAIL ) ; <nl> + } <nl> + <nl> + void CacheDictionary : : update ( BunchUpdateUnit & bunch_update_unit ) const <nl> + { <nl> + CurrentMetrics : : Increment metric_increment { CurrentMetrics : : DictCacheRequests } ; <nl> + ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysRequested , bunch_update_unit . getRequestedIds ( ) . size ( ) ) ; <nl> + <nl> + std : : unordered_map < Key , UInt8 > remaining_ids { bunch_update_unit . getRequestedIds ( ) . size ( ) } ; <nl> + for ( const auto id : bunch_update_unit . getRequestedIds ( ) ) <nl> + remaining_ids . insert ( { id , 0 } ) ; <nl> + <nl> + const auto now = std : : chrono : : system_clock : : now ( ) ; <nl> + <nl> + if ( now > backoff_end_time . load ( ) ) <nl> + { <nl> + try <nl> + { <nl> + if ( error_count ) <nl> + { <nl> + / / / Recover after error : we have to clone the source here because <nl> + / / / it could keep connections which should be reset after error . <nl> + source_ptr = source_ptr - > clone ( ) ; <nl> + } <nl> + <nl> + Stopwatch watch ; <nl> + auto stream = source_ptr - > loadIds ( bunch_update_unit . getRequestedIds ( ) ) ; <nl> + <nl> + const ProfilingScopedWriteRWLock write_lock { rw_lock , ProfileEvents : : DictCacheLockWriteNs } ; <nl> + <nl> + stream - > readPrefix ( ) ; <nl> + while ( const auto block = stream - > read ( ) ) <nl> + { <nl> + const auto id_column = typeid_cast < const ColumnUInt64 * > ( block . safeGetByPosition ( 0 ) . column . get ( ) ) ; <nl> + if ( ! id_column ) <nl> + throw Exception { name + " : id column has type different from UInt64 . " , ErrorCodes : : TYPE_MISMATCH } ; <nl> + <nl> + const auto & ids = id_column - > getData ( ) ; <nl> + <nl> + / / / cache column pointers <nl> + const auto column_ptrs = ext : : map < std : : vector > ( <nl> + ext : : range ( 0 , attributes . size ( ) ) , [ & block ] ( size_t i ) { return block . safeGetByPosition ( i + 1 ) . column . get ( ) ; } ) ; <nl> + <nl> + for ( const auto i : ext : : range ( 0 , ids . size ( ) ) ) <nl> + { <nl> + const auto id = ids [ i ] ; <nl> + <nl> + const auto find_result = findCellIdx ( id , now ) ; <nl> + const auto & cell_idx = find_result . cell_idx ; <nl> + <nl> + auto & cell = cells [ cell_idx ] ; <nl> + <nl> + for ( const auto attribute_idx : ext : : range ( 0 , attributes . size ( ) ) ) <nl> + { <nl> + const auto & attribute_column = * column_ptrs [ attribute_idx ] ; <nl> + auto & attribute = attributes [ attribute_idx ] ; <nl> + <nl> + setAttributeValue ( attribute , cell_idx , attribute_column [ i ] ) ; <nl> + } <nl> + <nl> + / / / if cell id is zero and zero does not map to this cell , then the cell is unused <nl> + if ( cell . id = = 0 & & cell_idx ! = zero_cell_idx ) <nl> + element_count . fetch_add ( 1 , std : : memory_order_relaxed ) ; <nl> + <nl> + cell . id = id ; <nl> + if ( dict_lifetime . min_sec ! = 0 & & dict_lifetime . max_sec ! = 0 ) <nl> + { <nl> + std : : uniform_int_distribution < UInt64 > distribution { dict_lifetime . min_sec , dict_lifetime . max_sec } ; <nl> + cell . setExpiresAt ( now + std : : chrono : : seconds { distribution ( rnd_engine ) } ) ; <nl> + } <nl> + else <nl> + cell . setExpiresAt ( std : : chrono : : time_point < std : : chrono : : system_clock > : : max ( ) ) ; <nl> + <nl> + <nl> + bunch_update_unit . informCallersAboutPresentId ( id , cell_idx ) ; <nl> + / / / mark corresponding id as found <nl> + remaining_ids [ id ] = 1 ; <nl> + } <nl> + } <nl> + <nl> + stream - > readSuffix ( ) ; <nl> + <nl> + error_count = 0 ; <nl> + last_exception = std : : exception_ptr { } ; <nl> + backoff_end_time = std : : chrono : : system_clock : : time_point { } ; <nl> + <nl> + ProfileEvents : : increment ( ProfileEvents : : DictCacheRequestTimeNs , watch . elapsed ( ) ) ; <nl> + } <nl> + catch ( . . . ) <nl> + { <nl> + + + error_count ; <nl> + last_exception = std : : current_exception ( ) ; <nl> + backoff_end_time = now + std : : chrono : : seconds ( calculateDurationWithBackoff ( rnd_engine , error_count ) ) ; <nl> + <nl> + tryLogException ( last_exception , log , " Could not update cache dictionary ' " + getFullName ( ) + <nl> + " ' , next update is scheduled at " + ext : : to_string ( backoff_end_time . load ( ) ) ) ; <nl> + } <nl> + } <nl> + <nl> + size_t not_found_num = 0 , found_num = 0 ; <nl> + <nl> + const ProfilingScopedWriteRWLock write_lock { rw_lock , ProfileEvents : : DictCacheLockWriteNs } ; <nl> + <nl> + / / / Check which ids have not been found and require setting null_value <nl> + for ( const auto & id_found_pair : remaining_ids ) <nl> + { <nl> + if ( id_found_pair . second ) <nl> + { <nl> + + + found_num ; <nl> + continue ; <nl> + } <nl> + + + not_found_num ; <nl> + <nl> + const auto id = id_found_pair . first ; <nl> + <nl> + const auto find_result = findCellIdx ( id , now ) ; <nl> + const auto & cell_idx = find_result . cell_idx ; <nl> + auto & cell = cells [ cell_idx ] ; <nl> + <nl> + if ( error_count ) <nl> + { <nl> + if ( find_result . outdated ) <nl> + { <nl> + / / / We have expired data for that ` id ` so we can continue using it . <nl> + bool was_default = cell . isDefault ( ) ; <nl> + cell . setExpiresAt ( backoff_end_time ) ; <nl> + if ( was_default ) <nl> + cell . setDefault ( ) ; <nl> + if ( was_default ) <nl> + bunch_update_unit . informCallersAboutAbsentId ( id , cell_idx ) ; <nl> + else <nl> + bunch_update_unit . informCallersAboutPresentId ( id , cell_idx ) ; <nl> + continue ; <nl> + } <nl> + / / / We don ' t have expired data for that ` id ` so all we can do is to rethrow ` last_exception ` . <nl> + std : : rethrow_exception ( last_exception ) ; <nl> + } <nl> + <nl> + / / / Check if cell had not been occupied before and increment element counter if it hadn ' t <nl> + if ( cell . id = = 0 & & cell_idx ! = zero_cell_idx ) <nl> + element_count . fetch_add ( 1 , std : : memory_order_relaxed ) ; <nl> + <nl> + cell . id = id ; <nl> + <nl> + if ( dict_lifetime . min_sec ! = 0 & & dict_lifetime . max_sec ! = 0 ) <nl> + { <nl> + std : : uniform_int_distribution < UInt64 > distribution { dict_lifetime . min_sec , dict_lifetime . max_sec } ; <nl> + cell . setExpiresAt ( now + std : : chrono : : seconds { distribution ( rnd_engine ) } ) ; <nl> + } <nl> + else <nl> + cell . setExpiresAt ( std : : chrono : : time_point < std : : chrono : : system_clock > : : max ( ) ) ; <nl> + <nl> + / / / Set null_value for each attribute <nl> + cell . setDefault ( ) ; <nl> + for ( auto & attribute : attributes ) <nl> + setDefaultAttributeValue ( attribute , cell_idx ) ; <nl> + <nl> + / / / inform caller that the cell has not been found <nl> + bunch_update_unit . informCallersAboutAbsentId ( id , cell_idx ) ; <nl> + } <nl> + <nl> + ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysRequestedMiss , not_found_num ) ; <nl> + ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysRequestedFound , found_num ) ; <nl> + ProfileEvents : : increment ( ProfileEvents : : DictCacheRequests ) ; <nl> + } <nl> <nl> } <nl> mmm a / dbms / src / Dictionaries / CacheDictionary . h <nl> ppp b / dbms / src / Dictionaries / CacheDictionary . h <nl> <nl> # include < chrono > <nl> # include < cmath > <nl> # include < map > <nl> + # include < mutex > <nl> # include < shared_mutex > <nl> + # include < utility > <nl> # include < variant > <nl> # include < vector > <nl> # include < common / logger_useful . h > <nl> # include < Columns / ColumnDecimal . h > <nl> # include < Columns / ColumnString . h > <nl> + # include < Common / ThreadPool . h > <nl> + # include < Common / ConcurrentBoundedQueue . h > <nl> # include < pcg_random . hpp > <nl> # include < Common / ArenaWithFreeLists . h > <nl> # include < Common / CurrentMetrics . h > <nl> <nl> <nl> namespace DB <nl> { <nl> + <nl> + namespace ErrorCodes <nl> + { <nl> + extern const int CACHE_DICTIONARY_UPDATE_FAIL ; <nl> + } <nl> + <nl> + / * <nl> + * <nl> + * This dictionary is stored in a cache that has a fixed number of cells . <nl> + * These cells contain frequently used elements . <nl> + * When searching for a dictionary , the cache is searched first and special heuristic is used : <nl> + * while looking for the key , we take a look only at max_collision_length elements . <nl> + * So , our cache is not perfect . It has errors like " the key is in cache , but the cache says that it does not " . <nl> + * And in this case we simply ask external source for the key which is faster . <nl> + * You have to keep this logic in mind . <nl> + * * / <nl> class CacheDictionary final : public IDictionary <nl> { <nl> public : <nl> class CacheDictionary final : public IDictionary <nl> const std : : string & name_ , <nl> const DictionaryStructure & dict_struct_ , <nl> DictionarySourcePtr source_ptr_ , <nl> - const DictionaryLifetime dict_lifetime_ , <nl> - const size_t size_ ) ; <nl> + DictionaryLifetime dict_lifetime_ , <nl> + size_t size_ , <nl> + bool allow_read_expired_keys_ , <nl> + size_t max_update_queue_size_ , <nl> + size_t update_queue_push_timeout_milliseconds_ , <nl> + size_t max_threads_for_updates ) ; <nl> + <nl> + ~ CacheDictionary ( ) override ; <nl> <nl> const std : : string & getDatabase ( ) const override { return database ; } <nl> const std : : string & getName ( ) const override { return name ; } <nl> class CacheDictionary final : public IDictionary <nl> <nl> std : : shared_ptr < const IExternalLoadable > clone ( ) const override <nl> { <nl> - return std : : make_shared < CacheDictionary > ( database , name , dict_struct , source_ptr - > clone ( ) , dict_lifetime , size ) ; <nl> + return std : : make_shared < CacheDictionary > ( <nl> + database , name , dict_struct , source_ptr - > clone ( ) , dict_lifetime , size , <nl> + allow_read_expired_keys , max_update_queue_size , <nl> + update_queue_push_timeout_milliseconds , max_threads_for_updates ) ; <nl> } <nl> <nl> const IDictionarySource * getSource ( ) const override { return source_ptr . get ( ) ; } <nl> class CacheDictionary final : public IDictionary <nl> template < typename DefaultGetter > <nl> void getItemsString ( Attribute & attribute , const PaddedPODArray < Key > & ids , ColumnString * out , DefaultGetter & & get_default ) const ; <nl> <nl> - template < typename PresentIdHandler , typename AbsentIdHandler > <nl> - void update ( const std : : vector < Key > & requested_ids , PresentIdHandler & & on_cell_updated , AbsentIdHandler & & on_id_not_found ) const ; <nl> - <nl> PaddedPODArray < Key > getCachedIds ( ) const ; <nl> <nl> bool isEmptyCell ( const UInt64 idx ) const ; <nl> class CacheDictionary final : public IDictionary <nl> const DictionaryStructure dict_struct ; <nl> mutable DictionarySourcePtr source_ptr ; <nl> const DictionaryLifetime dict_lifetime ; <nl> + const bool allow_read_expired_keys ; <nl> + const size_t max_update_queue_size ; <nl> + const size_t update_queue_push_timeout_milliseconds ; <nl> + const size_t max_threads_for_updates ; <nl> + <nl> Logger * const log ; <nl> <nl> mutable std : : shared_mutex rw_lock ; <nl> class CacheDictionary final : public IDictionary <nl> std : : unique_ptr < ArenaWithFreeLists > string_arena ; <nl> <nl> mutable std : : exception_ptr last_exception ; <nl> - mutable size_t error_count = 0 ; <nl> - mutable std : : chrono : : system_clock : : time_point backoff_end_time ; <nl> + mutable std : : atomic < size_t > error_count = 0 ; <nl> + mutable std : : atomic < std : : chrono : : system_clock : : time_point > backoff_end_time { std : : chrono : : system_clock : : time_point { } } ; <nl> <nl> mutable pcg64 rnd_engine ; <nl> <nl> class CacheDictionary final : public IDictionary <nl> mutable std : : atomic < size_t > element_count { 0 } ; <nl> mutable std : : atomic < size_t > hit_count { 0 } ; <nl> mutable std : : atomic < size_t > query_count { 0 } ; <nl> - } ; <nl> <nl> + / / / Field and methods correlated with update expired and not found keys <nl> + <nl> + using PresentIdHandler = std : : function < void ( Key , size_t ) > ; <nl> + using AbsentIdHandler = std : : function < void ( Key , size_t ) > ; <nl> + <nl> + / * <nl> + * Disclaimer : this comment is written not for fun . <nl> + * <nl> + * How the update goes : we basically have a method like get ( keys ) - > values . Values are cached , so sometimes we <nl> + * can return them from the cache . For values not in cache , we query them from the dictionary , and add to the <nl> + * cache . The cache is lossy , so we can ' t expect it to store all the keys , and we store them separately . Normally , <nl> + * they would be passed as a return value of get ( ) , but for Unknown Reasons the dictionaries use a baroque <nl> + * interface where get ( ) accepts two callback , one that it calls for found values , and one for not found . <nl> + * <nl> + * Now we make it even uglier by doing this from multiple threads . The missing values are retreived from the <nl> + * dictionary in a background thread , and this thread calls the provided callback . So if you provide the callbacks , <nl> + * you MUST wait until the background update finishes , or god knows what happens . Unfortunately , we have no <nl> + * way to check that you did this right , so good luck . <nl> + * / <nl> + struct UpdateUnit <nl> + { <nl> + UpdateUnit ( std : : vector < Key > requested_ids_ , <nl> + PresentIdHandler present_id_handler_ , <nl> + AbsentIdHandler absent_id_handler_ ) : <nl> + requested_ids ( std : : move ( requested_ids_ ) ) , <nl> + present_id_handler ( present_id_handler_ ) , <nl> + absent_id_handler ( absent_id_handler_ ) { } <nl> + <nl> + explicit UpdateUnit ( std : : vector < Key > requested_ids_ ) : <nl> + requested_ids ( std : : move ( requested_ids_ ) ) , <nl> + present_id_handler ( [ ] ( Key , size_t ) { } ) , <nl> + absent_id_handler ( [ ] ( Key , size_t ) { } ) { } <nl> + <nl> + std : : vector < Key > requested_ids ; <nl> + PresentIdHandler present_id_handler ; <nl> + AbsentIdHandler absent_id_handler ; <nl> + <nl> + std : : atomic < bool > is_done { false } ; <nl> + std : : exception_ptr current_exception { nullptr } ; <nl> + } ; <nl> + <nl> + using UpdateUnitPtr = std : : shared_ptr < UpdateUnit > ; <nl> + using UpdateQueue = ConcurrentBoundedQueue < UpdateUnitPtr > ; <nl> + <nl> + <nl> + / * <nl> + * This class is used to concatenate requested_keys . <nl> + * <nl> + * Imagine that we have several UpdateUnit with different vectors of keys and callbacks for that keys . <nl> + * We concatenate them into a long vector of keys that looks like : <nl> + * <nl> + * a1 . . . ak_a b1 . . . bk_2 c1 . . . ck_3 , <nl> + * <nl> + * where a1 . . . ak_a are requested_keys from the first UpdateUnit . <nl> + * In addition we have the same number ( three in this case ) of callbacks . <nl> + * This class helps us to find a callback ( or many callbacks ) for a special key . <nl> + * * / <nl> + <nl> + class BunchUpdateUnit <nl> + { <nl> + public : <nl> + explicit BunchUpdateUnit ( std : : vector < UpdateUnitPtr > & update_request ) <nl> + { <nl> + / / / Here we prepare total count of all requested ids <nl> + / / / not to do useless allocations later . <nl> + size_t total_requested_keys_count = 0 ; <nl> + <nl> + for ( auto & unit_ptr : update_request ) <nl> + { <nl> + total_requested_keys_count + = unit_ptr - > requested_ids . size ( ) ; <nl> + if ( helper . empty ( ) ) <nl> + helper . push_back ( unit_ptr - > requested_ids . size ( ) ) ; <nl> + else <nl> + helper . push_back ( unit_ptr - > requested_ids . size ( ) + helper . back ( ) ) ; <nl> + present_id_handlers . emplace_back ( unit_ptr - > present_id_handler ) ; <nl> + absent_id_handlers . emplace_back ( unit_ptr - > absent_id_handler ) ; <nl> + } <nl> + <nl> + concatenated_requested_ids . reserve ( total_requested_keys_count ) ; <nl> + for ( auto & unit_ptr : update_request ) <nl> + std : : for_each ( std : : begin ( unit_ptr - > requested_ids ) , std : : end ( unit_ptr - > requested_ids ) , <nl> + [ & ] ( const Key & key ) { concatenated_requested_ids . push_back ( key ) ; } ) ; <nl> + <nl> + } <nl> + <nl> + const std : : vector < Key > & getRequestedIds ( ) <nl> + { <nl> + return concatenated_requested_ids ; <nl> + } <nl> + <nl> + void informCallersAboutPresentId ( Key id , size_t cell_idx ) <nl> + { <nl> + for ( size_t i = 0 ; i < concatenated_requested_ids . size ( ) ; + + i ) <nl> + { <nl> + auto & curr = concatenated_requested_ids [ i ] ; <nl> + if ( curr = = id ) <nl> + getPresentIdHandlerForPosition ( i ) ( id , cell_idx ) ; <nl> + } <nl> + } <nl> + <nl> + void informCallersAboutAbsentId ( Key id , size_t cell_idx ) <nl> + { <nl> + for ( size_t i = 0 ; i < concatenated_requested_ids . size ( ) ; + + i ) <nl> + if ( concatenated_requested_ids [ i ] = = id ) <nl> + getAbsentIdHandlerForPosition ( i ) ( id , cell_idx ) ; <nl> + } <nl> + <nl> + <nl> + private : <nl> + PresentIdHandler & getPresentIdHandlerForPosition ( size_t position ) <nl> + { <nl> + return present_id_handlers [ getUpdateUnitNumberForRequestedIdPosition ( position ) ] ; <nl> + } <nl> + <nl> + AbsentIdHandler & getAbsentIdHandlerForPosition ( size_t position ) <nl> + { <nl> + return absent_id_handlers [ getUpdateUnitNumberForRequestedIdPosition ( ( position ) ) ] ; <nl> + } <nl> + <nl> + size_t getUpdateUnitNumberForRequestedIdPosition ( size_t position ) <nl> + { <nl> + return std : : lower_bound ( helper . begin ( ) , helper . end ( ) , position ) - helper . begin ( ) ; <nl> + } <nl> + <nl> + std : : vector < Key > concatenated_requested_ids ; <nl> + std : : vector < PresentIdHandler > present_id_handlers ; <nl> + std : : vector < AbsentIdHandler > absent_id_handlers ; <nl> + <nl> + std : : vector < size_t > helper ; <nl> + } ; <nl> + <nl> + mutable UpdateQueue update_queue ; <nl> + <nl> + ThreadPool update_pool ; <nl> + <nl> + / * <nl> + * Actually , we can divide all requested keys into two ' buckets ' . There are only four possible states and they <nl> + * are described in the table . <nl> + * <nl> + * cache_not_found_ids | 0 | 0 | 1 | 1 | <nl> + * cache_expired_ids | 0 | 1 | 0 | 1 | <nl> + * <nl> + * 0 - if set is empty , 1 - otherwise <nl> + * <nl> + * Only if there are no cache_not_found_ids and some cache_expired_ids <nl> + * ( with allow_read_expired_keys_from_cache_dictionary setting ) we can perform async update . <nl> + * Otherwise we have no concatenate ids and update them sync . <nl> + * <nl> + * / <nl> + void updateThreadFunction ( ) ; <nl> + void update ( BunchUpdateUnit & bunch_update_unit ) const ; <nl> + <nl> + <nl> + void tryPushToUpdateQueueOrThrow ( UpdateUnitPtr & update_unit_ptr ) const ; <nl> + void waitForCurrentUpdateFinish ( UpdateUnitPtr & update_unit_ptr ) const ; <nl> + <nl> + mutable std : : mutex update_mutex ; <nl> + mutable std : : condition_variable is_update_finished ; <nl> + <nl> + std : : atomic < bool > finished { false } ; <nl> + } ; <nl> } <nl> mmm a / dbms / src / Dictionaries / CacheDictionary . inc . h <nl> ppp b / dbms / src / Dictionaries / CacheDictionary . inc . h <nl> void CacheDictionary : : getItemsNumberImpl ( <nl> Attribute & attribute , const PaddedPODArray < Key > & ids , ResultArrayType < OutputType > & out , DefaultGetter & & get_default ) const <nl> { <nl> / / / Mapping : < id > - > { all indices ` i ` of ` ids ` such that ` ids [ i ] ` = < id > } <nl> - std : : unordered_map < Key , std : : vector < size_t > > outdated_ids ; <nl> + std : : unordered_map < Key , std : : vector < size_t > > cache_expired_ids ; <nl> + std : : unordered_map < Key , std : : vector < size_t > > cache_not_found_ids ; <nl> + <nl> auto & attribute_array = std : : get < ContainerPtrType < AttributeType > > ( attribute . arrays ) ; <nl> const auto rows = ext : : size ( ids ) ; <nl> <nl> - size_t cache_expired = 0 , cache_not_found = 0 , cache_hit = 0 ; <nl> + size_t cache_hit = 0 ; <nl> <nl> { <nl> const ProfilingScopedReadRWLock read_lock { rw_lock , ProfileEvents : : DictCacheLockReadNs } ; <nl> void CacheDictionary : : getItemsNumberImpl ( <nl> * 3 . explicit defaults were specified and cell was set default . * / <nl> <nl> const auto find_result = findCellIdx ( id , now ) ; <nl> + <nl> + auto update_routine = [ & ] ( ) <nl> + { <nl> + const auto & cell_idx = find_result . cell_idx ; <nl> + const auto & cell = cells [ cell_idx ] ; <nl> + out [ row ] = cell . isDefault ( ) ? get_default ( row ) : static_cast < OutputType > ( attribute_array [ cell_idx ] ) ; <nl> + } ; <nl> + <nl> if ( ! find_result . valid ) <nl> { <nl> - outdated_ids [ id ] . push_back ( row ) ; <nl> + <nl> if ( find_result . outdated ) <nl> - + + cache_expired ; <nl> + { <nl> + cache_expired_ids [ id ] . push_back ( row ) ; <nl> + if ( allow_read_expired_keys ) <nl> + update_routine ( ) ; <nl> + } <nl> else <nl> - + + cache_not_found ; <nl> + { <nl> + cache_not_found_ids [ id ] . push_back ( row ) ; <nl> + } <nl> } <nl> else <nl> { <nl> + + cache_hit ; <nl> - const auto & cell_idx = find_result . cell_idx ; <nl> - const auto & cell = cells [ cell_idx ] ; <nl> - out [ row ] = cell . isDefault ( ) ? get_default ( row ) : static_cast < OutputType > ( attribute_array [ cell_idx ] ) ; <nl> + update_routine ( ) ; <nl> } <nl> } <nl> } <nl> <nl> - ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysExpired , cache_expired ) ; <nl> - ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysNotFound , cache_not_found ) ; <nl> + ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysExpired , cache_expired_ids . size ( ) ) ; <nl> + ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysNotFound , cache_not_found_ids . size ( ) ) ; <nl> ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysHit , cache_hit ) ; <nl> <nl> query_count . fetch_add ( rows , std : : memory_order_relaxed ) ; <nl> - hit_count . fetch_add ( rows - outdated_ids . size ( ) , std : : memory_order_release ) ; <nl> - <nl> - if ( outdated_ids . empty ( ) ) <nl> - return ; <nl> + hit_count . fetch_add ( rows - cache_expired_ids . size ( ) - cache_not_found_ids . size ( ) , std : : memory_order_release ) ; <nl> <nl> - std : : vector < Key > required_ids ( outdated_ids . size ( ) ) ; <nl> - std : : transform ( std : : begin ( outdated_ids ) , std : : end ( outdated_ids ) , std : : begin ( required_ids ) , [ ] ( auto & pair ) { return pair . first ; } ) ; <nl> + if ( cache_not_found_ids . empty ( ) ) <nl> + { <nl> + / / / Nothing to update - return <nl> + if ( cache_expired_ids . empty ( ) ) <nl> + return ; <nl> <nl> - / / / request new values <nl> - update ( <nl> - required_ids , <nl> - [ & ] ( const auto id , const auto cell_idx ) <nl> + / / / Update async only if allow_read_expired_keys_is_enabledadd condvar usage and better code <nl> + if ( allow_read_expired_keys ) <nl> { <nl> - const auto attribute_value = attribute_array [ cell_idx ] ; <nl> + std : : vector < Key > required_expired_ids ; <nl> + required_expired_ids . reserve ( cache_expired_ids . size ( ) ) ; <nl> + std : : transform ( std : : begin ( cache_expired_ids ) , std : : end ( cache_expired_ids ) , std : : back_inserter ( required_expired_ids ) , <nl> + [ ] ( auto & pair ) { return pair . first ; } ) ; <nl> <nl> - for ( const size_t row : outdated_ids [ id ] ) <nl> - out [ row ] = static_cast < OutputType > ( attribute_value ) ; <nl> - } , <nl> - [ & ] ( const auto id , const auto ) <nl> - { <nl> - for ( const size_t row : outdated_ids [ id ] ) <nl> - out [ row ] = get_default ( row ) ; <nl> - } ) ; <nl> + / / / request new values <nl> + auto update_unit_ptr = std : : make_shared < UpdateUnit > ( required_expired_ids ) ; <nl> + <nl> + tryPushToUpdateQueueOrThrow ( update_unit_ptr ) ; <nl> + <nl> + / / / Nothing to do - return <nl> + return ; <nl> + } <nl> + } <nl> + <nl> + / / / From this point we have to update all keys sync . <nl> + / / / Maybe allow_read_expired_keys_from_cache_dictionary is disabled <nl> + / / / and there no cache_not_found_ids but some cache_expired . <nl> + <nl> + std : : vector < Key > required_ids ; <nl> + required_ids . reserve ( cache_not_found_ids . size ( ) + cache_expired_ids . size ( ) ) ; <nl> + std : : transform ( <nl> + std : : begin ( cache_not_found_ids ) , std : : end ( cache_not_found_ids ) , <nl> + std : : back_inserter ( required_ids ) , [ ] ( auto & pair ) { return pair . first ; } ) ; <nl> + std : : transform ( <nl> + std : : begin ( cache_expired_ids ) , std : : end ( cache_expired_ids ) , <nl> + std : : back_inserter ( required_ids ) , [ ] ( auto & pair ) { return pair . first ; } ) ; <nl> + <nl> + auto on_cell_updated = [ & ] ( const auto id , const auto cell_idx ) <nl> + { <nl> + const auto attribute_value = attribute_array [ cell_idx ] ; <nl> + <nl> + for ( const size_t row : cache_not_found_ids [ id ] ) <nl> + out [ row ] = static_cast < OutputType > ( attribute_value ) ; <nl> + <nl> + for ( const size_t row : cache_expired_ids [ id ] ) <nl> + out [ row ] = static_cast < OutputType > ( attribute_value ) ; <nl> + } ; <nl> + <nl> + auto on_id_not_found = [ & ] ( const auto id , const auto ) <nl> + { <nl> + for ( const size_t row : cache_not_found_ids [ id ] ) <nl> + out [ row ] = get_default ( row ) ; <nl> + <nl> + for ( const size_t row : cache_expired_ids [ id ] ) <nl> + out [ row ] = get_default ( row ) ; <nl> + } ; <nl> + <nl> + / / / Request new values <nl> + auto update_unit_ptr = std : : make_shared < UpdateUnit > ( required_ids , on_cell_updated , on_id_not_found ) ; <nl> + <nl> + tryPushToUpdateQueueOrThrow ( update_unit_ptr ) ; <nl> + waitForCurrentUpdateFinish ( update_unit_ptr ) ; <nl> } <nl> <nl> template < typename DefaultGetter > <nl> void CacheDictionary : : getItemsString ( <nl> out - > getOffsets ( ) . resize_assume_reserved ( 0 ) ; <nl> <nl> / / / Mapping : < id > - > { all indices ` i ` of ` ids ` such that ` ids [ i ] ` = < id > } <nl> - std : : unordered_map < Key , std : : vector < size_t > > outdated_ids ; <nl> + std : : unordered_map < Key , std : : vector < size_t > > cache_expired_ids ; <nl> + std : : unordered_map < Key , std : : vector < size_t > > cache_not_found_ids ; <nl> / / / we are going to store every string separately <nl> std : : unordered_map < Key , String > map ; <nl> <nl> size_t total_length = 0 ; <nl> - size_t cache_expired = 0 , cache_not_found = 0 , cache_hit = 0 ; <nl> + size_t cache_hit = 0 ; <nl> { <nl> const ProfilingScopedReadRWLock read_lock { rw_lock , ProfileEvents : : DictCacheLockReadNs } ; <nl> <nl> void CacheDictionary : : getItemsString ( <nl> const auto id = ids [ row ] ; <nl> <nl> const auto find_result = findCellIdx ( id , now ) ; <nl> - if ( ! find_result . valid ) <nl> - { <nl> - outdated_ids [ id ] . push_back ( row ) ; <nl> - if ( find_result . outdated ) <nl> - + + cache_expired ; <nl> - else <nl> - + + cache_not_found ; <nl> - } <nl> - else <nl> + <nl> + <nl> + auto insert_value_routine = [ & ] ( ) <nl> { <nl> - + + cache_hit ; <nl> const auto & cell_idx = find_result . cell_idx ; <nl> const auto & cell = cells [ cell_idx ] ; <nl> const auto string_ref = cell . isDefault ( ) ? get_default ( row ) : attribute_array [ cell_idx ] ; <nl> void CacheDictionary : : getItemsString ( <nl> map [ id ] = String { string_ref } ; <nl> <nl> total_length + = string_ref . size + 1 ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysExpired , cache_expired ) ; <nl> - ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysNotFound , cache_not_found ) ; <nl> - ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysHit , cache_hit ) ; <nl> + } ; <nl> <nl> - query_count . fetch_add ( rows , std : : memory_order_relaxed ) ; <nl> - hit_count . fetch_add ( rows - outdated_ids . size ( ) , std : : memory_order_release ) ; <nl> - <nl> - / / / request new values <nl> - if ( ! outdated_ids . empty ( ) ) <nl> - { <nl> - std : : vector < Key > required_ids ( outdated_ids . size ( ) ) ; <nl> - std : : transform ( std : : begin ( outdated_ids ) , std : : end ( outdated_ids ) , std : : begin ( required_ids ) , [ ] ( auto & pair ) { return pair . first ; } ) ; <nl> - <nl> - update ( <nl> - required_ids , <nl> - [ & ] ( const auto id , const auto cell_idx ) <nl> + if ( ! find_result . valid ) <nl> { <nl> - const auto attribute_value = attribute_array [ cell_idx ] ; <nl> + if ( find_result . outdated ) <nl> + { <nl> + cache_expired_ids [ id ] . push_back ( row ) ; <nl> <nl> - map [ id ] = String { attribute_value } ; <nl> - total_length + = ( attribute_value . size + 1 ) * outdated_ids [ id ] . size ( ) ; <nl> - } , <nl> - [ & ] ( const auto id , const auto ) <nl> + if ( allow_read_expired_keys ) <nl> + insert_value_routine ( ) ; <nl> + } else <nl> + cache_not_found_ids [ id ] . push_back ( row ) ; <nl> + } else <nl> { <nl> - for ( const auto row : outdated_ids [ id ] ) <nl> - total_length + = get_default ( row ) . size + 1 ; <nl> - } ) ; <nl> - } <nl> - <nl> - out - > getChars ( ) . reserve ( total_length ) ; <nl> - <nl> - for ( const auto row : ext : : range ( 0 , ext : : size ( ids ) ) ) <nl> - { <nl> - const auto id = ids [ row ] ; <nl> - const auto it = map . find ( id ) ; <nl> - <nl> - const auto string_ref = it ! = std : : end ( map ) ? StringRef { it - > second } : get_default ( row ) ; <nl> - out - > insertData ( string_ref . data , string_ref . size ) ; <nl> + + + cache_hit ; <nl> + insert_value_routine ( ) ; <nl> + } <nl> + } <nl> } <nl> - } <nl> - <nl> - template < typename PresentIdHandler , typename AbsentIdHandler > <nl> - void CacheDictionary : : update ( <nl> - const std : : vector < Key > & requested_ids , PresentIdHandler & & on_cell_updated , AbsentIdHandler & & on_id_not_found ) const <nl> - { <nl> - CurrentMetrics : : Increment metric_increment { CurrentMetrics : : DictCacheRequests } ; <nl> - ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysRequested , requested_ids . size ( ) ) ; <nl> <nl> - std : : unordered_map < Key , UInt8 > remaining_ids { requested_ids . size ( ) } ; <nl> - for ( const auto id : requested_ids ) <nl> - remaining_ids . insert ( { id , 0 } ) ; <nl> - <nl> - const auto now = std : : chrono : : system_clock : : now ( ) ; <nl> + ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysExpired , cache_expired_ids . size ( ) ) ; <nl> + ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysNotFound , cache_not_found_ids . size ( ) ) ; <nl> + ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysHit , cache_hit ) ; <nl> <nl> - const ProfilingScopedWriteRWLock write_lock { rw_lock , ProfileEvents : : DictCacheLockWriteNs } ; <nl> + query_count . fetch_add ( rows , std : : memory_order_relaxed ) ; <nl> + hit_count . fetch_add ( rows - cache_expired_ids . size ( ) - cache_not_found_ids . size ( ) , std : : memory_order_release ) ; <nl> <nl> - if ( now > backoff_end_time ) <nl> + / / / Async update of expired keys . <nl> + if ( cache_not_found_ids . empty ( ) ) <nl> { <nl> - try <nl> + if ( allow_read_expired_keys & & ! cache_expired_ids . empty ( ) ) <nl> { <nl> - if ( error_count ) <nl> - { <nl> - / / / Recover after error : we have to clone the source here because <nl> - / / / it could keep connections which should be reset after error . <nl> - source_ptr = source_ptr - > clone ( ) ; <nl> - } <nl> - <nl> - Stopwatch watch ; <nl> - auto stream = source_ptr - > loadIds ( requested_ids ) ; <nl> - stream - > readPrefix ( ) ; <nl> - <nl> - while ( const auto block = stream - > read ( ) ) <nl> - { <nl> - const auto id_column = typeid_cast < const ColumnUInt64 * > ( block . safeGetByPosition ( 0 ) . column . get ( ) ) ; <nl> - if ( ! id_column ) <nl> - throw Exception { name + " : id column has type different from UInt64 . " , ErrorCodes : : TYPE_MISMATCH } ; <nl> - <nl> - const auto & ids = id_column - > getData ( ) ; <nl> + std : : vector < Key > required_expired_ids ; <nl> + required_expired_ids . reserve ( cache_not_found_ids . size ( ) ) ; <nl> + std : : transform ( std : : begin ( cache_expired_ids ) , std : : end ( cache_expired_ids ) , <nl> + std : : back_inserter ( required_expired_ids ) , [ ] ( auto & pair ) { return pair . first ; } ) ; <nl> <nl> - / / / cache column pointers <nl> - const auto column_ptrs = ext : : map < std : : vector > ( <nl> - ext : : range ( 0 , attributes . size ( ) ) , [ & block ] ( size_t i ) { return block . safeGetByPosition ( i + 1 ) . column . get ( ) ; } ) ; <nl> + auto update_unit_ptr = std : : make_shared < UpdateUnit > ( required_expired_ids ) ; <nl> <nl> - for ( const auto i : ext : : range ( 0 , ids . size ( ) ) ) <nl> - { <nl> - const auto id = ids [ i ] ; <nl> - <nl> - const auto find_result = findCellIdx ( id , now ) ; <nl> - const auto & cell_idx = find_result . cell_idx ; <nl> - <nl> - auto & cell = cells [ cell_idx ] ; <nl> - <nl> - for ( const auto attribute_idx : ext : : range ( 0 , attributes . size ( ) ) ) <nl> - { <nl> - const auto & attribute_column = * column_ptrs [ attribute_idx ] ; <nl> - auto & attribute = attributes [ attribute_idx ] ; <nl> - <nl> - setAttributeValue ( attribute , cell_idx , attribute_column [ i ] ) ; <nl> - } <nl> - <nl> - / / / if cell id is zero and zero does not map to this cell , then the cell is unused <nl> - if ( cell . id = = 0 & & cell_idx ! = zero_cell_idx ) <nl> - element_count . fetch_add ( 1 , std : : memory_order_relaxed ) ; <nl> - <nl> - cell . id = id ; <nl> - if ( dict_lifetime . min_sec ! = 0 & & dict_lifetime . max_sec ! = 0 ) <nl> - { <nl> - std : : uniform_int_distribution < UInt64 > distribution { dict_lifetime . min_sec , dict_lifetime . max_sec } ; <nl> - cell . setExpiresAt ( now + std : : chrono : : seconds { distribution ( rnd_engine ) } ) ; <nl> - } <nl> - else <nl> - cell . setExpiresAt ( std : : chrono : : time_point < std : : chrono : : system_clock > : : max ( ) ) ; <nl> - <nl> - / / / inform caller <nl> - on_cell_updated ( id , cell_idx ) ; <nl> - / / / mark corresponding id as found <nl> - remaining_ids [ id ] = 1 ; <nl> - } <nl> - } <nl> - <nl> - stream - > readSuffix ( ) ; <nl> - <nl> - error_count = 0 ; <nl> - last_exception = std : : exception_ptr { } ; <nl> - backoff_end_time = std : : chrono : : system_clock : : time_point { } ; <nl> - <nl> - ProfileEvents : : increment ( ProfileEvents : : DictCacheRequestTimeNs , watch . elapsed ( ) ) ; <nl> - } <nl> - catch ( . . . ) <nl> - { <nl> - + + error_count ; <nl> - last_exception = std : : current_exception ( ) ; <nl> - backoff_end_time = now + std : : chrono : : seconds ( calculateDurationWithBackoff ( rnd_engine , error_count ) ) ; <nl> + tryPushToUpdateQueueOrThrow ( update_unit_ptr ) ; <nl> <nl> - tryLogException ( last_exception , log , " Could not update cache dictionary ' " + getFullName ( ) + <nl> - " ' , next update is scheduled at " + ext : : to_string ( backoff_end_time ) ) ; <nl> + / / / Do not return at this point , because there some extra stuff to do at the end of this method . <nl> } <nl> } <nl> <nl> - size_t not_found_num = 0 , found_num = 0 ; <nl> - <nl> - / / / Check which ids have not been found and require setting null_value <nl> - for ( const auto & id_found_pair : remaining_ids ) <nl> + / / / Request new values sync . <nl> + / / / We have request both cache_not_found_ids and cache_expired_ids . <nl> + if ( ! cache_not_found_ids . empty ( ) ) <nl> { <nl> - if ( id_found_pair . second ) <nl> + std : : vector < Key > required_ids ; <nl> + required_ids . reserve ( cache_not_found_ids . size ( ) + cache_expired_ids . size ( ) ) ; <nl> + std : : transform ( <nl> + std : : begin ( cache_not_found_ids ) , std : : end ( cache_not_found_ids ) , <nl> + std : : back_inserter ( required_ids ) , [ ] ( auto & pair ) { return pair . first ; } ) ; <nl> + std : : transform ( <nl> + std : : begin ( cache_expired_ids ) , std : : end ( cache_expired_ids ) , <nl> + std : : back_inserter ( required_ids ) , [ ] ( auto & pair ) { return pair . first ; } ) ; <nl> + <nl> + auto on_cell_updated = [ & ] ( const auto id , const auto cell_idx ) <nl> { <nl> - + + found_num ; <nl> - continue ; <nl> - } <nl> - + + not_found_num ; <nl> - <nl> - const auto id = id_found_pair . first ; <nl> + const auto attribute_value = attribute_array [ cell_idx ] ; <nl> <nl> - const auto find_result = findCellIdx ( id , now ) ; <nl> - const auto & cell_idx = find_result . cell_idx ; <nl> - auto & cell = cells [ cell_idx ] ; <nl> + map [ id ] = String { attribute_value } ; <nl> + total_length + = ( attribute_value . size + 1 ) * cache_not_found_ids [ id ] . size ( ) ; <nl> + } ; <nl> <nl> - if ( error_count ) <nl> + auto on_id_not_found = [ & ] ( const auto id , const auto ) <nl> { <nl> - if ( find_result . outdated ) <nl> - { <nl> - / / / We have expired data for that ` id ` so we can continue using it . <nl> - bool was_default = cell . isDefault ( ) ; <nl> - cell . setExpiresAt ( backoff_end_time ) ; <nl> - if ( was_default ) <nl> - cell . setDefault ( ) ; <nl> - if ( was_default ) <nl> - on_id_not_found ( id , cell_idx ) ; <nl> - else <nl> - on_cell_updated ( id , cell_idx ) ; <nl> - continue ; <nl> - } <nl> - / / / We don ' t have expired data for that ` id ` so all we can do is to rethrow ` last_exception ` . <nl> - std : : rethrow_exception ( last_exception ) ; <nl> - } <nl> + for ( const auto row : cache_not_found_ids [ id ] ) <nl> + total_length + = get_default ( row ) . size + 1 ; <nl> + } ; <nl> <nl> - / / / Check if cell had not been occupied before and increment element counter if it hadn ' t <nl> - if ( cell . id = = 0 & & cell_idx ! = zero_cell_idx ) <nl> - element_count . fetch_add ( 1 , std : : memory_order_relaxed ) ; <nl> + auto update_unit_ptr = std : : make_shared < UpdateUnit > ( required_ids , on_cell_updated , on_id_not_found ) ; <nl> <nl> - cell . id = id ; <nl> + tryPushToUpdateQueueOrThrow ( update_unit_ptr ) ; <nl> + waitForCurrentUpdateFinish ( update_unit_ptr ) ; <nl> + } <nl> <nl> - if ( dict_lifetime . min_sec ! = 0 & & dict_lifetime . max_sec ! = 0 ) <nl> - { <nl> - std : : uniform_int_distribution < UInt64 > distribution { dict_lifetime . min_sec , dict_lifetime . max_sec } ; <nl> - cell . setExpiresAt ( now + std : : chrono : : seconds { distribution ( rnd_engine ) } ) ; <nl> - } <nl> - else <nl> - cell . setExpiresAt ( std : : chrono : : time_point < std : : chrono : : system_clock > : : max ( ) ) ; <nl> + out - > getChars ( ) . reserve ( total_length ) ; <nl> <nl> - / / / Set null_value for each attribute <nl> - cell . setDefault ( ) ; <nl> - for ( auto & attribute : attributes ) <nl> - setDefaultAttributeValue ( attribute , cell_idx ) ; <nl> + for ( const auto row : ext : : range ( 0 , ext : : size ( ids ) ) ) <nl> + { <nl> + const auto id = ids [ row ] ; <nl> + const auto it = map . find ( id ) ; <nl> <nl> - / / / inform caller that the cell has not been found <nl> - on_id_not_found ( id , cell_idx ) ; <nl> + const auto string_ref = it ! = std : : end ( map ) ? StringRef { it - > second } : get_default ( row ) ; <nl> + out - > insertData ( string_ref . data , string_ref . size ) ; <nl> } <nl> - <nl> - ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysRequestedMiss , not_found_num ) ; <nl> - ProfileEvents : : increment ( ProfileEvents : : DictCacheKeysRequestedFound , found_num ) ; <nl> - ProfileEvents : : increment ( ProfileEvents : : DictCacheRequests ) ; <nl> } <nl> <nl> } <nl> mmm a / dbms / tests / config / ints_dictionary . xml <nl> ppp b / dbms / tests / config / ints_dictionary . xml <nl> <nl> < / attribute > <nl> < / structure > <nl> < / dictionary > <nl> - < / dictionaries > <nl> + <nl> + <nl> + < dictionary > <nl> + < name > one_cell_cache_ints < / name > <nl> + < source > <nl> + < clickhouse > <nl> + < host > localhost < / host > <nl> + < port > 9000 < / port > <nl> + < user > default < / user > <nl> + < password > < / password > <nl> + < db > test_01054 < / db > <nl> + < table > ints < / table > <nl> + < / clickhouse > <nl> + < / source > <nl> + < lifetime > 0 < / lifetime > <nl> + < layout > <nl> + < cache > < size_in_cells > 1 < / size_in_cells > < / cache > <nl> + < / layout > <nl> + < structure > <nl> + < id > <nl> + < name > key < / name > <nl> + < / id > <nl> + < attribute > <nl> + < name > i8 < / name > <nl> + < type > Int8 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > i16 < / name > <nl> + < type > Int16 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > i32 < / name > <nl> + < type > Int32 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > i64 < / name > <nl> + < type > Int64 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > u8 < / name > <nl> + < type > UInt8 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > u16 < / name > <nl> + < type > UInt16 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > u32 < / name > <nl> + < type > UInt32 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > u64 < / name > <nl> + < type > UInt64 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < / structure > <nl> + < / dictionary > <nl> + <nl> + <nl> + < dictionary > <nl> + < name > one_cell_cache_ints_overflow < / name > <nl> + < source > <nl> + < clickhouse > <nl> + < host > localhost < / host > <nl> + < port > 9000 < / port > <nl> + < user > default < / user > <nl> + < password > < / password > <nl> + < db > test_01054_overflow < / db > <nl> + < table > ints < / table > <nl> + < / clickhouse > <nl> + < / source > <nl> + < lifetime > 0 < / lifetime > <nl> + < layout > <nl> + < cache > < size_in_cells > 1 < / size_in_cells > < / cache > <nl> + < / layout > <nl> + < structure > <nl> + < id > <nl> + < name > key < / name > <nl> + < / id > <nl> + < attribute > <nl> + < name > i8 < / name > <nl> + < type > Int8 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > i16 < / name > <nl> + < type > Int16 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > i32 < / name > <nl> + < type > Int32 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > i64 < / name > <nl> + < type > Int64 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > u8 < / name > <nl> + < type > UInt8 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > u16 < / name > <nl> + < type > UInt16 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > u32 < / name > <nl> + < type > UInt32 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > u64 < / name > <nl> + < type > UInt64 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < / structure > <nl> + < / dictionary > <nl> + <nl> + < / dictionaries > <nl> \ No newline at end of file <nl> mmm a / dbms / tests / integration / helpers / cluster . py <nl> ppp b / dbms / tests / integration / helpers / cluster . py <nl> def http_query ( self , sql , data = None , params = None , user = None ) : <nl> <nl> return urllib . urlopen ( url , data ) . read ( ) <nl> <nl> + def kill_clickhouse ( self , stop_start_wait_sec = 5 ) : <nl> + pid = self . get_process_pid ( " clickhouse " ) <nl> + if not pid : <nl> + raise Exception ( " No clickhouse found " ) <nl> + self . exec_in_container ( [ " bash " , " - c " , " kill - 9 { } " . format ( pid ) ] , user = ' root ' ) <nl> + time . sleep ( stop_start_wait_sec ) <nl> + <nl> + def restore_clickhouse ( self , retries = 100 ) : <nl> + pid = self . get_process_pid ( " clickhouse " ) <nl> + if pid : <nl> + raise Exception ( " ClickHouse has already started " ) <nl> + self . exec_in_container ( [ " bash " , " - c " , " { } - - daemon " . format ( CLICKHOUSE_START_COMMAND ) ] , user = str ( os . getuid ( ) ) ) <nl> + from helpers . test_tools import assert_eq_with_retry <nl> + # wait start <nl> + assert_eq_with_retry ( self , " select 1 " , " 1 " , retry_count = retries ) <nl> + <nl> def restart_clickhouse ( self , stop_start_wait_sec = 5 , kill = False ) : <nl> if not self . stay_alive : <nl> raise Exception ( " clickhouse can be restarted only with stay_alive = True instance " ) <nl> def create_dir ( self , destroy_dir = True ) : <nl> def destroy_dir ( self ) : <nl> if p . exists ( self . path ) : <nl> shutil . rmtree ( self . path ) <nl> + <nl> + <nl> + class ClickHouseKiller ( object ) : <nl> + def __init__ ( self , clickhouse_node ) : <nl> + self . clickhouse_node = clickhouse_node <nl> + <nl> + def __enter__ ( self ) : <nl> + self . clickhouse_node . kill_clickhouse ( ) <nl> + <nl> + def __exit__ ( self , exc_type , exc_val , exc_tb ) : <nl> + self . clickhouse_node . restore_clickhouse ( ) <nl> \ No newline at end of file <nl> mmm a / dbms / tests / integration / helpers / network . py <nl> ppp b / dbms / tests / integration / helpers / network . py <nl> def __del__ ( self ) : <nl> self . heal_all ( ) <nl> <nl> <nl> - class PartitionManagerDisbaler : <nl> + class PartitionManagerDisabler : <nl> def __init__ ( self , manager ) : <nl> self . manager = manager <nl> self . rules = self . manager . pop_rules ( ) <nl> mmm a / dbms / tests / integration / test_dictionaries_all_layouts_and_sources / dictionary . py <nl> ppp b / dbms / tests / integration / test_dictionaries_all_layouts_and_sources / dictionary . py <nl> def get_is_in_expressions ( self , dict_name , row , parent_row ) : <nl> <nl> <nl> class Dictionary ( object ) : <nl> - def __init__ ( self , name , structure , source , config_path , table_name , fields ) : <nl> + def __init__ ( self , name , structure , source , config_path , <nl> + table_name , fields , min_lifetime = 3 , max_lifetime = 5 ) : <nl> self . name = name <nl> self . structure = copy . deepcopy ( structure ) <nl> self . source = copy . deepcopy ( source ) <nl> self . config_path = config_path <nl> self . table_name = table_name <nl> self . fields = fields <nl> + self . min_lifetime = min_lifetime <nl> + self . max_lifetime = max_lifetime <nl> <nl> def generate_config ( self ) : <nl> with open ( self . config_path , ' w ' ) as result : <nl> def generate_config ( self ) : <nl> < yandex > <nl> < dictionary > <nl> < lifetime > <nl> - < min > 3 < / min > <nl> - < max > 5 < / max > <nl> + < min > { min_lifetime } < / min > <nl> + < max > { max_lifetime } < / max > <nl> < / lifetime > <nl> < name > { name } < / name > <nl> { structure } <nl> def generate_config ( self ) : <nl> < / dictionary > <nl> < / yandex > <nl> ' ' ' . format ( <nl> + min_lifetime = self . min_lifetime , <nl> + max_lifetime = self . max_lifetime , <nl> name = self . name , <nl> structure = self . structure . get_structure_str ( ) , <nl> source = self . source . get_source_str ( self . table_name ) , <nl> mmm a / dbms / tests / integration / test_dictionaries_all_layouts_and_sources / test . py <nl> ppp b / dbms / tests / integration / test_dictionaries_all_layouts_and_sources / test . py <nl> <nl> VALUES = { <nl> " simple " : [ <nl> [ 1 , 22 , 333 , 4444 , 55555 , - 6 , - 77 , <nl> - - 888 , - 999 , ' 550e8400 - e29b - 41d4 - a716 - 446655440003 ' , <nl> - ' 1973 - 06 - 28 ' , ' 1985 - 02 - 28 23 : 43 : 25 ' , ' hello ' , 22 . 543 , 3332154213 . 4 , 0 ] , <nl> + - 888 , - 999 , ' 550e8400 - e29b - 41d4 - a716 - 446655440003 ' , <nl> + ' 1973 - 06 - 28 ' , ' 1985 - 02 - 28 23 : 43 : 25 ' , ' hello ' , 22 . 543 , 3332154213 . 4 , 0 ] , <nl> [ 2 , 3 , 4 , 5 , 6 , - 7 , - 8 , <nl> - - 9 , - 10 , ' 550e8400 - e29b - 41d4 - a716 - 446655440002 ' , <nl> - ' 1978 - 06 - 28 ' , ' 1986 - 02 - 28 23 : 42 : 25 ' , ' hello ' , 21 . 543 , 3222154213 . 4 , 1 ] <nl> + - 9 , - 10 , ' 550e8400 - e29b - 41d4 - a716 - 446655440002 ' , <nl> + ' 1978 - 06 - 28 ' , ' 1986 - 02 - 28 23 : 42 : 25 ' , ' hello ' , 21 . 543 , 3222154213 . 4 , 1 ] <nl> ] , <nl> " complex " : [ <nl> [ 1 , ' world ' , 22 , 333 , 4444 , 55555 , - 6 , <nl> - - 77 , - 888 , - 999 , ' 550e8400 - e29b - 41d4 - a716 - 446655440003 ' , <nl> - ' 1973 - 06 - 28 ' , ' 1985 - 02 - 28 23 : 43 : 25 ' , <nl> - ' hello ' , 22 . 543 , 3332154213 . 4 ] , <nl> + - 77 , - 888 , - 999 , ' 550e8400 - e29b - 41d4 - a716 - 446655440003 ' , <nl> + ' 1973 - 06 - 28 ' , ' 1985 - 02 - 28 23 : 43 : 25 ' , <nl> + ' hello ' , 22 . 543 , 3332154213 . 4 ] , <nl> [ 2 , ' qwerty2 ' , 52 , 2345 , 6544 , 9191991 , - 2 , <nl> - - 717 , - 81818 , - 92929 , ' 550e8400 - e29b - 41d4 - a716 - 446655440007 ' , <nl> - ' 1975 - 09 - 28 ' , ' 2000 - 02 - 28 23 : 33 : 24 ' , <nl> - ' my ' , 255 . 543 , 3332221 . 44 ] <nl> + - 717 , - 81818 , - 92929 , ' 550e8400 - e29b - 41d4 - a716 - 446655440007 ' , <nl> + ' 1975 - 09 - 28 ' , ' 2000 - 02 - 28 23 : 33 : 24 ' , <nl> + ' my ' , 255 . 543 , 3332221 . 44 ] <nl> <nl> ] , <nl> " ranged " : [ <nl> [ 1 , ' 2019 - 02 - 10 ' , ' 2019 - 02 - 01 ' , ' 2019 - 02 - 28 ' , <nl> - 22 , 333 , 4444 , 55555 , - 6 , - 77 , - 888 , - 999 , <nl> - ' 550e8400 - e29b - 41d4 - a716 - 446655440003 ' , <nl> - ' 1973 - 06 - 28 ' , ' 1985 - 02 - 28 23 : 43 : 25 ' , ' hello ' , <nl> - 22 . 543 , 3332154213 . 4 ] , <nl> + 22 , 333 , 4444 , 55555 , - 6 , - 77 , - 888 , - 999 , <nl> + ' 550e8400 - e29b - 41d4 - a716 - 446655440003 ' , <nl> + ' 1973 - 06 - 28 ' , ' 1985 - 02 - 28 23 : 43 : 25 ' , ' hello ' , <nl> + 22 . 543 , 3332154213 . 4 ] , <nl> [ 2 , ' 2019 - 04 - 10 ' , ' 2019 - 04 - 01 ' , ' 2019 - 04 - 28 ' , <nl> - 11 , 3223 , 41444 , 52515 , - 65 , - 747 , - 8388 , - 9099 , <nl> - ' 550e8400 - e29b - 41d4 - a716 - 446655440004 ' , <nl> - ' 1973 - 06 - 29 ' , ' 2002 - 02 - 28 23 : 23 : 25 ' , ' ! ! ! ! ' , <nl> - 32 . 543 , 3332543 . 4 ] <nl> + 11 , 3223 , 41444 , 52515 , - 65 , - 747 , - 8388 , - 9099 , <nl> + ' 550e8400 - e29b - 41d4 - a716 - 446655440004 ' , <nl> + ' 1973 - 06 - 29 ' , ' 2002 - 02 - 28 23 : 23 : 25 ' , ' ! ! ! ! ' , <nl> + 32 . 543 , 3332543 . 4 ] <nl> ] <nl> } <nl> <nl> new file mode 100644 <nl> index 00000000000 . . e69de29bb2d <nl> new file mode 100644 <nl> index 00000000000 . . a1518083be3 <nl> mmm / dev / null <nl> ppp b / dbms / tests / integration / test_dictionary_allow_read_expired_keys / configs / config . xml <nl> <nl> + < ? xml version = " 1 . 0 " ? > <nl> + < yandex > <nl> + < logger > <nl> + < level > trace < / level > <nl> + < log > / var / log / clickhouse - server / clickhouse - server . log < / log > <nl> + < errorlog > / var / log / clickhouse - server / clickhouse - server . err . log < / errorlog > <nl> + < size > 1000M < / size > <nl> + < count > 10 < / count > <nl> + < / logger > <nl> + <nl> + < tcp_port > 9000 < / tcp_port > <nl> + < listen_host > 127 . 0 . 0 . 1 < / listen_host > <nl> + <nl> + < openSSL > <nl> + < client > <nl> + < cacheSessions > true < / cacheSessions > <nl> + < verificationMode > none < / verificationMode > <nl> + < invalidCertificateHandler > <nl> + < name > AcceptCertificateHandler < / name > <nl> + < / invalidCertificateHandler > <nl> + < / client > <nl> + < / openSSL > <nl> + <nl> + < max_concurrent_queries > 500 < / max_concurrent_queries > <nl> + < mark_cache_size > 5368709120 < / mark_cache_size > <nl> + < path > . / clickhouse / < / path > <nl> + < users_config > users . xml < / users_config > <nl> + <nl> + < dictionaries_config > / etc / clickhouse - server / config . d / * . xml < / dictionaries_config > <nl> + < / yandex > <nl> new file mode 100644 <nl> index 00000000000 . . 3365741411d <nl> mmm / dev / null <nl> ppp b / dbms / tests / integration / test_dictionary_allow_read_expired_keys / configs / dictionaries / cache_ints_dictionary . xml <nl> <nl> + < yandex > <nl> + < dictionary > <nl> + < name > anime_dict < / name > <nl> + < source > <nl> + < clickhouse > <nl> + < host > dictionary_node < / host > <nl> + < port > 9000 < / port > <nl> + < user > default < / user > <nl> + < password > < / password > <nl> + < db > test < / db > <nl> + < table > ints < / table > <nl> + < / clickhouse > <nl> + < / source > <nl> + < lifetime > <nl> + < max > 2 < / max > <nl> + < min > 1 < / min > <nl> + < / lifetime > <nl> + < layout > <nl> + < cache > <nl> + < size_in_cells > 10000 < / size_in_cells > <nl> + < max_update_queue_size > 10000 < / max_update_queue_size > <nl> + < allow_read_expired_keys > 1 < / allow_read_expired_keys > <nl> + < update_queue_push_timeout_milliseconds > 10 < / update_queue_push_timeout_milliseconds > <nl> + < / cache > <nl> + < / layout > <nl> + < structure > <nl> + < id > <nl> + < name > key < / name > <nl> + < / id > <nl> + < attribute > <nl> + < name > i8 < / name > <nl> + < type > Int8 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > i16 < / name > <nl> + < type > Int16 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > i32 < / name > <nl> + < type > Int32 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > i64 < / name > <nl> + < type > Int64 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > u8 < / name > <nl> + < type > UInt8 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > u16 < / name > <nl> + < type > UInt16 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > u32 < / name > <nl> + < type > UInt32 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < attribute > <nl> + < name > u64 < / name > <nl> + < type > UInt64 < / type > <nl> + < null_value > 0 < / null_value > <nl> + < / attribute > <nl> + < / structure > <nl> + < / dictionary > <nl> + < / yandex > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . 6061af8e33d <nl> mmm / dev / null <nl> ppp b / dbms / tests / integration / test_dictionary_allow_read_expired_keys / configs / users . xml <nl> <nl> + < ? xml version = " 1 . 0 " ? > <nl> + < yandex > <nl> + < profiles > <nl> + < default > <nl> + < / default > <nl> + < / profiles > <nl> + <nl> + < users > <nl> + < default > <nl> + < password > < / password > <nl> + < networks incl = " networks " replace = " replace " > <nl> + < ip > : : / 0 < / ip > <nl> + < / networks > <nl> + < profile > default < / profile > <nl> + < quota > default < / quota > <nl> + < / default > <nl> + < / users > <nl> + <nl> + < quotas > <nl> + < default > <nl> + < / default > <nl> + < / quotas > <nl> + < / yandex > <nl> new file mode 100644 <nl> index 00000000000 . . 8da882679bd <nl> mmm / dev / null <nl> ppp b / dbms / tests / integration / test_dictionary_allow_read_expired_keys / test_default_reading . py <nl> <nl> + from __future__ import print_function <nl> + import pytest <nl> + import time <nl> + import os <nl> + from contextlib import contextmanager <nl> + <nl> + from helpers . cluster import ClickHouseCluster <nl> + from helpers . cluster import ClickHouseKiller <nl> + from helpers . network import PartitionManager <nl> + <nl> + SCRIPT_DIR = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> + cluster = ClickHouseCluster ( __file__ , base_configs_dir = os . path . join ( SCRIPT_DIR , ' configs ' ) ) <nl> + <nl> + dictionary_node = cluster . add_instance ( ' dictionary_node ' , stay_alive = True ) <nl> + main_node = cluster . add_instance ( ' main_node ' , main_configs = [ ' configs / dictionaries / cache_ints_dictionary . xml ' ] ) <nl> + <nl> + @ pytest . fixture ( scope = " module " ) <nl> + def started_cluster ( ) : <nl> + try : <nl> + cluster . start ( ) <nl> + dictionary_node . query ( " create database if not exists test ; " ) <nl> + dictionary_node . query ( " drop table if exists test . ints ; " ) <nl> + dictionary_node . query ( " create table test . ints " <nl> + " ( key UInt64 , " <nl> + " i8 Int8 , i16 Int16 , i32 Int32 , i64 Int64 , " <nl> + " u8 UInt8 , u16 UInt16 , u32 UInt32 , u64 UInt64 ) " <nl> + " Engine = Memory ; " ) <nl> + dictionary_node . query ( " insert into test . ints values ( 7 , 7 , 7 , 7 , 7 , 7 , 7 , 7 , 7 ) ; " ) <nl> + dictionary_node . query ( " insert into test . ints values ( 5 , 5 , 5 , 5 , 5 , 5 , 5 , 5 , 5 ) ; " ) <nl> + <nl> + yield cluster <nl> + finally : <nl> + cluster . shutdown ( ) <nl> + <nl> + # @ pytest . mark . skip ( reason = " debugging " ) <nl> + def test_default_reading ( started_cluster ) : <nl> + assert None ! = dictionary_node . get_process_pid ( " clickhouse " ) , " ClickHouse must be alive " <nl> + <nl> + # Key 0 is not in dictionary , so default value will be returned <nl> + <nl> + def test_helper ( ) : <nl> + assert ' 42 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' i8 ' , toUInt64 ( 13 ) , toInt8 ( 42 ) ) ; " ) . rstrip ( ) <nl> + assert ' 42 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' i16 ' , toUInt64 ( 13 ) , toInt16 ( 42 ) ) ; " ) . rstrip ( ) <nl> + assert ' 42 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' i32 ' , toUInt64 ( 13 ) , toInt32 ( 42 ) ) ; " ) . rstrip ( ) <nl> + assert ' 42 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' i64 ' , toUInt64 ( 13 ) , toInt64 ( 42 ) ) ; " ) . rstrip ( ) <nl> + assert ' 42 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' u8 ' , toUInt64 ( 13 ) , toUInt8 ( 42 ) ) ; " ) . rstrip ( ) <nl> + assert ' 42 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' u16 ' , toUInt64 ( 13 ) , toUInt16 ( 42 ) ) ; " ) . rstrip ( ) <nl> + assert ' 42 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' u32 ' , toUInt64 ( 13 ) , toUInt32 ( 42 ) ) ; " ) . rstrip ( ) <nl> + assert ' 42 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' u64 ' , toUInt64 ( 13 ) , toUInt64 ( 42 ) ) ; " ) . rstrip ( ) <nl> + <nl> + test_helper ( ) <nl> + <nl> + with PartitionManager ( ) as pm , ClickHouseKiller ( dictionary_node ) : <nl> + assert None = = dictionary_node . get_process_pid ( " clickhouse " ) , " CLickHouse must be alive " <nl> + <nl> + # Remove connection between main_node and dictionary for sure <nl> + pm . heal_all ( ) <nl> + pm . partition_instances ( main_node , dictionary_node ) <nl> + <nl> + # Dictionary max lifetime is 2 seconds . <nl> + time . sleep ( 3 ) <nl> + <nl> + test_helper ( ) <nl> + <nl> new file mode 100644 <nl> index 00000000000 . . 6b0e1936259 <nl> mmm / dev / null <nl> ppp b / dbms / tests / integration / test_dictionary_allow_read_expired_keys / test_dict_get . py <nl> <nl> + from __future__ import print_function <nl> + import pytest <nl> + import time <nl> + import os <nl> + from contextlib import contextmanager <nl> + <nl> + from helpers . cluster import ClickHouseCluster <nl> + from helpers . cluster import ClickHouseKiller <nl> + from helpers . network import PartitionManager <nl> + from helpers . network import PartitionManagerDisabler <nl> + <nl> + SCRIPT_DIR = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> + cluster = ClickHouseCluster ( __file__ , base_configs_dir = os . path . join ( SCRIPT_DIR , ' configs ' ) ) <nl> + <nl> + dictionary_node = cluster . add_instance ( ' dictionary_node ' , stay_alive = True ) <nl> + main_node = cluster . add_instance ( ' main_node ' , main_configs = [ ' configs / dictionaries / cache_ints_dictionary . xml ' ] ) <nl> + <nl> + @ pytest . fixture ( scope = " module " ) <nl> + def started_cluster ( ) : <nl> + try : <nl> + cluster . start ( ) <nl> + dictionary_node . query ( " create database if not exists test ; " ) <nl> + dictionary_node . query ( " drop table if exists test . ints ; " ) <nl> + dictionary_node . query ( " create table test . ints " <nl> + " ( key UInt64 , " <nl> + " i8 Int8 , i16 Int16 , i32 Int32 , i64 Int64 , " <nl> + " u8 UInt8 , u16 UInt16 , u32 UInt32 , u64 UInt64 ) " <nl> + " Engine = Memory ; " ) <nl> + dictionary_node . query ( " insert into test . ints values ( 7 , 7 , 7 , 7 , 7 , 7 , 7 , 7 , 7 ) ; " ) <nl> + dictionary_node . query ( " insert into test . ints values ( 5 , 5 , 5 , 5 , 5 , 5 , 5 , 5 , 5 ) ; " ) <nl> + <nl> + yield cluster <nl> + finally : <nl> + cluster . shutdown ( ) <nl> + <nl> + <nl> + # @ pytest . mark . skip ( reason = " debugging " ) <nl> + def test_simple_dict_get ( started_cluster ) : <nl> + assert None ! = dictionary_node . get_process_pid ( " clickhouse " ) , " ClickHouse must be alive " <nl> + <nl> + def test_helper ( ) : <nl> + assert ' 7 ' = = main_node . query ( " select dictGet ( ' anime_dict ' , ' i8 ' , toUInt64 ( 7 ) ) ; " ) . rstrip ( ) , " Wrong answer . " <nl> + assert ' 7 ' = = main_node . query ( " select dictGet ( ' anime_dict ' , ' i16 ' , toUInt64 ( 7 ) ) ; " ) . rstrip ( ) , " Wrong answer . " <nl> + assert ' 7 ' = = main_node . query ( " select dictGet ( ' anime_dict ' , ' i32 ' , toUInt64 ( 7 ) ) ; " ) . rstrip ( ) , " Wrong answer . " <nl> + assert ' 7 ' = = main_node . query ( " select dictGet ( ' anime_dict ' , ' i64 ' , toUInt64 ( 7 ) ) ; " ) . rstrip ( ) , " Wrong answer . " <nl> + assert ' 7 ' = = main_node . query ( " select dictGet ( ' anime_dict ' , ' u8 ' , toUInt64 ( 7 ) ) ; " ) . rstrip ( ) , " Wrong answer . " <nl> + assert ' 7 ' = = main_node . query ( " select dictGet ( ' anime_dict ' , ' u16 ' , toUInt64 ( 7 ) ) ; " ) . rstrip ( ) , " Wrong answer . " <nl> + assert ' 7 ' = = main_node . query ( " select dictGet ( ' anime_dict ' , ' u32 ' , toUInt64 ( 7 ) ) ; " ) . rstrip ( ) , " Wrong answer . " <nl> + assert ' 7 ' = = main_node . query ( " select dictGet ( ' anime_dict ' , ' u64 ' , toUInt64 ( 7 ) ) ; " ) . rstrip ( ) , " Wrong answer . " <nl> + <nl> + test_helper ( ) <nl> + <nl> + with PartitionManager ( ) as pm , ClickHouseKiller ( dictionary_node ) : <nl> + assert None = = dictionary_node . get_process_pid ( " clickhouse " ) <nl> + <nl> + # Remove connection between main_node and dictionary for sure <nl> + pm . heal_all ( ) <nl> + pm . partition_instances ( main_node , dictionary_node ) <nl> + <nl> + # Dictionary max lifetime is 2 seconds . <nl> + time . sleep ( 3 ) <nl> + <nl> + test_helper ( ) <nl> new file mode 100644 <nl> index 00000000000 . . 3fce7b7398d <nl> mmm / dev / null <nl> ppp b / dbms / tests / integration / test_dictionary_allow_read_expired_keys / test_dict_get_or_default . py <nl> <nl> + from __future__ import print_function <nl> + import pytest <nl> + import time <nl> + import os <nl> + from contextlib import contextmanager <nl> + <nl> + from helpers . cluster import ClickHouseCluster <nl> + from helpers . cluster import ClickHouseKiller <nl> + from helpers . network import PartitionManager <nl> + <nl> + SCRIPT_DIR = os . path . dirname ( os . path . realpath ( __file__ ) ) <nl> + cluster = ClickHouseCluster ( __file__ , base_configs_dir = os . path . join ( SCRIPT_DIR , ' configs ' ) ) <nl> + <nl> + dictionary_node = cluster . add_instance ( ' dictionary_node ' , stay_alive = True ) <nl> + main_node = cluster . add_instance ( ' main_node ' , main_configs = [ ' configs / dictionaries / cache_ints_dictionary . xml ' ] ) <nl> + <nl> + @ pytest . fixture ( scope = " module " ) <nl> + def started_cluster ( ) : <nl> + try : <nl> + cluster . start ( ) <nl> + dictionary_node . query ( " create database if not exists test ; " ) <nl> + dictionary_node . query ( " drop table if exists test . ints ; " ) <nl> + dictionary_node . query ( " create table test . ints " <nl> + " ( key UInt64 , " <nl> + " i8 Int8 , i16 Int16 , i32 Int32 , i64 Int64 , " <nl> + " u8 UInt8 , u16 UInt16 , u32 UInt32 , u64 UInt64 ) " <nl> + " Engine = Memory ; " ) <nl> + dictionary_node . query ( " insert into test . ints values ( 7 , 7 , 7 , 7 , 7 , 7 , 7 , 7 , 7 ) ; " ) <nl> + dictionary_node . query ( " insert into test . ints values ( 5 , 5 , 5 , 5 , 5 , 5 , 5 , 5 , 5 ) ; " ) <nl> + <nl> + yield cluster <nl> + finally : <nl> + cluster . shutdown ( ) <nl> + <nl> + # @ pytest . mark . skip ( reason = " debugging " ) <nl> + def test_simple_dict_get_or_default ( started_cluster ) : <nl> + assert None ! = dictionary_node . get_process_pid ( " clickhouse " ) , " ClickHouse must be alive " <nl> + <nl> + def test_helper ( ) : <nl> + assert ' 5 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' i8 ' , toUInt64 ( 5 ) , toInt8 ( 42 ) ) ; " ) . rstrip ( ) <nl> + assert ' 5 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' i16 ' , toUInt64 ( 5 ) , toInt16 ( 42 ) ) ; " ) . rstrip ( ) <nl> + assert ' 5 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' i32 ' , toUInt64 ( 5 ) , toInt32 ( 42 ) ) ; " ) . rstrip ( ) <nl> + assert ' 5 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' i64 ' , toUInt64 ( 5 ) , toInt64 ( 42 ) ) ; " ) . rstrip ( ) <nl> + assert ' 5 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' u8 ' , toUInt64 ( 5 ) , toUInt8 ( 42 ) ) ; " ) . rstrip ( ) <nl> + assert ' 5 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' u16 ' , toUInt64 ( 5 ) , toUInt16 ( 42 ) ) ; " ) . rstrip ( ) <nl> + assert ' 5 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' u32 ' , toUInt64 ( 5 ) , toUInt32 ( 42 ) ) ; " ) . rstrip ( ) <nl> + assert ' 5 ' = = main_node . query ( " select dictGetOrDefault ( ' anime_dict ' , ' u64 ' , toUInt64 ( 5 ) , toUInt64 ( 42 ) ) ; " ) . rstrip ( ) <nl> + <nl> + test_helper ( ) <nl> + <nl> + with PartitionManager ( ) as pm , ClickHouseKiller ( dictionary_node ) : <nl> + assert None = = dictionary_node . get_process_pid ( " clickhouse " ) <nl> + <nl> + # Remove connection between main_node and dictionary for sure <nl> + pm . partition_instances ( main_node , dictionary_node ) <nl> + <nl> + # Dictionary max lifetime is 2 seconds . <nl> + time . sleep ( 3 ) <nl> + <nl> + test_helper ( ) <nl> new file mode 100644 <nl> index 00000000000 . . d86bac9de59 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01054_cache_dictionary_bunch_update . reference <nl> @ @ - 0 , 0 + 1 @ @ <nl> + OK <nl> new file mode 100755 <nl> index 00000000000 . . 5ae6b9b4947 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01054_cache_dictionary_bunch_update . sh <nl> <nl> + # ! / usr / bin / env bash <nl> + <nl> + CURDIR = $ ( cd " $ ( dirname " $ { BASH_SOURCE [ 0 ] } " ) " & & pwd ) <nl> + . $ CURDIR / . . / shell_config . sh <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query = " create database if not exists test_01054 ; " <nl> + $ CLICKHOUSE_CLIENT - - query = " drop table if exists test_01054 . ints ; " <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query = " create table test_01054 . ints <nl> + ( key UInt64 , i8 Int8 , i16 Int16 , i32 Int32 , i64 Int64 , u8 UInt8 , u16 UInt16 , u32 UInt32 , u64 UInt64 ) <nl> + Engine = Memory ; " <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query = " insert into test_01054 . ints values ( 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ) ; " <nl> + $ CLICKHOUSE_CLIENT - - query = " insert into test_01054 . ints values ( 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 ) ; " <nl> + $ CLICKHOUSE_CLIENT - - query = " insert into test_01054 . ints values ( 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 ) ; " <nl> + <nl> + function thread1 ( ) <nl> + { <nl> + for attempt_thread1 in { 1 . . 100 } <nl> + do <nl> + RAND_NUMBER_THREAD1 = $ ( $ CLICKHOUSE_CLIENT - - query = " SELECT rand ( ) % 100 ; " ) <nl> + $ CLICKHOUSE_CLIENT - - query = " select dictGet ( ' one_cell_cache_ints ' , ' i8 ' , toUInt64 ( $ RAND_NUMBER_THREAD1 ) ) ; " <nl> + done <nl> + } <nl> + <nl> + <nl> + function thread2 ( ) <nl> + { <nl> + for attempt_thread2 in { 1 . . 100 } <nl> + do <nl> + RAND_NUMBER_THREAD2 = $ ( $ CLICKHOUSE_CLIENT - - query = " SELECT rand ( ) % 100 ; " ) <nl> + $ CLICKHOUSE_CLIENT - - query = " select dictGet ( ' one_cell_cache_ints ' , ' i8 ' , toUInt64 ( $ RAND_NUMBER_THREAD2 ) ) ; " <nl> + done <nl> + } <nl> + <nl> + <nl> + function thread3 ( ) <nl> + { <nl> + for attempt_thread3 in { 1 . . 100 } <nl> + do <nl> + RAND_NUMBER_THREAD3 = $ ( $ CLICKHOUSE_CLIENT - - query = " SELECT rand ( ) % 100 ; " ) <nl> + $ CLICKHOUSE_CLIENT - - query = " select dictGet ( ' one_cell_cache_ints ' , ' i8 ' , toUInt64 ( $ RAND_NUMBER_THREAD3 ) ) ; " <nl> + done <nl> + } <nl> + <nl> + <nl> + function thread4 ( ) <nl> + { <nl> + for attempt_thread4 in { 1 . . 100 } <nl> + do <nl> + RAND_NUMBER_THREAD4 = $ ( $ CLICKHOUSE_CLIENT - - query = " SELECT rand ( ) % 100 ; " ) <nl> + $ CLICKHOUSE_CLIENT - - query = " select dictGet ( ' one_cell_cache_ints ' , ' i8 ' , toUInt64 ( $ RAND_NUMBER_THREAD4 ) ) ; " <nl> + done <nl> + } <nl> + <nl> + <nl> + export - f thread1 ; <nl> + export - f thread2 ; <nl> + export - f thread3 ; <nl> + export - f thread4 ; <nl> + <nl> + TIMEOUT = 10 <nl> + <nl> + # shellcheck disable = SC2188 <nl> + timeout $ TIMEOUT bash - c thread1 > / dev / null 2 > & 1 & <nl> + timeout $ TIMEOUT bash - c thread2 > / dev / null 2 > & 1 & <nl> + timeout $ TIMEOUT bash - c thread3 > / dev / null 2 > & 1 & <nl> + timeout $ TIMEOUT bash - c thread4 > / dev / null 2 > & 1 & <nl> + <nl> + wait <nl> + <nl> + echo OK <nl> + <nl> + $ CLICKHOUSE_CLIENT - - query " DROP TABLE if exists test_01054 . ints " <nl> new file mode 100644 <nl> index 00000000000 . . 6b5418a7a60 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01054_cache_dictionary_overflow_cell . reference <nl> <nl> + 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 <nl> + [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 , 20 ] <nl> new file mode 100755 <nl> index 00000000000 . . b040a0e7a50 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 01054_cache_dictionary_overflow_cell . sql <nl> <nl> + <nl> + create database if not exists test_01054_overflow ; <nl> + drop table if exists test_01054_overflow . ints ; <nl> + <nl> + create table test_01054_overflow . ints ( key UInt64 , i8 Int8 , i16 Int16 , i32 Int32 , i64 Int64 , u8 UInt8 , u16 UInt16 , u32 UInt32 , u64 UInt64 ) Engine = Memory ; <nl> + <nl> + insert into test_01054_overflow . ints values ( 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ) ; <nl> + insert into test_01054_overflow . ints values ( 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 ) ; <nl> + insert into test_01054_overflow . ints values ( 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 ) ; <nl> + insert into test_01054_overflow . ints values ( 4 , 4 , 4 , 4 , 4 , 4 , 4 , 4 , 4 ) ; <nl> + insert into test_01054_overflow . ints values ( 5 , 5 , 5 , 5 , 5 , 5 , 5 , 5 , 5 ) ; <nl> + insert into test_01054_overflow . ints values ( 6 , 6 , 6 , 6 , 6 , 6 , 6 , 6 , 6 ) ; <nl> + insert into test_01054_overflow . ints values ( 7 , 7 , 7 , 7 , 7 , 7 , 7 , 7 , 7 ) ; <nl> + insert into test_01054_overflow . ints values ( 8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 , 8 ) ; <nl> + insert into test_01054_overflow . ints values ( 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 , 9 ) ; <nl> + insert into test_01054_overflow . ints values ( 10 , 10 , 10 , 10 , 10 , 10 , 10 , 10 , 10 ) ; <nl> + insert into test_01054_overflow . ints values ( 11 , 11 , 11 , 11 , 11 , 11 , 11 , 11 , 11 ) ; <nl> + insert into test_01054_overflow . ints values ( 12 , 12 , 12 , 12 , 12 , 12 , 12 , 12 , 12 ) ; <nl> + insert into test_01054_overflow . ints values ( 13 , 13 , 13 , 13 , 13 , 13 , 13 , 13 , 13 ) ; <nl> + insert into test_01054_overflow . ints values ( 14 , 14 , 14 , 14 , 14 , 14 , 14 , 14 , 14 ) ; <nl> + insert into test_01054_overflow . ints values ( 15 , 15 , 15 , 15 , 15 , 15 , 15 , 15 , 15 ) ; <nl> + insert into test_01054_overflow . ints values ( 16 , 16 , 16 , 16 , 16 , 16 , 16 , 16 , 16 ) ; <nl> + insert into test_01054_overflow . ints values ( 17 , 17 , 17 , 17 , 17 , 17 , 17 , 17 , 17 ) ; <nl> + insert into test_01054_overflow . ints values ( 18 , 18 , 18 , 18 , 18 , 18 , 18 , 18 , 18 ) ; <nl> + insert into test_01054_overflow . ints values ( 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 , 19 ) ; <nl> + insert into test_01054_overflow . ints values ( 20 , 20 , 20 , 20 , 20 , 20 , 20 , 20 , 20 ) ; <nl> + <nl> + select <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 1 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 2 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 3 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 4 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 5 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 6 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 7 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 8 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 9 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 10 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 11 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 12 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 13 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 14 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 15 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 16 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 17 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 18 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 19 ) ) , <nl> + dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( 20 ) ) ; <nl> + <nl> + SELECT arrayMap ( x - > dictGet ( ' one_cell_cache_ints_overflow ' , ' i8 ' , toUInt64 ( x ) ) , array ) <nl> + FROM <nl> + ( <nl> + SELECT [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 , 20 ] AS array <nl> + ) ; <nl> + <nl> + DROP TABLE if exists test_01054 . ints ; <nl>
Merge pull request from nikitamikhaylov / cache - dictionary
ClickHouse/ClickHouse
dad4f03a8f64e89a9793e7a94c82b379a3ea4353
2020-02-05T15:36:38Z
mmm a / Unity / AirLibWrapper / AirsimWrapper / Source / WorldSimApi . cpp <nl> ppp b / Unity / AirLibWrapper / AirsimWrapper / Source / WorldSimApi . cpp <nl> void WorldSimApi : : printLogMessage ( const std : : string & message , const std : : string & <nl> PrintLogMessage ( message . c_str ( ) , message_param . c_str ( ) , vehicle_name_ . c_str ( ) , severity ) ; <nl> } <nl> <nl> + std : : vector < std : : string > WorldSimApi : : listSceneObjects ( const std : : string & name_regex ) const <nl> + { <nl> + std : : vector < std : : string > result ; <nl> + throw std : : invalid_argument ( common_utils : : Utils : : stringf ( <nl> + " simListSceneObject is not supported on unity " ) . c_str ( ) ) ; <nl> + return result ; <nl> + } <nl> + <nl> WorldSimApi : : Pose WorldSimApi : : getObjectPose ( const std : : string & object_name ) const <nl> { <nl> AirSimUnity : : AirSimPose airSimPose = GetPose ( object_name . c_str ( ) ) ; <nl> mmm a / Unity / AirLibWrapper / AirsimWrapper / Source / WorldSimApi . h <nl> ppp b / Unity / AirLibWrapper / AirsimWrapper / Source / WorldSimApi . h <nl> class WorldSimApi : public msr : : airlib : : WorldSimApiBase <nl> virtual int getSegmentationObjectID ( const std : : string & mesh_name ) const override ; <nl> virtual void printLogMessage ( const std : : string & message , <nl> const std : : string & message_param = " " , unsigned char severity = 0 ) override ; <nl> + <nl> + virtual std : : vector < std : : string > listSceneObjects ( const std : : string & name_regex ) const override ; <nl> virtual Pose getObjectPose ( const std : : string & object_name ) const override ; <nl> virtual bool setObjectPose ( const std : : string & object_name , const Pose & pose , bool teleport ) override ; <nl> <nl>
Merge pull request from msb336 / PR / 1925 - unity - compatibility
microsoft/AirSim
b0e145a20c68abc556ba3f034674961a75ef9986
2019-05-16T18:02:11Z
mmm a / emscripten - version . txt <nl> ppp b / emscripten - version . txt <nl> <nl> - 1 . 21 . 1 <nl> + 1 . 21 . 2 <nl> <nl>
1 . 21 . 2
emscripten-core/emscripten
b3e6e8077a4abd70f6acba98127e5788842a6341
2014-07-06T02:19:27Z
mmm a / dbms / src / Functions / FunctionsJSON . cpp <nl> ppp b / dbms / src / Functions / FunctionsJSON . cpp <nl> class JSONExtractImpl <nl> type <nl> } ; <nl> <nl> - if ( which . isNativeUInt ( ) ) <nl> - return std : : make_shared < DataTypeNullable > ( <nl> - std : : make_shared < DataTypeUInt64 > ( ) <nl> - ) ; <nl> - <nl> - if ( which . isNativeInt ( ) ) <nl> - return std : : make_shared < DataTypeNullable > ( <nl> - std : : make_shared < DataTypeInt64 > ( ) <nl> - ) ; <nl> - <nl> - if ( which . isFloat ( ) ) <nl> - return std : : make_shared < DataTypeNullable > ( <nl> - std : : make_shared < DataTypeFloat64 > ( ) <nl> - ) ; <nl> - <nl> if ( <nl> - which . isEnum ( ) <nl> + which . isNativeUInt ( ) <nl> + | | which . isNativeInt ( ) <nl> + | | which . isFloat ( ) <nl> + | | which . isEnum ( ) <nl> | | which . isDateOrDateTime ( ) <nl> | | which . isStringOrFixedString ( ) <nl> | | which . isInterval ( ) <nl> mmm a / dbms / src / Functions / FunctionsJSON . h <nl> ppp b / dbms / src / Functions / FunctionsJSON . h <nl> <nl> # include < Columns / ColumnConst . h > <nl> # include < Columns / ColumnsNumber . h > <nl> # include < Common / typeid_cast . h > <nl> + # include < DataTypes / DataTypeFactory . h > <nl> # include < DataTypes / DataTypesNumber . h > <nl> # include < Functions / FunctionHelpers . h > <nl> # include < Functions / IFunction . h > <nl> class FunctionJSONBase : public IFunction <nl> return true ; <nl> } <nl> <nl> - DataTypePtr getReturnTypeImpl ( const DataTypes & arguments ) const override <nl> + ColumnNumbers getArgumentsThatAreAlwaysConstant ( ) const override <nl> + { <nl> + if constexpr ( ExtraArg ) <nl> + return { 1 } ; <nl> + else <nl> + return { } ; <nl> + } <nl> + <nl> + DataTypePtr getReturnTypeImpl ( <nl> + const ColumnsWithTypeAndName & arguments <nl> + ) const override <nl> { <nl> if constexpr ( ExtraArg ) <nl> { <nl> class FunctionJSONBase : public IFunction <nl> ErrorCodes : : NUMBER_OF_ARGUMENTS_DOESNT_MATCH <nl> } ; <nl> <nl> - virtual_type = arguments [ 1 ] ; <nl> + auto col_type_const { <nl> + static_cast < const ColumnConst * > ( arguments [ 1 ] . column . get ( ) ) <nl> + } ; <nl> + <nl> + virtual_type = DataTypeFactory : : instance ( ) . get ( <nl> + col_type_const - > getValue < String > ( ) <nl> + ) ; <nl> } <nl> else <nl> { <nl> class FunctionJSONBase : public IFunction <nl> } ; <nl> } <nl> <nl> - if ( ! isString ( arguments [ 0 ] ) ) <nl> + if ( ! isString ( arguments [ 0 ] . type ) ) <nl> throw Exception { <nl> - " Illegal type " + arguments [ 0 ] - > getName ( ) <nl> + " Illegal type " + arguments [ 0 ] . type - > getName ( ) <nl> + " of argument of function " + getName ( ) , <nl> ErrorCodes : : ILLEGAL_TYPE_OF_ARGUMENT <nl> } ; <nl> class FunctionJSONBase : public IFunction <nl> <nl> for ( const auto i : ext : : range ( 1 + ExtraArg , arguments . size ( ) ) ) <nl> { <nl> - if ( isString ( arguments [ i ] ) ) <nl> + if ( isString ( arguments [ i ] . type ) ) <nl> actions . push_back ( Action : : key ) ; <nl> - else if ( isInteger ( arguments [ i ] ) ) <nl> + else if ( isInteger ( arguments [ i ] . type ) ) <nl> actions . push_back ( Action : : index ) ; <nl> else <nl> throw Exception { <nl> - " Illegal type " + arguments [ i ] - > getName ( ) <nl> + " Illegal type " + arguments [ i ] . type - > getName ( ) <nl> + " of argument of function " + getName ( ) , <nl> ErrorCodes : : ILLEGAL_TYPE_OF_ARGUMENT <nl> } ; <nl>
Update API
ClickHouse/ClickHouse
e1a236f55c63cd05e9f65d48be224a6f213eb699
2019-05-08T13:20:25Z
mmm a / src / conversions . cc <nl> ppp b / src / conversions . cc <nl> static double InternalStringToInt ( UnicodeCache * unicode_cache , <nl> if ( * current = = ' + ' ) { <nl> / / Ignore leading sign ; skip following spaces . <nl> + + current ; <nl> - if ( ! AdvanceToNonspace ( unicode_cache , & current , end ) ) { <nl> + if ( current = = end ) { <nl> return JUNK_STRING_VALUE ; <nl> } <nl> } else if ( * current = = ' - ' ) { <nl> + + current ; <nl> - if ( ! AdvanceToNonspace ( unicode_cache , & current , end ) ) { <nl> + if ( current = = end ) { <nl> return JUNK_STRING_VALUE ; <nl> } <nl> negative = true ; <nl> new file mode 100644 <nl> index 00000000000 . . 9a9a0b0d2a3 <nl> mmm / dev / null <nl> ppp b / test / mjsunit / regress / regress - 955 . js <nl> <nl> + / / Copyright 2011 the V8 project authors . All rights reserved . <nl> + / / Redistribution and use in source and binary forms , with or without <nl> + / / modification , are permitted provided that the following conditions are <nl> + / / met : <nl> + / / <nl> + / / * Redistributions of source code must retain the above copyright <nl> + / / notice , this list of conditions and the following disclaimer . <nl> + / / * Redistributions in binary form must reproduce the above <nl> + / / copyright notice , this list of conditions and the following <nl> + / / disclaimer in the documentation and / or other materials provided <nl> + / / with the distribution . <nl> + / / * Neither the name of Google Inc . nor the names of its <nl> + / / contributors may be used to endorse or promote products derived <nl> + / / from this software without specific prior written permission . <nl> + / / <nl> + / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + / / See : http : / / code . google . com / p / v8 / issues / detail ? id = 955 <nl> + <nl> + / / Correctly parse signed numbers . <nl> + assertEquals ( - 0 , parseInt ( " - 0 " ) ) ; <nl> + assertEquals ( 0 , parseInt ( " + 0 " ) ) ; <nl> + <nl> + / / Don ' t allow whitespace after signs in parseInt . <nl> + assertEquals ( NaN , parseInt ( " - 0 " ) ) ; <nl> + assertEquals ( NaN , parseInt ( " + 0 " ) ) ; <nl> + assertEquals ( NaN , parseInt ( " - \ t0 " ) ) ; <nl> + assertEquals ( NaN , parseInt ( " + \ t0 " ) ) ; <nl> + <nl> + / / Do allow whitespace at start . <nl> + assertEquals ( - 0 , parseInt ( " - 0 " ) ) ; <nl> + assertEquals ( 0 , parseInt ( " + 0 " ) ) ; <nl> + assertEquals ( - 0 , parseInt ( " \ t - 0 " ) ) ; <nl> + assertEquals ( 0 , parseInt ( " \ t + 0 " ) ) ; <nl>
Don ' t allow whitespace after sign characters in parseInt .
v8/v8
d1411602a7840ab87e16309193b062ca1dc0a988
2011-05-03T07:11:17Z
mmm a / example / mnist / README . md <nl> ppp b / example / mnist / README . md <nl> <nl> + # Training Neural Networks on MNIST <nl> <nl> - Machine : Dual Xeon E5 - 2680 2 . 8GHz , Dual GTX 980 , CUDA 7 . 0 <nl> - <nl> - | | 2 x E5 - 2680 | 1 x GTX 980 | 2 x GTX 980 | <nl> - | mmm | mmm | mmm | mmm | <nl> - | ` mlp . py ` | 40K img / sec | 103K img / sec | 60K img / sec | <nl> - <nl> - Dual GPUs slow down the performance due to the tiny size of workload . <nl> - <nl> - sample output using single GTX 980 <nl> - <nl> - ` ` ` bash <nl> - ~ / mxnet / example / mnist $ python mlp . py <nl> - [ 20 : 52 : 47 ] src / io / iter_mnist . cc : 84 : MNISTIter : load 60000 images , shuffle = 1 , shape = ( 100 , 784 ) <nl> - [ 20 : 52 : 47 ] src / io / iter_mnist . cc : 84 : MNISTIter : load 10000 images , shuffle = 1 , shape = ( 100 , 784 ) <nl> - INFO : root : Start training with 1 devices <nl> - INFO : root : Iteration [ 0 ] Train - accuracy = 0 . 920833 <nl> - INFO : root : Iteration [ 0 ] Time cost = 0 . 656 <nl> - INFO : root : Iteration [ 0 ] Validation - accuracy = 0 . 961100 <nl> - INFO : root : Iteration [ 1 ] Train - accuracy = 0 . 965317 <nl> - INFO : root : Iteration [ 1 ] Time cost = 0 . 576 <nl> - INFO : root : Iteration [ 1 ] Validation - accuracy = 0 . 963000 <nl> - INFO : root : Iteration [ 2 ] Train - accuracy = 0 . 974817 <nl> - INFO : root : Iteration [ 2 ] Time cost = 0 . 567 <nl> - INFO : root : Iteration [ 2 ] Validation - accuracy = 0 . 965800 <nl> - INFO : root : Iteration [ 3 ] Train - accuracy = 0 . 978433 <nl> - INFO : root : Iteration [ 3 ] Time cost = 0 . 590 <nl> - INFO : root : Iteration [ 3 ] Validation - accuracy = 0 . 970900 <nl> - INFO : root : Iteration [ 4 ] Train - accuracy = 0 . 982583 <nl> - INFO : root : Iteration [ 4 ] Time cost = 0 . 593 <nl> - INFO : root : Iteration [ 4 ] Validation - accuracy = 0 . 973100 <nl> - INFO : root : Iteration [ 5 ] Train - accuracy = 0 . 982217 <nl> - INFO : root : Iteration [ 5 ] Time cost = 0 . 592 <nl> - INFO : root : Iteration [ 5 ] Validation - accuracy = 0 . 971300 <nl> - INFO : root : Iteration [ 6 ] Train - accuracy = 0 . 985817 <nl> - INFO : root : Iteration [ 6 ] Time cost = 0 . 555 <nl> - INFO : root : Iteration [ 6 ] Validation - accuracy = 0 . 969400 <nl> - INFO : root : Iteration [ 7 ] Train - accuracy = 0 . 987033 <nl> - INFO : root : Iteration [ 7 ] Time cost = 0 . 546 <nl> - INFO : root : Iteration [ 7 ] Validation - accuracy = 0 . 974800 <nl> - INFO : root : Iteration [ 8 ] Train - accuracy = 0 . 988333 <nl> - INFO : root : Iteration [ 8 ] Time cost = 0 . 535 <nl> - INFO : root : Iteration [ 8 ] Validation - accuracy = 0 . 975900 <nl> - INFO : root : Iteration [ 9 ] Train - accuracy = 0 . 987983 <nl> - INFO : root : Iteration [ 9 ] Time cost = 0 . 531 <nl> - INFO : root : Iteration [ 9 ] Validation - accuracy = 0 . 968900 <nl> - ` ` ` <nl> + The [ MNIST ] ( http : / / yann . lecun . com / exdb / mnist / ) database of handwritten digits <nl> + has a training set of 60 , 000 examples , and a test set of 10 , 000 examples . Each <nl> + example is a 28 × 28 gray image . They are provided by Yann LeCun , Corinna <nl> + Cortes , and Christopher J . C . Burges . <nl> + <nl> + <nl> + # # Neural Networks <nl> + <nl> + - [ mlp . py ] ( mlp . py ) : multilayer perceptron with 3 fully connected layers <nl> + - [ lenet . py ] ( lenet . py ) : LeNet with 2 convolution layers followed by 2 fully <nl> + connected layers <nl> + <nl> + # # Results <nl> + <nl> + <nl> + Using 100 minibatch size and 20 data passes ( not fine tuned . ) <nl> + <nl> + Machine : Dual Xeon E5 - 2680 2 . 8GHz , Dual GTX 980 , Intel MKL , and CUDA 7 . 0 <nl> + <nl> + | | val accuracy | 2 x E5 - 2680 | 1 x GTX 980 | 2 x GTX 980 | <nl> + | mmm | mmm : | mmm : | mmm : | mmm : | <nl> + | ` mlp . py ` | 97 . 8 % | 40K img / sec | 103K img / sec | 60K img / sec | <nl> + | ` lenet . py ` | 99 % | 368 img / sec | 22 . 5K img / sec | 33K img / sec | <nl> new file mode 100644 <nl> index 00000000000 . . 38f71263ea3 <nl> mmm / dev / null <nl> ppp b / example / mnist / data . py <nl> <nl> + # pylint : skip - file <nl> + " " " data iterator for mnist " " " <nl> + import sys <nl> + sys . path . insert ( 0 , " . . / . . / python / " ) <nl> + sys . path . append ( " . . / . . / tests / python / common " ) <nl> + import get_data <nl> + import mxnet as mx <nl> + <nl> + def mnist_iterator ( batch_size , input_shape ) : <nl> + " " " return train and val iterators for mnist " " " <nl> + # download data <nl> + get_data . GetMNIST_ubyte ( ) <nl> + flat = False if len ( input_shape ) = = 3 else True <nl> + <nl> + train_dataiter = mx . io . MNISTIter ( <nl> + image = " data / train - images - idx3 - ubyte " , <nl> + label = " data / train - labels - idx1 - ubyte " , <nl> + input_shape = input_shape , <nl> + batch_size = batch_size , <nl> + shuffle = True , <nl> + flat = flat ) <nl> + <nl> + val_dataiter = mx . io . MNISTIter ( <nl> + image = " data / t10k - images - idx3 - ubyte " , <nl> + label = " data / t10k - labels - idx1 - ubyte " , <nl> + input_shape = input_shape , <nl> + batch_size = batch_size , <nl> + flat = flat ) <nl> + <nl> + return ( train_dataiter , val_dataiter ) <nl> new file mode 100644 <nl> index 00000000000 . . d9b30b31641 <nl> mmm / dev / null <nl> ppp b / example / mnist / lenet . py <nl> <nl> + # pylint : skip - file <nl> + from data import mnist_iterator <nl> + import mxnet as mx <nl> + import logging <nl> + <nl> + # # define lenet <nl> + <nl> + # input <nl> + data = mx . symbol . Variable ( ' data ' ) <nl> + # first conv <nl> + conv1 = mx . symbol . Convolution ( data = data , kernel = ( 5 , 5 ) , num_filter = 20 ) <nl> + relu1 = mx . symbol . Activation ( data = conv1 , act_type = " relu " ) <nl> + pool1 = mx . symbol . Pooling ( data = relu1 , pool_type = " max " , <nl> + kernel = ( 2 , 2 ) , stride = ( 2 , 2 ) ) <nl> + # second conv <nl> + conv2 = mx . symbol . Convolution ( data = pool1 , kernel = ( 5 , 5 ) , num_filter = 50 ) <nl> + relu2 = mx . symbol . Activation ( data = conv2 , act_type = " relu " ) <nl> + pool2 = mx . symbol . Pooling ( data = relu2 , pool_type = " max " , <nl> + kernel = ( 2 , 2 ) , stride = ( 2 , 2 ) ) <nl> + # first fullc <nl> + flatten = mx . symbol . Flatten ( data = pool2 ) <nl> + fc1 = mx . symbol . FullyConnected ( data = flatten , num_hidden = 500 ) <nl> + relu3 = mx . symbol . Activation ( data = fc1 , act_type = " relu " ) <nl> + # second fullc <nl> + fc2 = mx . symbol . FullyConnected ( data = relu3 , num_hidden = 10 ) <nl> + # loss <nl> + lenet = mx . symbol . Softmax ( data = fc2 ) <nl> + <nl> + # # data <nl> + <nl> + train , val = mnist_iterator ( batch_size = 100 , input_shape = ( 1 , 28 , 28 ) ) <nl> + <nl> + # # train <nl> + <nl> + logging . basicConfig ( level = logging . DEBUG ) <nl> + <nl> + # dev = [ mx . gpu ( i ) for i in range ( 2 ) ] <nl> + dev = mx . gpu ( ) <nl> + <nl> + model = mx . model . FeedForward ( <nl> + ctx = dev , symbol = lenet , num_round = 20 , <nl> + learning_rate = 0 . 01 , momentum = 0 . 9 , wd = 0 . 00001 ) <nl> + <nl> + model . fit ( X = train , eval_data = val ) <nl> mmm a / example / mnist / mlp . py <nl> ppp b / example / mnist / mlp . py <nl> <nl> # pylint : skip - file <nl> - import sys <nl> - sys . path . insert ( 0 , " . . / . . / python / " ) <nl> - sys . path . append ( " . . / . . / tests / python / common " ) <nl> + from data import mnist_iterator <nl> import mxnet as mx <nl> import logging <nl> - import numpy as np <nl> - import get_data <nl> <nl> # define mlp <nl> <nl> <nl> <nl> # data <nl> <nl> - batch_size = 100 <nl> - <nl> - get_data . GetMNIST_ubyte ( ) <nl> - train_dataiter = mx . io . MNISTIter ( <nl> - image = " data / train - images - idx3 - ubyte " , <nl> - label = " data / train - labels - idx1 - ubyte " , <nl> - input_shape = ( 784 , ) , <nl> - batch_size = batch_size , shuffle = True , flat = True , silent = False , seed = 10 ) <nl> - val_dataiter = mx . io . MNISTIter ( <nl> - image = " data / t10k - images - idx3 - ubyte " , <nl> - label = " data / t10k - labels - idx1 - ubyte " , <nl> - input_shape = ( 784 , ) , <nl> - batch_size = batch_size , shuffle = True , flat = True , silent = False ) <nl> - <nl> + train , val = mnist_iterator ( batch_size = 100 , input_shape = ( 784 , ) ) <nl> <nl> # train <nl> <nl> logging . basicConfig ( level = logging . DEBUG ) <nl> <nl> - model = mx . model . FeedForward ( ctx = mx . cpu ( ) , <nl> - symbol = mlp , <nl> - num_round = 10 , <nl> - learning_rate = 0 . 1 , <nl> - momentum = 0 . 9 , <nl> - wd = 0 . 00001 ) <nl> + model = mx . model . FeedForward ( <nl> + ctx = mx . cpu ( ) , symbol = mlp , num_round = 20 , <nl> + learning_rate = 0 . 1 , momentum = 0 . 9 , wd = 0 . 00001 ) <nl> <nl> - model . fit ( X = train_dataiter , eval_data = val_dataiter ) <nl> + model . fit ( X = train , eval_data = val ) <nl> mmm a / python / mxnet / visualization . py <nl> ppp b / python / mxnet / visualization . py <nl> def plot_network ( title , symbol , shape = None ) : <nl> node_attr = { " shape " : " box " , " fixedsize " : " true " , <nl> " width " : " 1 . 3 " , " height " : " 0 . 8034 " , " style " : " filled " } <nl> dot = Digraph ( name = title ) <nl> + # color map <nl> + cm = ( " # 8dd3c7 " , " # fb8072 " , " # ffffb3 " , " # bebada " , " # 80b1d3 " , <nl> + " # fdb462 " , " # b3de69 " , " # fccde5 " ) <nl> + <nl> # make nodes <nl> for i in range ( len ( nodes ) ) : <nl> node = nodes [ i ] <nl> op = node [ " op " ] <nl> name = " % s_ % d " % ( op , i ) <nl> # input data <nl> - if i in heads and op = = " null " : <nl> - label = node [ " name " ] <nl> - attr = copy . deepcopy ( node_attr ) <nl> - dot . node ( name = name , label = label , * * attr ) <nl> + attr = copy . deepcopy ( node_attr ) <nl> + label = op <nl> + <nl> if op = = " null " : <nl> - continue <nl> + if i in heads : <nl> + label = node [ " name " ] <nl> + attr [ " fillcolor " ] = cm [ 0 ] <nl> + else : <nl> + continue <nl> elif op = = " Convolution " : <nl> label = " Convolution \ n % sx % s / % s , % s " % ( _str2tuple ( node [ " param " ] [ " kernel " ] ) [ 0 ] , <nl> _str2tuple ( node [ " param " ] [ " kernel " ] ) [ 1 ] , <nl> _str2tuple ( node [ " param " ] [ " stride " ] ) [ 0 ] , <nl> node [ " param " ] [ " num_filter " ] ) <nl> - attr = copy . deepcopy ( node_attr ) <nl> - attr [ " color " ] = " royalblue1 " <nl> - dot . node ( name = name , label = label , * * attr ) <nl> + attr [ " fillcolor " ] = cm [ 1 ] <nl> elif op = = " FullyConnected " : <nl> label = " FullyConnected \ n % s " % node [ " param " ] [ " num_hidden " ] <nl> - attr = copy . deepcopy ( node_attr ) <nl> - attr [ " color " ] = " royalblue1 " <nl> - dot . node ( name = name , label = label , * * attr ) <nl> + attr [ " fillcolor " ] = cm [ 1 ] <nl> elif op = = " BatchNorm " : <nl> - label = " BatchNorm " <nl> - attr = copy . deepcopy ( node_attr ) <nl> - attr [ " color " ] = " orchid1 " <nl> - dot . node ( name = name , label = label , * * attr ) <nl> - elif op = = " Concat " : <nl> - label = " Concat " <nl> - attr = copy . deepcopy ( node_attr ) <nl> - attr [ " color " ] = " seagreen1 " <nl> - dot . node ( name = name , label = label , * * attr ) <nl> - elif op = = " Flatten " : <nl> - label = " Flatten " <nl> - attr = copy . deepcopy ( node_attr ) <nl> - attr [ " color " ] = " seagreen1 " <nl> - dot . node ( name = name , label = label , * * attr ) <nl> - elif op = = " Reshape " : <nl> - label = " Reshape " <nl> - attr = copy . deepcopy ( node_attr ) <nl> - attr [ " color " ] = " seagreen1 " <nl> - dot . node ( name = name , label = label , * * attr ) <nl> + attr [ " fillcolor " ] = cm [ 3 ] <nl> + elif op = = " Activation " or op = = " LeakyReLU " : <nl> + label = " % s \ n % s " % ( op , node [ " param " ] [ " act_type " ] ) <nl> + attr [ " fillcolor " ] = cm [ 2 ] <nl> elif op = = " Pooling " : <nl> label = " Pooling \ n % s , % sx % s / % s " % ( node [ " param " ] [ " pool_type " ] , <nl> _str2tuple ( node [ " param " ] [ " kernel " ] ) [ 0 ] , <nl> _str2tuple ( node [ " param " ] [ " kernel " ] ) [ 1 ] , <nl> _str2tuple ( node [ " param " ] [ " stride " ] ) [ 0 ] ) <nl> - attr = copy . deepcopy ( node_attr ) <nl> - attr [ " color " ] = " firebrick2 " <nl> - dot . node ( name = name , label = label , * * attr ) <nl> - elif op = = " Activation " or op = = " LeakyReLU " : <nl> - label = " % s \ n % s " % ( op , node [ " param " ] [ " act_type " ] ) <nl> - attr = copy . deepcopy ( node_attr ) <nl> - attr [ " color " ] = " salmon " <nl> - dot . node ( name = name , label = label , * * attr ) <nl> + attr [ " fillcolor " ] = cm [ 4 ] <nl> + elif op = = " Concat " or op = = " Flatten " or op = = " Reshape " : <nl> + attr [ " fillcolor " ] = cm [ 5 ] <nl> + elif op = = " Softmax " : <nl> + attr [ " fillcolor " ] = cm [ 6 ] <nl> else : <nl> - label = op <nl> - attr = copy . deepcopy ( node_attr ) <nl> - attr [ " color " ] = " olivedrab1 " <nl> - dot . node ( name = name , label = label , * * attr ) <nl> + attr [ " fillcolor " ] = cm [ 7 ] <nl> + <nl> + dot . node ( name = name , label = label , * * attr ) <nl> <nl> # add edges <nl> for i in range ( len ( nodes ) ) : <nl> def plot_network ( title , symbol , shape = None ) : <nl> input_name = " % s_ % d " % ( input_node [ " op " ] , item [ 0 ] ) <nl> if input_node [ " op " ] ! = " null " or item [ 0 ] in heads : <nl> # add shape into label <nl> - attr = { " dir " : " back " } <nl> + attr = { " dir " : " back " , ' arrowtail ' : ' open ' } <nl> dot . edge ( tail_name = name , head_name = input_name , * * attr ) <nl> <nl> return dot <nl>
Merge pull request from mli / master
apache/incubator-mxnet
924aa2e75d75fb61cfcabcc3779255a615531cbb
2015-09-23T03:43:19Z
mmm a / src / compiler / graph - assembler . cc <nl> ppp b / src / compiler / graph - assembler . cc <nl> Node * GraphAssembler : : DeoptimizeUnless ( DeoptimizeReason reason , Node * condition , <nl> frame_state ) ; <nl> } <nl> <nl> - void GraphAssembler : : Branch ( Node * condition , <nl> - GraphAssemblerStaticLabel < 1 > * if_true , <nl> - GraphAssemblerStaticLabel < 1 > * if_false ) { <nl> + void GraphAssembler : : Branch ( Node * condition , GraphAssemblerLabel < 0u > * if_true , <nl> + GraphAssemblerLabel < 0u > * if_false ) { <nl> DCHECK_NOT_NULL ( current_control_ ) ; <nl> <nl> BranchHint hint = BranchHint : : kNone ; <nl> Operator const * GraphAssembler : : ToNumberOperator ( ) { <nl> return to_number_operator_ . get ( ) ; <nl> } <nl> <nl> - Node * GraphAssemblerLabel : : PhiAt ( size_t index ) { <nl> - DCHECK ( IsBound ( ) ) ; <nl> - return GetBindingsPtrFor ( index ) [ 0 ] ; <nl> - } <nl> - <nl> - GraphAssemblerLabel : : GraphAssemblerLabel ( GraphAssemblerLabelType is_deferred , <nl> - size_t merge_count , size_t var_count , <nl> - MachineRepresentation * representations , <nl> - Zone * zone ) <nl> - : is_deferred_ ( is_deferred = = GraphAssemblerLabelType : : kDeferred ) , <nl> - max_merge_count_ ( merge_count ) , <nl> - var_count_ ( var_count ) { <nl> - effects_ = zone - > NewArray < Node * > ( MaxMergeCount ( ) + 1 ) ; <nl> - for ( size_t i = 0 ; i < MaxMergeCount ( ) + 1 ; i + + ) { <nl> - effects_ [ i ] = nullptr ; <nl> - } <nl> - <nl> - controls_ = zone - > NewArray < Node * > ( MaxMergeCount ( ) ) ; <nl> - for ( size_t i = 0 ; i < MaxMergeCount ( ) ; i + + ) { <nl> - controls_ [ i ] = nullptr ; <nl> - } <nl> - <nl> - size_t num_bindings = ( MaxMergeCount ( ) + 1 ) * PhiCount ( ) + 1 ; <nl> - bindings_ = zone - > NewArray < Node * > ( num_bindings ) ; <nl> - for ( size_t i = 0 ; i < num_bindings ; i + + ) { <nl> - bindings_ [ i ] = nullptr ; <nl> - } <nl> - <nl> - representations_ = zone - > NewArray < MachineRepresentation > ( PhiCount ( ) + 1 ) ; <nl> - for ( size_t i = 0 ; i < PhiCount ( ) ; i + + ) { <nl> - representations_ [ i ] = representations [ i ] ; <nl> - } <nl> - } <nl> - <nl> - GraphAssemblerLabel : : ~ GraphAssemblerLabel ( ) { <nl> - DCHECK ( IsBound ( ) | | MergedCount ( ) = = 0 ) ; <nl> - } <nl> - <nl> - Node * * GraphAssemblerLabel : : GetBindingsPtrFor ( size_t phi_index ) { <nl> - DCHECK_LT ( phi_index , PhiCount ( ) ) ; <nl> - return & bindings_ [ phi_index * ( MaxMergeCount ( ) + 1 ) ] ; <nl> - } <nl> - <nl> - void GraphAssemblerLabel : : SetBinding ( size_t phi_index , size_t merge_index , <nl> - Node * binding ) { <nl> - DCHECK_LT ( phi_index , PhiCount ( ) ) ; <nl> - DCHECK_LT ( merge_index , MaxMergeCount ( ) ) ; <nl> - bindings_ [ phi_index * ( MaxMergeCount ( ) + 1 ) + merge_index ] = binding ; <nl> - } <nl> - <nl> - MachineRepresentation GraphAssemblerLabel : : GetRepresentationFor ( <nl> - size_t phi_index ) { <nl> - DCHECK_LT ( phi_index , PhiCount ( ) ) ; <nl> - return representations_ [ phi_index ] ; <nl> - } <nl> - <nl> - Node * * GraphAssemblerLabel : : GetControlsPtr ( ) { return controls_ ; } <nl> - <nl> - Node * * GraphAssemblerLabel : : GetEffectsPtr ( ) { return effects_ ; } <nl> - <nl> } / / namespace compiler <nl> } / / namespace internal <nl> } / / namespace v8 <nl> mmm a / src / compiler / graph - assembler . h <nl> ppp b / src / compiler / graph - assembler . h <nl> class GraphAssembler ; <nl> enum class GraphAssemblerLabelType { kDeferred , kNonDeferred } ; <nl> <nl> / / Label with statically known count of incoming branches and phis . <nl> - template < size_t MergeCount , size_t VarCount = 0u > <nl> - class GraphAssemblerStaticLabel { <nl> + template < size_t VarCount > <nl> + class GraphAssemblerLabel { <nl> public : <nl> Node * PhiAt ( size_t index ) ; <nl> <nl> template < typename . . . Reps > <nl> - explicit GraphAssemblerStaticLabel ( GraphAssemblerLabelType is_deferred , <nl> - Reps . . . reps ) <nl> + explicit GraphAssemblerLabel ( GraphAssemblerLabelType is_deferred , <nl> + size_t merge_count , Reps . . . reps ) <nl> : is_deferred_ ( is_deferred = = GraphAssemblerLabelType : : kDeferred ) { <nl> STATIC_ASSERT ( VarCount = = sizeof . . . ( reps ) ) ; <nl> MachineRepresentation reps_array [ ] = { MachineRepresentation : : kNone , <nl> class GraphAssemblerStaticLabel { <nl> } <nl> } <nl> <nl> - ~ GraphAssemblerStaticLabel ( ) { DCHECK ( IsBound ( ) | | MergedCount ( ) = = 0 ) ; } <nl> + ~ GraphAssemblerLabel ( ) { DCHECK ( IsBound ( ) | | merged_count_ = = 0 ) ; } <nl> <nl> private : <nl> friend class GraphAssembler ; <nl> <nl> void SetBound ( ) { <nl> DCHECK ( ! IsBound ( ) ) ; <nl> - DCHECK_EQ ( merged_count_ , MergeCount ) ; <nl> is_bound_ = true ; <nl> } <nl> bool IsBound ( ) const { return is_bound_ ; } <nl> - <nl> - size_t PhiCount ( ) const { return VarCount ; } <nl> - size_t MaxMergeCount ( ) const { return MergeCount ; } <nl> - size_t MergedCount ( ) const { return merged_count_ ; } <nl> bool IsDeferred ( ) const { return is_deferred_ ; } <nl> <nl> - / / For each phi , the buffer must have at least MaxMergeCount ( ) + 1 <nl> - / / node entries . <nl> - Node * * GetBindingsPtrFor ( size_t phi_index ) { <nl> - DCHECK_LT ( phi_index , PhiCount ( ) ) ; <nl> - return & bindings_ [ phi_index * ( MergeCount + 1 ) ] ; <nl> - } <nl> - void SetBinding ( size_t phi_index , size_t merge_index , Node * binding ) { <nl> - DCHECK_LT ( phi_index , PhiCount ( ) ) ; <nl> - DCHECK_LT ( merge_index , MergeCount ) ; <nl> - bindings_ [ phi_index * ( MergeCount + 1 ) + merge_index ] = binding ; <nl> - } <nl> - MachineRepresentation GetRepresentationFor ( size_t phi_index ) { <nl> - DCHECK_LT ( phi_index , PhiCount ( ) ) ; <nl> - return representations_ [ phi_index ] ; <nl> - } <nl> - / / The controls buffer must have at least MaxMergeCount ( ) entries . <nl> - Node * * GetControlsPtr ( ) { return controls_ ; } <nl> - / / The effects buffer must have at least MaxMergeCount ( ) + 1 entries . <nl> - Node * * GetEffectsPtr ( ) { return effects_ ; } <nl> - void IncrementMergedCount ( ) { merged_count_ + + ; } <nl> - <nl> bool is_bound_ = false ; <nl> bool is_deferred_ ; <nl> size_t merged_count_ = 0 ; <nl> - Node * effects_ [ MergeCount + 1 ] ; / / Extra element for control edge , <nl> - / / so that we can use the array to <nl> - / / construct EffectPhi . <nl> - Node * controls_ [ MergeCount ] ; <nl> - Node * bindings_ [ ( MergeCount + 1 ) * VarCount + 1 ] ; <nl> + Node * effect_ ; <nl> + Node * control_ ; <nl> + Node * bindings_ [ VarCount + 1 ] ; <nl> MachineRepresentation representations_ [ VarCount + 1 ] ; <nl> } ; <nl> <nl> - / / General label ( with zone allocated buffers for incoming branches and phi <nl> - / / inputs ) . <nl> - class GraphAssemblerLabel { <nl> - public : <nl> - Node * PhiAt ( size_t index ) ; <nl> - <nl> - GraphAssemblerLabel ( GraphAssemblerLabelType is_deferred , size_t merge_count , <nl> - size_t var_count , MachineRepresentation * representations , <nl> - Zone * zone ) ; <nl> - <nl> - ~ GraphAssemblerLabel ( ) ; <nl> - <nl> - private : <nl> - friend class GraphAssembler ; <nl> - <nl> - void SetBound ( ) { <nl> - DCHECK ( ! is_bound_ ) ; <nl> - is_bound_ = true ; <nl> - } <nl> - bool IsBound ( ) const { return is_bound_ ; } <nl> - size_t PhiCount ( ) const { return var_count_ ; } <nl> - size_t MaxMergeCount ( ) const { return max_merge_count_ ; } <nl> - size_t MergedCount ( ) const { return merged_count_ ; } <nl> - bool IsDeferred ( ) const { return is_deferred_ ; } <nl> - <nl> - / / For each phi , the buffer must have at least MaxMergeCount ( ) + 1 <nl> - / / node entries . <nl> - Node * * GetBindingsPtrFor ( size_t phi_index ) ; <nl> - void SetBinding ( size_t phi_index , size_t merge_index , Node * binding ) ; <nl> - MachineRepresentation GetRepresentationFor ( size_t phi_index ) ; <nl> - / / The controls buffer must have at least MaxMergeCount ( ) entries . <nl> - Node * * GetControlsPtr ( ) ; <nl> - / / The effects buffer must have at least MaxMergeCount ( ) + 1 entries . <nl> - Node * * GetEffectsPtr ( ) ; <nl> - void IncrementMergedCount ( ) { merged_count_ + + ; } <nl> - <nl> - bool is_bound_ = false ; <nl> - bool is_deferred_ ; <nl> - size_t merged_count_ = 0 ; <nl> - size_t max_merge_count_ ; <nl> - size_t var_count_ ; <nl> - Node * * effects_ = nullptr ; <nl> - Node * * controls_ = nullptr ; <nl> - Node * * bindings_ = nullptr ; <nl> - MachineRepresentation * representations_ = nullptr ; <nl> - } ; <nl> - <nl> class GraphAssembler { <nl> public : <nl> GraphAssembler ( JSGraph * jsgraph , Node * effect , Node * control , Zone * zone ) ; <nl> class GraphAssembler { <nl> / / Create non - deferred label with statically known number of incoming <nl> / / gotos / branches . <nl> template < size_t MergeCount , typename . . . Reps > <nl> - static GraphAssemblerStaticLabel < MergeCount , sizeof . . . ( Reps ) > MakeLabel ( <nl> - Reps . . . reps ) { <nl> - return GraphAssemblerStaticLabel < MergeCount , sizeof . . . ( Reps ) > ( <nl> - GraphAssemblerLabelType : : kNonDeferred , reps . . . ) ; <nl> + static GraphAssemblerLabel < sizeof . . . ( Reps ) > MakeLabel ( Reps . . . reps ) { <nl> + return GraphAssemblerLabel < sizeof . . . ( Reps ) > ( <nl> + GraphAssemblerLabelType : : kNonDeferred , MergeCount , reps . . . ) ; <nl> } <nl> <nl> / / Create deferred label with statically known number of incoming <nl> / / gotos / branches . <nl> template < size_t MergeCount , typename . . . Reps > <nl> - static GraphAssemblerStaticLabel < MergeCount , sizeof . . . ( Reps ) > <nl> - MakeDeferredLabel ( Reps . . . reps ) { <nl> - return GraphAssemblerStaticLabel < MergeCount , sizeof . . . ( Reps ) > ( <nl> - GraphAssemblerLabelType : : kDeferred , reps . . . ) ; <nl> + static GraphAssemblerLabel < sizeof . . . ( Reps ) > MakeDeferredLabel ( Reps . . . reps ) { <nl> + return GraphAssemblerLabel < sizeof . . . ( Reps ) > ( <nl> + GraphAssemblerLabelType : : kDeferred , MergeCount , reps . . . ) ; <nl> } <nl> <nl> / / Create label with number of incoming branches supplied at runtime . <nl> template < typename . . . Reps > <nl> - GraphAssemblerLabel MakeLabelFor ( GraphAssemblerLabelType is_deferred , <nl> - size_t merge_count , Reps . . . reps ) { <nl> - MachineRepresentation reps_array [ ] = { MachineRepresentation : : kNone , <nl> - reps . . . } ; <nl> - return GraphAssemblerLabel ( is_deferred , merge_count , sizeof . . . ( reps ) , <nl> - & ( reps_array [ 1 ] ) , temp_zone ( ) ) ; <nl> + GraphAssemblerLabel < sizeof . . . ( Reps ) > MakeLabelFor ( <nl> + GraphAssemblerLabelType is_deferred , size_t merge_count , Reps . . . reps ) { <nl> + return GraphAssemblerLabel < sizeof . . . ( Reps ) > ( is_deferred , merge_count , <nl> + reps . . . ) ; <nl> } <nl> <nl> / / Value creation . <nl> class GraphAssembler { <nl> Node * Call ( const Operator * op , Args . . . args ) ; <nl> <nl> / / Basic control operations . <nl> - template < class LabelType > <nl> - void Bind ( LabelType * label ) ; <nl> + template < size_t VarCount > <nl> + void Bind ( GraphAssemblerLabel < VarCount > * label ) ; <nl> <nl> - template < class LabelType , typename . . . vars > <nl> - void Goto ( LabelType * label , vars . . . ) ; <nl> + template < typename . . . Vars > <nl> + void Goto ( GraphAssemblerLabel < sizeof . . . ( Vars ) > * label , Vars . . . ) ; <nl> <nl> - void Branch ( Node * condition , GraphAssemblerStaticLabel < 1 > * if_true , <nl> - GraphAssemblerStaticLabel < 1 > * if_false ) ; <nl> + void Branch ( Node * condition , GraphAssemblerLabel < 0u > * if_true , <nl> + GraphAssemblerLabel < 0u > * if_false ) ; <nl> <nl> / / Control helpers . <nl> / / { GotoIf ( c , l ) } is equivalent to { Branch ( c , l , templ ) ; Bind ( templ ) } . <nl> - template < class LabelType , typename . . . vars > <nl> - void GotoIf ( Node * condition , LabelType * label , vars . . . ) ; <nl> + template < typename . . . Vars > <nl> + void GotoIf ( Node * condition , GraphAssemblerLabel < sizeof . . . ( Vars ) > * label , <nl> + Vars . . . ) ; <nl> <nl> / / { GotoUnless ( c , l ) } is equivalent to { Branch ( c , templ , l ) ; Bind ( templ ) } . <nl> - template < class LabelType , typename . . . vars > <nl> - void GotoUnless ( Node * condition , LabelType * label , vars . . . ) ; <nl> + template < typename . . . Vars > <nl> + void GotoUnless ( Node * condition , GraphAssemblerLabel < sizeof . . . ( Vars ) > * label , <nl> + Vars . . . ) ; <nl> <nl> / / Extractors ( should be only used when destructing / resetting the assembler ) . <nl> Node * ExtractCurrentControl ( ) ; <nl> Node * ExtractCurrentEffect ( ) ; <nl> <nl> private : <nl> - template < class LabelType , typename . . . Vars > <nl> - void MergeState ( LabelType label , Vars . . . vars ) ; <nl> + template < typename . . . Vars > <nl> + void MergeState ( GraphAssemblerLabel < sizeof . . . ( Vars ) > * label , Vars . . . vars ) ; <nl> <nl> Operator const * ToNumberOperator ( ) ; <nl> <nl> class GraphAssembler { <nl> Node * current_control_ ; <nl> } ; <nl> <nl> - template < size_t MergeCount , size_t VarCount > <nl> - Node * GraphAssemblerStaticLabel < MergeCount , VarCount > : : PhiAt ( size_t index ) { <nl> + template < size_t VarCount > <nl> + Node * GraphAssemblerLabel < VarCount > : : PhiAt ( size_t index ) { <nl> DCHECK ( IsBound ( ) ) ; <nl> - return GetBindingsPtrFor ( index ) [ 0 ] ; <nl> + DCHECK_LT ( index , VarCount ) ; <nl> + return bindings_ [ index ] ; <nl> } <nl> <nl> - template < class LabelType , typename . . . Vars > <nl> - void GraphAssembler : : MergeState ( LabelType label , Vars . . . vars ) { <nl> + template < typename . . . Vars > <nl> + void GraphAssembler : : MergeState ( GraphAssemblerLabel < sizeof . . . ( Vars ) > * label , <nl> + Vars . . . vars ) { <nl> DCHECK ( ! label - > IsBound ( ) ) ; <nl> - size_t merged_count = label - > MergedCount ( ) ; <nl> - DCHECK_LT ( merged_count , label - > MaxMergeCount ( ) ) ; <nl> - DCHECK_EQ ( label - > PhiCount ( ) , sizeof . . . ( vars ) ) ; <nl> - label - > GetEffectsPtr ( ) [ merged_count ] = current_effect_ ; <nl> - label - > GetControlsPtr ( ) [ merged_count ] = current_control_ ; <nl> - / / We need to start with nullptr to avoid 0 - length arrays . <nl> + <nl> + int merged_count = static_cast < int > ( label - > merged_count_ ) ; <nl> Node * var_array [ ] = { nullptr , vars . . . } ; <nl> - for ( size_t i = 0 ; i < sizeof . . . ( vars ) ; i + + ) { <nl> - label - > SetBinding ( i , merged_count , var_array [ i + 1 ] ) ; <nl> + if ( merged_count = = 0 ) { <nl> + / / Just set the control , effect and variables directly . <nl> + label - > control_ = current_control_ ; <nl> + label - > effect_ = current_effect_ ; <nl> + for ( size_t i = 0 ; i < sizeof . . . ( vars ) ; i + + ) { <nl> + label - > bindings_ [ i ] = var_array [ i + 1 ] ; <nl> + } <nl> + } else if ( merged_count = = 1 ) { <nl> + / / Create merge , effect phi and a phi for each variable . <nl> + label - > control_ = <nl> + graph ( ) - > NewNode ( common ( ) - > Merge ( 2 ) , label - > control_ , current_control_ ) ; <nl> + label - > effect_ = graph ( ) - > NewNode ( common ( ) - > EffectPhi ( 2 ) , label - > effect_ , <nl> + current_effect_ , label - > control_ ) ; <nl> + for ( size_t i = 0 ; i < sizeof . . . ( vars ) ; i + + ) { <nl> + label - > bindings_ [ i ] = graph ( ) - > NewNode ( <nl> + common ( ) - > Phi ( label - > representations_ [ i ] , 2 ) , label - > bindings_ [ i ] , <nl> + var_array [ i + 1 ] , label - > control_ ) ; <nl> + } <nl> + } else { <nl> + / / Append to the merge , effect phi and phis . <nl> + DCHECK_EQ ( IrOpcode : : kMerge , label - > control_ - > opcode ( ) ) ; <nl> + label - > control_ - > AppendInput ( graph ( ) - > zone ( ) , current_control_ ) ; <nl> + NodeProperties : : ChangeOp ( label - > control_ , <nl> + common ( ) - > Merge ( merged_count + 1 ) ) ; <nl> + <nl> + DCHECK_EQ ( IrOpcode : : kEffectPhi , label - > effect_ - > opcode ( ) ) ; <nl> + label - > effect_ - > ReplaceInput ( merged_count , current_effect_ ) ; <nl> + label - > effect_ - > AppendInput ( graph ( ) - > zone ( ) , label - > control_ ) ; <nl> + NodeProperties : : ChangeOp ( label - > effect_ , <nl> + common ( ) - > EffectPhi ( merged_count + 1 ) ) ; <nl> + <nl> + for ( size_t i = 0 ; i < sizeof . . . ( vars ) ; i + + ) { <nl> + DCHECK_EQ ( IrOpcode : : kPhi , label - > bindings_ [ i ] - > opcode ( ) ) ; <nl> + label - > bindings_ [ i ] - > ReplaceInput ( merged_count , var_array [ i + 1 ] ) ; <nl> + label - > bindings_ [ i ] - > AppendInput ( graph ( ) - > zone ( ) , label - > control_ ) ; <nl> + NodeProperties : : ChangeOp ( <nl> + label - > bindings_ [ i ] , <nl> + common ( ) - > Phi ( label - > representations_ [ i ] , merged_count + 1 ) ) ; <nl> + } <nl> } <nl> - label - > IncrementMergedCount ( ) ; <nl> + label - > merged_count_ + + ; <nl> } <nl> <nl> - template < class LabelType > <nl> - void GraphAssembler : : Bind ( LabelType * label ) { <nl> + template < size_t VarCount > <nl> + void GraphAssembler : : Bind ( GraphAssemblerLabel < VarCount > * label ) { <nl> DCHECK ( current_control_ = = nullptr ) ; <nl> DCHECK ( current_effect_ = = nullptr ) ; <nl> - DCHECK ( label - > MaxMergeCount ( ) > 0 ) ; <nl> - DCHECK_EQ ( label - > MaxMergeCount ( ) , label - > MergedCount ( ) ) ; <nl> - <nl> - int merge_count = static_cast < int > ( label - > MaxMergeCount ( ) ) ; <nl> - if ( merge_count = = 1 ) { <nl> - current_control_ = label - > GetControlsPtr ( ) [ 0 ] ; <nl> - current_effect_ = label - > GetEffectsPtr ( ) [ 0 ] ; <nl> - label - > SetBound ( ) ; <nl> - return ; <nl> - } <nl> + DCHECK ( label - > merged_count_ > 0 ) ; <nl> <nl> - current_control_ = graph ( ) - > NewNode ( common ( ) - > Merge ( merge_count ) , merge_count , <nl> - label - > GetControlsPtr ( ) ) ; <nl> - <nl> - Node * * effects = label - > GetEffectsPtr ( ) ; <nl> - current_effect_ = effects [ 0 ] ; <nl> - for ( size_t i = 1 ; i < label - > MaxMergeCount ( ) ; i + + ) { <nl> - if ( current_effect_ ! = effects [ i ] ) { <nl> - effects [ label - > MaxMergeCount ( ) ] = current_control_ ; <nl> - current_effect_ = graph ( ) - > NewNode ( common ( ) - > EffectPhi ( merge_count ) , <nl> - merge_count + 1 , effects ) ; <nl> - break ; <nl> - } <nl> - } <nl> - <nl> - for ( size_t var = 0 ; var < label - > PhiCount ( ) ; var + + ) { <nl> - Node * * bindings = label - > GetBindingsPtrFor ( var ) ; <nl> - bindings [ label - > MaxMergeCount ( ) ] = current_control_ ; <nl> - bindings [ 0 ] = graph ( ) - > NewNode ( <nl> - common ( ) - > Phi ( label - > GetRepresentationFor ( var ) , merge_count ) , <nl> - merge_count + 1 , bindings ) ; <nl> - } <nl> + current_control_ = label - > control_ ; <nl> + current_effect_ = label - > effect_ ; <nl> <nl> label - > SetBound ( ) ; <nl> } <nl> <nl> - template < class LabelType , typename . . . Vars > <nl> - void GraphAssembler : : Goto ( LabelType * label , Vars . . . vars ) { <nl> + template < typename . . . Vars > <nl> + void GraphAssembler : : Goto ( GraphAssemblerLabel < sizeof . . . ( Vars ) > * label , <nl> + Vars . . . vars ) { <nl> DCHECK_NOT_NULL ( current_control_ ) ; <nl> DCHECK_NOT_NULL ( current_effect_ ) ; <nl> MergeState ( label , vars . . . ) ; <nl> void GraphAssembler : : Goto ( LabelType * label , Vars . . . vars ) { <nl> current_effect_ = nullptr ; <nl> } <nl> <nl> - template < class LabelType , typename . . . Vars > <nl> - void GraphAssembler : : GotoIf ( Node * condition , LabelType * label , Vars . . . vars ) { <nl> + template < typename . . . Vars > <nl> + void GraphAssembler : : GotoIf ( Node * condition , <nl> + GraphAssemblerLabel < sizeof . . . ( Vars ) > * label , <nl> + Vars . . . vars ) { <nl> BranchHint hint = <nl> label - > IsDeferred ( ) ? BranchHint : : kFalse : BranchHint : : kNone ; <nl> Node * branch = <nl> void GraphAssembler : : GotoIf ( Node * condition , LabelType * label , Vars . . . vars ) { <nl> current_control_ = graph ( ) - > NewNode ( common ( ) - > IfFalse ( ) , branch ) ; <nl> } <nl> <nl> - template < class LabelType , typename . . . Vars > <nl> - void GraphAssembler : : GotoUnless ( Node * condition , LabelType * label , <nl> + template < typename . . . Vars > <nl> + void GraphAssembler : : GotoUnless ( Node * condition , <nl> + GraphAssemblerLabel < sizeof . . . ( Vars ) > * label , <nl> Vars . . . vars ) { <nl> BranchHint hint = label - > IsDeferred ( ) ? BranchHint : : kTrue : BranchHint : : kNone ; <nl> Node * branch = <nl>
[ turbofan ] Simplify graph assembler label .
v8/v8
3e8ff5cbae719293825eec7d99ed110b951c2db7
2017-08-28T16:49:18Z
mmm a / doc / syntax . rst <nl> ppp b / doc / syntax . rst <nl> described in the next section . <nl> <nl> A * format_spec * field can also include nested replacement fields in certain <nl> positions within it . These nested replacement fields can contain only an <nl> - argument index ; format specifications are not allowed . This allows the <nl> + argument id ; format specifications are not allowed . This allows the <nl> formatting of a value to be dynamically specified . <nl> <nl> See the : ref : ` formatexamples ` section for some examples . <nl>
argument index - > argument id
fmtlib/fmt
7ce7def515a38ea87517a1cd6406e4b773da241b
2016-11-02T00:00:59Z
mmm a / tensorflow / compiler / tf2xla / graph_compiler . cc <nl> ppp b / tensorflow / compiler / tf2xla / graph_compiler . cc <nl> Status PrepareArguments ( XlaOpKernelContext * ctx , Graph * graph , <nl> } <nl> } / / namespace <nl> Status GraphCompiler : : Compile ( ) { <nl> - std : : vector < NodeBinding > bindings ( graph_ - > num_node_ids ( ) ) ; <nl> + OutputRegistry output_registry ( graph_ - > num_node_ids ( ) ) ; <nl> std : : vector < Node * > topo_sorted_nodes ; <nl> / / XLA requires determinism , generate a stable ordering from DFS . <nl> GetReversePostOrder ( * graph_ , & topo_sorted_nodes , <nl> Status GraphCompiler : : Compile ( ) { <nl> PartiallySetupParams ( & params ) ; <nl> <nl> for ( Node * n : topo_sorted_nodes ) { <nl> - / / Set up bindings . <nl> - NodeBinding & binding = bindings [ n - > id ( ) ] ; <nl> - binding . node = n ; <nl> - Status s = flib_ - > CreateKernel ( n - > def ( ) , & binding . op_kernel ) ; <nl> - binding . output_attrs . resize ( n - > num_outputs ( ) ) ; <nl> + NodeOutputs node_outputs ; <nl> + OpKernel * op_kernel_raw = nullptr ; <nl> + Status s = flib_ - > CreateKernel ( n - > def ( ) , & op_kernel_raw ) ; <nl> + / / Transfer ownership of the kernel to a local smart pointer . <nl> + std : : unique_ptr < OpKernel > op_kernel ( op_kernel_raw ) ; <nl> + <nl> if ( ! s . ok ( ) ) { <nl> - binding . op_kernel = nullptr ; <nl> s = AttachDef ( s , * n ) ; <nl> LOG ( ERROR ) < < " Executor failed to create kernel . " < < s ; <nl> return s ; <nl> } <nl> - } <nl> - <nl> - / / Bindings are initialized by the size of graph_ - > num_node_ids . However , the <nl> - / / graph may contain dead nodes that still hold a valid node id . Thus <nl> - / / graph_ - > num_node_ids could be larger than number of topo sorted nodes . <nl> - TF_RET_CHECK ( bindings . size ( ) > = topo_sorted_nodes . size ( ) ) ; <nl> <nl> - for ( Node * n : topo_sorted_nodes ) { <nl> TF_RET_CHECK ( ! n - > IsRecv ( ) & & ! n - > IsSend ( ) & & ! n - > IsSwitch ( ) ) <nl> < < " Not supported node : " < < n - > DebugString ( ) ; <nl> - NodeBinding & binding = bindings [ n - > id ( ) ] ; <nl> - params . op_kernel = binding . op_kernel ; <nl> - params . output_attr_array = binding . output_attrs . data ( ) ; <nl> + params . op_kernel = op_kernel . get ( ) ; <nl> + gtl : : InlinedVector < AllocatorAttributes , 4 > output_attr ( n - > num_outputs ( ) ) ; <nl> + params . output_attr_array = output_attr . data ( ) ; <nl> <nl> / / tensor_inputs_ is a buffer reused across graph traversal . We clean up and <nl> / / reinitialize the buffer before we visit a new node . <nl> Status GraphCompiler : : Compile ( ) { <nl> for ( auto * e : n - > in_edges ( ) ) { <nl> if ( e - > IsControlEdge ( ) ) continue ; <nl> Node * src = e - > src ( ) ; <nl> - tensor_inputs_ [ e - > dst_input ( ) ] = <nl> - bindings [ src - > id ( ) ] . tensor_values [ e - > src_output ( ) ] ; <nl> + TF_RET_CHECK ( src - > id ( ) < output_registry . size ( ) ) ; <nl> + const NodeOutputs & outputs = output_registry [ src - > id ( ) ] ; <nl> + <nl> + tensor_inputs_ [ e - > dst_input ( ) ] = outputs . values [ e - > src_output ( ) ] ; <nl> } <nl> <nl> OpKernelContext op_context ( & params , n - > num_outputs ( ) ) ; <nl> Status GraphCompiler : : Compile ( ) { <nl> ( * op_context . is_output_dead ( ) ? " ( dead ) " : " " ) , <nl> SummarizeNode ( * n ) ) ; <nl> } <nl> - binding . tensor_values . push_back ( tensor_val ) ; <nl> - } <nl> - } <nl> - <nl> - / / Clean up tensor data and op kernels . <nl> - for ( NodeBinding & binding : bindings ) { <nl> - delete binding . op_kernel ; <nl> - for ( auto & t : binding . tensor_values ) { <nl> - if ( ! t . is_ref ( ) ) { <nl> - delete t . tensor ; <nl> - } <nl> + / / Set up outputs <nl> + output_registry [ n - > id ( ) ] . values . push_back ( tensor_val ) ; <nl> } <nl> } <nl> return Status : : OK ( ) ; <nl> mmm a / tensorflow / compiler / tf2xla / graph_compiler . h <nl> ppp b / tensorflow / compiler / tf2xla / graph_compiler . h <nl> class GraphCompiler { <nl> Status Compile ( ) ; <nl> <nl> private : <nl> - / / NodeBinding is a wrapper on a ` Node ` that also contains computed <nl> - / / TensorValue . <nl> - struct NodeBinding { <nl> - const Node * node ; <nl> - / / Kernel for this node , to be filled by CreateKernel . <nl> - / / TODO ( yunxing ) : Switching this to unique_ptr and understand why it crashes <nl> - / / on GPU devices . <nl> - OpKernel * op_kernel ; <nl> + / / NodeOutputs is a wrapper over TensorValues that represents outputs of a <nl> + / / node . <nl> + struct NodeOutputs { <nl> + ~ NodeOutputs ( ) { <nl> + for ( auto & v : values ) { <nl> + CHECK ( ! v . is_ref ( ) ) ; <nl> + delete v . tensor ; <nl> + } <nl> + } <nl> + <nl> / / Output values of this node . <nl> - std : : vector < TensorValue > tensor_values ; <nl> - / / Attributes of the outputs . <nl> - gtl : : InlinedVector < AllocatorAttributes , 4 > output_attrs ; <nl> + std : : vector < TensorValue > values ; <nl> } ; <nl> <nl> + / / A mapping from node id to node output . <nl> + using OutputRegistry = std : : vector < NodeOutputs > ; <nl> + <nl> / / Partially sets params . This partially set params can be reused <nl> / / across multple nodes visit . <nl> void PartiallySetupParams ( OpKernelContext : : Params * params ) ; <nl> mmm a / tensorflow / compiler / tf2xla / xla_jit_compiled_cpu_function_test . cc <nl> ppp b / tensorflow / compiler / tf2xla / xla_jit_compiled_cpu_function_test . cc <nl> TEST ( XlaJitCompiledCpuFunction , Sum ) { <nl> EXPECT_TRUE ( ShapeUtil : : Compatible ( result0 , s32 ) ) ; <nl> } <nl> <nl> + / / Test when a graph compilation terminates early , resources are properly <nl> + / / reclaimed . <nl> + TEST ( XlaJitCompiledCpuFunction , SumWithJunkAttr ) { <nl> + GraphDef graph_def = SumGraph ( ) ; <nl> + <nl> + ( * graph_def . mutable_node ( 2 ) - > mutable_attr ( ) ) [ " junk " ] = <nl> + TypeAttrValue ( DT_INT32 ) ; <nl> + <nl> + tf2xla : : Config config = SumConfig ( ) ; <nl> + EXPECT_FALSE ( XlaJitCompiledCpuFunction : : Compile ( graph_def , config , <nl> + xla : : ExecutableBuildOptions ( ) ) <nl> + . ok ( ) ) ; <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace tensorflow <nl>
Fix a memory leak in graph compiler .
tensorflow/tensorflow
b2f5acd2c3fbcccb580d6393c0ce77a32ad01279
2017-10-18T22:09:56Z
mmm a / TARGETS <nl> ppp b / TARGETS <nl> cpp_library ( <nl> # out of the final binary ! <nl> link_whole = True , <nl> supported_platforms_regex = " glibc " , <nl> + undefined_symbols = True , # TODO ( T23121628 ) : fix deps and remove <nl> deps = [ <nl> " : err " , <nl> " : headers " , <nl> cpp_library ( <nl> # out of the final binary ! <nl> link_whole = True , <nl> supported_platforms_regex = " glibc " , <nl> + undefined_symbols = True , # TODO ( T23121628 ) : fix deps and remove <nl> deps = [ <nl> " : eden_watcher " , <nl> " : err " , <nl> cpp_library ( <nl> " log . cpp " , <nl> ] , <nl> compiler_flags = compiler_flags , <nl> + undefined_symbols = True , # TODO ( T23121628 ) : fix deps and remove <nl> deps = [ <nl> " : headers " , <nl> " @ / watchman / thirdparty / jansson : jansson " , <nl> cpp_library ( <nl> " root / warnerr . cpp " , <nl> ] , <nl> compiler_flags = compiler_flags , <nl> + undefined_symbols = True , # TODO ( T23121628 ) : fix deps and remove <nl> deps = [ " : headers " ] , <nl> ) <nl> <nl> cpp_library ( <nl> srcs = [ " query / pcre . cpp " ] , <nl> compiler_flags = [ " - DHAVE_PCRE_H " ] + compiler_flags , <nl> link_whole = True , <nl> + undefined_symbols = True , # TODO ( T23121628 ) : fix deps and remove <nl> deps = [ " : headers " ] , <nl> external_deps = [ " pcre " ] , <nl> ) <nl> cpp_library ( <nl> # out of the final binary ! <nl> link_whole = True , <nl> os_linker_flags = os_linker_flags , <nl> + undefined_symbols = True , # TODO ( T23121628 ) : fix deps and remove <nl> deps = [ <nl> " : err " , <nl> " : headers " , <nl>
codemod : add ` undefined_symbols ` to rules with improper deps
facebook/watchman
45177cc2ee4f8e6314bec2915484b98b06f171f3
2017-11-30T01:36:30Z
mmm a / imgui . cpp <nl> ppp b / imgui . cpp <nl> namespace IMGUI_STB_NAMESPACE <nl> # ifndef IMGUI_DISABLE_STB_TRUETYPE_IMPLEMENTATION <nl> # define STBTT_STATIC <nl> # define STB_TRUETYPE_IMPLEMENTATION <nl> + # else <nl> + # define STBTT_DEF extern <nl> # endif <nl> # include " stb_truetype . h " <nl> <nl>
Define STBTT_DEF extern when STBTT not compiled with ImGui .
ocornut/imgui
cd9244ab158c8f73b76351b4963e6ebd176cff57
2015-05-27T23:12:52Z
mmm a / yoga / Yoga . cpp <nl> ppp b / yoga / Yoga . cpp <nl> static float YGNodeCalculateAvailableInnerDim ( <nl> return availableInnerDim ; <nl> } <nl> <nl> + static void YGNodeComputeFlexBasisForChildren ( <nl> + const YGNodeRef node , <nl> + const float availableInnerWidth , <nl> + const float availableInnerHeight , <nl> + YGMeasureMode widthMeasureMode , <nl> + YGMeasureMode heightMeasureMode , <nl> + YGDirection direction , <nl> + YGFlexDirection mainAxis , <nl> + const YGConfigRef config , <nl> + bool performLayout , <nl> + float & totalOuterFlexBasis ) { <nl> + YGNodeRef singleFlexChild = nullptr ; <nl> + YGVector children = node - > getChildren ( ) ; <nl> + YGMeasureMode measureModeMainDim = <nl> + YGFlexDirectionIsRow ( mainAxis ) ? widthMeasureMode : heightMeasureMode ; <nl> + / / If there is only one child with flexGrow + flexShrink it means we can set <nl> + / / the computedFlexBasis to 0 instead of measuring and shrinking / flexing the <nl> + / / child to exactly match the remaining space <nl> + if ( measureModeMainDim = = YGMeasureModeExactly ) { <nl> + for ( auto child : children ) { <nl> + if ( singleFlexChild ! = nullptr ) { <nl> + if ( YGNodeIsFlex ( child ) ) { <nl> + / / There is already a flexible child , abort <nl> + singleFlexChild = nullptr ; <nl> + break ; <nl> + } <nl> + } else if ( <nl> + child - > resolveFlexGrow ( ) > 0 . 0f & & <nl> + child - > resolveFlexShrink ( ) > 0 . 0f ) { <nl> + singleFlexChild = child ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + for ( auto child : children ) { <nl> + child - > resolveDimension ( ) ; <nl> + if ( child - > getStyle ( ) . display = = YGDisplayNone ) { <nl> + YGZeroOutLayoutRecursivly ( child ) ; <nl> + child - > setHasNewLayout ( true ) ; <nl> + child - > setDirty ( false ) ; <nl> + continue ; <nl> + } <nl> + if ( child - > getStyle ( ) . positionType = = YGPositionTypeAbsolute ) { <nl> + continue ; <nl> + } <nl> + if ( performLayout ) { <nl> + / / Set the initial position ( relative to the parent ) . <nl> + const YGDirection childDirection = <nl> + YGNodeResolveDirection ( child , direction ) ; <nl> + const float mainDim = YGFlexDirectionIsRow ( mainAxis ) <nl> + ? availableInnerWidth <nl> + : availableInnerHeight ; <nl> + const float crossDim = YGFlexDirectionIsRow ( mainAxis ) <nl> + ? availableInnerHeight <nl> + : availableInnerWidth ; <nl> + child - > setPosition ( <nl> + childDirection , mainDim , crossDim , availableInnerWidth ) ; <nl> + } <nl> + if ( child = = singleFlexChild ) { <nl> + child - > setLayoutComputedFlexBasisGeneration ( gCurrentGenerationCount ) ; <nl> + child - > setLayoutComputedFlexBasis ( 0 ) ; <nl> + } else { <nl> + YGNodeComputeFlexBasisForChild ( <nl> + node , <nl> + child , <nl> + availableInnerWidth , <nl> + widthMeasureMode , <nl> + availableInnerHeight , <nl> + availableInnerWidth , <nl> + availableInnerHeight , <nl> + heightMeasureMode , <nl> + direction , <nl> + config ) ; <nl> + } <nl> + <nl> + totalOuterFlexBasis + = child - > getLayout ( ) . computedFlexBasis + <nl> + YGNodeMarginForAxis ( child , mainAxis , availableInnerWidth ) ; <nl> + ; <nl> + } <nl> + } <nl> + <nl> / / <nl> / / This is the main routine that implements a subset of the flexbox layout <nl> / / algorithm <nl> static void YGNodelayoutImpl ( const YGNodeRef node , <nl> const float minInnerMainDim = isMainAxisRow ? minInnerWidth : minInnerHeight ; <nl> const float maxInnerMainDim = isMainAxisRow ? maxInnerWidth : maxInnerHeight ; <nl> <nl> - / / STEP 2 : DETERMINE AVAILABLE SIZE IN MAIN AND CROSS DIRECTIONS <nl> - <nl> - float availableInnerWidth = YGNodeCalculateAvailableInnerDim ( <nl> - node , YGFlexDirectionRow , availableWidth , parentWidth ) ; <nl> - float availableInnerHeight = YGNodeCalculateAvailableInnerDim ( <nl> - node , YGFlexDirectionColumn , availableHeight , parentHeight ) ; <nl> - <nl> - float availableInnerMainDim = isMainAxisRow ? availableInnerWidth : availableInnerHeight ; <nl> - const float availableInnerCrossDim = isMainAxisRow ? availableInnerHeight : availableInnerWidth ; <nl> - <nl> - / / If there is only one child with flexGrow + flexShrink it means we can set the <nl> - / / computedFlexBasis to 0 instead of measuring and shrinking / flexing the child to exactly <nl> - / / match the remaining space <nl> - YGNodeRef singleFlexChild = nullptr ; <nl> - if ( measureModeMainDim = = YGMeasureModeExactly ) { <nl> - for ( uint32_t i = 0 ; i < childCount ; i + + ) { <nl> - const YGNodeRef child = YGNodeGetChild ( node , i ) ; <nl> - if ( singleFlexChild ) { <nl> - if ( YGNodeIsFlex ( child ) ) { <nl> - / / There is already a flexible child , abort . <nl> - singleFlexChild = nullptr ; <nl> - break ; <nl> - } <nl> - } else if ( <nl> - child - > resolveFlexGrow ( ) > 0 . 0f & & <nl> - child - > resolveFlexShrink ( ) > 0 . 0f ) { <nl> - singleFlexChild = child ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - float totalOuterFlexBasis = 0 ; <nl> - <nl> - / / STEP 3 : DETERMINE FLEX BASIS FOR EACH ITEM <nl> - for ( uint32_t i = 0 ; i < childCount ; i + + ) { <nl> - const YGNodeRef child = node - > getChild ( i ) ; <nl> - if ( child - > getStyle ( ) . display = = YGDisplayNone ) { <nl> - YGZeroOutLayoutRecursivly ( child ) ; <nl> - child - > setHasNewLayout ( true ) ; <nl> - child - > setDirty ( false ) ; <nl> - continue ; <nl> - } <nl> - child - > resolveDimension ( ) ; <nl> - if ( performLayout ) { <nl> - / / Set the initial position ( relative to the parent ) . <nl> - const YGDirection childDirection = YGNodeResolveDirection ( child , direction ) ; <nl> - child - > setPosition ( <nl> - childDirection , <nl> - availableInnerMainDim , <nl> - availableInnerCrossDim , <nl> - availableInnerWidth ) ; <nl> - } <nl> - <nl> + / / Make a private linkedlist of absolutely positioned child <nl> + for ( auto child : node - > getChildren ( ) ) { <nl> / / Absolute - positioned children don ' t participate in flex layout . Add them <nl> / / to a list that we can process later . <nl> if ( child - > getStyle ( ) . positionType = = YGPositionTypeAbsolute ) { <nl> static void YGNodelayoutImpl ( const YGNodeRef node , <nl> } <nl> currentAbsoluteChild = child ; <nl> child - > setNextChild ( nullptr ) ; <nl> - } else { <nl> - if ( child = = singleFlexChild ) { <nl> - child - > setLayoutComputedFlexBasisGeneration ( gCurrentGenerationCount ) ; <nl> - child - > setLayoutComputedFlexBasis ( 0 ) ; <nl> - } else { <nl> - YGNodeComputeFlexBasisForChild ( node , <nl> - child , <nl> - availableInnerWidth , <nl> - widthMeasureMode , <nl> - availableInnerHeight , <nl> - availableInnerWidth , <nl> - availableInnerHeight , <nl> - heightMeasureMode , <nl> - direction , <nl> - config ) ; <nl> - } <nl> } <nl> - <nl> - totalOuterFlexBasis + = child - > getLayout ( ) . computedFlexBasis + <nl> - YGNodeMarginForAxis ( child , mainAxis , availableInnerWidth ) ; <nl> - ; <nl> } <nl> <nl> + / / STEP 2 : DETERMINE AVAILABLE SIZE IN MAIN AND CROSS DIRECTIONS <nl> + <nl> + float availableInnerWidth = YGNodeCalculateAvailableInnerDim ( <nl> + node , YGFlexDirectionRow , availableWidth , parentWidth ) ; <nl> + float availableInnerHeight = YGNodeCalculateAvailableInnerDim ( <nl> + node , YGFlexDirectionColumn , availableHeight , parentHeight ) ; <nl> + <nl> + float availableInnerMainDim = <nl> + isMainAxisRow ? availableInnerWidth : availableInnerHeight ; <nl> + const float availableInnerCrossDim = <nl> + isMainAxisRow ? availableInnerHeight : availableInnerWidth ; <nl> + <nl> + float totalOuterFlexBasis = 0 ; <nl> + <nl> + / / STEP 3 : DETERMINE FLEX BASIS FOR EACH ITEM <nl> + <nl> + YGNodeComputeFlexBasisForChildren ( <nl> + node , <nl> + availableInnerWidth , <nl> + availableInnerHeight , <nl> + widthMeasureMode , <nl> + heightMeasureMode , <nl> + direction , <nl> + mainAxis , <nl> + config , <nl> + performLayout , <nl> + totalOuterFlexBasis ) ; <nl> + <nl> const bool flexBasisOverflows = measureModeMainDim = = YGMeasureModeUndefined <nl> ? false <nl> : totalOuterFlexBasis > availableInnerMainDim ; <nl> if ( isNodeFlexWrap & & flexBasisOverflows & & measureModeMainDim = = YGMeasureModeAtMost ) { <nl> measureModeMainDim = YGMeasureModeExactly ; <nl> } <nl> - <nl> / / STEP 4 : COLLECT FLEX ITEMS INTO FLEX LINES <nl> <nl> / / Indexes of children that represent the first and last items in the line . <nl>
Refactored step3 of flexbox algorithm
facebook/yoga
a9dd5277480d4c8115ab9768abff74e5fb7d2a4b
2018-01-15T18:15:12Z
mmm a / src / Processors / Formats / Impl / MsgPackRowInputFormat . h <nl> ppp b / src / Processors / Formats / Impl / MsgPackRowInputFormat . h <nl> class MsgPackVisitor : public msgpack : : null_visitor <nl> bool end_array ( ) ; <nl> <nl> / / / This function will be called if error occurs in parsing <nl> - void parse_error ( size_t parsed_offset , size_t error_offset ) ; <nl> + [ [ noreturn ] ] void parse_error ( size_t parsed_offset , size_t error_offset ) ; <nl> <nl> / / / Update info_stack <nl> void set_info ( IColumn & column , DataTypePtr type ) ; <nl>
Fix build error
ClickHouse/ClickHouse
96f4d5b7c6f41d9e4797b4c8d86a9ebe69a7dfe5
2020-04-17T20:51:53Z
mmm a / folly / experimental / coro / Error . h <nl> ppp b / folly / experimental / coro / Error . h <nl> <nl> namespace folly { <nl> namespace coro { <nl> <nl> - class co_error { <nl> + class co_error final { <nl> public : <nl> template < <nl> typename . . . A , <nl>
Mark co_error as final
facebook/folly
186cd82394aed95b2bb436b017a225d1cf21587b
2019-12-21T01:43:45Z
new file mode 100644 <nl> index 000000000000 . . b2e68c712dd6 <nl> mmm / dev / null <nl> ppp b / validation - test / compiler_crashers / 28723 - unreachable - executed - at - swift - lib - sema - csdiag - cpp - 4012 . swift <nl> <nl> + / / This source file is part of the Swift . org open source project <nl> + / / Copyright ( c ) 2014 - 2017 Apple Inc . and the Swift project authors <nl> + / / Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> + / / <nl> + / / See https : / / swift . org / LICENSE . txt for license information <nl> + / / See https : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> + <nl> + / / RUN : not - - crash % target - swift - frontend % s - emit - ir <nl> + func t ( UInt = __FUNCTION__ <nl> + func & t ( <nl>
[ swiftc ( 27 vs . 5507 ) ] Add crasher in swift : : constraints : : ConstraintSystem : : diagnoseFailureForExpr ( . . . )
apple/swift
71845c060b2aa49ee0acfc4ebe874ccb0b692f3a
2017-03-16T09:48:51Z
mmm a / src / compiler / control - reducer . cc <nl> ppp b / src / compiler / control - reducer . cc <nl> class ControlReducerImpl { <nl> / / Gather all nodes backwards - reachable from end ( through inputs ) . <nl> ReachabilityMarker marked ( graph ( ) ) ; <nl> NodeVector nodes ( zone_ ) ; <nl> - AddNodesReachableFromEnd ( marked , nodes ) ; <nl> + AddNodesReachableFromRoots ( marked , nodes ) ; <nl> <nl> / / Walk forward through control nodes , looking for back edges to nodes <nl> / / that are not connected to end . Those are non - terminating loops ( NTLs ) . <nl> class ControlReducerImpl { <nl> } <nl> <nl> / / Trim references from dead nodes to live nodes first . <nl> - jsgraph_ - > GetCachedNodes ( & nodes ) ; <nl> TrimNodes ( marked , nodes ) ; <nl> <nl> / / Any control nodes not reachable from start are dead , even loops . <nl> class ControlReducerImpl { <nl> return ret ; <nl> } <nl> <nl> - void AddNodesReachableFromEnd ( ReachabilityMarker & marked , NodeVector & nodes ) { <nl> + void AddNodesReachableFromRoots ( ReachabilityMarker & marked , <nl> + NodeVector & nodes ) { <nl> + jsgraph_ - > GetCachedNodes ( & nodes ) ; / / Consider cached nodes roots . <nl> Node * end = graph ( ) - > end ( ) ; <nl> marked . SetReachableFromEnd ( end ) ; <nl> - if ( ! end - > IsDead ( ) ) { <nl> - nodes . push_back ( end ) ; <nl> - AddBackwardsReachableNodes ( marked , nodes , nodes . size ( ) - 1 ) ; <nl> - } <nl> + if ( ! end - > IsDead ( ) ) nodes . push_back ( end ) ; / / Consider end to be a root . <nl> + for ( Node * node : nodes ) marked . SetReachableFromEnd ( node ) ; <nl> + AddBackwardsReachableNodes ( marked , nodes , 0 ) ; <nl> } <nl> <nl> void AddBackwardsReachableNodes ( ReachabilityMarker & marked , NodeVector & nodes , <nl> class ControlReducerImpl { <nl> / / Gather all nodes backwards - reachable from end through inputs . <nl> ReachabilityMarker marked ( graph ( ) ) ; <nl> NodeVector nodes ( zone_ ) ; <nl> - AddNodesReachableFromEnd ( marked , nodes ) ; <nl> - <nl> - / / Process cached nodes in the JSGraph too . <nl> jsgraph_ - > GetCachedNodes ( & nodes ) ; <nl> + AddNodesReachableFromRoots ( marked , nodes ) ; <nl> TrimNodes ( marked , nodes ) ; <nl> } <nl> <nl> mmm a / src / compiler / js - graph . cc <nl> ppp b / src / compiler / js - graph . cc <nl> Node * JSGraph : : ImmovableHeapConstant ( Handle < HeapObject > object ) { <nl> } <nl> <nl> <nl> + # define CACHED ( name , expr ) \ <nl> + cached_nodes_ [ name ] ? cached_nodes_ [ name ] : ( cached_nodes_ [ name ] = ( expr ) ) <nl> + <nl> + <nl> Node * JSGraph : : CEntryStubConstant ( int result_size ) { <nl> if ( result_size = = 1 ) { <nl> - if ( ! c_entry_stub_constant_ . is_set ( ) ) { <nl> - c_entry_stub_constant_ . set ( <nl> - ImmovableHeapConstant ( CEntryStub ( isolate ( ) , 1 ) . GetCode ( ) ) ) ; <nl> - } <nl> - return c_entry_stub_constant_ . get ( ) ; <nl> + return CACHED ( kCEntryStubConstant , <nl> + ImmovableHeapConstant ( CEntryStub ( isolate ( ) , 1 ) . GetCode ( ) ) ) ; <nl> } <nl> - <nl> return ImmovableHeapConstant ( CEntryStub ( isolate ( ) , result_size ) . GetCode ( ) ) ; <nl> } <nl> <nl> <nl> Node * JSGraph : : UndefinedConstant ( ) { <nl> - if ( ! undefined_constant_ . is_set ( ) ) { <nl> - undefined_constant_ . set ( <nl> - ImmovableHeapConstant ( factory ( ) - > undefined_value ( ) ) ) ; <nl> - } <nl> - return undefined_constant_ . get ( ) ; <nl> + return CACHED ( kUndefinedConstant , <nl> + ImmovableHeapConstant ( factory ( ) - > undefined_value ( ) ) ) ; <nl> } <nl> <nl> <nl> Node * JSGraph : : TheHoleConstant ( ) { <nl> - if ( ! the_hole_constant_ . is_set ( ) ) { <nl> - the_hole_constant_ . set ( ImmovableHeapConstant ( factory ( ) - > the_hole_value ( ) ) ) ; <nl> - } <nl> - return the_hole_constant_ . get ( ) ; <nl> + return CACHED ( kTheHoleConstant , <nl> + ImmovableHeapConstant ( factory ( ) - > the_hole_value ( ) ) ) ; <nl> } <nl> <nl> <nl> Node * JSGraph : : TrueConstant ( ) { <nl> - if ( ! true_constant_ . is_set ( ) ) { <nl> - true_constant_ . set ( ImmovableHeapConstant ( factory ( ) - > true_value ( ) ) ) ; <nl> - } <nl> - return true_constant_ . get ( ) ; <nl> + return CACHED ( kTrueConstant , ImmovableHeapConstant ( factory ( ) - > true_value ( ) ) ) ; <nl> } <nl> <nl> <nl> Node * JSGraph : : FalseConstant ( ) { <nl> - if ( ! false_constant_ . is_set ( ) ) { <nl> - false_constant_ . set ( ImmovableHeapConstant ( factory ( ) - > false_value ( ) ) ) ; <nl> - } <nl> - return false_constant_ . get ( ) ; <nl> + return CACHED ( kFalseConstant , <nl> + ImmovableHeapConstant ( factory ( ) - > false_value ( ) ) ) ; <nl> } <nl> <nl> <nl> Node * JSGraph : : NullConstant ( ) { <nl> - if ( ! null_constant_ . is_set ( ) ) { <nl> - null_constant_ . set ( ImmovableHeapConstant ( factory ( ) - > null_value ( ) ) ) ; <nl> - } <nl> - return null_constant_ . get ( ) ; <nl> + return CACHED ( kNullConstant , ImmovableHeapConstant ( factory ( ) - > null_value ( ) ) ) ; <nl> } <nl> <nl> <nl> Node * JSGraph : : ZeroConstant ( ) { <nl> - if ( ! zero_constant_ . is_set ( ) ) zero_constant_ . set ( NumberConstant ( 0 . 0 ) ) ; <nl> - return zero_constant_ . get ( ) ; <nl> + return CACHED ( kZeroConstant , NumberConstant ( 0 . 0 ) ) ; <nl> } <nl> <nl> <nl> Node * JSGraph : : OneConstant ( ) { <nl> - if ( ! one_constant_ . is_set ( ) ) one_constant_ . set ( NumberConstant ( 1 . 0 ) ) ; <nl> - return one_constant_ . get ( ) ; <nl> + return CACHED ( kOneConstant , NumberConstant ( 1 . 0 ) ) ; <nl> } <nl> <nl> <nl> Node * JSGraph : : NaNConstant ( ) { <nl> - if ( ! nan_constant_ . is_set ( ) ) { <nl> - nan_constant_ . set ( NumberConstant ( std : : numeric_limits < double > : : quiet_NaN ( ) ) ) ; <nl> - } <nl> - return nan_constant_ . get ( ) ; <nl> + return CACHED ( kNaNConstant , <nl> + NumberConstant ( std : : numeric_limits < double > : : quiet_NaN ( ) ) ) ; <nl> } <nl> <nl> <nl> Node * JSGraph : : ExternalConstant ( ExternalReference reference ) { <nl> <nl> <nl> Node * JSGraph : : EmptyFrameState ( ) { <nl> - if ( ! empty_frame_state_ . is_set ( ) ) { <nl> + if ( cached_nodes_ [ kEmptyFrameState ] = = nullptr ) { <nl> Node * values = graph ( ) - > NewNode ( common ( ) - > StateValues ( 0 ) ) ; <nl> Node * state_node = graph ( ) - > NewNode ( <nl> common ( ) - > FrameState ( JS_FRAME , BailoutId : : None ( ) , <nl> OutputFrameStateCombine : : Ignore ( ) ) , <nl> values , values , values , NoContextConstant ( ) , UndefinedConstant ( ) ) ; <nl> - empty_frame_state_ . set ( state_node ) ; <nl> + cached_nodes_ [ kEmptyFrameState ] = state_node ; <nl> } <nl> - return empty_frame_state_ . get ( ) ; <nl> + return cached_nodes_ [ kEmptyFrameState ] ; <nl> } <nl> <nl> <nl> Node * JSGraph : : DeadControl ( ) { <nl> - if ( ! dead_control_ . is_set ( ) ) { <nl> - Node * dead_node = graph ( ) - > NewNode ( common ( ) - > Dead ( ) ) ; <nl> - dead_control_ . set ( dead_node ) ; <nl> - } <nl> - return dead_control_ . get ( ) ; <nl> + return CACHED ( kDeadControl , graph ( ) - > NewNode ( common ( ) - > Dead ( ) ) ) ; <nl> } <nl> <nl> <nl> void JSGraph : : GetCachedNodes ( NodeVector * nodes ) { <nl> cache_ . GetCachedNodes ( nodes ) ; <nl> - SetOncePointer < Node > * ptrs [ ] = { <nl> - & c_entry_stub_constant_ , & undefined_constant_ , & the_hole_constant_ , <nl> - & true_constant_ , & false_constant_ , & null_constant_ , <nl> - & zero_constant_ , & one_constant_ , & nan_constant_ } ; <nl> - for ( size_t i = 0 ; i < arraysize ( ptrs ) ; i + + ) { <nl> - if ( ptrs [ i ] - > is_set ( ) ) nodes - > push_back ( ptrs [ i ] - > get ( ) ) ; <nl> + for ( size_t i = 0 ; i < arraysize ( cached_nodes_ ) ; i + + ) { <nl> + if ( cached_nodes_ [ i ] ) nodes - > push_back ( cached_nodes_ [ i ] ) ; <nl> } <nl> } <nl> <nl> mmm a / src / compiler / js - graph . h <nl> ppp b / src / compiler / js - graph . h <nl> class JSGraph : public ZoneObject { <nl> common_ ( common ) , <nl> javascript_ ( javascript ) , <nl> machine_ ( machine ) , <nl> - cache_ ( zone ( ) ) { } <nl> + cache_ ( zone ( ) ) { <nl> + for ( int i = 0 ; i < kNumCachedNodes ; i + + ) cached_nodes_ [ i ] = nullptr ; <nl> + } <nl> <nl> / / Canonicalized global constants . <nl> Node * CEntryStubConstant ( int result_size ) ; <nl> class JSGraph : public ZoneObject { <nl> void GetCachedNodes ( NodeVector * nodes ) ; <nl> <nl> private : <nl> + enum CachedNode { <nl> + kCEntryStubConstant , <nl> + kUndefinedConstant , <nl> + kTheHoleConstant , <nl> + kTrueConstant , <nl> + kFalseConstant , <nl> + kNullConstant , <nl> + kZeroConstant , <nl> + kOneConstant , <nl> + kNaNConstant , <nl> + kEmptyFrameState , <nl> + kDeadControl , <nl> + kNumCachedNodes / / Must remain last . <nl> + } ; <nl> + <nl> Isolate * isolate_ ; <nl> Graph * graph_ ; <nl> CommonOperatorBuilder * common_ ; <nl> JSOperatorBuilder * javascript_ ; <nl> MachineOperatorBuilder * machine_ ; <nl> - <nl> - / / TODO ( titzer ) : make this into a simple array . <nl> - SetOncePointer < Node > c_entry_stub_constant_ ; <nl> - SetOncePointer < Node > undefined_constant_ ; <nl> - SetOncePointer < Node > the_hole_constant_ ; <nl> - SetOncePointer < Node > true_constant_ ; <nl> - SetOncePointer < Node > false_constant_ ; <nl> - SetOncePointer < Node > null_constant_ ; <nl> - SetOncePointer < Node > zero_constant_ ; <nl> - SetOncePointer < Node > one_constant_ ; <nl> - SetOncePointer < Node > nan_constant_ ; <nl> - SetOncePointer < Node > empty_frame_state_ ; <nl> - SetOncePointer < Node > dead_control_ ; <nl> - <nl> CommonNodeCache cache_ ; <nl> + Node * cached_nodes_ [ kNumCachedNodes ] ; <nl> <nl> Node * ImmovableHeapConstant ( Handle < HeapObject > value ) ; <nl> Node * NumberConstant ( double value ) ; <nl> mmm a / test / cctest / compiler / test - control - reducer . cc <nl> ppp b / test / cctest / compiler / test - control - reducer . cc <nl> TEST ( Trim_constants ) { <nl> } <nl> <nl> <nl> + TEST ( Trim_EmptyFrameState1 ) { <nl> + ControlReducerTester T ; <nl> + <nl> + Node * node = T . jsgraph . EmptyFrameState ( ) ; <nl> + T . Trim ( ) ; <nl> + <nl> + for ( Node * input : node - > inputs ( ) ) { <nl> + CHECK_NOT_NULL ( input ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + TEST ( Trim_EmptyFrameState2 ) { <nl> + ControlReducerTester T ; <nl> + CheckTrimConstant ( & T , T . jsgraph . EmptyFrameState ( ) ) ; <nl> + } <nl> + <nl> + <nl> TEST ( CReducePhi1 ) { <nl> ControlReducerTester R ; <nl> <nl>
[ turbofan ] Clean up cached nodes in JSGraph .
v8/v8
addb10633c99b2b9b799cfc8cd0b796043adb1c9
2015-04-16T08:41:34Z
mmm a / stdlib / public / SDK / Foundation / NSError . swift <nl> ppp b / stdlib / public / SDK / Foundation / NSError . swift <nl> public extension CustomNSError { <nl> } <nl> } <nl> <nl> - extension CustomNSError where Self : RawRepresentable , Self . RawValue : SignedInteger { <nl> - / / The error code of Error with integral raw values is the raw value . <nl> - public var errorCode : Int { <nl> - return numericCast ( self . rawValue ) <nl> - } <nl> - } <nl> - <nl> - extension CustomNSError where Self : RawRepresentable , Self . RawValue : UnsignedInteger { <nl> + extension CustomNSError <nl> + where Self : RawRepresentable , Self . RawValue : FixedWidthInteger { <nl> / / The error code of Error with integral raw values is the raw value . <nl> public var errorCode : Int { <nl> return numericCast ( self . rawValue ) <nl> public extension Error where Self : CustomNSError { <nl> } <nl> <nl> public extension Error where Self : CustomNSError , Self : RawRepresentable , <nl> - Self . RawValue : SignedInteger { <nl> - / / / Default implementation for customized NSErrors . <nl> - var _code : Int { return self . errorCode } <nl> - } <nl> - <nl> - public extension Error where Self : CustomNSError , Self : RawRepresentable , <nl> - Self . RawValue : UnsignedInteger { <nl> + Self . RawValue : FixedWidthInteger { <nl> / / / Default implementation for customized NSErrors . <nl> var _code : Int { return self . errorCode } <nl> } <nl> extension _BridgedNSError { <nl> public var _domain : String { return Self . _nsErrorDomain } <nl> } <nl> <nl> - extension _BridgedNSError where Self . RawValue : SignedInteger { <nl> + extension _BridgedNSError where Self . RawValue : FixedWidthInteger { <nl> public var _code : Int { return Int ( rawValue ) } <nl> <nl> public init ? ( _bridgedNSError : NSError ) { <nl> extension _BridgedNSError where Self . RawValue : SignedInteger { <nl> public var hashValue : Int { return _code } <nl> } <nl> <nl> - extension _BridgedNSError where Self . RawValue : UnsignedInteger { <nl> - public var _code : Int { <nl> - return Int ( bitPattern : UInt ( rawValue ) ) <nl> - } <nl> - <nl> - public init ? ( _bridgedNSError : NSError ) { <nl> - if _bridgedNSError . domain ! = Self . _nsErrorDomain { <nl> - return nil <nl> - } <nl> - <nl> - self . init ( rawValue : RawValue ( UInt ( bitPattern : _bridgedNSError . code ) ) ) <nl> - } <nl> - <nl> - public var hashValue : Int { return _code } <nl> - } <nl> - <nl> / / / Describes a bridged error that stores the underlying NSError , so <nl> / / / it can be queried . <nl> public protocol _BridgedStoredNSError : <nl> mmm a / stdlib / public / core / ErrorType . swift <nl> ppp b / stdlib / public / core / ErrorType . swift <nl> extension Error { <nl> } <nl> } <nl> <nl> - extension Error where Self : RawRepresentable , Self . RawValue : SignedInteger { <nl> - / / The error code of Error with integral raw values is the raw value . <nl> - public var _code : Int { <nl> - return numericCast ( self . rawValue ) <nl> - } <nl> - } <nl> - <nl> - extension Error where Self : RawRepresentable , Self . RawValue : UnsignedInteger { <nl> + extension Error where Self : RawRepresentable , Self . RawValue : FixedWidthInteger { <nl> / / The error code of Error with integral raw values is the raw value . <nl> public var _code : Int { <nl> return numericCast ( self . rawValue ) <nl>
[ Foundation ] Collapse ( SignedInteger | UnsignedInteger ) reqts into FixedWidthInteger .
apple/swift
bd799d4b432a0d9af518f9ee5556f3718c33225b
2018-08-28T17:49:01Z
mmm a / include / mlir / EDSC / Builders . h <nl> ppp b / include / mlir / EDSC / Builders . h <nl> struct index_t { <nl> } ; <nl> <nl> class BlockHandle ; <nl> + class CapturableHandle ; <nl> class NestedBuilder ; <nl> class ValueHandle ; <nl> <nl> class LoopBuilder : public NestedBuilder { <nl> / / / In order to be admissible in a nested ArrayRef < ValueHandle > , operator ( ) <nl> / / / returns a ValueHandle : : null ( ) that cannot be captured . <nl> / / TODO ( ntv ) : when loops return escaping ssa - values , this should be adapted . <nl> - ValueHandle operator ( ) ( ArrayRef < ValueHandle > stmts ) ; <nl> + ValueHandle operator ( ) ( ArrayRef < CapturableHandle > stmts ) ; <nl> } ; <nl> <nl> / / / Explicit nested LoopBuilder . Offers a compressed multi - loop builder to avoid <nl> class LoopNestBuilder { <nl> ArrayRef < ValueHandle > ubs , ArrayRef < int64_t > steps ) ; <nl> <nl> / / TODO ( ntv ) : when loops return escaping ssa - values , this should be adapted . <nl> - ValueHandle operator ( ) ( ArrayRef < ValueHandle > stmts ) ; <nl> + ValueHandle operator ( ) ( ArrayRef < CapturableHandle > stmts ) ; <nl> <nl> private : <nl> SmallVector < LoopBuilder , 4 > loops ; <nl> class BlockBuilder : public NestedBuilder { <nl> / / / The only purpose of this operator is to serve as a sequence point so that <nl> / / / the evaluation of ` stmts ` ( which build IR snippets in a scoped fashion ) is <nl> / / / sequenced strictly after the constructor of BlockBuilder . <nl> - void operator ( ) ( ArrayRef < ValueHandle > stmts ) ; <nl> + void operator ( ) ( ArrayRef < CapturableHandle > stmts ) ; <nl> <nl> private : <nl> BlockBuilder ( const BlockBuilder & ) = delete ; <nl> BlockBuilder & operator = ( const BlockBuilder & other ) = delete ; <nl> } ; <nl> <nl> + / / / Base class for Handles that cannot be constructed explicitly by a user of <nl> + / / / the API . <nl> + struct CapturableHandle { <nl> + protected : <nl> + CapturableHandle ( ) = default ; <nl> + } ; <nl> + <nl> / / / ValueHandle implements a ( potentially " delayed " ) typed Value abstraction . <nl> / / / ValueHandle should be captured by pointer but otherwise passed by Value <nl> / / / everywhere . <nl> class BlockBuilder : public NestedBuilder { <nl> / / / 2 . delayed state ( empty value ) , in which case it represents an eagerly <nl> / / / typed " delayed " value that can be hold a Value in the future ; <nl> / / / 3 . constructed state , in which case it holds a Value . <nl> - class ValueHandle { <nl> + / / / <nl> + / / / A ValueHandle is meant to capture a single Value * and should be used for <nl> + / / / instructions that have a single result . For convenience of use , we also <nl> + / / / include AffineForOp in this category although it does not return a value . <nl> + / / / In the case of AffineForOp , the captured Value * is the loop induction <nl> + / / / variable . <nl> + class ValueHandle : public CapturableHandle { <nl> public : <nl> / / / A ValueHandle in a null state can never be captured ; <nl> static ValueHandle null ( ) { return ValueHandle ( ) ; } <nl> class ValueHandle { <nl> <nl> / / / ValueHandle is a value type , the assignment operator typechecks before <nl> / / / assigning . <nl> - / / / ` ` ` <nl> ValueHandle & operator = ( const ValueHandle & other ) ; <nl> <nl> / / / Implicit conversion useful for automatic conversion to Container < Value * > . <nl> operator Value * ( ) const { return getValue ( ) ; } <nl> <nl> / / / Generic mlir : : Op create . This is the key to being extensible to the whole <nl> - / / / of MLIR without duplicating the type system or the AST . <nl> + / / / of MLIR without duplicating the type system or the op definitions . <nl> template < typename Op , typename . . . Args > <nl> static ValueHandle create ( Args . . . args ) ; <nl> <nl> class ValueHandle { <nl> static ValueHandle createComposedAffineApply ( AffineMap map , <nl> ArrayRef < Value * > operands ) ; <nl> <nl> + / / / Generic create for a named instruction producing a single value . <nl> + static ValueHandle create ( StringRef name , ArrayRef < ValueHandle > operands , <nl> + ArrayRef < Type > resultTypes , <nl> + ArrayRef < NamedAttribute > attributes = { } ) ; <nl> + <nl> bool hasValue ( ) const { return v ! = nullptr ; } <nl> Value * getValue ( ) const { return v ; } <nl> bool hasType ( ) const { return t ! = Type ( ) ; } <nl> class ValueHandle { <nl> Value * v ; <nl> } ; <nl> <nl> + / / / An InstructionHandle can be used in lieu of ValueHandle to capture the <nl> + / / / instruction in cases when one does not care about , or cannot extract , a <nl> + / / / unique Value * from the instruction . <nl> + / / / This can be used for capturing zero result instructions as well as <nl> + / / / multi - result instructions that are not supported by ValueHandle . <nl> + / / / We do not distinguish further between zero and multi - result instructions at <nl> + / / / this time . <nl> + struct InstructionHandle : public CapturableHandle { <nl> + InstructionHandle ( ) : inst ( nullptr ) { } <nl> + InstructionHandle ( Instruction * inst ) : inst ( inst ) { } <nl> + <nl> + InstructionHandle ( const InstructionHandle & ) = default ; <nl> + InstructionHandle & operator = ( const InstructionHandle & ) = default ; <nl> + <nl> + / / / Generic mlir : : Op create . This is the key to being extensible to the whole <nl> + / / / of MLIR without duplicating the type system or the op definitions . <nl> + template < typename Op , typename . . . Args > <nl> + static InstructionHandle create ( Args . . . args ) ; <nl> + <nl> + / / / Generic create for a named instruction . <nl> + static InstructionHandle create ( StringRef name , <nl> + ArrayRef < ValueHandle > operands , <nl> + ArrayRef < Type > resultTypes , <nl> + ArrayRef < NamedAttribute > attributes = { } ) ; <nl> + <nl> + operator Instruction * ( ) { return inst ; } <nl> + <nl> + private : <nl> + Instruction * inst ; <nl> + } ; <nl> + <nl> + / / / Simple wrapper to build a generic instruction without successor blocks . <nl> + template < typename HandleType > struct CustomInstruction { <nl> + CustomInstruction ( StringRef name ) : name ( name ) { <nl> + static_assert ( std : : is_same < HandleType , ValueHandle > ( ) | | <nl> + std : : is_same < HandleType , InstructionHandle > ( ) , <nl> + " Only CustomInstruction < ValueHandle > or " <nl> + " CustomInstruction < InstructionHandle > can be constructed . " ) ; <nl> + } <nl> + HandleType operator ( ) ( ArrayRef < ValueHandle > operands = { } , <nl> + ArrayRef < Type > resultTypes = { } , <nl> + ArrayRef < NamedAttribute > attributes = { } ) { <nl> + return HandleType : : create ( name , operands , resultTypes , attributes ) ; <nl> + } <nl> + std : : string name ; <nl> + } ; <nl> + <nl> / / / A BlockHandle represents a ( potentially " delayed " ) Block abstraction . <nl> / / / This extra abstraction is necessary because an mlir : : Block is not an <nl> / / / mlir : : Value . <nl> / / / A BlockHandle should be captured by pointer but otherwise passed by Value <nl> / / / everywhere . <nl> - class BlockHandle { <nl> + class BlockHandle : public CapturableHandle { <nl> public : <nl> / / / A BlockHandle constructed without an mlir : : Block * represents a " delayed " <nl> / / / Block . A delayed Block represents the declaration ( in the PL sense ) of a <nl> class BlockHandle { <nl> mlir : : Block * block ; <nl> } ; <nl> <nl> + template < typename Op , typename . . . Args > <nl> + InstructionHandle InstructionHandle : : create ( Args . . . args ) { <nl> + return InstructionHandle ( <nl> + ScopedContext : : getBuilder ( ) <nl> + - > create < Op > ( ScopedContext : : getLocation ( ) , args . . . ) <nl> + - > getInstruction ( ) ) ; <nl> + } <nl> + <nl> template < typename Op , typename . . . Args > <nl> ValueHandle ValueHandle : : create ( Args . . . args ) { <nl> Instruction * inst = ScopedContext : : getBuilder ( ) <nl> ValueHandle ValueHandle : : create ( Args . . . args ) { <nl> f - > createBody ( ) ; <nl> return ValueHandle ( f - > getInductionVar ( ) ) ; <nl> } <nl> - return ValueHandle ( ) ; <nl> } <nl> - llvm_unreachable ( " unsupported inst with > 1 results " ) ; <nl> + llvm_unreachable ( " unsupported instruction , use an InstructionHandle instead " ) ; <nl> } <nl> <nl> namespace op { <nl> mmm a / include / mlir / EDSC / Helpers . h <nl> ppp b / include / mlir / EDSC / Helpers . h <nl> struct IndexedValue { <nl> <nl> / / / Emits a ` store ` . <nl> / / NOLINTNEXTLINE : unconventional - assign - operator <nl> - ValueHandle operator = ( ValueHandle rhs ) { <nl> + InstructionHandle operator = ( ValueHandle rhs ) { <nl> return intrinsics : : STORE ( rhs , getBase ( ) , indices ) ; <nl> } <nl> <nl> struct IndexedValue { <nl> ValueHandle operator - ( ValueHandle e ) ; <nl> ValueHandle operator * ( ValueHandle e ) ; <nl> ValueHandle operator / ( ValueHandle e ) ; <nl> - ValueHandle operator + = ( ValueHandle e ) ; <nl> - ValueHandle operator - = ( ValueHandle e ) ; <nl> - ValueHandle operator * = ( ValueHandle e ) ; <nl> - ValueHandle operator / = ( ValueHandle e ) ; <nl> + InstructionHandle operator + = ( ValueHandle e ) ; <nl> + InstructionHandle operator - = ( ValueHandle e ) ; <nl> + InstructionHandle operator * = ( ValueHandle e ) ; <nl> + InstructionHandle operator / = ( ValueHandle e ) ; <nl> ValueHandle operator + ( IndexedValue e ) { <nl> return * this + static_cast < ValueHandle > ( e ) ; <nl> } <nl> struct IndexedValue { <nl> ValueHandle operator / ( IndexedValue e ) { <nl> return * this / static_cast < ValueHandle > ( e ) ; <nl> } <nl> - ValueHandle operator + = ( IndexedValue e ) { <nl> + InstructionHandle operator + = ( IndexedValue e ) { <nl> return this - > operator + = ( static_cast < ValueHandle > ( e ) ) ; <nl> } <nl> - ValueHandle operator - = ( IndexedValue e ) { <nl> + InstructionHandle operator - = ( IndexedValue e ) { <nl> return this - > operator - = ( static_cast < ValueHandle > ( e ) ) ; <nl> } <nl> - ValueHandle operator * = ( IndexedValue e ) { <nl> + InstructionHandle operator * = ( IndexedValue e ) { <nl> return this - > operator * = ( static_cast < ValueHandle > ( e ) ) ; <nl> } <nl> - ValueHandle operator / = ( IndexedValue e ) { <nl> + InstructionHandle operator / = ( IndexedValue e ) { <nl> return this - > operator / = ( static_cast < ValueHandle > ( e ) ) ; <nl> } <nl> <nl> mmm a / include / mlir / EDSC / Intrinsics . h <nl> ppp b / include / mlir / EDSC / Intrinsics . h <nl> namespace mlir { <nl> namespace edsc { <nl> <nl> class BlockHandle ; <nl> + class InstructionHandle ; <nl> class ValueHandle ; <nl> <nl> / / / Provides a set of first class intrinsics . <nl> namespace intrinsics { <nl> / / / <nl> / / / Prerequisites : <nl> / / / All Handles have already captured previously constructed IR objects . <nl> - ValueHandle BR ( BlockHandle bh , ArrayRef < ValueHandle > operands ) ; <nl> + InstructionHandle BR ( BlockHandle bh , ArrayRef < ValueHandle > operands ) ; <nl> <nl> / / / Creates a new mlir : : Block * and branches to it from the current block . <nl> / / / Argument types are specified by ` operands ` . <nl> ValueHandle BR ( BlockHandle bh , ArrayRef < ValueHandle > operands ) ; <nl> / / / All ` operands ` have already captured an mlir : : Value * <nl> / / / captures . size ( ) = = operands . size ( ) <nl> / / / captures and operands are pairwise of the same type . <nl> - ValueHandle BR ( BlockHandle * bh , ArrayRef < ValueHandle * > captures , <nl> - ArrayRef < ValueHandle > operands ) ; <nl> + InstructionHandle BR ( BlockHandle * bh , ArrayRef < ValueHandle * > captures , <nl> + ArrayRef < ValueHandle > operands ) ; <nl> <nl> / / / Branches into the mlir : : Block * captured by BlockHandle ` trueBranch ` with <nl> / / / ` trueOperands ` if ` cond ` evaluates to ` true ` ( resp . ` falseBranch ` and <nl> ValueHandle BR ( BlockHandle * bh , ArrayRef < ValueHandle * > captures , <nl> / / / <nl> / / / Prerequisites : <nl> / / / All Handles have captured previouly constructed IR objects . <nl> - ValueHandle COND_BR ( ValueHandle cond , BlockHandle trueBranch , <nl> - ArrayRef < ValueHandle > trueOperands , BlockHandle falseBranch , <nl> - ArrayRef < ValueHandle > falseOperands ) ; <nl> + InstructionHandle COND_BR ( ValueHandle cond , BlockHandle trueBranch , <nl> + ArrayRef < ValueHandle > trueOperands , <nl> + BlockHandle falseBranch , <nl> + ArrayRef < ValueHandle > falseOperands ) ; <nl> <nl> / / / Eagerly creates new mlir : : Block * with argument types specified by <nl> / / / ` trueOperands ` / ` falseOperands ` . <nl> ValueHandle COND_BR ( ValueHandle cond , BlockHandle trueBranch , <nl> / / / ` falseCaptures ` . size ( ) = = ` falseOperands ` . size ( ) <nl> / / / ` trueCaptures ` and ` trueOperands ` are pairwise of the same type <nl> / / / ` falseCaptures ` and ` falseOperands ` are pairwise of the same type . <nl> - ValueHandle COND_BR ( ValueHandle cond , BlockHandle * trueBranch , <nl> - ArrayRef < ValueHandle * > trueCaptures , <nl> - ArrayRef < ValueHandle > trueOperands , <nl> - BlockHandle * falseBranch , <nl> - ArrayRef < ValueHandle * > falseCaptures , <nl> - ArrayRef < ValueHandle > falseOperands ) ; <nl> + InstructionHandle COND_BR ( ValueHandle cond , BlockHandle * trueBranch , <nl> + ArrayRef < ValueHandle * > trueCaptures , <nl> + ArrayRef < ValueHandle > trueOperands , <nl> + BlockHandle * falseBranch , <nl> + ArrayRef < ValueHandle * > falseCaptures , <nl> + ArrayRef < ValueHandle > falseOperands ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / TODO ( ntv ) : Intrinsics below this line should be TableGen ' d . <nl> ValueHandle LOAD ( ValueHandle base , llvm : : ArrayRef < ValueHandle > indices ) ; <nl> / / / Builds an mlir : : ReturnOp with the proper ` operands ` that each must have <nl> / / / captured an mlir : : Value * . <nl> / / / Returns an empty ValueHandle . <nl> - ValueHandle RETURN ( llvm : : ArrayRef < ValueHandle > operands ) ; <nl> + InstructionHandle RETURN ( llvm : : ArrayRef < ValueHandle > operands ) ; <nl> <nl> / / / Builds an mlir : : StoreOp with the proper ` operands ` that each must have <nl> / / / captured an mlir : : Value * . <nl> / / / Returns an empty ValueHandle . <nl> - ValueHandle STORE ( ValueHandle value , ValueHandle base , <nl> - llvm : : ArrayRef < ValueHandle > indices ) ; <nl> + InstructionHandle STORE ( ValueHandle value , ValueHandle base , <nl> + llvm : : ArrayRef < ValueHandle > indices ) ; <nl> <nl> } / / namespace intrinsics <nl> <nl> mmm a / lib / EDSC / Builders . cpp <nl> ppp b / lib / EDSC / Builders . cpp <nl> mlir : : edsc : : ValueHandle : : createComposedAffineApply ( AffineMap map , <nl> return ValueHandle ( inst - > getResult ( 0 ) ) ; <nl> } <nl> <nl> + ValueHandle ValueHandle : : create ( StringRef name , ArrayRef < ValueHandle > operands , <nl> + ArrayRef < Type > resultTypes , <nl> + ArrayRef < NamedAttribute > attributes ) { <nl> + Instruction * inst = <nl> + InstructionHandle : : create ( name , operands , resultTypes , attributes ) ; <nl> + if ( auto f = inst - > dyn_cast < AffineForOp > ( ) ) { <nl> + / / Immediately create the loop body so we can just insert instructions right <nl> + / / away . <nl> + f - > createBody ( ) ; <nl> + return ValueHandle ( f - > getInductionVar ( ) ) ; <nl> + } <nl> + if ( inst - > getNumResults ( ) = = 1 ) { <nl> + return ValueHandle ( inst - > getResult ( 0 ) ) ; <nl> + } <nl> + llvm_unreachable ( " unsupported instruction , use an InstructionHandle instead " ) ; <nl> + } <nl> + <nl> + InstructionHandle <nl> + InstructionHandle : : create ( StringRef name , ArrayRef < ValueHandle > operands , <nl> + ArrayRef < Type > resultTypes , <nl> + ArrayRef < NamedAttribute > attributes ) { <nl> + OperationState state ( ScopedContext : : getContext ( ) , <nl> + ScopedContext : : getLocation ( ) , name ) ; <nl> + SmallVector < Value * , 4 > ops ( operands . begin ( ) , operands . end ( ) ) ; <nl> + state . addOperands ( ops ) ; <nl> + state . addTypes ( resultTypes ) ; <nl> + for ( const auto & attr : attributes ) { <nl> + state . addAttribute ( attr . first , attr . second ) ; <nl> + } <nl> + return InstructionHandle ( ScopedContext : : getBuilder ( ) - > createOperation ( state ) ) ; <nl> + } <nl> + <nl> BlockHandle mlir : : edsc : : BlockHandle : : create ( ArrayRef < Type > argTypes ) { <nl> BlockHandle res ; <nl> res . block = ScopedContext : : getBuilder ( ) - > createBlock ( ) ; <nl> mlir : : edsc : : LoopBuilder : : LoopBuilder ( ValueHandle * iv , <nl> enter ( body ) ; <nl> } <nl> <nl> - ValueHandle mlir : : edsc : : LoopBuilder : : operator ( ) ( ArrayRef < ValueHandle > stmts ) { <nl> + ValueHandle <nl> + mlir : : edsc : : LoopBuilder : : operator ( ) ( ArrayRef < CapturableHandle > stmts ) { <nl> / / Call to ` exit ` must be explicit and asymmetric ( cannot happen in the <nl> / / destructor ) because of ordering wrt comma operator . <nl> / / / The particular use case concerns nested blocks : <nl> mlir : : edsc : : LoopNestBuilder : : LoopNestBuilder ( ArrayRef < ValueHandle * > ivs , <nl> } <nl> <nl> ValueHandle <nl> - mlir : : edsc : : LoopNestBuilder : : operator ( ) ( ArrayRef < ValueHandle > stmts ) { <nl> + mlir : : edsc : : LoopNestBuilder : : operator ( ) ( ArrayRef < CapturableHandle > stmts ) { <nl> / / Iterate on the calling operator ( ) on all the loops in the nest . <nl> / / The iteration order is from innermost to outermost because enter / exit needs <nl> / / to be asymmetric ( i . e . enter ( ) occurs on LoopBuilder construction , exit ( ) <nl> mlir : : edsc : : BlockBuilder : : BlockBuilder ( BlockHandle * bh , <nl> <nl> / / / Only serves as an ordering point between entering nested block and creating <nl> / / / stmts . <nl> - void mlir : : edsc : : BlockBuilder : : operator ( ) ( ArrayRef < ValueHandle > stmts ) { <nl> + void mlir : : edsc : : BlockBuilder : : operator ( ) ( ArrayRef < CapturableHandle > stmts ) { <nl> / / Call to ` exit ` must be explicit and asymmetric ( cannot happen in the <nl> / / destructor ) because of ordering wrt comma operator . <nl> exit ( ) ; <nl> mmm a / lib / EDSC / Helpers . cpp <nl> ppp b / lib / EDSC / Helpers . cpp <nl> ValueHandle mlir : : edsc : : IndexedValue : : operator / ( ValueHandle e ) { <nl> return static_cast < ValueHandle > ( * this ) / e ; <nl> } <nl> <nl> - ValueHandle mlir : : edsc : : IndexedValue : : operator + = ( ValueHandle e ) { <nl> + InstructionHandle mlir : : edsc : : IndexedValue : : operator + = ( ValueHandle e ) { <nl> using op : : operator + ; <nl> return intrinsics : : STORE ( * this + e , getBase ( ) , indices ) ; <nl> } <nl> - ValueHandle mlir : : edsc : : IndexedValue : : operator - = ( ValueHandle e ) { <nl> + InstructionHandle mlir : : edsc : : IndexedValue : : operator - = ( ValueHandle e ) { <nl> using op : : operator - ; <nl> return intrinsics : : STORE ( * this - e , getBase ( ) , indices ) ; <nl> } <nl> - ValueHandle mlir : : edsc : : IndexedValue : : operator * = ( ValueHandle e ) { <nl> + InstructionHandle mlir : : edsc : : IndexedValue : : operator * = ( ValueHandle e ) { <nl> using op : : operator * ; <nl> return intrinsics : : STORE ( * this * e , getBase ( ) , indices ) ; <nl> } <nl> - ValueHandle mlir : : edsc : : IndexedValue : : operator / = ( ValueHandle e ) { <nl> + InstructionHandle mlir : : edsc : : IndexedValue : : operator / = ( ValueHandle e ) { <nl> using op : : operator / ; <nl> return intrinsics : : STORE ( * this / e , getBase ( ) , indices ) ; <nl> } <nl> mmm a / lib / EDSC / Intrinsics . cpp <nl> ppp b / lib / EDSC / Intrinsics . cpp <nl> <nl> using namespace mlir ; <nl> using namespace mlir : : edsc ; <nl> <nl> - ValueHandle mlir : : edsc : : intrinsics : : BR ( BlockHandle bh , <nl> - ArrayRef < ValueHandle > operands ) { <nl> + InstructionHandle mlir : : edsc : : intrinsics : : BR ( BlockHandle bh , <nl> + ArrayRef < ValueHandle > operands ) { <nl> assert ( bh & & " Expected already captured BlockHandle " ) ; <nl> for ( auto & o : operands ) { <nl> ( void ) o ; <nl> assert ( o & & " Expected already captured ValueHandle " ) ; <nl> } <nl> SmallVector < Value * , 4 > ops ( operands . begin ( ) , operands . end ( ) ) ; <nl> - return ValueHandle : : create < BranchOp > ( bh . getBlock ( ) , ops ) ; <nl> + return InstructionHandle : : create < BranchOp > ( bh . getBlock ( ) , ops ) ; <nl> } <nl> static void enforceEmptyCapturesMatchOperands ( ArrayRef < ValueHandle * > captures , <nl> ArrayRef < ValueHandle > operands ) { <nl> static void enforceEmptyCapturesMatchOperands ( ArrayRef < ValueHandle * > captures , <nl> } <nl> } <nl> <nl> - ValueHandle mlir : : edsc : : intrinsics : : BR ( BlockHandle * bh , <nl> - ArrayRef < ValueHandle * > captures , <nl> - ArrayRef < ValueHandle > operands ) { <nl> + InstructionHandle mlir : : edsc : : intrinsics : : BR ( BlockHandle * bh , <nl> + ArrayRef < ValueHandle * > captures , <nl> + ArrayRef < ValueHandle > operands ) { <nl> assert ( ! * bh & & " Unexpected already captured BlockHandle " ) ; <nl> enforceEmptyCapturesMatchOperands ( captures , operands ) ; <nl> { / / Clone the scope explicitly to avoid modifying the insertion point in the <nl> ValueHandle mlir : : edsc : : intrinsics : : BR ( BlockHandle * bh , <nl> BlockBuilder ( bh , captures ) ( { / * no body * / } ) ; <nl> } / / Release before adding the branch to the eagerly created block . <nl> SmallVector < Value * , 4 > ops ( operands . begin ( ) , operands . end ( ) ) ; <nl> - return ValueHandle : : create < BranchOp > ( bh - > getBlock ( ) , ops ) ; <nl> + return InstructionHandle : : create < BranchOp > ( bh - > getBlock ( ) , ops ) ; <nl> } <nl> <nl> - ValueHandle <nl> + InstructionHandle <nl> mlir : : edsc : : intrinsics : : COND_BR ( ValueHandle cond , BlockHandle trueBranch , <nl> ArrayRef < ValueHandle > trueOperands , <nl> BlockHandle falseBranch , <nl> ArrayRef < ValueHandle > falseOperands ) { <nl> SmallVector < Value * , 4 > trueOps ( trueOperands . begin ( ) , trueOperands . end ( ) ) ; <nl> SmallVector < Value * , 4 > falseOps ( falseOperands . begin ( ) , falseOperands . end ( ) ) ; <nl> - return ValueHandle : : create < CondBranchOp > ( cond , trueBranch . getBlock ( ) , trueOps , <nl> - falseBranch . getBlock ( ) , falseOps ) ; <nl> + return InstructionHandle : : create < CondBranchOp > ( <nl> + cond , trueBranch . getBlock ( ) , trueOps , falseBranch . getBlock ( ) , falseOps ) ; <nl> } <nl> <nl> - ValueHandle mlir : : edsc : : intrinsics : : COND_BR ( <nl> + InstructionHandle mlir : : edsc : : intrinsics : : COND_BR ( <nl> ValueHandle cond , BlockHandle * trueBranch , <nl> ArrayRef < ValueHandle * > trueCaptures , ArrayRef < ValueHandle > trueOperands , <nl> BlockHandle * falseBranch , ArrayRef < ValueHandle * > falseCaptures , <nl> ValueHandle mlir : : edsc : : intrinsics : : COND_BR ( <nl> } / / Release before adding the branch to the eagerly created block . <nl> SmallVector < Value * , 4 > trueOps ( trueOperands . begin ( ) , trueOperands . end ( ) ) ; <nl> SmallVector < Value * , 4 > falseOps ( falseOperands . begin ( ) , falseOperands . end ( ) ) ; <nl> - return ValueHandle : : create < CondBranchOp > ( <nl> + return InstructionHandle : : create < CondBranchOp > ( <nl> cond , trueBranch - > getBlock ( ) , trueOps , falseBranch - > getBlock ( ) , falseOps ) ; <nl> } <nl> <nl> mlir : : edsc : : intrinsics : : LOAD ( ValueHandle base , <nl> return ValueHandle : : create < LoadOp > ( base . getValue ( ) , ops ) ; <nl> } <nl> <nl> - ValueHandle mlir : : edsc : : intrinsics : : RETURN ( ArrayRef < ValueHandle > operands ) { <nl> + InstructionHandle <nl> + mlir : : edsc : : intrinsics : : RETURN ( ArrayRef < ValueHandle > operands ) { <nl> SmallVector < Value * , 4 > ops ( operands . begin ( ) , operands . end ( ) ) ; <nl> - return ValueHandle : : create < ReturnOp > ( ops ) ; <nl> + return InstructionHandle : : create < ReturnOp > ( ops ) ; <nl> } <nl> <nl> - ValueHandle <nl> + InstructionHandle <nl> mlir : : edsc : : intrinsics : : STORE ( ValueHandle value , ValueHandle base , <nl> llvm : : ArrayRef < ValueHandle > indices = { } ) { <nl> SmallVector < Value * , 4 > ops ( indices . begin ( ) , indices . end ( ) ) ; <nl> - return ValueHandle : : create < StoreOp > ( value . getValue ( ) , base . getValue ( ) , ops ) ; <nl> + return InstructionHandle : : create < StoreOp > ( value . getValue ( ) , base . getValue ( ) , <nl> + ops ) ; <nl> } <nl> mmm a / test / EDSC / builder - api - test . cpp <nl> ppp b / test / EDSC / builder - api - test . cpp <nl> TEST_FUNC ( builder_helpers ) { <nl> f - > print ( llvm : : outs ( ) ) ; <nl> } <nl> <nl> + TEST_FUNC ( custom_ops ) { <nl> + using namespace edsc ; <nl> + using namespace edsc : : intrinsics ; <nl> + using namespace edsc : : op ; <nl> + auto indexType = IndexType : : get ( & globalContext ( ) ) ; <nl> + auto f = makeFunction ( " custom_ops " , { } , { indexType , indexType } ) ; <nl> + <nl> + ScopedContext scope ( f . get ( ) ) ; <nl> + CustomInstruction < ValueHandle > MY_CUSTOM_OP ( " my_custom_op " ) ; <nl> + CustomInstruction < InstructionHandle > MY_CUSTOM_INST_0 ( " my_custom_inst_0 " ) ; <nl> + CustomInstruction < InstructionHandle > MY_CUSTOM_INST_2 ( " my_custom_inst_2 " ) ; <nl> + <nl> + / / clang - format off <nl> + ValueHandle vh ( indexType ) ; <nl> + InstructionHandle ih0 , ih2 ; <nl> + IndexHandle m , n , M ( f - > getArgument ( 0 ) ) , N ( f - > getArgument ( 1 ) ) ; <nl> + IndexHandle ten ( index_t ( 10 ) ) , twenty ( index_t ( 20 ) ) ; <nl> + LoopNestBuilder ( { & m , & n } , { M , N } , { M + ten , N + twenty } , { 1 , 1 } ) ( { <nl> + vh = MY_CUSTOM_OP ( { m , m + n } , { indexType } , { } ) , <nl> + ih0 = MY_CUSTOM_INST_0 ( { m , m + n } , { } ) , <nl> + ih2 = MY_CUSTOM_INST_2 ( { m , m + n } , { indexType , indexType } ) , <nl> + } ) ; <nl> + <nl> + / / CHECK - LABEL : @ custom_ops <nl> + / / CHECK : for % i0 { { . * } } <nl> + / / CHECK : for % i1 { { . * } } <nl> + / / CHECK : { { . * } } = " my_custom_op " { { . * } } : ( index , index ) - > index <nl> + / / CHECK : " my_custom_inst_0 " { { . * } } : ( index , index ) - > ( ) <nl> + / / CHECK : { { . * } } = " my_custom_inst_2 " { { . * } } : ( index , index ) - > ( index , index ) <nl> + / / clang - format on <nl> + f - > print ( llvm : : outs ( ) ) ; <nl> + } <nl> + <nl> int main ( ) { <nl> RUN_TESTS ( ) ; <nl> return 0 ; <nl>
Add support for custom ops in declarative builders .
tensorflow/tensorflow
629ea3702625de5b384bfe4844c48b2b8c227641
2019-03-30T00:09:05Z
mmm a / include / swift / AST / Decl . h <nl> ppp b / include / swift / AST / Decl . h <nl> class alignas ( 1 < < DeclAlignInBits ) Decl { <nl> IsIncompatibleWithWeakReferences : 1 <nl> ) ; <nl> <nl> - SWIFT_INLINE_BITFIELD ( StructDecl , NominalTypeDecl , 1 , <nl> + SWIFT_INLINE_BITFIELD ( StructDecl , NominalTypeDecl , 1 + 1 , <nl> / / / True if this struct has storage for fields that aren ' t accessible in <nl> / / / Swift . <nl> - HasUnreferenceableStorage : 1 <nl> + HasUnreferenceableStorage : 1 , <nl> + / / / True if this struct is imported from C + + and not trivially copyable . <nl> + IsCxxNotTriviallyCopyable : 1 <nl> ) ; <nl> <nl> SWIFT_INLINE_BITFIELD ( EnumDecl , NominalTypeDecl , 2 + 1 , <nl> class StructDecl final : public NominalTypeDecl { <nl> void setHasUnreferenceableStorage ( bool v ) { <nl> Bits . StructDecl . HasUnreferenceableStorage = v ; <nl> } <nl> + <nl> + bool isCxxNotTriviallyCopyable ( ) const { <nl> + return Bits . StructDecl . IsCxxNotTriviallyCopyable ; <nl> + } <nl> + <nl> + void setIsCxxNotTriviallyCopyable ( bool v ) { <nl> + Bits . StructDecl . IsCxxNotTriviallyCopyable = v ; <nl> + } <nl> } ; <nl> <nl> / / / This is the base type for AncestryOptions . Each flag describes possible <nl> mmm a / lib / AST / Decl . cpp <nl> ppp b / lib / AST / Decl . cpp <nl> StructDecl : : StructDecl ( SourceLoc StructLoc , Identifier Name , SourceLoc NameLoc , <nl> StructLoc ( StructLoc ) <nl> { <nl> Bits . StructDecl . HasUnreferenceableStorage = false ; <nl> + Bits . StructDecl . IsCxxNotTriviallyCopyable = false ; <nl> } <nl> <nl> bool NominalTypeDecl : : hasMemberwiseInitializer ( ) const { <nl> mmm a / lib / ClangImporter / ImportDecl . cpp <nl> ppp b / lib / ClangImporter / ImportDecl . cpp <nl> <nl> # include " clang / AST / DeclCXX . h " <nl> # include " clang / Basic / CharInfo . h " <nl> # include " swift / Basic / Statistic . h " <nl> + # include " clang / Basic / Specifiers . h " <nl> # include " clang / Basic / TargetInfo . h " <nl> # include " clang / Lex / Preprocessor . h " <nl> # include " clang / Sema / Lookup . h " <nl> namespace { <nl> <nl> result - > setHasUnreferenceableStorage ( hasUnreferenceableStorage ) ; <nl> <nl> + if ( auto cxxRecordDecl = dyn_cast < clang : : CXXRecordDecl > ( decl ) ) { <nl> + result - > setIsCxxNotTriviallyCopyable ( <nl> + ! cxxRecordDecl - > isTriviallyCopyable ( ) ) ; <nl> + <nl> + for ( auto ctor : cxxRecordDecl - > ctors ( ) ) { <nl> + if ( ctor - > isCopyConstructor ( ) & & <nl> + ( ctor - > isDeleted ( ) | | ctor - > getAccess ( ) ! = clang : : AS_public ) ) { <nl> + result - > setIsCxxNotTriviallyCopyable ( true ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + if ( auto dtor = cxxRecordDecl - > getDestructor ( ) ) { <nl> + if ( dtor - > isDeleted ( ) | | dtor - > getAccess ( ) ! = clang : : AS_public ) { <nl> + result - > setIsCxxNotTriviallyCopyable ( true ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> return result ; <nl> } <nl> <nl> mmm a / lib / SIL / IR / TypeLowering . cpp <nl> ppp b / lib / SIL / IR / TypeLowering . cpp <nl> namespace { <nl> if ( handleResilience ( structType , D , properties ) ) <nl> return handleAddressOnly ( structType , properties ) ; <nl> <nl> + if ( D - > isCxxNotTriviallyCopyable ( ) ) { <nl> + properties . setAddressOnly ( ) ; <nl> + } <nl> + <nl> auto subMap = structType - > getContextSubstitutionMap ( & TC . M , D ) ; <nl> <nl> / / Classify the type according to its stored properties . <nl> new file mode 100644 <nl> index 000000000000 . . c2698961c4b4 <nl> mmm / dev / null <nl> ppp b / test / Interop / Cxx / class / Inputs / loadable - types . h <nl> <nl> + # ifndef TEST_INTEROP_CXX_CLASS_INPUTS_LOADABLE_TYPES_H <nl> + # define TEST_INTEROP_CXX_CLASS_INPUTS_LOADABLE_TYPES_H <nl> + <nl> + struct EmptyStruct { } ; <nl> + <nl> + / / Tests for individual special members <nl> + <nl> + struct StructWithDefaultConstructor { <nl> + StructWithDefaultConstructor ( ) { } <nl> + } ; <nl> + <nl> + struct StructWithAdditionalConstructor { <nl> + StructWithAdditionalConstructor ( ) { } <nl> + StructWithAdditionalConstructor ( int parameter ) { } <nl> + } ; <nl> + <nl> + struct StructWithCopyConstructor { <nl> + StructWithCopyConstructor ( const StructWithCopyConstructor & ) { } <nl> + } ; <nl> + <nl> + struct StructWithInheritedCopyConstructor : StructWithCopyConstructor { } ; <nl> + <nl> + struct StructWithSubobjectCopyConstructor { <nl> + StructWithCopyConstructor subobject ; <nl> + } ; <nl> + <nl> + struct StructWithDefaultedCopyConstructor { <nl> + StructWithDefaultedCopyConstructor ( <nl> + const StructWithDefaultedCopyConstructor & ) = default ; <nl> + } ; <nl> + <nl> + struct StructWithInheritedDefaultedCopyConstructor <nl> + : StructWithDefaultedCopyConstructor { } ; <nl> + <nl> + struct StructWithSubobjectDefaultedCopyConstructor { <nl> + StructWithDefaultedCopyConstructor subobject ; <nl> + } ; <nl> + <nl> + struct StructWithPrivateDefaultedCopyConstructor { <nl> + private : <nl> + StructWithPrivateDefaultedCopyConstructor ( <nl> + const StructWithPrivateDefaultedCopyConstructor & ) = default ; <nl> + } ; <nl> + <nl> + struct StructWithInheritedPrivateDefaultedCopyConstructor <nl> + : StructWithPrivateDefaultedCopyConstructor { } ; <nl> + <nl> + struct StructWithSubobjectPrivateDefaultedCopyConstructor { <nl> + StructWithPrivateDefaultedCopyConstructor subobject ; <nl> + } ; <nl> + <nl> + struct StructWithMoveConstructor { <nl> + StructWithMoveConstructor ( StructWithMoveConstructor & & ) { } <nl> + } ; <nl> + <nl> + struct StructWithInheritedMoveConstructor : StructWithMoveConstructor { } ; <nl> + <nl> + struct StructWithSubobjectMoveConstructor { <nl> + StructWithMoveConstructor subobject ; <nl> + } ; <nl> + <nl> + struct StructWithCopyAssignment { <nl> + StructWithCopyAssignment & operator = ( const StructWithCopyAssignment & ) { } <nl> + } ; <nl> + <nl> + struct StructWithInheritedCopyAssignment : StructWithCopyAssignment { } ; <nl> + <nl> + struct StructWithSubobjectCopyAssignment { <nl> + StructWithCopyAssignment subobject ; <nl> + } ; <nl> + <nl> + struct StructWithMoveAssignment { <nl> + StructWithMoveAssignment & operator = ( StructWithMoveAssignment & & ) { } <nl> + } ; <nl> + <nl> + struct StructWithInheritedMoveAssignment : StructWithMoveAssignment { } ; <nl> + <nl> + struct StructWithSubobjectMoveAssignment { <nl> + StructWithMoveAssignment subobject ; <nl> + } ; <nl> + <nl> + struct StructWithDestructor { <nl> + ~ StructWithDestructor ( ) { } <nl> + } ; <nl> + <nl> + struct StructWithInheritedDestructor : StructWithDestructor { } ; <nl> + <nl> + struct StructWithSubobjectDestructor { <nl> + StructWithDestructor subobject ; <nl> + } ; <nl> + <nl> + struct StructWithDefaultedDestructor { <nl> + ~ StructWithDefaultedDestructor ( ) = default ; <nl> + } ; <nl> + <nl> + struct StructWithInheritedDefaultedDestructor : StructWithDefaultedDestructor { <nl> + } ; <nl> + <nl> + struct StructWithSubobjectDefaultedDestructor { <nl> + StructWithDefaultedDestructor subobject ; <nl> + } ; <nl> + <nl> + struct StructWithPrivateDefaultedDestructor { <nl> + private : <nl> + ~ StructWithPrivateDefaultedDestructor ( ) = default ; <nl> + } ; <nl> + <nl> + struct StructWithInheritedPrivateDefaultedDestructor <nl> + : StructWithPrivateDefaultedDestructor { } ; <nl> + <nl> + struct StructWithSubobjectPrivateDefaultedDestructor { <nl> + StructWithPrivateDefaultedDestructor subobject ; <nl> + } ; <nl> + <nl> + / / Tests for common sets of special member functions . <nl> + <nl> + struct StructTriviallyCopyableMovable { <nl> + StructTriviallyCopyableMovable ( const StructTriviallyCopyableMovable & ) = <nl> + default ; <nl> + StructTriviallyCopyableMovable ( StructTriviallyCopyableMovable & & ) = default ; <nl> + StructTriviallyCopyableMovable & <nl> + operator = ( const StructTriviallyCopyableMovable & ) = default ; <nl> + StructTriviallyCopyableMovable & <nl> + operator = ( StructTriviallyCopyableMovable & & ) = default ; <nl> + ~ StructTriviallyCopyableMovable ( ) = default ; <nl> + } ; <nl> + <nl> + struct StructNonCopyableTriviallyMovable { <nl> + StructNonCopyableTriviallyMovable ( const StructNonCopyableTriviallyMovable & ) = <nl> + delete ; <nl> + StructNonCopyableTriviallyMovable ( StructNonCopyableTriviallyMovable & & ) = <nl> + default ; <nl> + StructNonCopyableTriviallyMovable & <nl> + operator = ( const StructNonCopyableTriviallyMovable & ) = delete ; <nl> + StructNonCopyableTriviallyMovable & <nl> + operator = ( StructNonCopyableTriviallyMovable & & ) = default ; <nl> + ~ StructNonCopyableTriviallyMovable ( ) = default ; <nl> + } ; <nl> + <nl> + struct StructNonCopyableNonMovable { <nl> + StructNonCopyableNonMovable ( const StructNonCopyableNonMovable & ) = delete ; <nl> + StructNonCopyableNonMovable ( StructNonCopyableNonMovable & & ) = default ; <nl> + StructNonCopyableNonMovable & <nl> + operator = ( const StructNonCopyableNonMovable & ) = delete ; <nl> + StructNonCopyableNonMovable & <nl> + operator = ( StructNonCopyableNonMovable & & ) = default ; <nl> + ~ StructNonCopyableNonMovable ( ) = default ; <nl> + } ; <nl> + <nl> + struct StructDeletedDestructor { <nl> + StructDeletedDestructor ( const StructDeletedDestructor & ) = default ; <nl> + StructDeletedDestructor ( StructDeletedDestructor & & ) = default ; <nl> + StructDeletedDestructor & operator = ( const StructDeletedDestructor & ) = default ; <nl> + StructDeletedDestructor & operator = ( StructDeletedDestructor & & ) = default ; <nl> + ~ StructDeletedDestructor ( ) = delete ; <nl> + } ; <nl> + <nl> + # endif <nl> mmm a / test / Interop / Cxx / class / Inputs / module . modulemap <nl> ppp b / test / Interop / Cxx / class / Inputs / module . modulemap <nl> module AccessSpecifiers { <nl> header " access - specifiers . h " <nl> } <nl> <nl> + module LoadableTypes { <nl> + header " loadable - types . h " <nl> + } <nl> + <nl> module MemberwiseInitializer { <nl> header " memberwise - initializer . h " <nl> } <nl> new file mode 100644 <nl> index 000000000000 . . 7611f5d73d74 <nl> mmm / dev / null <nl> ppp b / test / Interop / Cxx / class / loadable - types - silgen . swift <nl> <nl> + / / RUN : % target - swift - emit - silgen - I % S / Inputs - enable - cxx - interop % s | % FileCheck % s <nl> + <nl> + / / This test checks that we classify C + + types as loadable and address - only <nl> + / / correctly . <nl> + <nl> + import LoadableTypes <nl> + <nl> + / / Tests for individual special members <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } EmptyStruct ) <nl> + func pass ( s : EmptyStruct ) { <nl> + / / CHECK : bb0 ( % 0 : $ EmptyStruct ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithDefaultConstructor ) <nl> + func pass ( s : StructWithDefaultConstructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ StructWithDefaultConstructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithAdditionalConstructor ) <nl> + func pass ( s : StructWithAdditionalConstructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ StructWithAdditionalConstructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithCopyConstructor ) <nl> + func pass ( s : StructWithCopyConstructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithCopyConstructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithInheritedCopyConstructor ) <nl> + func pass ( s : StructWithInheritedCopyConstructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithInheritedCopyConstructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithSubobjectCopyConstructor ) <nl> + func pass ( s : StructWithSubobjectCopyConstructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithSubobjectCopyConstructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithDefaultedCopyConstructor ) <nl> + func pass ( s : StructWithDefaultedCopyConstructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ StructWithDefaultedCopyConstructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithInheritedDefaultedCopyConstructor ) <nl> + func pass ( s : StructWithInheritedDefaultedCopyConstructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ StructWithInheritedDefaultedCopyConstructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithSubobjectDefaultedCopyConstructor ) <nl> + func pass ( s : StructWithSubobjectDefaultedCopyConstructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ StructWithSubobjectDefaultedCopyConstructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithPrivateDefaultedCopyConstructor ) <nl> + func pass ( s : StructWithPrivateDefaultedCopyConstructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithPrivateDefaultedCopyConstructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithInheritedPrivateDefaultedCopyConstructor ) <nl> + func pass ( s : StructWithInheritedPrivateDefaultedCopyConstructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithInheritedPrivateDefaultedCopyConstructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithSubobjectPrivateDefaultedCopyConstructor ) <nl> + func pass ( s : StructWithSubobjectPrivateDefaultedCopyConstructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithSubobjectPrivateDefaultedCopyConstructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithMoveConstructor ) <nl> + func pass ( s : StructWithMoveConstructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithMoveConstructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithInheritedMoveConstructor ) <nl> + func pass ( s : StructWithInheritedMoveConstructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithInheritedMoveConstructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithSubobjectMoveConstructor ) <nl> + func pass ( s : StructWithSubobjectMoveConstructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithSubobjectMoveConstructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithCopyAssignment ) <nl> + func pass ( s : StructWithCopyAssignment ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithCopyAssignment ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithInheritedCopyAssignment ) <nl> + func pass ( s : StructWithInheritedCopyAssignment ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithInheritedCopyAssignment ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithSubobjectCopyAssignment ) <nl> + func pass ( s : StructWithSubobjectCopyAssignment ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithSubobjectCopyAssignment ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithMoveAssignment ) <nl> + func pass ( s : StructWithMoveAssignment ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithMoveAssignment ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithInheritedMoveAssignment ) <nl> + func pass ( s : StructWithInheritedMoveAssignment ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithInheritedMoveAssignment ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithSubobjectMoveAssignment ) <nl> + func pass ( s : StructWithSubobjectMoveAssignment ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithSubobjectMoveAssignment ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithDestructor ) <nl> + func pass ( s : StructWithDestructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithDestructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithInheritedDestructor ) <nl> + func pass ( s : StructWithInheritedDestructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithInheritedDestructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithSubobjectDestructor ) <nl> + func pass ( s : StructWithSubobjectDestructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithSubobjectDestructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithDefaultedDestructor ) <nl> + func pass ( s : StructWithDefaultedDestructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ StructWithDefaultedDestructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithInheritedDefaultedDestructor ) <nl> + func pass ( s : StructWithInheritedDefaultedDestructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ StructWithInheritedDefaultedDestructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithSubobjectDefaultedDestructor ) <nl> + func pass ( s : StructWithSubobjectDefaultedDestructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ StructWithSubobjectDefaultedDestructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithPrivateDefaultedDestructor ) <nl> + func pass ( s : StructWithPrivateDefaultedDestructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithPrivateDefaultedDestructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithInheritedPrivateDefaultedDestructor ) <nl> + func pass ( s : StructWithInheritedPrivateDefaultedDestructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithInheritedPrivateDefaultedDestructor ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructWithSubobjectPrivateDefaultedDestructor ) <nl> + func pass ( s : StructWithSubobjectPrivateDefaultedDestructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructWithSubobjectPrivateDefaultedDestructor ) : <nl> + } <nl> + <nl> + / / Tests for common sets of special member functions . <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructTriviallyCopyableMovable ) <nl> + func pass ( s : StructTriviallyCopyableMovable ) { <nl> + / / CHECK : bb0 ( % 0 : $ StructTriviallyCopyableMovable ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructNonCopyableTriviallyMovable ) <nl> + func pass ( s : StructNonCopyableTriviallyMovable ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructNonCopyableTriviallyMovable ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructNonCopyableNonMovable ) <nl> + func pass ( s : StructNonCopyableNonMovable ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructNonCopyableNonMovable ) : <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil hidden [ ossa ] @ $ s4main4pass { { . * [ ( ] } } StructDeletedDestructor ) <nl> + func pass ( s : StructDeletedDestructor ) { <nl> + / / CHECK : bb0 ( % 0 : $ * StructDeletedDestructor ) : <nl> + } <nl>
Classify C + + structs as loadable or address - only ( )
apple/swift
e69abeba531bfbf5302ab13d70b7393551d4d04b
2020-05-13T15:16:47Z
mmm a / src / rdb_protocol / func . cc <nl> ppp b / src / rdb_protocol / func . cc <nl> counted_t < val_t > func_t : : call ( const std : : vector < counted_t < const datum_t > > & args <nl> result ) ; <nl> } else { <nl> r_sanity_check ( body . has ( ) & & source . has ( ) & & js_env = = NULL ) ; <nl> - rcheck ( args . size ( ) = = static_cast < size_t > ( argptrs . size ( ) ) <nl> - | | argptrs . size ( ) = = 0 , <nl> + rcheck ( args . size ( ) = = argptrs . size ( ) | | argptrs . size ( ) = = 0 , <nl> base_exc_t : : GENERIC , <nl> strprintf ( " Expected % zd argument ( s ) but found % zu . " , <nl> argptrs . size ( ) , args . size ( ) ) ) ; <nl>
Removed an unnecessary static_cast .
rethinkdb/rethinkdb
9f36fe777f643db82f3745e1bae59d22238e3e44
2013-08-29T23:39:25Z