text
stringlengths
5
22M
id
stringlengths
12
177
metadata
dict
__index_level_0__
int64
0
1.37k
<?xml version='1.0' encoding='UTF-8'?> <glyph name="ghainDotbelow-ar.fina" format="2"> <advance width="1200"/> <outline> <component base="ain-ar.fina"/> <component base="dotabove-ar" xOffset="10" yOffset="273"/> <component base="dotcenter-ar" xOffset="32" yOffset="-469"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/ghainD_otbelow-ar.fina.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/ghainD_otbelow-ar.fina.glif", "repo_id": "cascadia-code", "token_count": 190 }
418
<?xml version='1.0' encoding='UTF-8'?> <glyph name="hyphen" format="2"> <advance width="1200"/> <unicode hex="002D"/> <outline> <component base="minus"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>minus</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/hyphen.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/hyphen.glif", "repo_id": "cascadia-code", "token_count": 270 }
419
<?xml version='1.0' encoding='UTF-8'?> <glyph name="kehehDotabove-ar.init" format="2"> <advance width="1200"/> <outline> <component base="keheh-ar.init"/> <component base="dotabove-ar" xOffset="-167" yOffset="601"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/kehehD_otabove-ar.init.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/kehehD_otabove-ar.init.glif", "repo_id": "cascadia-code", "token_count": 168 }
420
<?xml version='1.0' encoding='UTF-8'?> <glyph name="kehehTwodotshorizontalabove-ar.init" format="2"> <advance width="1200"/> <anchor x="385" y="1494" name="top"/> <outline> <component base="keheh-ar.init"/> <component base="twodotshorizontalabove-ar.v2" xScale="0.7" yScale="0.7" xOffset="-15" yOffset="750"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/kehehT_wodotshorizontalabove-ar.init.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/kehehT_wodotshorizontalabove-ar.init.glif", "repo_id": "cascadia-code", "token_count": 212 }
421
<?xml version='1.0' encoding='UTF-8'?> <glyph name="lamDotabove-ar.medi" format="2"> <advance width="1200"/> <guideline x="304" y="835" angle="0"/> <outline> <component base="lam-ar.medi"/> <component base="dotabove-ar" xOffset="101" yOffset="925"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/lamD_otabove-ar.medi.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/lamD_otabove-ar.medi.glif", "repo_id": "cascadia-code", "token_count": 182 }
422
<?xml version='1.0' encoding='UTF-8'?> <glyph name="lamThreedotsbelow-ar.init" format="2"> <advance width="1200"/> <outline> <component base="lam-ar.init"/> <component base="threedotsdownbelow-ar" xOffset="257" yOffset="-4"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/lamT_hreedotsbelow-ar.init.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/lamT_hreedotsbelow-ar.init.glif", "repo_id": "cascadia-code", "token_count": 168 }
423
<?xml version='1.0' encoding='UTF-8'?> <glyph name="noonThreedotsabove-ar" format="2"> <advance width="1200"/> <unicode hex="06BD"/> <outline> <component base="noonghunna-ar"/> <component base="threedotsupabove-ar" xOffset="4" yOffset="11"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/noonT_hreedotsabove-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/noonT_hreedotsabove-ar.glif", "repo_id": "cascadia-code", "token_count": 176 }
424
<?xml version='1.0' encoding='UTF-8'?> <glyph name="numbersign_bracketleft.liga" format="2"> <advance width="1200"/> <outline> <contour> <point x="842" y="0" type="line"/> <point x="1084" y="0" type="line"/> <point x="1204" y="1420" type="line"/> <point x="962" y="1420" type="line"/> </contour> <contour> <point x="406" y="0" type="line"/> <point x="648" y="0" type="line"/> <point x="768" y="1420" type="line"/> <point x="526" y="1420" type="line"/> </contour> <contour> <point x="263" y="310" type="line"/> <point x="1607" y="310" type="line"/> <point x="1607" y="558" type="line"/> <point x="263" y="558" type="line"/> </contour> <contour> <point x="263" y="862" type="line"/> <point x="1607" y="862" type="line"/> <point x="1607" y="1110" type="line"/> <point x="263" y="1110" type="line"/> </contour> <component base="bracketleft" xOffset="1164"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>bracketleft</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/numbersign_bracketleft.liga.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/numbersign_bracketleft.liga.glif", "repo_id": "cascadia-code", "token_count": 677 }
425
<?xml version='1.0' encoding='UTF-8'?> <glyph name="onedotenleader" format="2"> <advance width="1200"/> <unicode hex="2024"/> <outline> <component base="period"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>period</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/onedotenleader.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/onedotenleader.glif", "repo_id": "cascadia-code", "token_count": 272 }
426
<?xml version='1.0' encoding='UTF-8'?> <glyph name="ruble" format="2"> <advance width="1200"/> <unicode hex="20BD"/> <outline> <contour> <point x="0" y="111" type="line"/> <point x="788" y="111" type="line"/> <point x="788" y="316" type="line"/> <point x="0" y="316" type="line"/> </contour> <contour> <point x="0" y="407" type="line"/> <point x="437" y="407" type="line"/> <point x="437" y="647" type="line"/> <point x="0" y="647" type="line"/> </contour> <component base="P"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>P</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/ruble.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/ruble.glif", "repo_id": "cascadia-code", "token_count": 464 }
427
<?xml version='1.0' encoding='UTF-8'?> <glyph name="tilde" format="2"> <advance width="1200"/> <unicode hex="02DC"/> <outline> <component base="tildecomb"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>tildecomb</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/tilde.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/tilde.glif", "repo_id": "cascadia-code", "token_count": 276 }
428
<?xml version='1.0' encoding='UTF-8'?> <glyph name="wawHamzaabove-ar.fina" format="2"> <advance width="1200"/> <outline> <component base="waw-ar.fina"/> <component base="hamzaabove-ar" xOffset="50" yOffset="-334"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/wawH_amzaabove-ar.fina.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/wawH_amzaabove-ar.fina.glif", "repo_id": "cascadia-code", "token_count": 169 }
429
<?xml version='1.0' encoding='UTF-8'?> <glyph name="Abreveacute" format="2"> <advance width="1200"/> <unicode hex="1EAE"/> <outline> <component base="A"/> <component base="brevecomb.case" xOffset="-1"/> <component base="acutecomb.case" xOffset="69" yOffset="400"/> </outline> </glyph>
cascadia-code/sources/CascadiaCode-BoldItalic.ufo/glyphs/A_breveacute.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-BoldItalic.ufo/glyphs/A_breveacute.glif", "repo_id": "cascadia-code", "token_count": 124 }
430
<?xml version='1.0' encoding='UTF-8'?> <glyph name="beeh-ar.init.alt" format="2"> <advance width="1200"/> <guideline x="104" y="-120" angle="0"/> <guideline x="181" y="520" angle="0"/> <anchor x="0" y="0" name="overlap"/> <outline> <component base="behDotless-ar.init.alt"/> <component base="twodotsverticalbelow-ar" xOffset="250" yOffset="-24"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>bottom.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>twodotsverticalbelow-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/beeh-ar.init.alt.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/beeh-ar.init.alt.glif", "repo_id": "cascadia-code", "token_count": 407 }
431
<?xml version='1.0' encoding='UTF-8'?> <glyph name="sadTwodotsbelow-ar.medi" format="2"> <advance width="1200"/> <guideline x="1561" y="-129" angle="0"/> <outline> <component base="sad-ar.medi"/> <component base="twodotshorizontalbelow-ar" xOffset="50" yOffset="-24"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/sadT_wodotsbelow-ar.medi.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/sadT_wodotsbelow-ar.medi.glif", "repo_id": "cascadia-code", "token_count": 192 }
432
<?xml version='1.0' encoding='UTF-8'?> <glyph name="tchehDotabove-ar.medi" format="2"> <advance width="1200"/> <outline> <component base="tcheh-ar.medi"/> <component base="dotabove-ar" xOffset="-34" yOffset="332"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/tchehD_otabove-ar.medi.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/tchehD_otabove-ar.medi.glif", "repo_id": "cascadia-code", "token_count": 168 }
433
<?xml version='1.0' encoding='UTF-8'?> <glyph name="ainTwodotshorizontalabove-ar.medi" format="2"> <advance width="1200"/> <outline> <component base="ain-ar.medi"/> <component base="twodotshorizontalabove-ar" xOffset="10" yOffset="283"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>twodotshorizontalabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/ainT_wodotshorizontalabove-ar.medi.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/ainT_wodotshorizontalabove-ar.medi.glif", "repo_id": "cascadia-code", "token_count": 352 }
434
<?xml version='1.0' encoding='UTF-8'?> <glyph name="dalVinvertedabove-ar.fina" format="2"> <advance width="1200"/> <outline> <component base="dal-ar.fina"/> <component base="vinvertedabove-ar" xOffset="36" yOffset="492"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>vinvertedabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/dalV_invertedabove-ar.fina.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/dalV_invertedabove-ar.fina.glif", "repo_id": "cascadia-code", "token_count": 343 }
435
<?xml version='1.0' encoding='UTF-8'?> <glyph name="ddahal-ar" format="2"> <advance width="1200"/> <unicode hex="068D"/> <outline> <component base="dal-ar"/> <component base="twodotshorizontalbelow-ar" xOffset="-30" yOffset="-24"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>bottom.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>twodotshorizontalbelow-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/ddahal-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/ddahal-ar.glif", "repo_id": "cascadia-code", "token_count": 356 }
436
<?xml version='1.0' encoding='UTF-8'?> <glyph name="gafThreedots-ar.init" format="2"> <advance width="1200"/> <anchor x="420" y="1850" name="top"/> <outline> <component base="gaf-ar.init"/> <component base="threedotsupabove-ar.v2" xScale="0.8" yScale="0.8" xOffset="-13" yOffset="912"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>threedotsupabove-ar.v2</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/gafT_hreedots-ar.init.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/gafT_hreedots-ar.init.glif", "repo_id": "cascadia-code", "token_count": 386 }
437
<?xml version='1.0' encoding='UTF-8'?> <glyph name="gueh-ar.init" format="2"> <advance width="1200"/> <outline> <component base="gaf-ar.init"/> <component base="twodotsverticalbelow-ar" xOffset="-33" yOffset="-24"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>bottom.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>twodotsverticalbelow-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/gueh-ar.init.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/gueh-ar.init.glif", "repo_id": "cascadia-code", "token_count": 347 }
438
<?xml version='1.0' encoding='UTF-8'?> <glyph name="kehehThreedotsbelow-ar.medi" format="2"> <advance width="1200"/> <outline> <component base="keheh-ar.medi"/> <component base="threedotsdownbelow-ar" xOffset="49" yOffset="-18"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>bottom.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>threedotsdownbelow-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/kehehT_hreedotsbelow-ar.medi.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/kehehT_hreedotsbelow-ar.medi.glif", "repo_id": "cascadia-code", "token_count": 350 }
439
<?xml version='1.0' encoding='UTF-8'?> <glyph name="nine-arsuperior" format="2"> <advance width="1200"/> <outline> <component base="nine-arinferior" yOffset="801"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>nine-arinferior</string> </dict> </array> <key>com.schriftgestaltung.Glyphs.category</key> <string>Number</string> <key>com.schriftgestaltung.Glyphs.script</key> <string>arabic</string> <key>com.schriftgestaltung.Glyphs.subCategory</key> <string>Small</string> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/nine-arsuperior.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/nine-arsuperior.glif", "repo_id": "cascadia-code", "token_count": 437 }
440
<?xml version='1.0' encoding='UTF-8'?> <glyph name="qaf-ar.init" format="2"> <advance width="1200"/> <outline> <component base="fehDotless-ar.init"/> <component base="twodotshorizontalabove-ar" xOffset="65" yOffset="470"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>twodotshorizontalabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/qaf-ar.init.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/qaf-ar.init.glif", "repo_id": "cascadia-code", "token_count": 350 }
441
<?xml version='1.0' encoding='UTF-8'?> <glyph name="tahThreedots-ar.medi" format="2"> <advance width="1200"/> <outline> <component base="tah-ar.medi"/> <component base="threedotsupabove-ar" xOffset="160" yOffset="343"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>threedotsupabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/tahT_hreedots-ar.medi.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/tahT_hreedots-ar.medi.glif", "repo_id": "cascadia-code", "token_count": 346 }
442
<?xml version='1.0' encoding='UTF-8'?> <glyph name="thal-ar" format="2"> <advance width="1200"/> <unicode hex="0630"/> <outline> <component base="dal-ar"/> <component base="dotabove-ar" xOffset="16" yOffset="492"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>dotabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/thal-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/thal-ar.glif", "repo_id": "cascadia-code", "token_count": 341 }
443
<?xml version='1.0' encoding='UTF-8'?> <glyph name="yehTwodotsbelowHamzaabove-ar" format="2"> <advance width="1200"/> <unicode hex="08A8"/> <outline> <component base="yeh-ar"/> <component base="hamzaabove-ar" xOffset="-436" yOffset="-664"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>hamzaabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/yehT_wodotsbelowH_amzaabove-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Regular.ufo/glyphs/yehT_wodotsbelowH_amzaabove-ar.glif", "repo_id": "cascadia-code", "token_count": 356 }
444
feature subs { # automatic sub zero by zeroinferior; sub one by oneinferior; sub two by twoinferior; sub three by threeinferior; sub four by fourinferior; sub five by fiveinferior; sub six by sixinferior; sub seven by seveninferior; sub eight by eightinferior; sub nine by nineinferior; sub eight-ar by eight-arinferior; sub eight-persian by eight-persianinferior; sub five-ar by five-arinferior; sub five-persian by five-persianinferior; sub four-ar by four-arinferior; sub four-persian by four-persianinferior; sub nine-ar by nine-arinferior; sub nine-persian by nine-persianinferior; sub one-ar by one-arinferior; sub one-persian by one-persianinferior; sub seven-ar by seven-arinferior; sub seven-persian by seven-persianinferior; sub six-ar by six-arinferior; sub six-persian by six-persianinferior; sub three-ar by three-arinferior; sub three-persian by three-persianinferior; sub two-ar by two-arinferior; sub two-persian by two-persianinferior; sub four-persian.urdu by four-persian.urduinferior; sub seven-persian.urdu by seven-persian.urduinferior; sub zero-ar by zero-arinferior; sub zero-persian by zero-persianinferior; } subs; feature sinf { # automatic sub zero by zeroinferior; sub one by oneinferior; sub two by twoinferior; sub three by threeinferior; sub four by fourinferior; sub five by fiveinferior; sub six by sixinferior; sub seven by seveninferior; sub eight by eightinferior; sub nine by nineinferior; sub eight-ar by eight-arinferior; sub eight-persian by eight-persianinferior; sub five-ar by five-arinferior; sub five-persian by five-persianinferior; sub four-ar by four-arinferior; sub four-persian by four-persianinferior; sub nine-ar by nine-arinferior; sub nine-persian by nine-persianinferior; sub one-ar by one-arinferior; sub one-persian by one-persianinferior; sub seven-ar by seven-arinferior; sub seven-persian by seven-persianinferior; sub six-ar by six-arinferior; sub six-persian by six-persianinferior; sub three-ar by three-arinferior; sub three-persian by three-persianinferior; sub two-ar by two-arinferior; sub two-persian by two-persianinferior; sub four-persian.urdu by four-persian.urduinferior; sub seven-persian.urdu by seven-persian.urduinferior; sub zero-ar by zero-arinferior; sub zero-persian by zero-persianinferior; } sinf; feature sups { # automatic sub zero by zerosuperior; sub one by onesuperior; sub two by twosuperior; sub three by threesuperior; sub four by foursuperior; sub five by fivesuperior; sub six by sixsuperior; sub seven by sevensuperior; sub eight by eightsuperior; sub nine by ninesuperior; sub eight-ar by eight-arsuperior; sub eight-persian by eight-persiansuperior; sub five-ar by five-arsuperior; sub five-persian by five-persiansuperior; sub four-ar by four-arsuperior; sub four-persian by four-persiansuperior; sub nine-ar by nine-arsuperior; sub nine-persian by nine-persiansuperior; sub one-ar by one-arsuperior; sub one-persian by one-persiansuperior; sub seven-ar by seven-arsuperior; sub seven-persian by seven-persiansuperior; sub six-ar by six-arsuperior; sub six-persian by six-persiansuperior; sub three-ar by three-arsuperior; sub three-persian by three-persiansuperior; sub two-ar by two-arsuperior; sub two-persian by two-persiansuperior; sub four-persian.urdu by four-persian.urdusuperior; sub seven-persian.urdu by seven-persian.urdusuperior; sub zero-ar by zero-arsuperior; sub zero-persian by zero-persiansuperior; } sups; feature numr { # automatic sub zero by zero.numr; sub one by one.numr; sub two by two.numr; sub three by three.numr; sub four by four.numr; sub five by five.numr; sub six by six.numr; sub seven by seven.numr; sub eight by eight.numr; sub nine by nine.numr; } numr; feature dnom { # automatic sub zero by zero.dnom; sub one by one.dnom; sub two by two.dnom; sub three by three.dnom; sub four by four.dnom; sub five by five.dnom; sub six by six.dnom; sub seven by seven.dnom; sub eight by eight.dnom; sub nine by nine.dnom; } dnom;
cascadia-code/sources/features/figures.fea/0
{ "file_path": "cascadia-code/sources/features/figures.fea", "repo_id": "cascadia-code", "token_count": 1441 }
445
feature ss02 { # notes: # Name: Alternate not equals sub asciitilde_equal.liga by exclam_equal.liga; } ss02;
cascadia-code/sources/features/ss02.fea/0
{ "file_path": "cascadia-code/sources/features/ss02.fea", "repo_id": "cascadia-code", "token_count": 40 }
446
<?xml version='1.0' encoding='UTF-8'?> <glyph name="uniE0A0" format="2"> <advance width="1200"/> <unicode hex="E0A0"/> <note> uniE0A0 </note> <outline> <contour> <point x="168" y="-480" type="line"/> <point x="444" y="-480" type="line"/> <point x="444" y="48" type="line" smooth="yes"/> <point x="444" y="307"/> <point x="527" y="350"/> <point x="788" y="528" type="curve" smooth="yes"/> <point x="946" y="636"/> <point x="1001" y="742"/> <point x="1001" y="934" type="curve" smooth="yes"/> <point x="1001" y="1576" type="line"/> <point x="1210" y="1576" type="line"/> <point x="919" y="1918" type="line"/> <point x="626" y="1576" type="line"/> <point x="835" y="1576" type="line"/> <point x="835" y="990" type="line" smooth="yes"/> <point x="835" y="865"/> <point x="798" y="777"/> <point x="669" y="702" type="curve" smooth="yes"/> <point x="216" y="439"/> <point x="168" y="348"/> <point x="168" y="48" type="curve"/> </contour> <contour> <point x="168" y="682" type="line"/> <point x="444" y="860" type="line"/> <point x="444" y="2226" type="line"/> <point x="168" y="2226" type="line"/> </contour> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.lastChange</key> <string>2020/07/23 18:20:43</string> <key>public.markColor</key> <string>0.65,0.48,0.2,1</string> </dict> </lib> </glyph>
cascadia-code/sources/nerdfonts/NerdfontsPL-Regular.ufo/glyphs/uniE_0A_0.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/NerdfontsPL-Regular.ufo/glyphs/uniE_0A_0.glif", "repo_id": "cascadia-code", "token_count": 744 }
447
<?xml version='1.0' encoding='UTF-8'?> <glyph name="uniE0BD.stypo" format="2"> <advance width="1200"/> <note> uniE0BD </note> <outline> <contour> <point x="0" y="-480" type="line"/> <point x="56" y="-480" type="line"/> <point x="1200" y="1900" type="line"/> <point x="1144" y="1900" type="line"/> </contour> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.lastChange</key> <string>2020/07/23 18:20:43</string> <key>public.markColor</key> <string>0.65,0.48,0.2,1</string> </dict> </lib> </glyph>
cascadia-code/sources/nerdfonts/NerdfontsPL-Regular.ufo/glyphs/uniE_0B_D_.stypo.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/NerdfontsPL-Regular.ufo/glyphs/uniE_0B_D_.stypo.glif", "repo_id": "cascadia-code", "token_count": 288 }
448
<?xml version='1.0' encoding='UTF-8'?> <glyph name="checkbox-blank-circle" format="2"> <advance width="512"/> <unicode hex="F012F"/> <unicode hex="F0765"/> <note> checkbox-blank-circle </note> <outline> <contour> <point x="298" y="405"/> <point x="377" y="373"/> <point x="437" y="313"/> <point x="469" y="234"/> <point x="469" y="150"/> <point x="437" y="71"/> <point x="377" y="11"/> <point x="298" y="-21"/> <point x="214" y="-21"/> <point x="135" y="11"/> <point x="75" y="71"/> <point x="43" y="150"/> <point x="43" y="234"/> <point x="75" y="313"/> <point x="135" y="373"/> <point x="214" y="405"/> <point x="256" y="405" type="qcurve" smooth="yes"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/checkbox-blank-circle.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/checkbox-blank-circle.glif", "repo_id": "cascadia-code", "token_count": 397 }
449
<?xml version='1.0' encoding='UTF-8'?> <glyph name="ev-plug-type1" format="2"> <advance width="512"/> <unicode hex="F151D"/> <note> ev-plug-type1 </note> <outline> <contour> <point x="277" y="274"/> <point x="302" y="299"/> <point x="338" y="299"/> <point x="363" y="274"/> <point x="363" y="238"/> <point x="338" y="213"/> <point x="302" y="213"/> <point x="277" y="238"/> <point x="277" y="256" type="qcurve" smooth="yes"/> </contour> <contour> <point x="149" y="274"/> <point x="174" y="299"/> <point x="210" y="299"/> <point x="235" y="274"/> <point x="235" y="238"/> <point x="210" y="213"/> <point x="174" y="213"/> <point x="149" y="238"/> <point x="149" y="256" type="qcurve" smooth="yes"/> </contour> <contour> <point x="192" y="156"/> <point x="176" y="141"/> <point x="157" y="141"/> <point x="141" y="157"/> <point x="141" y="176"/> <point x="157" y="192"/> <point x="176" y="192"/> <point x="192" y="176"/> <point x="192" y="166" type="qcurve" smooth="yes"/> </contour> <contour> <point x="299" y="89"/> <point x="274" y="64"/> <point x="238" y="64"/> <point x="213" y="89"/> <point x="213" y="124"/> <point x="238" y="149"/> <point x="274" y="149"/> <point x="299" y="124"/> <point x="299" y="107" type="qcurve" smooth="yes"/> </contour> <contour> <point x="210" y="363"/> <point x="131" y="317"/> <point x="85" y="238"/> <point x="85" y="146"/> <point x="131" y="67"/> <point x="210" y="21"/> <point x="302" y="21"/> <point x="381" y="67"/> <point x="427" y="146"/> <point x="427" y="238"/> <point x="381" y="317"/> <point x="302" y="363"/> <point x="256" y="363" type="qcurve" smooth="yes"/> </contour> <contour> <point x="314" y="405"/> <point x="412" y="348"/> <point x="469" y="250"/> <point x="469" y="134"/> <point x="412" y="36"/> <point x="314" y="-21"/> <point x="198" y="-21"/> <point x="100" y="36"/> <point x="43" y="134"/> <point x="43" y="250"/> <point x="100" y="348"/> <point x="198" y="405"/> <point x="256" y="405" type="qcurve" smooth="yes"/> </contour> <contour> <point x="373" y="155"/> <point x="357" y="139"/> <point x="338" y="141"/> <point x="322" y="157"/> <point x="322" y="176"/> <point x="337" y="192"/> <point x="358" y="192"/> <point x="373" y="177"/> <point x="373" y="166" type="qcurve" smooth="yes"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/ev-plug-type1.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/ev-plug-type1.glif", "repo_id": "cascadia-code", "token_count": 1406 }
450
<?xml version='1.0' encoding='UTF-8'?> <glyph name="google-assistant" format="2"> <advance width="512"/> <unicode hex="F07CD"/> <note> google-assistant </note> <outline> <contour> <point x="184" y="405"/> <point x="243" y="371"/> <point x="277" y="312"/> <point x="277" y="277" type="qcurve" smooth="yes"/> <point x="277" y="252"/> <point x="258" y="205"/> <point x="222" y="169"/> <point x="175" y="149"/> <point x="124" y="149"/> <point x="77" y="169"/> <point x="41" y="205"/> <point x="21" y="252"/> <point x="21" y="277" type="qcurve" smooth="yes"/> <point x="21" y="312"/> <point x="56" y="371"/> <point x="115" y="405"/> <point x="149" y="405" type="qcurve" smooth="yes"/> </contour> <contour> <point x="472" y="320"/> <point x="491" y="301"/> <point x="491" y="275"/> <point x="472" y="256"/> <point x="445" y="256"/> <point x="427" y="275"/> <point x="427" y="301"/> <point x="445" y="320"/> <point x="459" y="320" type="qcurve" smooth="yes"/> </contour> <contour> <point x="389" y="277"/> <point x="427" y="240"/> <point x="427" y="187"/> <point x="389" y="149"/> <point x="336" y="149"/> <point x="299" y="187"/> <point x="299" y="240"/> <point x="336" y="277"/> <point x="363" y="277" type="qcurve" smooth="yes"/> </contour> <contour> <point x="394" y="128"/> <point x="437" y="84"/> <point x="437" y="22"/> <point x="394" y="-21"/> <point x="332" y="-21"/> <point x="288" y="22"/> <point x="288" y="84"/> <point x="332" y="128"/> <point x="363" y="128" type="qcurve" smooth="yes"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/google-assistant.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/google-assistant.glif", "repo_id": "cascadia-code", "token_count": 912 }
451
<?xml version='1.0' encoding='UTF-8'?> <glyph name="grain" format="2"> <advance width="512"/> <unicode hex="F0D7C"/> <note> grain </note> <outline> <contour> <point x="231" y="192"/> <point x="256" y="167"/> <point x="256" y="132"/> <point x="231" y="107"/> <point x="196" y="107"/> <point x="171" y="132"/> <point x="171" y="167"/> <point x="196" y="192"/> <point x="213" y="192" type="qcurve" smooth="yes"/> </contour> <contour> <point x="146" y="277"/> <point x="171" y="252"/> <point x="171" y="217"/> <point x="146" y="192"/> <point x="110" y="192"/> <point x="85" y="217"/> <point x="85" y="252"/> <point x="110" y="277"/> <point x="128" y="277" type="qcurve" smooth="yes"/> </contour> <contour> <point x="146" y="107"/> <point x="171" y="82"/> <point x="171" y="46"/> <point x="146" y="21"/> <point x="110" y="21"/> <point x="85" y="46"/> <point x="85" y="82"/> <point x="110" y="107"/> <point x="128" y="107" type="qcurve" smooth="yes"/> </contour> <contour> <point x="366" y="277"/> <point x="341" y="302"/> <point x="341" y="338"/> <point x="366" y="363"/> <point x="402" y="363"/> <point x="427" y="338"/> <point x="427" y="302"/> <point x="402" y="277"/> <point x="384" y="277" type="qcurve" smooth="yes"/> </contour> <contour> <point x="316" y="107"/> <point x="341" y="82"/> <point x="341" y="46"/> <point x="316" y="21"/> <point x="281" y="21"/> <point x="256" y="46"/> <point x="256" y="82"/> <point x="281" y="107"/> <point x="299" y="107" type="qcurve" smooth="yes"/> </contour> <contour> <point x="402" y="192"/> <point x="427" y="167"/> <point x="427" y="132"/> <point x="402" y="107"/> <point x="366" y="107"/> <point x="341" y="132"/> <point x="341" y="167"/> <point x="366" y="192"/> <point x="384" y="192" type="qcurve" smooth="yes"/> </contour> <contour> <point x="316" y="277"/> <point x="341" y="252"/> <point x="341" y="217"/> <point x="316" y="192"/> <point x="281" y="192"/> <point x="256" y="217"/> <point x="256" y="252"/> <point x="281" y="277"/> <point x="299" y="277" type="qcurve" smooth="yes"/> </contour> <contour> <point x="231" y="363"/> <point x="256" y="338"/> <point x="256" y="302"/> <point x="231" y="277"/> <point x="196" y="277"/> <point x="171" y="302"/> <point x="171" y="338"/> <point x="196" y="363"/> <point x="213" y="363" type="qcurve" smooth="yes"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/grain.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/grain.glif", "repo_id": "cascadia-code", "token_count": 1438 }
452
<?xml version='1.0' encoding='UTF-8'?> <glyph name="plus-circle" format="2"> <advance width="512"/> <unicode hex="F0417"/> <note> plus-circle </note> <outline> <contour> <point x="363" y="213" type="line"/> <point x="277" y="213" type="line"/> <point x="277" y="299" type="line"/> <point x="235" y="299" type="line"/> <point x="235" y="213" type="line"/> <point x="149" y="213" type="line"/> <point x="149" y="171" type="line"/> <point x="235" y="171" type="line"/> <point x="235" y="85" type="line"/> <point x="277" y="85" type="line"/> <point x="277" y="171" type="line"/> <point x="363" y="171" type="line"/> </contour> <contour> <point x="298" y="405"/> <point x="377" y="373"/> <point x="437" y="313"/> <point x="469" y="234"/> <point x="469" y="150"/> <point x="437" y="71"/> <point x="377" y="11"/> <point x="298" y="-21"/> <point x="214" y="-21"/> <point x="135" y="11"/> <point x="75" y="71"/> <point x="43" y="150"/> <point x="43" y="234"/> <point x="75" y="313"/> <point x="135" y="373"/> <point x="214" y="405"/> <point x="256" y="405" type="qcurve" smooth="yes"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/plus-circle.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/plus-circle.glif", "repo_id": "cascadia-code", "token_count": 636 }
453
<?xml version='1.0' encoding='UTF-8'?> <glyph name="protocol" format="2"> <advance width="512"/> <unicode hex="F0FD8"/> <note> protocol </note> <outline> <contour> <point x="469" y="363" type="line"/> <point x="384" y="363" type="line"/> <point x="299" y="21" type="line"/> <point x="384" y="21" type="line"/> </contour> <contour> <point x="256" y="21" type="line"/> <point x="171" y="21" type="line"/> <point x="256" y="363" type="line"/> <point x="341" y="363" type="line"/> </contour> <contour> <point x="43" y="118"/> <point x="74" y="149"/> <point x="118" y="149"/> <point x="149" y="118"/> <point x="149" y="74"/> <point x="118" y="43"/> <point x="74" y="43"/> <point x="43" y="74"/> <point x="43" y="96" type="qcurve" smooth="yes"/> </contour> <contour> <point x="43" y="267"/> <point x="74" y="299"/> <point x="118" y="299"/> <point x="149" y="267"/> <point x="149" y="223"/> <point x="118" y="192"/> <point x="74" y="192"/> <point x="43" y="223"/> <point x="43" y="245" type="qcurve" smooth="yes"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/protocol.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/protocol.glif", "repo_id": "cascadia-code", "token_count": 611 }
454
<?xml version='1.0' encoding='UTF-8'?> <glyph name="tumble-dryer-off" format="2"> <advance width="512"/> <unicode hex="F11BB"/> <note> tumble-dryer-off </note> <outline> <contour> <point x="445" y="-37" type="line"/> <point x="415" y="-7" type="line"/> <point x="403" y="-21"/> <point x="384" y="-21" type="qcurve" smooth="yes"/> <point x="128" y="-21" type="line" smooth="yes"/> <point x="110" y="-21"/> <point x="85" y="4"/> <point x="85" y="21" type="qcurve" smooth="yes"/> <point x="85" y="322" type="line"/> <point x="24" y="384" type="line"/> <point x="51" y="411" type="line"/> <point x="472" y="-10" type="line"/> </contour> <contour> <point x="283" y="21"/> <point x="330" y="42"/> <point x="348" y="60" type="qcurve"/> <point x="333" y="75" type="line"/> <point x="299" y="75" type="line"/> <point x="302" y="94"/> <point x="299" y="108" type="qcurve"/> <point x="212" y="195" type="line"/> <point x="214" y="177"/> <point x="226" y="164" type="qcurve" smooth="yes"/> <point x="262" y="129"/> <point x="254" y="75" type="qcurve"/> <point x="213" y="75" type="line"/> <point x="217" y="96"/> <point x="211" y="124"/> <point x="201" y="134" type="qcurve" smooth="yes"/> <point x="165" y="169"/> <point x="173" y="224" type="qcurve"/> <point x="184" y="224" type="line"/> <point x="167" y="241" type="line"/> <point x="149" y="223"/> <point x="128" y="176"/> <point x="128" y="149" type="qcurve" smooth="yes"/> <point x="128" y="115"/> <point x="162" y="56"/> <point x="221" y="21"/> <point x="256" y="21" type="qcurve" smooth="yes"/> </contour> <contour> <point x="114" y="403" type="line"/> <point x="121" y="405"/> <point x="128" y="405" type="qcurve" smooth="yes"/> <point x="384" y="405" type="line" smooth="yes"/> <point x="402" y="405"/> <point x="427" y="380"/> <point x="427" y="363" type="qcurve" smooth="yes"/> <point x="427" y="90" type="line"/> <point x="383" y="133" type="line"/> <point x="384" y="139"/> <point x="384" y="149" type="qcurve" smooth="yes"/> <point x="384" y="184"/> <point x="350" y="243"/> <point x="291" y="277"/> <point x="256" y="277" type="qcurve" smooth="yes"/> <point x="245" y="277"/> <point x="240" y="276" type="qcurve"/> </contour> <contour> <point x="205" y="363"/> <point x="192" y="350"/> <point x="192" y="333"/> <point x="205" y="320"/> <point x="222" y="320"/> <point x="235" y="333"/> <point x="235" y="350"/> <point x="222" y="363"/> <point x="213" y="363" type="qcurve" smooth="yes"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/tumble-dryer-off.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/original/MaterialDesignIconsDesktop.ufo/glyphs/tumble-dryer-off.glif", "repo_id": "cascadia-code", "token_count": 1415 }
455
<?xml version='1.0' encoding='UTF-8'?> <glyph name="access-point" format="2"> <advance width="1200"/> <unicode hex="F0003"/> <note> access-point </note> <outline> <contour> <point x="271" y="1039" type="qcurve"/> <point x="189" y="1121" type="line"/> <point x="110" y="1039"/> <point x="20" y="827"/> <point x="20" y="593"/> <point x="110" y="381"/> <point x="189" y="299" type="qcurve"/> <point x="271" y="381" type="line"/> <point x="208" y="446"/> <point x="134" y="615"/> <point x="134" y="805"/> <point x="208" y="974"/> </contour> <contour> <point x="1090" y="1039"/> <point x="1011" y="1121" type="qcurve"/> <point x="929" y="1039" type="line"/> <point x="992" y="974"/> <point x="1066" y="805"/> <point x="1066" y="615"/> <point x="992" y="446"/> <point x="929" y="381" type="qcurve"/> <point x="1011" y="299" type="line"/> <point x="1090" y="381"/> <point x="1180" y="593"/> <point x="1180" y="827"/> </contour> <contour> <point x="437" y="873" type="qcurve"/> <point x="355" y="955" type="line"/> <point x="306" y="909"/> <point x="251" y="781"/> <point x="251" y="639"/> <point x="306" y="511"/> <point x="355" y="465" type="qcurve"/> <point x="437" y="547" type="line"/> <point x="369" y="615"/> <point x="369" y="805"/> </contour> <contour> <point x="894" y="909"/> <point x="845" y="955" type="qcurve"/> <point x="763" y="873" type="line"/> <point x="831" y="805"/> <point x="831" y="615"/> <point x="763" y="547" type="qcurve"/> <point x="845" y="465" type="line"/> <point x="894" y="511"/> <point x="949" y="639"/> <point x="949" y="781"/> </contour> <contour> <point x="649" y="827"/> <point x="600" y="827" type="qcurve" smooth="yes"/> <point x="551" y="827"/> <point x="483" y="759"/> <point x="483" y="661"/> <point x="551" y="593"/> <point x="649" y="593"/> <point x="717" y="661"/> <point x="717" y="759"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/access-point.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/access-point.glif", "repo_id": "cascadia-code", "token_count": 1137 }
456
<?xml version='1.0' encoding='UTF-8'?> <glyph name="blur-radial" format="2"> <advance width="1200"/> <unicode hex="F00B8"/> <note> blur-radial </note> <outline> <contour> <point x="742" y="653"/> <point x="717" y="653" type="qcurve" smooth="yes"/> <point x="693" y="653"/> <point x="657" y="617"/> <point x="657" y="568"/> <point x="693" y="536"/> <point x="742" y="536"/> <point x="774" y="568"/> <point x="774" y="617"/> </contour> <contour> <point x="728" y="449"/> <point x="717" y="449" type="qcurve" smooth="yes"/> <point x="703" y="449"/> <point x="687" y="432"/> <point x="687" y="408"/> <point x="703" y="391"/> <point x="728" y="391"/> <point x="744" y="408"/> <point x="744" y="432"/> </contour> <contour> <point x="725" y="244"/> <point x="600" y="244" type="qcurve" smooth="yes"/> <point x="475" y="244"/> <point x="260" y="370"/> <point x="134" y="585"/> <point x="134" y="835"/> <point x="260" y="1050"/> <point x="475" y="1176"/> <point x="725" y="1176"/> <point x="940" y="1050"/> <point x="1066" y="835"/> <point x="1066" y="585"/> <point x="940" y="370"/> </contour> <contour> <point x="714" y="1290"/> <point x="600" y="1290" type="qcurve" smooth="yes"/> <point x="486" y="1290"/> <point x="271" y="1203"/> <point x="107" y="1039"/> <point x="20" y="824"/> <point x="20" y="596"/> <point x="107" y="381"/> <point x="271" y="217"/> <point x="486" y="130"/> <point x="714" y="130"/> <point x="929" y="217"/> <point x="1093" y="381"/> <point x="1180" y="596"/> <point x="1180" y="824"/> <point x="1093" y="1039"/> <point x="929" y="1203"/> </contour> <contour> <point x="902" y="854"/> <point x="891" y="854" type="qcurve" smooth="yes"/> <point x="878" y="854"/> <point x="861" y="838"/> <point x="861" y="813"/> <point x="878" y="797"/> <point x="902" y="797"/> <point x="919" y="813"/> <point x="919" y="838"/> </contour> <contour> <point x="902" y="623"/> <point x="891" y="623" type="qcurve" smooth="yes"/> <point x="878" y="623"/> <point x="861" y="607"/> <point x="861" y="582"/> <point x="878" y="566"/> <point x="902" y="566"/> <point x="919" y="582"/> <point x="919" y="607"/> </contour> <contour> <point x="703" y="971"/> <point x="717" y="971" type="qcurve" smooth="yes"/> <point x="728" y="971"/> <point x="744" y="988"/> <point x="744" y="1012"/> <point x="728" y="1029"/> <point x="703" y="1029"/> <point x="687" y="1012"/> <point x="687" y="988"/> </contour> <contour> <point x="742" y="884"/> <point x="717" y="884" type="qcurve" smooth="yes"/> <point x="693" y="884"/> <point x="657" y="852"/> <point x="657" y="803"/> <point x="693" y="767"/> <point x="742" y="767"/> <point x="774" y="803"/> <point x="774" y="852"/> </contour> <contour> <point x="472" y="971"/> <point x="483" y="971" type="qcurve" smooth="yes"/> <point x="497" y="971"/> <point x="513" y="988"/> <point x="513" y="1012"/> <point x="497" y="1029"/> <point x="472" y="1029"/> <point x="456" y="1012"/> <point x="456" y="988"/> </contour> <contour> <point x="322" y="623"/> <point x="309" y="623" type="qcurve" smooth="yes"/> <point x="298" y="623"/> <point x="281" y="607"/> <point x="281" y="582"/> <point x="298" y="566"/> <point x="322" y="566"/> <point x="339" y="582"/> <point x="339" y="607"/> </contour> <contour> <point x="497" y="449"/> <point x="483" y="449" type="qcurve" smooth="yes"/> <point x="472" y="449"/> <point x="456" y="432"/> <point x="456" y="408"/> <point x="472" y="391"/> <point x="497" y="391"/> <point x="513" y="408"/> <point x="513" y="432"/> </contour> <contour> <point x="322" y="854"/> <point x="309" y="854" type="qcurve" smooth="yes"/> <point x="298" y="854"/> <point x="281" y="838"/> <point x="281" y="813"/> <point x="298" y="797"/> <point x="322" y="797"/> <point x="339" y="813"/> <point x="339" y="838"/> </contour> <contour> <point x="507" y="653"/> <point x="483" y="653" type="qcurve" smooth="yes"/> <point x="458" y="653"/> <point x="426" y="617"/> <point x="426" y="568"/> <point x="458" y="536"/> <point x="507" y="536"/> <point x="543" y="568"/> <point x="543" y="617"/> </contour> <contour> <point x="507" y="884"/> <point x="483" y="884" type="qcurve" smooth="yes"/> <point x="458" y="884"/> <point x="426" y="852"/> <point x="426" y="803"/> <point x="458" y="767"/> <point x="507" y="767"/> <point x="543" y="803"/> <point x="543" y="852"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/blur-radial.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/blur-radial.glif", "repo_id": "cascadia-code", "token_count": 2754 }
457
<?xml version='1.0' encoding='UTF-8'?> <glyph name="briefcase-account" format="2"> <advance width="1200"/> <unicode hex="F0CF0"/> <note> briefcase-account </note> <outline> <contour> <point x="1112" y="1030"/> <point x="1066" y="1030" type="qcurve" smooth="yes"/> <point x="831" y="1030" type="line"/> <point x="831" y="1147" type="line" smooth="yes"/> <point x="831" y="1193"/> <point x="763" y="1261"/> <point x="717" y="1261" type="qcurve" smooth="yes"/> <point x="483" y="1261" type="line" smooth="yes"/> <point x="437" y="1261"/> <point x="369" y="1193"/> <point x="369" y="1147" type="qcurve" smooth="yes"/> <point x="369" y="1030" type="line"/> <point x="134" y="1030" type="line" smooth="yes"/> <point x="88" y="1030"/> <point x="20" y="962"/> <point x="20" y="913" type="qcurve" smooth="yes"/> <point x="20" y="276" type="line" smooth="yes"/> <point x="20" y="227"/> <point x="88" y="159"/> <point x="134" y="159" type="qcurve" smooth="yes"/> <point x="1066" y="159" type="line" smooth="yes"/> <point x="1112" y="159"/> <point x="1180" y="227"/> <point x="1180" y="276" type="qcurve" smooth="yes"/> <point x="1180" y="913" type="line" smooth="yes"/> <point x="1180" y="962"/> </contour> <contour> <point x="483" y="1030" type="line"/> <point x="483" y="1147" type="line"/> <point x="717" y="1147" type="line"/> <point x="717" y="1030" type="line"/> </contour> <contour> <point x="540" y="856"/> <point x="600" y="856" type="qcurve" smooth="yes"/> <point x="660" y="856"/> <point x="744" y="771"/> <point x="744" y="651"/> <point x="660" y="564"/> <point x="540" y="564"/> <point x="456" y="651"/> <point x="456" y="771"/> </contour> <contour> <point x="891" y="346" type="qcurve" smooth="yes"/> <point x="891" y="276" type="line"/> <point x="309" y="276" type="line"/> <point x="309" y="346" type="line" smooth="yes"/> <point x="309" y="406"/> <point x="480" y="494"/> <point x="720" y="494"/> <point x="891" y="406"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/briefcase-account.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/briefcase-account.glif", "repo_id": "cascadia-code", "token_count": 1115 }
458
<?xml version='1.0' encoding='UTF-8'?> <glyph name="chat-outline" format="2"> <advance width="1200"/> <unicode hex="F0EDE"/> <note> chat-outline </note> <outline> <contour> <point x="758" y="1233"/> <point x="600" y="1233" type="qcurve" smooth="yes"/> <point x="442" y="1233"/> <point x="175" y="1108"/> <point x="20" y="895"/> <point x="20" y="767" type="qcurve" smooth="yes"/> <point x="20" y="675"/> <point x="104" y="506"/> <point x="178" y="449" type="qcurve"/> <point x="178" y="410"/> <point x="151" y="356" type="qcurve" smooth="yes"/> <point x="107" y="277"/> <point x="20" y="187" type="qcurve"/> <point x="121" y="193"/> <point x="314" y="266"/> <point x="396" y="332" type="qcurve"/> <point x="497" y="304"/> <point x="600" y="304" type="qcurve" smooth="yes"/> <point x="758" y="304"/> <point x="1025" y="430"/> <point x="1180" y="642"/> <point x="1180" y="895"/> <point x="1025" y="1108"/> </contour> <contour> <point x="725" y="419"/> <point x="600" y="419" type="qcurve" smooth="yes"/> <point x="475" y="419"/> <point x="260" y="514"/> <point x="134" y="675"/> <point x="134" y="862"/> <point x="260" y="1023"/> <point x="475" y="1116"/> <point x="725" y="1116"/> <point x="940" y="1023"/> <point x="1066" y="862"/> <point x="1066" y="675"/> <point x="940" y="514"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/chat-outline.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/chat-outline.glif", "repo_id": "cascadia-code", "token_count": 778 }
459
<?xml version='1.0' encoding='UTF-8'?> <glyph name="circle-half" format="2"> <advance width="1200"/> <unicode hex="F1395"/> <note> circle-half </note> <outline> <contour> <point x="955" y="0" type="qcurve"/> <point x="955" y="1420" type="line"/> <point x="815" y="1420"/> <point x="552" y="1313"/> <point x="352" y="1113"/> <point x="245" y="850"/> <point x="245" y="570"/> <point x="352" y="307"/> <point x="552" y="107"/> <point x="815" y="0"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/circle-half.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/circle-half.glif", "repo_id": "cascadia-code", "token_count": 272 }
460
<?xml version='1.0' encoding='UTF-8'?> <glyph name="dialpad" format="2"> <advance width="1200"/> <unicode hex="F061C"/> <note> dialpad </note> <outline> <contour> <point x="654" y="260"/> <point x="600" y="260" type="qcurve" smooth="yes"/> <point x="546" y="260"/> <point x="470" y="184"/> <point x="470" y="76"/> <point x="546" y="0"/> <point x="654" y="0"/> <point x="730" y="76"/> <point x="730" y="184"/> </contour> <contour> <point x="268" y="1420"/> <point x="213" y="1420" type="qcurve" smooth="yes"/> <point x="159" y="1420"/> <point x="83" y="1344"/> <point x="83" y="1236"/> <point x="159" y="1160"/> <point x="268" y="1160"/> <point x="343" y="1236"/> <point x="343" y="1344"/> </contour> <contour> <point x="268" y="1033"/> <point x="213" y="1033" type="qcurve" smooth="yes"/> <point x="159" y="1033"/> <point x="83" y="958"/> <point x="83" y="849"/> <point x="159" y="773"/> <point x="268" y="773"/> <point x="343" y="849"/> <point x="343" y="958"/> </contour> <contour> <point x="268" y="647"/> <point x="213" y="647" type="qcurve" smooth="yes"/> <point x="159" y="647"/> <point x="83" y="571"/> <point x="83" y="462"/> <point x="159" y="387"/> <point x="268" y="387"/> <point x="343" y="462"/> <point x="343" y="571"/> </contour> <contour> <point x="932" y="1160"/> <point x="987" y="1160" type="qcurve" smooth="yes"/> <point x="1041" y="1160"/> <point x="1117" y="1236"/> <point x="1117" y="1344"/> <point x="1041" y="1420"/> <point x="932" y="1420"/> <point x="857" y="1344"/> <point x="857" y="1236"/> </contour> <contour> <point x="654" y="647"/> <point x="600" y="647" type="qcurve" smooth="yes"/> <point x="546" y="647"/> <point x="470" y="571"/> <point x="470" y="462"/> <point x="546" y="387"/> <point x="654" y="387"/> <point x="730" y="462"/> <point x="730" y="571"/> </contour> <contour> <point x="1041" y="647"/> <point x="987" y="647" type="qcurve" smooth="yes"/> <point x="932" y="647"/> <point x="857" y="571"/> <point x="857" y="462"/> <point x="932" y="387"/> <point x="1041" y="387"/> <point x="1117" y="462"/> <point x="1117" y="571"/> </contour> <contour> <point x="1041" y="1033"/> <point x="987" y="1033" type="qcurve" smooth="yes"/> <point x="932" y="1033"/> <point x="857" y="958"/> <point x="857" y="849"/> <point x="932" y="773"/> <point x="1041" y="773"/> <point x="1117" y="849"/> <point x="1117" y="958"/> </contour> <contour> <point x="654" y="1033"/> <point x="600" y="1033" type="qcurve" smooth="yes"/> <point x="546" y="1033"/> <point x="470" y="958"/> <point x="470" y="849"/> <point x="546" y="773"/> <point x="654" y="773"/> <point x="730" y="849"/> <point x="730" y="958"/> </contour> <contour> <point x="654" y="1420"/> <point x="600" y="1420" type="qcurve" smooth="yes"/> <point x="546" y="1420"/> <point x="470" y="1344"/> <point x="470" y="1236"/> <point x="546" y="1160"/> <point x="654" y="1160"/> <point x="730" y="1236"/> <point x="730" y="1344"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/dialpad.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/dialpad.glif", "repo_id": "cascadia-code", "token_count": 1851 }
461
<?xml version='1.0' encoding='UTF-8'?> <glyph name="google-circles-communities" format="2"> <advance width="1200"/> <unicode hex="F02B1"/> <note> google-circles-communities </note> <outline> <contour> <point x="823" y="710"/> <point x="774" y="710" type="qcurve" smooth="yes"/> <point x="725" y="710"/> <point x="657" y="642"/> <point x="657" y="547"/> <point x="725" y="479"/> <point x="823" y="479"/> <point x="891" y="547"/> <point x="891" y="642"/> </contour> <contour> <point x="725" y="244"/> <point x="600" y="244" type="qcurve" smooth="yes"/> <point x="475" y="244"/> <point x="260" y="370"/> <point x="134" y="585"/> <point x="134" y="835"/> <point x="260" y="1050"/> <point x="475" y="1176"/> <point x="725" y="1176"/> <point x="940" y="1050"/> <point x="1066" y="835"/> <point x="1066" y="585"/> <point x="940" y="370"/> </contour> <contour> <point x="714" y="1290"/> <point x="600" y="1290" type="qcurve" smooth="yes"/> <point x="486" y="1290"/> <point x="271" y="1203"/> <point x="107" y="1039"/> <point x="20" y="824"/> <point x="20" y="596"/> <point x="107" y="381"/> <point x="271" y="217"/> <point x="486" y="130"/> <point x="714" y="130"/> <point x="929" y="217"/> <point x="1093" y="381"/> <point x="1180" y="596"/> <point x="1180" y="824"/> <point x="1093" y="1039"/> <point x="929" y="1203"/> </contour> <contour> <point x="717" y="835"/> <point x="717" y="884" type="qcurve" smooth="yes"/> <point x="717" y="933"/> <point x="649" y="1001"/> <point x="551" y="1001"/> <point x="483" y="933"/> <point x="483" y="835"/> <point x="551" y="767"/> <point x="649" y="767"/> </contour> <contour> <point x="475" y="710"/> <point x="426" y="710" type="qcurve" smooth="yes"/> <point x="377" y="710"/> <point x="309" y="642"/> <point x="309" y="547"/> <point x="377" y="479"/> <point x="475" y="479"/> <point x="543" y="547"/> <point x="543" y="642"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/google-circles-communities.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/google-circles-communities.glif", "repo_id": "cascadia-code", "token_count": 1173 }
462
<?xml version='1.0' encoding='UTF-8'?> <glyph name="keyboard-settings-outline" format="2"> <advance width="1200"/> <unicode hex="F09FA"/> <note> keyboard-settings-outline </note> <outline> <contour> <point x="309" y="159" type="line"/> <point x="426" y="159" type="line"/> <point x="426" y="276" type="line"/> <point x="309" y="276" type="line"/> </contour> <contour> <point x="543" y="159" type="line"/> <point x="657" y="159" type="line"/> <point x="657" y="276" type="line"/> <point x="543" y="276" type="line"/> </contour> <contour> <point x="774" y="159" type="line"/> <point x="891" y="159" type="line"/> <point x="891" y="276" type="line"/> <point x="774" y="276" type="line"/> </contour> <contour> <point x="1066" y="1261" type="qcurve" smooth="yes"/> <point x="134" y="1261" type="line" smooth="yes"/> <point x="88" y="1261"/> <point x="20" y="1193"/> <point x="20" y="1147" type="qcurve" smooth="yes"/> <point x="20" y="564" type="line" smooth="yes"/> <point x="20" y="518"/> <point x="88" y="450"/> <point x="134" y="450" type="qcurve" smooth="yes"/> <point x="1066" y="450" type="line" smooth="yes"/> <point x="1112" y="450"/> <point x="1180" y="518"/> <point x="1180" y="564" type="qcurve" smooth="yes"/> <point x="1180" y="1147" type="line" smooth="yes"/> <point x="1180" y="1193"/> <point x="1112" y="1261"/> </contour> <contour> <point x="134" y="564" type="line"/> <point x="134" y="1147" type="line"/> <point x="1066" y="1147" type="line"/> <point x="1066" y="564" type="line"/> </contour> <contour> <point x="309" y="1087" type="line"/> <point x="194" y="1087" type="line"/> <point x="194" y="973" type="line"/> <point x="309" y="973" type="line"/> </contour> <contour> <point x="483" y="1087" type="line"/> <point x="369" y="1087" type="line"/> <point x="369" y="973" type="line"/> <point x="483" y="973" type="line"/> </contour> <contour> <point x="657" y="1087" type="line"/> <point x="543" y="1087" type="line"/> <point x="543" y="973" type="line"/> <point x="657" y="973" type="line"/> </contour> <contour> <point x="831" y="1087" type="line"/> <point x="717" y="1087" type="line"/> <point x="717" y="973" type="line"/> <point x="831" y="973" type="line"/> </contour> <contour> <point x="1006" y="1087" type="line"/> <point x="891" y="1087" type="line"/> <point x="891" y="973" type="line"/> <point x="1006" y="973" type="line"/> </contour> <contour> <point x="309" y="913" type="line"/> <point x="194" y="913" type="line"/> <point x="194" y="798" type="line"/> <point x="309" y="798" type="line"/> </contour> <contour> <point x="483" y="913" type="line"/> <point x="369" y="913" type="line"/> <point x="369" y="798" type="line"/> <point x="483" y="798" type="line"/> </contour> <contour> <point x="657" y="913" type="line"/> <point x="543" y="913" type="line"/> <point x="543" y="798" type="line"/> <point x="657" y="798" type="line"/> </contour> <contour> <point x="831" y="913" type="line"/> <point x="717" y="913" type="line"/> <point x="717" y="798" type="line"/> <point x="831" y="798" type="line"/> </contour> <contour> <point x="1006" y="913" type="line"/> <point x="891" y="913" type="line"/> <point x="891" y="798" type="line"/> <point x="1006" y="798" type="line"/> </contour> <contour> <point x="831" y="739" type="line"/> <point x="369" y="739" type="line"/> <point x="369" y="624" type="line"/> <point x="831" y="624" type="line"/> </contour> </outline> </glyph>
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/keyboard-settings-outline.glif/0
{ "file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/keyboard-settings-outline.glif", "repo_id": "cascadia-code", "token_count": 1956 }
463
# Generated by Powerlevel10k configuration wizard on 2022-04-20 at 10:33 BST. # Based on romkatv/powerlevel10k/config/p10k-lean.zsh, checksum 08860. # Wizard options: nerdfont-complete + powerline, small icons, unicode, lean, 2 lines, # disconnected, no frame, compact, many icons, concise, instant_prompt=verbose. # Type `p10k configure` to generate another config. # # Config for Powerlevel10k with lean prompt style. Type `p10k configure` to generate # your own config based on it. # # Tip: Looking for a nice color? Here's a one-liner to print colormap. # # for i in {0..255}; do print -Pn "%K{$i} %k%F{$i}${(l:3::0:)i}%f " ${${(M)$((i%6)):#3}:+$'\n'}; done # Temporarily change options. 'builtin' 'local' '-a' 'p10k_config_opts' [[ ! -o 'aliases' ]] || p10k_config_opts+=('aliases') [[ ! -o 'sh_glob' ]] || p10k_config_opts+=('sh_glob') [[ ! -o 'no_brace_expand' ]] || p10k_config_opts+=('no_brace_expand') 'builtin' 'setopt' 'no_aliases' 'no_sh_glob' 'brace_expand' () { emulate -L zsh -o extended_glob # Unset all configuration options. This allows you to apply configuration changes without # restarting zsh. Edit ~/.p10k.zsh and type `source ~/.p10k.zsh`. unset -m '(POWERLEVEL9K_*|DEFAULT_USER)~POWERLEVEL9K_GITSTATUS_DIR' # Zsh >= 5.1 is required. autoload -Uz is-at-least && is-at-least 5.1 || return # The list of segments shown on the left. Fill it with the most important segments. typeset -g POWERLEVEL9K_LEFT_PROMPT_ELEMENTS=( # =========================[ Line #1 ]========================= os_icon # os identifier dir # current directory vcs # git status # =========================[ Line #2 ]========================= newline # \n prompt_char # prompt symbol ) # The list of segments shown on the right. Fill it with less important segments. # Right prompt on the last prompt line (where you are typing your commands) gets # automatically hidden when the input line reaches it. Right prompt above the # last prompt line gets hidden if it would overlap with left prompt. typeset -g POWERLEVEL9K_RIGHT_PROMPT_ELEMENTS=( # =========================[ Line #1 ]========================= status # exit code of the last command command_execution_time # duration of the last command background_jobs # presence of background jobs direnv # direnv status (https://direnv.net/) asdf # asdf version manager (https://github.com/asdf-vm/asdf) virtualenv # python virtual environment (https://docs.python.org/3/library/venv.html) anaconda # conda environment (https://conda.io/) pyenv # python environment (https://github.com/pyenv/pyenv) goenv # go environment (https://github.com/syndbg/goenv) nodenv # node.js version from nodenv (https://github.com/nodenv/nodenv) nvm # node.js version from nvm (https://github.com/nvm-sh/nvm) nodeenv # node.js environment (https://github.com/ekalinin/nodeenv) # node_version # node.js version # go_version # go version (https://golang.org) # rust_version # rustc version (https://www.rust-lang.org) # dotnet_version # .NET version (https://dotnet.microsoft.com) # php_version # php version (https://www.php.net/) # laravel_version # laravel php framework version (https://laravel.com/) # java_version # java version (https://www.java.com/) # package # name@version from package.json (https://docs.npmjs.com/files/package.json) rbenv # ruby version from rbenv (https://github.com/rbenv/rbenv) rvm # ruby version from rvm (https://rvm.io) fvm # flutter version management (https://github.com/leoafarias/fvm) luaenv # lua version from luaenv (https://github.com/cehoffman/luaenv) jenv # java version from jenv (https://github.com/jenv/jenv) plenv # perl version from plenv (https://github.com/tokuhirom/plenv) perlbrew # perl version from perlbrew (https://github.com/gugod/App-perlbrew) phpenv # php version from phpenv (https://github.com/phpenv/phpenv) scalaenv # scala version from scalaenv (https://github.com/scalaenv/scalaenv) haskell_stack # haskell version from stack (https://haskellstack.org/) kubecontext # current kubernetes context (https://kubernetes.io/) terraform # terraform workspace (https://www.terraform.io) # terraform_version # terraform version (https://www.terraform.io) aws # aws profile (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html) aws_eb_env # aws elastic beanstalk environment (https://aws.amazon.com/elasticbeanstalk/) azure # azure account name (https://docs.microsoft.com/en-us/cli/azure) gcloud # google cloud cli account and project (https://cloud.google.com/) google_app_cred # google application credentials (https://cloud.google.com/docs/authentication/production) toolbox # toolbox name (https://github.com/containers/toolbox) context # user@hostname nordvpn # nordvpn connection status, linux only (https://nordvpn.com/) ranger # ranger shell (https://github.com/ranger/ranger) nnn # nnn shell (https://github.com/jarun/nnn) xplr # xplr shell (https://github.com/sayanarijit/xplr) vim_shell # vim shell indicator (:sh) midnight_commander # midnight commander shell (https://midnight-commander.org/) nix_shell # nix shell (https://nixos.org/nixos/nix-pills/developing-with-nix-shell.html) # vpn_ip # virtual private network indicator # load # CPU load # disk_usage # disk usage # ram # free RAM # swap # used swap todo # todo items (https://github.com/todotxt/todo.txt-cli) timewarrior # timewarrior tracking status (https://timewarrior.net/) taskwarrior # taskwarrior task count (https://taskwarrior.org/) # time # current time # =========================[ Line #2 ]========================= newline # ip # ip address and bandwidth usage for a specified network interface # public_ip # public IP address # proxy # system-wide http/https/ftp proxy # battery # internal battery # wifi # wifi speed # example # example user-defined segment (see prompt_example function below) ) # Defines character set used by powerlevel10k. It's best to let `p10k configure` set it for you. typeset -g POWERLEVEL9K_MODE=nerdfont-complete # When set to `moderate`, some icons will have an extra space after them. This is meant to avoid # icon overlap when using non-monospace fonts. When set to `none`, spaces are not added. typeset -g POWERLEVEL9K_ICON_PADDING=none # Basic style options that define the overall look of your prompt. You probably don't want to # change them. typeset -g POWERLEVEL9K_BACKGROUND= # transparent background typeset -g POWERLEVEL9K_{LEFT,RIGHT}_{LEFT,RIGHT}_WHITESPACE= # no surrounding whitespace typeset -g POWERLEVEL9K_{LEFT,RIGHT}_SUBSEGMENT_SEPARATOR=' ' # separate segments with a space typeset -g POWERLEVEL9K_{LEFT,RIGHT}_SEGMENT_SEPARATOR= # no end-of-line symbol # When set to true, icons appear before content on both sides of the prompt. When set # to false, icons go after content. If empty or not set, icons go before content in the left # prompt and after content in the right prompt. # # You can also override it for a specific segment: # # POWERLEVEL9K_STATUS_ICON_BEFORE_CONTENT=false # # Or for a specific segment in specific state: # # POWERLEVEL9K_DIR_NOT_WRITABLE_ICON_BEFORE_CONTENT=false typeset -g POWERLEVEL9K_ICON_BEFORE_CONTENT=true # Add an empty line before each prompt. typeset -g POWERLEVEL9K_PROMPT_ADD_NEWLINE=false # Connect left prompt lines with these symbols. typeset -g POWERLEVEL9K_MULTILINE_FIRST_PROMPT_PREFIX= typeset -g POWERLEVEL9K_MULTILINE_NEWLINE_PROMPT_PREFIX= typeset -g POWERLEVEL9K_MULTILINE_LAST_PROMPT_PREFIX= # Connect right prompt lines with these symbols. typeset -g POWERLEVEL9K_MULTILINE_FIRST_PROMPT_SUFFIX= typeset -g POWERLEVEL9K_MULTILINE_NEWLINE_PROMPT_SUFFIX= typeset -g POWERLEVEL9K_MULTILINE_LAST_PROMPT_SUFFIX= # The left end of left prompt. typeset -g POWERLEVEL9K_LEFT_PROMPT_FIRST_SEGMENT_START_SYMBOL= # The right end of right prompt. typeset -g POWERLEVEL9K_RIGHT_PROMPT_LAST_SEGMENT_END_SYMBOL= # Ruler, a.k.a. the horizontal line before each prompt. If you set it to true, you'll # probably want to set POWERLEVEL9K_PROMPT_ADD_NEWLINE=false above and # POWERLEVEL9K_MULTILINE_FIRST_PROMPT_GAP_CHAR=' ' below. typeset -g POWERLEVEL9K_SHOW_RULER=false typeset -g POWERLEVEL9K_RULER_CHAR='─' # reasonable alternative: '·' typeset -g POWERLEVEL9K_RULER_FOREGROUND=242 # Filler between left and right prompt on the first prompt line. You can set it to '·' or '─' # to make it easier to see the alignment between left and right prompt and to separate prompt # from command output. It serves the same purpose as ruler (see above) without increasing # the number of prompt lines. You'll probably want to set POWERLEVEL9K_SHOW_RULER=false # if using this. You might also like POWERLEVEL9K_PROMPT_ADD_NEWLINE=false for more compact # prompt. typeset -g POWERLEVEL9K_MULTILINE_FIRST_PROMPT_GAP_CHAR=' ' if [[ $POWERLEVEL9K_MULTILINE_FIRST_PROMPT_GAP_CHAR != ' ' ]]; then # The color of the filler. typeset -g POWERLEVEL9K_MULTILINE_FIRST_PROMPT_GAP_FOREGROUND=242 # Add a space between the end of left prompt and the filler. typeset -g POWERLEVEL9K_LEFT_PROMPT_LAST_SEGMENT_END_SYMBOL=' ' # Add a space between the filler and the start of right prompt. typeset -g POWERLEVEL9K_RIGHT_PROMPT_FIRST_SEGMENT_START_SYMBOL=' ' # Start filler from the edge of the screen if there are no left segments on the first line. typeset -g POWERLEVEL9K_EMPTY_LINE_LEFT_PROMPT_FIRST_SEGMENT_END_SYMBOL='%{%}' # End filler on the edge of the screen if there are no right segments on the first line. typeset -g POWERLEVEL9K_EMPTY_LINE_RIGHT_PROMPT_FIRST_SEGMENT_START_SYMBOL='%{%}' fi #################################[ os_icon: os identifier ]################################## # OS identifier color. typeset -g POWERLEVEL9K_OS_ICON_FOREGROUND= # Custom icon. # typeset -g POWERLEVEL9K_OS_ICON_CONTENT_EXPANSION='⭐' ################################[ prompt_char: prompt symbol ]################################ # Green prompt symbol if the last command succeeded. typeset -g POWERLEVEL9K_PROMPT_CHAR_OK_{VIINS,VICMD,VIVIS,VIOWR}_FOREGROUND=76 # Red prompt symbol if the last command failed. typeset -g POWERLEVEL9K_PROMPT_CHAR_ERROR_{VIINS,VICMD,VIVIS,VIOWR}_FOREGROUND=196 # Default prompt symbol. typeset -g POWERLEVEL9K_PROMPT_CHAR_{OK,ERROR}_VIINS_CONTENT_EXPANSION='❯' # Prompt symbol in command vi mode. typeset -g POWERLEVEL9K_PROMPT_CHAR_{OK,ERROR}_VICMD_CONTENT_EXPANSION='❮' # Prompt symbol in visual vi mode. typeset -g POWERLEVEL9K_PROMPT_CHAR_{OK,ERROR}_VIVIS_CONTENT_EXPANSION='V' # Prompt symbol in overwrite vi mode. typeset -g POWERLEVEL9K_PROMPT_CHAR_{OK,ERROR}_VIOWR_CONTENT_EXPANSION='▶' typeset -g POWERLEVEL9K_PROMPT_CHAR_OVERWRITE_STATE=true # No line terminator if prompt_char is the last segment. typeset -g POWERLEVEL9K_PROMPT_CHAR_LEFT_PROMPT_LAST_SEGMENT_END_SYMBOL='' # No line introducer if prompt_char is the first segment. typeset -g POWERLEVEL9K_PROMPT_CHAR_LEFT_PROMPT_FIRST_SEGMENT_START_SYMBOL= ##################################[ dir: current directory ]################################## # Default current directory color. typeset -g POWERLEVEL9K_DIR_FOREGROUND=31 # If directory is too long, shorten some of its segments to the shortest possible unique # prefix. The shortened directory can be tab-completed to the original. typeset -g POWERLEVEL9K_SHORTEN_STRATEGY=truncate_to_unique # Replace removed segment suffixes with this symbol. typeset -g POWERLEVEL9K_SHORTEN_DELIMITER= # Color of the shortened directory segments. typeset -g POWERLEVEL9K_DIR_SHORTENED_FOREGROUND=103 # Color of the anchor directory segments. Anchor segments are never shortened. The first # segment is always an anchor. typeset -g POWERLEVEL9K_DIR_ANCHOR_FOREGROUND=39 # Display anchor directory segments in bold. typeset -g POWERLEVEL9K_DIR_ANCHOR_BOLD=true # Don't shorten directories that contain any of these files. They are anchors. local anchor_files=( .bzr .citc .git .hg .node-version .python-version .go-version .ruby-version .lua-version .java-version .perl-version .php-version .tool-version .shorten_folder_marker .svn .terraform CVS Cargo.toml composer.json go.mod package.json stack.yaml ) typeset -g POWERLEVEL9K_SHORTEN_FOLDER_MARKER="(${(j:|:)anchor_files})" # If set to "first" ("last"), remove everything before the first (last) subdirectory that contains # files matching $POWERLEVEL9K_SHORTEN_FOLDER_MARKER. For example, when the current directory is # /foo/bar/git_repo/nested_git_repo/baz, prompt will display git_repo/nested_git_repo/baz (first) # or nested_git_repo/baz (last). This assumes that git_repo and nested_git_repo contain markers # and other directories don't. # # Optionally, "first" and "last" can be followed by ":<offset>" where <offset> is an integer. # This moves the truncation point to the right (positive offset) or to the left (negative offset) # relative to the marker. Plain "first" and "last" are equivalent to "first:0" and "last:0" # respectively. typeset -g POWERLEVEL9K_DIR_TRUNCATE_BEFORE_MARKER=false # Don't shorten this many last directory segments. They are anchors. typeset -g POWERLEVEL9K_SHORTEN_DIR_LENGTH=1 # Shorten directory if it's longer than this even if there is space for it. The value can # be either absolute (e.g., '80') or a percentage of terminal width (e.g, '50%'). If empty, # directory will be shortened only when prompt doesn't fit or when other parameters demand it # (see POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS and POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS_PCT below). # If set to `0`, directory will always be shortened to its minimum length. typeset -g POWERLEVEL9K_DIR_MAX_LENGTH=80 # When `dir` segment is on the last prompt line, try to shorten it enough to leave at least this # many columns for typing commands. typeset -g POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS=40 # When `dir` segment is on the last prompt line, try to shorten it enough to leave at least # COLUMNS * POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS_PCT * 0.01 columns for typing commands. typeset -g POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS_PCT=50 # If set to true, embed a hyperlink into the directory. Useful for quickly # opening a directory in the file manager simply by clicking the link. # Can also be handy when the directory is shortened, as it allows you to see # the full directory that was used in previous commands. typeset -g POWERLEVEL9K_DIR_HYPERLINK=false # Enable special styling for non-writable and non-existent directories. See POWERLEVEL9K_LOCK_ICON # and POWERLEVEL9K_DIR_CLASSES below. typeset -g POWERLEVEL9K_DIR_SHOW_WRITABLE=v3 # The default icon shown next to non-writable and non-existent directories when # POWERLEVEL9K_DIR_SHOW_WRITABLE is set to v3. # typeset -g POWERLEVEL9K_LOCK_ICON='⭐' # POWERLEVEL9K_DIR_CLASSES allows you to specify custom icons and colors for different # directories. It must be an array with 3 * N elements. Each triplet consists of: # # 1. A pattern against which the current directory ($PWD) is matched. Matching is done with # extended_glob option enabled. # 2. Directory class for the purpose of styling. # 3. An empty string. # # Triplets are tried in order. The first triplet whose pattern matches $PWD wins. # # If POWERLEVEL9K_DIR_SHOW_WRITABLE is set to v3, non-writable and non-existent directories # acquire class suffix _NOT_WRITABLE and NON_EXISTENT respectively. # # For example, given these settings: # # typeset -g POWERLEVEL9K_DIR_CLASSES=( # '~/work(|/*)' WORK '' # '~(|/*)' HOME '' # '*' DEFAULT '') # # Whenever the current directory is ~/work or a subdirectory of ~/work, it gets styled with one # of the following classes depending on its writability and existence: WORK, WORK_NOT_WRITABLE or # WORK_NON_EXISTENT. # # Simply assigning classes to directories doesn't have any visible effects. It merely gives you an # option to define custom colors and icons for different directory classes. # # # Styling for WORK. # typeset -g POWERLEVEL9K_DIR_WORK_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_DIR_WORK_FOREGROUND=31 # typeset -g POWERLEVEL9K_DIR_WORK_SHORTENED_FOREGROUND=103 # typeset -g POWERLEVEL9K_DIR_WORK_ANCHOR_FOREGROUND=39 # # # Styling for WORK_NOT_WRITABLE. # typeset -g POWERLEVEL9K_DIR_WORK_NOT_WRITABLE_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_DIR_WORK_NOT_WRITABLE_FOREGROUND=31 # typeset -g POWERLEVEL9K_DIR_WORK_NOT_WRITABLE_SHORTENED_FOREGROUND=103 # typeset -g POWERLEVEL9K_DIR_WORK_NOT_WRITABLE_ANCHOR_FOREGROUND=39 # # # Styling for WORK_NON_EXISTENT. # typeset -g POWERLEVEL9K_DIR_WORK_NON_EXISTENT_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_DIR_WORK_NON_EXISTENT_FOREGROUND=31 # typeset -g POWERLEVEL9K_DIR_WORK_NON_EXISTENT_SHORTENED_FOREGROUND=103 # typeset -g POWERLEVEL9K_DIR_WORK_NON_EXISTENT_ANCHOR_FOREGROUND=39 # # If a styling parameter isn't explicitly defined for some class, it falls back to the classless # parameter. For example, if POWERLEVEL9K_DIR_WORK_NOT_WRITABLE_FOREGROUND is not set, it falls # back to POWERLEVEL9K_DIR_FOREGROUND. # # typeset -g POWERLEVEL9K_DIR_CLASSES=() # Custom prefix. # typeset -g POWERLEVEL9K_DIR_PREFIX='%fin ' #####################################[ vcs: git status ]###################################### # Branch icon. Set this parameter to '\uF126 ' for the popular Powerline branch icon. typeset -g POWERLEVEL9K_VCS_BRANCH_ICON='\uF126 ' # Untracked files icon. It's really a question mark, your font isn't broken. # Change the value of this parameter to show a different icon. typeset -g POWERLEVEL9K_VCS_UNTRACKED_ICON='?' # Formatter for Git status. # # Example output: master wip ⇣42⇡42 *42 merge ~42 +42 !42 ?42. # # You can edit the function to customize how Git status looks. # # VCS_STATUS_* parameters are set by gitstatus plugin. See reference: # https://github.com/romkatv/gitstatus/blob/master/gitstatus.plugin.zsh. function my_git_formatter() { emulate -L zsh if [[ -n $P9K_CONTENT ]]; then # If P9K_CONTENT is not empty, use it. It's either "loading" or from vcs_info (not from # gitstatus plugin). VCS_STATUS_* parameters are not available in this case. typeset -g my_git_format=$P9K_CONTENT return fi if (( $1 )); then # Styling for up-to-date Git status. local meta='%f' # default foreground local clean='%76F' # green foreground local modified='%178F' # yellow foreground local untracked='%39F' # blue foreground local conflicted='%196F' # red foreground else # Styling for incomplete and stale Git status. local meta='%244F' # grey foreground local clean='%244F' # grey foreground local modified='%244F' # grey foreground local untracked='%244F' # grey foreground local conflicted='%244F' # grey foreground fi local res if [[ -n $VCS_STATUS_LOCAL_BRANCH ]]; then local branch=${(V)VCS_STATUS_LOCAL_BRANCH} # If local branch name is at most 32 characters long, show it in full. # Otherwise show the first 12 … the last 12. # Tip: To always show local branch name in full without truncation, delete the next line. (( $#branch > 32 )) && branch[13,-13]="…" # <-- this line res+="${clean}${(g::)POWERLEVEL9K_VCS_BRANCH_ICON}${branch//\%/%%}" fi if [[ -n $VCS_STATUS_TAG # Show tag only if not on a branch. # Tip: To always show tag, delete the next line. && -z $VCS_STATUS_LOCAL_BRANCH # <-- this line ]]; then local tag=${(V)VCS_STATUS_TAG} # If tag name is at most 32 characters long, show it in full. # Otherwise show the first 12 … the last 12. # Tip: To always show tag name in full without truncation, delete the next line. (( $#tag > 32 )) && tag[13,-13]="…" # <-- this line res+="${meta}#${clean}${tag//\%/%%}" fi # Display the current Git commit if there is no branch and no tag. # Tip: To always display the current Git commit, delete the next line. [[ -z $VCS_STATUS_LOCAL_BRANCH && -z $VCS_STATUS_TAG ]] && # <-- this line res+="${meta}@${clean}${VCS_STATUS_COMMIT[1,8]}" # Show tracking branch name if it differs from local branch. if [[ -n ${VCS_STATUS_REMOTE_BRANCH:#$VCS_STATUS_LOCAL_BRANCH} ]]; then res+="${meta}:${clean}${(V)VCS_STATUS_REMOTE_BRANCH//\%/%%}" fi # Display "wip" if the latest commit's summary contains "wip" or "WIP". if [[ $VCS_STATUS_COMMIT_SUMMARY == (|*[^[:alnum:]])(wip|WIP)(|[^[:alnum:]]*) ]]; then res+=" ${modified}wip" fi # ⇣42 if behind the remote. (( VCS_STATUS_COMMITS_BEHIND )) && res+=" ${clean}⇣${VCS_STATUS_COMMITS_BEHIND}" # ⇡42 if ahead of the remote; no leading space if also behind the remote: ⇣42⇡42. (( VCS_STATUS_COMMITS_AHEAD && !VCS_STATUS_COMMITS_BEHIND )) && res+=" " (( VCS_STATUS_COMMITS_AHEAD )) && res+="${clean}⇡${VCS_STATUS_COMMITS_AHEAD}" # ⇠42 if behind the push remote. (( VCS_STATUS_PUSH_COMMITS_BEHIND )) && res+=" ${clean}⇠${VCS_STATUS_PUSH_COMMITS_BEHIND}" (( VCS_STATUS_PUSH_COMMITS_AHEAD && !VCS_STATUS_PUSH_COMMITS_BEHIND )) && res+=" " # ⇢42 if ahead of the push remote; no leading space if also behind: ⇠42⇢42. (( VCS_STATUS_PUSH_COMMITS_AHEAD )) && res+="${clean}⇢${VCS_STATUS_PUSH_COMMITS_AHEAD}" # *42 if have stashes. (( VCS_STATUS_STASHES )) && res+=" ${clean}*${VCS_STATUS_STASHES}" # 'merge' if the repo is in an unusual state. [[ -n $VCS_STATUS_ACTION ]] && res+=" ${conflicted}${VCS_STATUS_ACTION}" # ~42 if have merge conflicts. (( VCS_STATUS_NUM_CONFLICTED )) && res+=" ${conflicted}~${VCS_STATUS_NUM_CONFLICTED}" # +42 if have staged changes. (( VCS_STATUS_NUM_STAGED )) && res+=" ${modified}+${VCS_STATUS_NUM_STAGED}" # !42 if have unstaged changes. (( VCS_STATUS_NUM_UNSTAGED )) && res+=" ${modified}!${VCS_STATUS_NUM_UNSTAGED}" # ?42 if have untracked files. It's really a question mark, your font isn't broken. # See POWERLEVEL9K_VCS_UNTRACKED_ICON above if you want to use a different icon. # Remove the next line if you don't want to see untracked files at all. (( VCS_STATUS_NUM_UNTRACKED )) && res+=" ${untracked}${(g::)POWERLEVEL9K_VCS_UNTRACKED_ICON}${VCS_STATUS_NUM_UNTRACKED}" # "─" if the number of unstaged files is unknown. This can happen due to # POWERLEVEL9K_VCS_MAX_INDEX_SIZE_DIRTY (see below) being set to a non-negative number lower # than the number of files in the Git index, or due to bash.showDirtyState being set to false # in the repository config. The number of staged and untracked files may also be unknown # in this case. (( VCS_STATUS_HAS_UNSTAGED == -1 )) && res+=" ${modified}─" typeset -g my_git_format=$res } functions -M my_git_formatter 2>/dev/null # Don't count the number of unstaged, untracked and conflicted files in Git repositories with # more than this many files in the index. Negative value means infinity. # # If you are working in Git repositories with tens of millions of files and seeing performance # sagging, try setting POWERLEVEL9K_VCS_MAX_INDEX_SIZE_DIRTY to a number lower than the output # of `git ls-files | wc -l`. Alternatively, add `bash.showDirtyState = false` to the repository's # config: `git config bash.showDirtyState false`. typeset -g POWERLEVEL9K_VCS_MAX_INDEX_SIZE_DIRTY=-1 # Don't show Git status in prompt for repositories whose workdir matches this pattern. # For example, if set to '~', the Git repository at $HOME/.git will be ignored. # Multiple patterns can be combined with '|': '~(|/foo)|/bar/baz/*'. typeset -g POWERLEVEL9K_VCS_DISABLED_WORKDIR_PATTERN='~' # Disable the default Git status formatting. typeset -g POWERLEVEL9K_VCS_DISABLE_GITSTATUS_FORMATTING=true # Install our own Git status formatter. typeset -g POWERLEVEL9K_VCS_CONTENT_EXPANSION='${$((my_git_formatter(1)))+${my_git_format}}' typeset -g POWERLEVEL9K_VCS_LOADING_CONTENT_EXPANSION='${$((my_git_formatter(0)))+${my_git_format}}' # Enable counters for staged, unstaged, etc. typeset -g POWERLEVEL9K_VCS_{STAGED,UNSTAGED,UNTRACKED,CONFLICTED,COMMITS_AHEAD,COMMITS_BEHIND}_MAX_NUM=-1 # Icon color. typeset -g POWERLEVEL9K_VCS_VISUAL_IDENTIFIER_COLOR=76 typeset -g POWERLEVEL9K_VCS_LOADING_VISUAL_IDENTIFIER_COLOR=244 # Custom icon. # typeset -g POWERLEVEL9K_VCS_VISUAL_IDENTIFIER_EXPANSION='⭐' # Custom prefix. # typeset -g POWERLEVEL9K_VCS_PREFIX='%fon ' # Show status of repositories of these types. You can add svn and/or hg if you are # using them. If you do, your prompt may become slow even when your current directory # isn't in an svn or hg reposotiry. typeset -g POWERLEVEL9K_VCS_BACKENDS=(git) # These settings are used for repositories other than Git or when gitstatusd fails and # Powerlevel10k has to fall back to using vcs_info. typeset -g POWERLEVEL9K_VCS_CLEAN_FOREGROUND=76 typeset -g POWERLEVEL9K_VCS_UNTRACKED_FOREGROUND=76 typeset -g POWERLEVEL9K_VCS_MODIFIED_FOREGROUND=178 ##########################[ status: exit code of the last command ]########################### # Enable OK_PIPE, ERROR_PIPE and ERROR_SIGNAL status states to allow us to enable, disable and # style them independently from the regular OK and ERROR state. typeset -g POWERLEVEL9K_STATUS_EXTENDED_STATES=true # Status on success. No content, just an icon. No need to show it if prompt_char is enabled as # it will signify success by turning green. typeset -g POWERLEVEL9K_STATUS_OK=false typeset -g POWERLEVEL9K_STATUS_OK_FOREGROUND=70 typeset -g POWERLEVEL9K_STATUS_OK_VISUAL_IDENTIFIER_EXPANSION='✔' # Status when some part of a pipe command fails but the overall exit status is zero. It may look # like this: 1|0. typeset -g POWERLEVEL9K_STATUS_OK_PIPE=true typeset -g POWERLEVEL9K_STATUS_OK_PIPE_FOREGROUND=70 typeset -g POWERLEVEL9K_STATUS_OK_PIPE_VISUAL_IDENTIFIER_EXPANSION='✔' # Status when it's just an error code (e.g., '1'). No need to show it if prompt_char is enabled as # it will signify error by turning red. typeset -g POWERLEVEL9K_STATUS_ERROR=false typeset -g POWERLEVEL9K_STATUS_ERROR_FOREGROUND=160 typeset -g POWERLEVEL9K_STATUS_ERROR_VISUAL_IDENTIFIER_EXPANSION='✘' # Status when the last command was terminated by a signal. typeset -g POWERLEVEL9K_STATUS_ERROR_SIGNAL=true typeset -g POWERLEVEL9K_STATUS_ERROR_SIGNAL_FOREGROUND=160 # Use terse signal names: "INT" instead of "SIGINT(2)". typeset -g POWERLEVEL9K_STATUS_VERBOSE_SIGNAME=false typeset -g POWERLEVEL9K_STATUS_ERROR_SIGNAL_VISUAL_IDENTIFIER_EXPANSION='✘' # Status when some part of a pipe command fails and the overall exit status is also non-zero. # It may look like this: 1|0. typeset -g POWERLEVEL9K_STATUS_ERROR_PIPE=true typeset -g POWERLEVEL9K_STATUS_ERROR_PIPE_FOREGROUND=160 typeset -g POWERLEVEL9K_STATUS_ERROR_PIPE_VISUAL_IDENTIFIER_EXPANSION='✘' ###################[ command_execution_time: duration of the last command ]################### # Show duration of the last command if takes at least this many seconds. typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_THRESHOLD=3 # Show this many fractional digits. Zero means round to seconds. typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_PRECISION=0 # Execution time color. typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_FOREGROUND=101 # Duration format: 1d 2h 3m 4s. typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_FORMAT='d h m s' # Custom icon. # typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_VISUAL_IDENTIFIER_EXPANSION='⭐' # Custom prefix. # typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_PREFIX='%ftook ' #######################[ background_jobs: presence of background jobs ]####################### # Don't show the number of background jobs. typeset -g POWERLEVEL9K_BACKGROUND_JOBS_VERBOSE=false # Background jobs color. typeset -g POWERLEVEL9K_BACKGROUND_JOBS_FOREGROUND=70 # Custom icon. # typeset -g POWERLEVEL9K_BACKGROUND_JOBS_VISUAL_IDENTIFIER_EXPANSION='⭐' #######################[ direnv: direnv status (https://direnv.net/) ]######################## # Direnv color. typeset -g POWERLEVEL9K_DIRENV_FOREGROUND=178 # Custom icon. # typeset -g POWERLEVEL9K_DIRENV_VISUAL_IDENTIFIER_EXPANSION='⭐' ###############[ asdf: asdf version manager (https://github.com/asdf-vm/asdf) ]############### # Default asdf color. Only used to display tools for which there is no color override (see below). # Tip: Override this parameter for ${TOOL} with POWERLEVEL9K_ASDF_${TOOL}_FOREGROUND. typeset -g POWERLEVEL9K_ASDF_FOREGROUND=66 # There are four parameters that can be used to hide asdf tools. Each parameter describes # conditions under which a tool gets hidden. Parameters can hide tools but not unhide them. If at # least one parameter decides to hide a tool, that tool gets hidden. If no parameter decides to # hide a tool, it gets shown. # # Special note on the difference between POWERLEVEL9K_ASDF_SOURCES and # POWERLEVEL9K_ASDF_PROMPT_ALWAYS_SHOW. Consider the effect of the following commands: # # asdf local python 3.8.1 # asdf global python 3.8.1 # # After running both commands the current python version is 3.8.1 and its source is "local" as # it takes precedence over "global". If POWERLEVEL9K_ASDF_PROMPT_ALWAYS_SHOW is set to false, # it'll hide python version in this case because 3.8.1 is the same as the global version. # POWERLEVEL9K_ASDF_SOURCES will hide python version only if the value of this parameter doesn't # contain "local". # Hide tool versions that don't come from one of these sources. # # Available sources: # # - shell `asdf current` says "set by ASDF_${TOOL}_VERSION environment variable" # - local `asdf current` says "set by /some/not/home/directory/file" # - global `asdf current` says "set by /home/username/file" # # Note: If this parameter is set to (shell local global), it won't hide tools. # Tip: Override this parameter for ${TOOL} with POWERLEVEL9K_ASDF_${TOOL}_SOURCES. typeset -g POWERLEVEL9K_ASDF_SOURCES=(shell local global) # If set to false, hide tool versions that are the same as global. # # Note: The name of this parameter doesn't reflect its meaning at all. # Note: If this parameter is set to true, it won't hide tools. # Tip: Override this parameter for ${TOOL} with POWERLEVEL9K_ASDF_${TOOL}_PROMPT_ALWAYS_SHOW. typeset -g POWERLEVEL9K_ASDF_PROMPT_ALWAYS_SHOW=false # If set to false, hide tool versions that are equal to "system". # # Note: If this parameter is set to true, it won't hide tools. # Tip: Override this parameter for ${TOOL} with POWERLEVEL9K_ASDF_${TOOL}_SHOW_SYSTEM. typeset -g POWERLEVEL9K_ASDF_SHOW_SYSTEM=true # If set to non-empty value, hide tools unless there is a file matching the specified file pattern # in the current directory, or its parent directory, or its grandparent directory, and so on. # # Note: If this parameter is set to empty value, it won't hide tools. # Note: SHOW_ON_UPGLOB isn't specific to asdf. It works with all prompt segments. # Tip: Override this parameter for ${TOOL} with POWERLEVEL9K_ASDF_${TOOL}_SHOW_ON_UPGLOB. # # Example: Hide nodejs version when there is no package.json and no *.js files in the current # directory, in `..`, in `../..` and so on. # # typeset -g POWERLEVEL9K_ASDF_NODEJS_SHOW_ON_UPGLOB='*.js|package.json' typeset -g POWERLEVEL9K_ASDF_SHOW_ON_UPGLOB= # Ruby version from asdf. typeset -g POWERLEVEL9K_ASDF_RUBY_FOREGROUND=168 # typeset -g POWERLEVEL9K_ASDF_RUBY_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_RUBY_SHOW_ON_UPGLOB='*.foo|*.bar' # Python version from asdf. typeset -g POWERLEVEL9K_ASDF_PYTHON_FOREGROUND=37 # typeset -g POWERLEVEL9K_ASDF_PYTHON_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_PYTHON_SHOW_ON_UPGLOB='*.foo|*.bar' # Go version from asdf. typeset -g POWERLEVEL9K_ASDF_GOLANG_FOREGROUND=37 # typeset -g POWERLEVEL9K_ASDF_GOLANG_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_GOLANG_SHOW_ON_UPGLOB='*.foo|*.bar' # Node.js version from asdf. typeset -g POWERLEVEL9K_ASDF_NODEJS_FOREGROUND=70 # typeset -g POWERLEVEL9K_ASDF_NODEJS_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_NODEJS_SHOW_ON_UPGLOB='*.foo|*.bar' # Rust version from asdf. typeset -g POWERLEVEL9K_ASDF_RUST_FOREGROUND=37 # typeset -g POWERLEVEL9K_ASDF_RUST_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_RUST_SHOW_ON_UPGLOB='*.foo|*.bar' # .NET Core version from asdf. typeset -g POWERLEVEL9K_ASDF_DOTNET_CORE_FOREGROUND=134 # typeset -g POWERLEVEL9K_ASDF_DOTNET_CORE_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_DOTNET_SHOW_ON_UPGLOB='*.foo|*.bar' # Flutter version from asdf. typeset -g POWERLEVEL9K_ASDF_FLUTTER_FOREGROUND=38 # typeset -g POWERLEVEL9K_ASDF_FLUTTER_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_FLUTTER_SHOW_ON_UPGLOB='*.foo|*.bar' # Lua version from asdf. typeset -g POWERLEVEL9K_ASDF_LUA_FOREGROUND=32 # typeset -g POWERLEVEL9K_ASDF_LUA_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_LUA_SHOW_ON_UPGLOB='*.foo|*.bar' # Java version from asdf. typeset -g POWERLEVEL9K_ASDF_JAVA_FOREGROUND=32 # typeset -g POWERLEVEL9K_ASDF_JAVA_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_JAVA_SHOW_ON_UPGLOB='*.foo|*.bar' # Perl version from asdf. typeset -g POWERLEVEL9K_ASDF_PERL_FOREGROUND=67 # typeset -g POWERLEVEL9K_ASDF_PERL_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_PERL_SHOW_ON_UPGLOB='*.foo|*.bar' # Erlang version from asdf. typeset -g POWERLEVEL9K_ASDF_ERLANG_FOREGROUND=125 # typeset -g POWERLEVEL9K_ASDF_ERLANG_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_ERLANG_SHOW_ON_UPGLOB='*.foo|*.bar' # Elixir version from asdf. typeset -g POWERLEVEL9K_ASDF_ELIXIR_FOREGROUND=129 # typeset -g POWERLEVEL9K_ASDF_ELIXIR_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_ELIXIR_SHOW_ON_UPGLOB='*.foo|*.bar' # Postgres version from asdf. typeset -g POWERLEVEL9K_ASDF_POSTGRES_FOREGROUND=31 # typeset -g POWERLEVEL9K_ASDF_POSTGRES_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_POSTGRES_SHOW_ON_UPGLOB='*.foo|*.bar' # PHP version from asdf. typeset -g POWERLEVEL9K_ASDF_PHP_FOREGROUND=99 # typeset -g POWERLEVEL9K_ASDF_PHP_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_PHP_SHOW_ON_UPGLOB='*.foo|*.bar' # Haskell version from asdf. typeset -g POWERLEVEL9K_ASDF_HASKELL_FOREGROUND=172 # typeset -g POWERLEVEL9K_ASDF_HASKELL_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_HASKELL_SHOW_ON_UPGLOB='*.foo|*.bar' # Julia version from asdf. typeset -g POWERLEVEL9K_ASDF_JULIA_FOREGROUND=70 # typeset -g POWERLEVEL9K_ASDF_JULIA_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_ASDF_JULIA_SHOW_ON_UPGLOB='*.foo|*.bar' ##########[ nordvpn: nordvpn connection status, linux only (https://nordvpn.com/) ]########### # NordVPN connection indicator color. typeset -g POWERLEVEL9K_NORDVPN_FOREGROUND=39 # Hide NordVPN connection indicator when not connected. typeset -g POWERLEVEL9K_NORDVPN_{DISCONNECTED,CONNECTING,DISCONNECTING}_CONTENT_EXPANSION= typeset -g POWERLEVEL9K_NORDVPN_{DISCONNECTED,CONNECTING,DISCONNECTING}_VISUAL_IDENTIFIER_EXPANSION= # Custom icon. # typeset -g POWERLEVEL9K_NORDVPN_VISUAL_IDENTIFIER_EXPANSION='⭐' #################[ ranger: ranger shell (https://github.com/ranger/ranger) ]################## # Ranger shell color. typeset -g POWERLEVEL9K_RANGER_FOREGROUND=178 # Custom icon. # typeset -g POWERLEVEL9K_RANGER_VISUAL_IDENTIFIER_EXPANSION='⭐' ######################[ nnn: nnn shell (https://github.com/jarun/nnn) ]####################### # Nnn shell color. typeset -g POWERLEVEL9K_NNN_FOREGROUND=72 # Custom icon. # typeset -g POWERLEVEL9K_NNN_VISUAL_IDENTIFIER_EXPANSION='⭐' ##################[ xplr: xplr shell (https://github.com/sayanarijit/xplr) ]################## # xplr shell color. typeset -g POWERLEVEL9K_XPLR_FOREGROUND=72 # Custom icon. # typeset -g POWERLEVEL9K_XPLR_VISUAL_IDENTIFIER_EXPANSION='⭐' ###########################[ vim_shell: vim shell indicator (:sh) ]########################### # Vim shell indicator color. typeset -g POWERLEVEL9K_VIM_SHELL_FOREGROUND=34 # Custom icon. # typeset -g POWERLEVEL9K_VIM_SHELL_VISUAL_IDENTIFIER_EXPANSION='⭐' ######[ midnight_commander: midnight commander shell (https://midnight-commander.org/) ]###### # Midnight Commander shell color. typeset -g POWERLEVEL9K_MIDNIGHT_COMMANDER_FOREGROUND=178 # Custom icon. # typeset -g POWERLEVEL9K_MIDNIGHT_COMMANDER_VISUAL_IDENTIFIER_EXPANSION='⭐' #[ nix_shell: nix shell (https://nixos.org/nixos/nix-pills/developing-with-nix-shell.html) ]## # Nix shell color. typeset -g POWERLEVEL9K_NIX_SHELL_FOREGROUND=74 # Tip: If you want to see just the icon without "pure" and "impure", uncomment the next line. # typeset -g POWERLEVEL9K_NIX_SHELL_CONTENT_EXPANSION= # Custom icon. # typeset -g POWERLEVEL9K_NIX_SHELL_VISUAL_IDENTIFIER_EXPANSION='⭐' ##################################[ disk_usage: disk usage ]################################## # Colors for different levels of disk usage. typeset -g POWERLEVEL9K_DISK_USAGE_NORMAL_FOREGROUND=35 typeset -g POWERLEVEL9K_DISK_USAGE_WARNING_FOREGROUND=220 typeset -g POWERLEVEL9K_DISK_USAGE_CRITICAL_FOREGROUND=160 # Thresholds for different levels of disk usage (percentage points). typeset -g POWERLEVEL9K_DISK_USAGE_WARNING_LEVEL=90 typeset -g POWERLEVEL9K_DISK_USAGE_CRITICAL_LEVEL=95 # If set to true, hide disk usage when below $POWERLEVEL9K_DISK_USAGE_WARNING_LEVEL percent. typeset -g POWERLEVEL9K_DISK_USAGE_ONLY_WARNING=false # Custom icon. # typeset -g POWERLEVEL9K_DISK_USAGE_VISUAL_IDENTIFIER_EXPANSION='⭐' ######################################[ ram: free RAM ]####################################### # RAM color. typeset -g POWERLEVEL9K_RAM_FOREGROUND=66 # Custom icon. # typeset -g POWERLEVEL9K_RAM_VISUAL_IDENTIFIER_EXPANSION='⭐' #####################################[ swap: used swap ]###################################### # Swap color. typeset -g POWERLEVEL9K_SWAP_FOREGROUND=96 # Custom icon. # typeset -g POWERLEVEL9K_SWAP_VISUAL_IDENTIFIER_EXPANSION='⭐' ######################################[ load: CPU load ]###################################### # Show average CPU load over this many last minutes. Valid values are 1, 5 and 15. typeset -g POWERLEVEL9K_LOAD_WHICH=5 # Load color when load is under 50%. typeset -g POWERLEVEL9K_LOAD_NORMAL_FOREGROUND=66 # Load color when load is between 50% and 70%. typeset -g POWERLEVEL9K_LOAD_WARNING_FOREGROUND=178 # Load color when load is over 70%. typeset -g POWERLEVEL9K_LOAD_CRITICAL_FOREGROUND=166 # Custom icon. # typeset -g POWERLEVEL9K_LOAD_VISUAL_IDENTIFIER_EXPANSION='⭐' ################[ todo: todo items (https://github.com/todotxt/todo.txt-cli) ]################ # Todo color. typeset -g POWERLEVEL9K_TODO_FOREGROUND=110 # Hide todo when the total number of tasks is zero. typeset -g POWERLEVEL9K_TODO_HIDE_ZERO_TOTAL=true # Hide todo when the number of tasks after filtering is zero. typeset -g POWERLEVEL9K_TODO_HIDE_ZERO_FILTERED=false # Todo format. The following parameters are available within the expansion. # # - P9K_TODO_TOTAL_TASK_COUNT The total number of tasks. # - P9K_TODO_FILTERED_TASK_COUNT The number of tasks after filtering. # # These variables correspond to the last line of the output of `todo.sh -p ls`: # # TODO: 24 of 42 tasks shown # # Here 24 is P9K_TODO_FILTERED_TASK_COUNT and 42 is P9K_TODO_TOTAL_TASK_COUNT. # # typeset -g POWERLEVEL9K_TODO_CONTENT_EXPANSION='$P9K_TODO_FILTERED_TASK_COUNT' # Custom icon. # typeset -g POWERLEVEL9K_TODO_VISUAL_IDENTIFIER_EXPANSION='⭐' ###########[ timewarrior: timewarrior tracking status (https://timewarrior.net/) ]############ # Timewarrior color. typeset -g POWERLEVEL9K_TIMEWARRIOR_FOREGROUND=110 # If the tracked task is longer than 24 characters, truncate and append "…". # Tip: To always display tasks without truncation, delete the following parameter. # Tip: To hide task names and display just the icon when time tracking is enabled, set the # value of the following parameter to "". typeset -g POWERLEVEL9K_TIMEWARRIOR_CONTENT_EXPANSION='${P9K_CONTENT:0:24}${${P9K_CONTENT:24}:+…}' # Custom icon. # typeset -g POWERLEVEL9K_TIMEWARRIOR_VISUAL_IDENTIFIER_EXPANSION='⭐' ##############[ taskwarrior: taskwarrior task count (https://taskwarrior.org/) ]############## # Taskwarrior color. typeset -g POWERLEVEL9K_TASKWARRIOR_FOREGROUND=74 # Taskwarrior segment format. The following parameters are available within the expansion. # # - P9K_TASKWARRIOR_PENDING_COUNT The number of pending tasks: `task +PENDING count`. # - P9K_TASKWARRIOR_OVERDUE_COUNT The number of overdue tasks: `task +OVERDUE count`. # # Zero values are represented as empty parameters. # # The default format: # # '${P9K_TASKWARRIOR_OVERDUE_COUNT:+"!$P9K_TASKWARRIOR_OVERDUE_COUNT/"}$P9K_TASKWARRIOR_PENDING_COUNT' # # typeset -g POWERLEVEL9K_TASKWARRIOR_CONTENT_EXPANSION='$P9K_TASKWARRIOR_PENDING_COUNT' # Custom icon. # typeset -g POWERLEVEL9K_TASKWARRIOR_VISUAL_IDENTIFIER_EXPANSION='⭐' ##################################[ context: user@hostname ]################################## # Context color when running with privileges. typeset -g POWERLEVEL9K_CONTEXT_ROOT_FOREGROUND=178 # Context color in SSH without privileges. typeset -g POWERLEVEL9K_CONTEXT_{REMOTE,REMOTE_SUDO}_FOREGROUND=180 # Default context color (no privileges, no SSH). typeset -g POWERLEVEL9K_CONTEXT_FOREGROUND=180 # Context format when running with privileges: bold user@hostname. typeset -g POWERLEVEL9K_CONTEXT_ROOT_TEMPLATE='%B%n@%m' # Context format when in SSH without privileges: user@hostname. typeset -g POWERLEVEL9K_CONTEXT_{REMOTE,REMOTE_SUDO}_TEMPLATE='%n@%m' # Default context format (no privileges, no SSH): user@hostname. typeset -g POWERLEVEL9K_CONTEXT_TEMPLATE='%n@%m' # Don't show context unless running with privileges or in SSH. # Tip: Remove the next line to always show context. typeset -g POWERLEVEL9K_CONTEXT_{DEFAULT,SUDO}_{CONTENT,VISUAL_IDENTIFIER}_EXPANSION= # Custom icon. # typeset -g POWERLEVEL9K_CONTEXT_VISUAL_IDENTIFIER_EXPANSION='⭐' # Custom prefix. # typeset -g POWERLEVEL9K_CONTEXT_PREFIX='%fwith ' ###[ virtualenv: python virtual environment (https://docs.python.org/3/library/venv.html) ]### # Python virtual environment color. typeset -g POWERLEVEL9K_VIRTUALENV_FOREGROUND=37 # Don't show Python version next to the virtual environment name. typeset -g POWERLEVEL9K_VIRTUALENV_SHOW_PYTHON_VERSION=false # If set to "false", won't show virtualenv if pyenv is already shown. # If set to "if-different", won't show virtualenv if it's the same as pyenv. typeset -g POWERLEVEL9K_VIRTUALENV_SHOW_WITH_PYENV=false # Separate environment name from Python version only with a space. typeset -g POWERLEVEL9K_VIRTUALENV_{LEFT,RIGHT}_DELIMITER= # Custom icon. # typeset -g POWERLEVEL9K_VIRTUALENV_VISUAL_IDENTIFIER_EXPANSION='⭐' #####################[ anaconda: conda environment (https://conda.io/) ]###################### # Anaconda environment color. typeset -g POWERLEVEL9K_ANACONDA_FOREGROUND=37 # Anaconda segment format. The following parameters are available within the expansion. # # - CONDA_PREFIX Absolute path to the active Anaconda/Miniconda environment. # - CONDA_DEFAULT_ENV Name of the active Anaconda/Miniconda environment. # - CONDA_PROMPT_MODIFIER Configurable prompt modifier (see below). # - P9K_ANACONDA_PYTHON_VERSION Current python version (python --version). # # CONDA_PROMPT_MODIFIER can be configured with the following command: # # conda config --set env_prompt '({default_env}) ' # # The last argument is a Python format string that can use the following variables: # # - prefix The same as CONDA_PREFIX. # - default_env The same as CONDA_DEFAULT_ENV. # - name The last segment of CONDA_PREFIX. # - stacked_env Comma-separated list of names in the environment stack. The first element is # always the same as default_env. # # Note: '({default_env}) ' is the default value of env_prompt. # # The default value of POWERLEVEL9K_ANACONDA_CONTENT_EXPANSION expands to $CONDA_PROMPT_MODIFIER # without the surrounding parentheses, or to the last path component of CONDA_PREFIX if the former # is empty. typeset -g POWERLEVEL9K_ANACONDA_CONTENT_EXPANSION='${${${${CONDA_PROMPT_MODIFIER#\(}% }%\)}:-${CONDA_PREFIX:t}}' # Custom icon. # typeset -g POWERLEVEL9K_ANACONDA_VISUAL_IDENTIFIER_EXPANSION='⭐' ################[ pyenv: python environment (https://github.com/pyenv/pyenv) ]################ # Pyenv color. typeset -g POWERLEVEL9K_PYENV_FOREGROUND=37 # Hide python version if it doesn't come from one of these sources. typeset -g POWERLEVEL9K_PYENV_SOURCES=(shell local global) # If set to false, hide python version if it's the same as global: # $(pyenv version-name) == $(pyenv global). typeset -g POWERLEVEL9K_PYENV_PROMPT_ALWAYS_SHOW=false # If set to false, hide python version if it's equal to "system". typeset -g POWERLEVEL9K_PYENV_SHOW_SYSTEM=true # Pyenv segment format. The following parameters are available within the expansion. # # - P9K_CONTENT Current pyenv environment (pyenv version-name). # - P9K_PYENV_PYTHON_VERSION Current python version (python --version). # # The default format has the following logic: # # 1. Display just "$P9K_CONTENT" if it's equal to "$P9K_PYENV_PYTHON_VERSION" or # starts with "$P9K_PYENV_PYTHON_VERSION/". # 2. Otherwise display "$P9K_CONTENT $P9K_PYENV_PYTHON_VERSION". typeset -g POWERLEVEL9K_PYENV_CONTENT_EXPANSION='${P9K_CONTENT}${${P9K_CONTENT:#$P9K_PYENV_PYTHON_VERSION(|/*)}:+ $P9K_PYENV_PYTHON_VERSION}' # Custom icon. # typeset -g POWERLEVEL9K_PYENV_VISUAL_IDENTIFIER_EXPANSION='⭐' ################[ goenv: go environment (https://github.com/syndbg/goenv) ]################ # Goenv color. typeset -g POWERLEVEL9K_GOENV_FOREGROUND=37 # Hide go version if it doesn't come from one of these sources. typeset -g POWERLEVEL9K_GOENV_SOURCES=(shell local global) # If set to false, hide go version if it's the same as global: # $(goenv version-name) == $(goenv global). typeset -g POWERLEVEL9K_GOENV_PROMPT_ALWAYS_SHOW=false # If set to false, hide go version if it's equal to "system". typeset -g POWERLEVEL9K_GOENV_SHOW_SYSTEM=true # Custom icon. # typeset -g POWERLEVEL9K_GOENV_VISUAL_IDENTIFIER_EXPANSION='⭐' ##########[ nodenv: node.js version from nodenv (https://github.com/nodenv/nodenv) ]########## # Nodenv color. typeset -g POWERLEVEL9K_NODENV_FOREGROUND=70 # Hide node version if it doesn't come from one of these sources. typeset -g POWERLEVEL9K_NODENV_SOURCES=(shell local global) # If set to false, hide node version if it's the same as global: # $(nodenv version-name) == $(nodenv global). typeset -g POWERLEVEL9K_NODENV_PROMPT_ALWAYS_SHOW=false # If set to false, hide node version if it's equal to "system". typeset -g POWERLEVEL9K_NODENV_SHOW_SYSTEM=true # Custom icon. # typeset -g POWERLEVEL9K_NODENV_VISUAL_IDENTIFIER_EXPANSION='⭐' ##############[ nvm: node.js version from nvm (https://github.com/nvm-sh/nvm) ]############### # Nvm color. typeset -g POWERLEVEL9K_NVM_FOREGROUND=70 # Custom icon. # typeset -g POWERLEVEL9K_NVM_VISUAL_IDENTIFIER_EXPANSION='⭐' ############[ nodeenv: node.js environment (https://github.com/ekalinin/nodeenv) ]############ # Nodeenv color. typeset -g POWERLEVEL9K_NODEENV_FOREGROUND=70 # Don't show Node version next to the environment name. typeset -g POWERLEVEL9K_NODEENV_SHOW_NODE_VERSION=false # Separate environment name from Node version only with a space. typeset -g POWERLEVEL9K_NODEENV_{LEFT,RIGHT}_DELIMITER= # Custom icon. # typeset -g POWERLEVEL9K_NODEENV_VISUAL_IDENTIFIER_EXPANSION='⭐' ##############################[ node_version: node.js version ]############################### # Node version color. typeset -g POWERLEVEL9K_NODE_VERSION_FOREGROUND=70 # Show node version only when in a directory tree containing package.json. typeset -g POWERLEVEL9K_NODE_VERSION_PROJECT_ONLY=true # Custom icon. # typeset -g POWERLEVEL9K_NODE_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐' #######################[ go_version: go version (https://golang.org) ]######################## # Go version color. typeset -g POWERLEVEL9K_GO_VERSION_FOREGROUND=37 # Show go version only when in a go project subdirectory. typeset -g POWERLEVEL9K_GO_VERSION_PROJECT_ONLY=true # Custom icon. # typeset -g POWERLEVEL9K_GO_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐' #################[ rust_version: rustc version (https://www.rust-lang.org) ]################## # Rust version color. typeset -g POWERLEVEL9K_RUST_VERSION_FOREGROUND=37 # Show rust version only when in a rust project subdirectory. typeset -g POWERLEVEL9K_RUST_VERSION_PROJECT_ONLY=true # Custom icon. # typeset -g POWERLEVEL9K_RUST_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐' ###############[ dotnet_version: .NET version (https://dotnet.microsoft.com) ]################ # .NET version color. typeset -g POWERLEVEL9K_DOTNET_VERSION_FOREGROUND=134 # Show .NET version only when in a .NET project subdirectory. typeset -g POWERLEVEL9K_DOTNET_VERSION_PROJECT_ONLY=true # Custom icon. # typeset -g POWERLEVEL9K_DOTNET_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐' #####################[ php_version: php version (https://www.php.net/) ]###################### # PHP version color. typeset -g POWERLEVEL9K_PHP_VERSION_FOREGROUND=99 # Show PHP version only when in a PHP project subdirectory. typeset -g POWERLEVEL9K_PHP_VERSION_PROJECT_ONLY=true # Custom icon. # typeset -g POWERLEVEL9K_PHP_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐' ##########[ laravel_version: laravel php framework version (https://laravel.com/) ]########### # Laravel version color. typeset -g POWERLEVEL9K_LARAVEL_VERSION_FOREGROUND=161 # Custom icon. # typeset -g POWERLEVEL9K_LARAVEL_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐' ####################[ java_version: java version (https://www.java.com/) ]#################### # Java version color. typeset -g POWERLEVEL9K_JAVA_VERSION_FOREGROUND=32 # Show java version only when in a java project subdirectory. typeset -g POWERLEVEL9K_JAVA_VERSION_PROJECT_ONLY=true # Show brief version. typeset -g POWERLEVEL9K_JAVA_VERSION_FULL=false # Custom icon. # typeset -g POWERLEVEL9K_JAVA_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐' ###[ package: name@version from package.json (https://docs.npmjs.com/files/package.json) ]#### # Package color. typeset -g POWERLEVEL9K_PACKAGE_FOREGROUND=117 # Package format. The following parameters are available within the expansion. # # - P9K_PACKAGE_NAME The value of `name` field in package.json. # - P9K_PACKAGE_VERSION The value of `version` field in package.json. # # typeset -g POWERLEVEL9K_PACKAGE_CONTENT_EXPANSION='${P9K_PACKAGE_NAME//\%/%%}@${P9K_PACKAGE_VERSION//\%/%%}' # Custom icon. # typeset -g POWERLEVEL9K_PACKAGE_VISUAL_IDENTIFIER_EXPANSION='⭐' #############[ rbenv: ruby version from rbenv (https://github.com/rbenv/rbenv) ]############## # Rbenv color. typeset -g POWERLEVEL9K_RBENV_FOREGROUND=168 # Hide ruby version if it doesn't come from one of these sources. typeset -g POWERLEVEL9K_RBENV_SOURCES=(shell local global) # If set to false, hide ruby version if it's the same as global: # $(rbenv version-name) == $(rbenv global). typeset -g POWERLEVEL9K_RBENV_PROMPT_ALWAYS_SHOW=false # If set to false, hide ruby version if it's equal to "system". typeset -g POWERLEVEL9K_RBENV_SHOW_SYSTEM=true # Custom icon. # typeset -g POWERLEVEL9K_RBENV_VISUAL_IDENTIFIER_EXPANSION='⭐' #######################[ rvm: ruby version from rvm (https://rvm.io) ]######################## # Rvm color. typeset -g POWERLEVEL9K_RVM_FOREGROUND=168 # Don't show @gemset at the end. typeset -g POWERLEVEL9K_RVM_SHOW_GEMSET=false # Don't show ruby- at the front. typeset -g POWERLEVEL9K_RVM_SHOW_PREFIX=false # Custom icon. # typeset -g POWERLEVEL9K_RVM_VISUAL_IDENTIFIER_EXPANSION='⭐' ###########[ fvm: flutter version management (https://github.com/leoafarias/fvm) ]############ # Fvm color. typeset -g POWERLEVEL9K_FVM_FOREGROUND=38 # Custom icon. # typeset -g POWERLEVEL9K_FVM_VISUAL_IDENTIFIER_EXPANSION='⭐' ##########[ luaenv: lua version from luaenv (https://github.com/cehoffman/luaenv) ]########### # Lua color. typeset -g POWERLEVEL9K_LUAENV_FOREGROUND=32 # Hide lua version if it doesn't come from one of these sources. typeset -g POWERLEVEL9K_LUAENV_SOURCES=(shell local global) # If set to false, hide lua version if it's the same as global: # $(luaenv version-name) == $(luaenv global). typeset -g POWERLEVEL9K_LUAENV_PROMPT_ALWAYS_SHOW=false # If set to false, hide lua version if it's equal to "system". typeset -g POWERLEVEL9K_LUAENV_SHOW_SYSTEM=true # Custom icon. # typeset -g POWERLEVEL9K_LUAENV_VISUAL_IDENTIFIER_EXPANSION='⭐' ###############[ jenv: java version from jenv (https://github.com/jenv/jenv) ]################ # Java color. typeset -g POWERLEVEL9K_JENV_FOREGROUND=32 # Hide java version if it doesn't come from one of these sources. typeset -g POWERLEVEL9K_JENV_SOURCES=(shell local global) # If set to false, hide java version if it's the same as global: # $(jenv version-name) == $(jenv global). typeset -g POWERLEVEL9K_JENV_PROMPT_ALWAYS_SHOW=false # If set to false, hide java version if it's equal to "system". typeset -g POWERLEVEL9K_JENV_SHOW_SYSTEM=true # Custom icon. # typeset -g POWERLEVEL9K_JENV_VISUAL_IDENTIFIER_EXPANSION='⭐' ###########[ plenv: perl version from plenv (https://github.com/tokuhirom/plenv) ]############ # Perl color. typeset -g POWERLEVEL9K_PLENV_FOREGROUND=67 # Hide perl version if it doesn't come from one of these sources. typeset -g POWERLEVEL9K_PLENV_SOURCES=(shell local global) # If set to false, hide perl version if it's the same as global: # $(plenv version-name) == $(plenv global). typeset -g POWERLEVEL9K_PLENV_PROMPT_ALWAYS_SHOW=false # If set to false, hide perl version if it's equal to "system". typeset -g POWERLEVEL9K_PLENV_SHOW_SYSTEM=true # Custom icon. # typeset -g POWERLEVEL9K_PLENV_VISUAL_IDENTIFIER_EXPANSION='⭐' ###########[ perlbrew: perl version from perlbrew (https://github.com/gugod/App-perlbrew) ]############ # Perlbrew color. typeset -g POWERLEVEL9K_PERLBREW_FOREGROUND=67 # Show perlbrew version only when in a perl project subdirectory. typeset -g POWERLEVEL9K_PERLBREW_PROJECT_ONLY=true # Don't show "perl-" at the front. typeset -g POWERLEVEL9K_PERLBREW_SHOW_PREFIX=false # Custom icon. # typeset -g POWERLEVEL9K_PERLBREW_VISUAL_IDENTIFIER_EXPANSION='⭐' ############[ phpenv: php version from phpenv (https://github.com/phpenv/phpenv) ]############ # PHP color. typeset -g POWERLEVEL9K_PHPENV_FOREGROUND=99 # Hide php version if it doesn't come from one of these sources. typeset -g POWERLEVEL9K_PHPENV_SOURCES=(shell local global) # If set to false, hide php version if it's the same as global: # $(phpenv version-name) == $(phpenv global). typeset -g POWERLEVEL9K_PHPENV_PROMPT_ALWAYS_SHOW=false # If set to false, hide php version if it's equal to "system". typeset -g POWERLEVEL9K_PHPENV_SHOW_SYSTEM=true # Custom icon. # typeset -g POWERLEVEL9K_PHPENV_VISUAL_IDENTIFIER_EXPANSION='⭐' #######[ scalaenv: scala version from scalaenv (https://github.com/scalaenv/scalaenv) ]####### # Scala color. typeset -g POWERLEVEL9K_SCALAENV_FOREGROUND=160 # Hide scala version if it doesn't come from one of these sources. typeset -g POWERLEVEL9K_SCALAENV_SOURCES=(shell local global) # If set to false, hide scala version if it's the same as global: # $(scalaenv version-name) == $(scalaenv global). typeset -g POWERLEVEL9K_SCALAENV_PROMPT_ALWAYS_SHOW=false # If set to false, hide scala version if it's equal to "system". typeset -g POWERLEVEL9K_SCALAENV_SHOW_SYSTEM=true # Custom icon. # typeset -g POWERLEVEL9K_SCALAENV_VISUAL_IDENTIFIER_EXPANSION='⭐' ##########[ haskell_stack: haskell version from stack (https://haskellstack.org/) ]########### # Haskell color. typeset -g POWERLEVEL9K_HASKELL_STACK_FOREGROUND=172 # Hide haskell version if it doesn't come from one of these sources. # # shell: version is set by STACK_YAML # local: version is set by stack.yaml up the directory tree # global: version is set by the implicit global project (~/.stack/global-project/stack.yaml) typeset -g POWERLEVEL9K_HASKELL_STACK_SOURCES=(shell local) # If set to false, hide haskell version if it's the same as in the implicit global project. typeset -g POWERLEVEL9K_HASKELL_STACK_ALWAYS_SHOW=true # Custom icon. # typeset -g POWERLEVEL9K_HASKELL_STACK_VISUAL_IDENTIFIER_EXPANSION='⭐' #############[ kubecontext: current kubernetes context (https://kubernetes.io/) ]############# # Show kubecontext only when the command you are typing invokes one of these tools. # Tip: Remove the next line to always show kubecontext. typeset -g POWERLEVEL9K_KUBECONTEXT_SHOW_ON_COMMAND='kubectl|helm|kubens|kubectx|oc|istioctl|kogito|k9s|helmfile|flux|fluxctl|stern|kubeseal|skaffold' # Kubernetes context classes for the purpose of using different colors, icons and expansions with # different contexts. # # POWERLEVEL9K_KUBECONTEXT_CLASSES is an array with even number of elements. The first element # in each pair defines a pattern against which the current kubernetes context gets matched. # More specifically, it's P9K_CONTENT prior to the application of context expansion (see below) # that gets matched. If you unset all POWERLEVEL9K_KUBECONTEXT_*CONTENT_EXPANSION parameters, # you'll see this value in your prompt. The second element of each pair in # POWERLEVEL9K_KUBECONTEXT_CLASSES defines the context class. Patterns are tried in order. The # first match wins. # # For example, given these settings: # # typeset -g POWERLEVEL9K_KUBECONTEXT_CLASSES=( # '*prod*' PROD # '*test*' TEST # '*' DEFAULT) # # If your current kubernetes context is "deathray-testing/default", its class is TEST # because "deathray-testing/default" doesn't match the pattern '*prod*' but does match '*test*'. # # You can define different colors, icons and content expansions for different classes: # # typeset -g POWERLEVEL9K_KUBECONTEXT_TEST_FOREGROUND=28 # typeset -g POWERLEVEL9K_KUBECONTEXT_TEST_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_KUBECONTEXT_TEST_CONTENT_EXPANSION='> ${P9K_CONTENT} <' typeset -g POWERLEVEL9K_KUBECONTEXT_CLASSES=( # '*prod*' PROD # These values are examples that are unlikely # '*test*' TEST # to match your needs. Customize them as needed. '*' DEFAULT) typeset -g POWERLEVEL9K_KUBECONTEXT_DEFAULT_FOREGROUND=134 # typeset -g POWERLEVEL9K_KUBECONTEXT_DEFAULT_VISUAL_IDENTIFIER_EXPANSION='⭐' # Use POWERLEVEL9K_KUBECONTEXT_CONTENT_EXPANSION to specify the content displayed by kubecontext # segment. Parameter expansions are very flexible and fast, too. See reference: # http://zsh.sourceforge.net/Doc/Release/Expansion.html#Parameter-Expansion. # # Within the expansion the following parameters are always available: # # - P9K_CONTENT The content that would've been displayed if there was no content # expansion defined. # - P9K_KUBECONTEXT_NAME The current context's name. Corresponds to column NAME in the # output of `kubectl config get-contexts`. # - P9K_KUBECONTEXT_CLUSTER The current context's cluster. Corresponds to column CLUSTER in the # output of `kubectl config get-contexts`. # - P9K_KUBECONTEXT_NAMESPACE The current context's namespace. Corresponds to column NAMESPACE # in the output of `kubectl config get-contexts`. If there is no # namespace, the parameter is set to "default". # - P9K_KUBECONTEXT_USER The current context's user. Corresponds to column AUTHINFO in the # output of `kubectl config get-contexts`. # # If the context points to Google Kubernetes Engine (GKE) or Elastic Kubernetes Service (EKS), # the following extra parameters are available: # # - P9K_KUBECONTEXT_CLOUD_NAME Either "gke" or "eks". # - P9K_KUBECONTEXT_CLOUD_ACCOUNT Account/project ID. # - P9K_KUBECONTEXT_CLOUD_ZONE Availability zone. # - P9K_KUBECONTEXT_CLOUD_CLUSTER Cluster. # # P9K_KUBECONTEXT_CLOUD_* parameters are derived from P9K_KUBECONTEXT_CLUSTER. For example, # if P9K_KUBECONTEXT_CLUSTER is "gke_my-account_us-east1-a_my-cluster-01": # # - P9K_KUBECONTEXT_CLOUD_NAME=gke # - P9K_KUBECONTEXT_CLOUD_ACCOUNT=my-account # - P9K_KUBECONTEXT_CLOUD_ZONE=us-east1-a # - P9K_KUBECONTEXT_CLOUD_CLUSTER=my-cluster-01 # # If P9K_KUBECONTEXT_CLUSTER is "arn:aws:eks:us-east-1:123456789012:cluster/my-cluster-01": # # - P9K_KUBECONTEXT_CLOUD_NAME=eks # - P9K_KUBECONTEXT_CLOUD_ACCOUNT=123456789012 # - P9K_KUBECONTEXT_CLOUD_ZONE=us-east-1 # - P9K_KUBECONTEXT_CLOUD_CLUSTER=my-cluster-01 typeset -g POWERLEVEL9K_KUBECONTEXT_DEFAULT_CONTENT_EXPANSION= # Show P9K_KUBECONTEXT_CLOUD_CLUSTER if it's not empty and fall back to P9K_KUBECONTEXT_NAME. POWERLEVEL9K_KUBECONTEXT_DEFAULT_CONTENT_EXPANSION+='${P9K_KUBECONTEXT_CLOUD_CLUSTER:-${P9K_KUBECONTEXT_NAME}}' # Append the current context's namespace if it's not "default". POWERLEVEL9K_KUBECONTEXT_DEFAULT_CONTENT_EXPANSION+='${${:-/$P9K_KUBECONTEXT_NAMESPACE}:#/default}' # Custom prefix. # typeset -g POWERLEVEL9K_KUBECONTEXT_PREFIX='%fat ' ################[ terraform: terraform workspace (https://www.terraform.io) ]################# # Don't show terraform workspace if it's literally "default". typeset -g POWERLEVEL9K_TERRAFORM_SHOW_DEFAULT=false # POWERLEVEL9K_TERRAFORM_CLASSES is an array with even number of elements. The first element # in each pair defines a pattern against which the current terraform workspace gets matched. # More specifically, it's P9K_CONTENT prior to the application of context expansion (see below) # that gets matched. If you unset all POWERLEVEL9K_TERRAFORM_*CONTENT_EXPANSION parameters, # you'll see this value in your prompt. The second element of each pair in # POWERLEVEL9K_TERRAFORM_CLASSES defines the workspace class. Patterns are tried in order. The # first match wins. # # For example, given these settings: # # typeset -g POWERLEVEL9K_TERRAFORM_CLASSES=( # '*prod*' PROD # '*test*' TEST # '*' OTHER) # # If your current terraform workspace is "project_test", its class is TEST because "project_test" # doesn't match the pattern '*prod*' but does match '*test*'. # # You can define different colors, icons and content expansions for different classes: # # typeset -g POWERLEVEL9K_TERRAFORM_TEST_FOREGROUND=28 # typeset -g POWERLEVEL9K_TERRAFORM_TEST_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_TERRAFORM_TEST_CONTENT_EXPANSION='> ${P9K_CONTENT} <' typeset -g POWERLEVEL9K_TERRAFORM_CLASSES=( # '*prod*' PROD # These values are examples that are unlikely # '*test*' TEST # to match your needs. Customize them as needed. '*' OTHER) typeset -g POWERLEVEL9K_TERRAFORM_OTHER_FOREGROUND=38 # typeset -g POWERLEVEL9K_TERRAFORM_OTHER_VISUAL_IDENTIFIER_EXPANSION='⭐' #############[ terraform_version: terraform version (https://www.terraform.io) ]############## # Terraform version color. typeset -g POWERLEVEL9K_TERRAFORM_VERSION_FOREGROUND=38 # Custom icon. # typeset -g POWERLEVEL9K_TERRAFORM_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐' #[ aws: aws profile (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html) ]# # Show aws only when the command you are typing invokes one of these tools. # Tip: Remove the next line to always show aws. typeset -g POWERLEVEL9K_AWS_SHOW_ON_COMMAND='aws|awless|terraform|pulumi|terragrunt' # POWERLEVEL9K_AWS_CLASSES is an array with even number of elements. The first element # in each pair defines a pattern against which the current AWS profile gets matched. # More specifically, it's P9K_CONTENT prior to the application of context expansion (see below) # that gets matched. If you unset all POWERLEVEL9K_AWS_*CONTENT_EXPANSION parameters, # you'll see this value in your prompt. The second element of each pair in # POWERLEVEL9K_AWS_CLASSES defines the profile class. Patterns are tried in order. The # first match wins. # # For example, given these settings: # # typeset -g POWERLEVEL9K_AWS_CLASSES=( # '*prod*' PROD # '*test*' TEST # '*' DEFAULT) # # If your current AWS profile is "company_test", its class is TEST # because "company_test" doesn't match the pattern '*prod*' but does match '*test*'. # # You can define different colors, icons and content expansions for different classes: # # typeset -g POWERLEVEL9K_AWS_TEST_FOREGROUND=28 # typeset -g POWERLEVEL9K_AWS_TEST_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_AWS_TEST_CONTENT_EXPANSION='> ${P9K_CONTENT} <' typeset -g POWERLEVEL9K_AWS_CLASSES=( # '*prod*' PROD # These values are examples that are unlikely # '*test*' TEST # to match your needs. Customize them as needed. '*' DEFAULT) typeset -g POWERLEVEL9K_AWS_DEFAULT_FOREGROUND=208 # typeset -g POWERLEVEL9K_AWS_DEFAULT_VISUAL_IDENTIFIER_EXPANSION='⭐' # AWS segment format. The following parameters are available within the expansion. # # - P9K_AWS_PROFILE The name of the current AWS profile. # - P9K_AWS_REGION The region associated with the current AWS profile. typeset -g POWERLEVEL9K_AWS_CONTENT_EXPANSION='${P9K_AWS_PROFILE//\%/%%}${P9K_AWS_REGION:+ ${P9K_AWS_REGION//\%/%%}}' #[ aws_eb_env: aws elastic beanstalk environment (https://aws.amazon.com/elasticbeanstalk/) ]# # AWS Elastic Beanstalk environment color. typeset -g POWERLEVEL9K_AWS_EB_ENV_FOREGROUND=70 # Custom icon. # typeset -g POWERLEVEL9K_AWS_EB_ENV_VISUAL_IDENTIFIER_EXPANSION='⭐' ##########[ azure: azure account name (https://docs.microsoft.com/en-us/cli/azure) ]########## # Show azure only when the command you are typing invokes one of these tools. # Tip: Remove the next line to always show azure. typeset -g POWERLEVEL9K_AZURE_SHOW_ON_COMMAND='az|terraform|pulumi|terragrunt' # Azure account name color. typeset -g POWERLEVEL9K_AZURE_FOREGROUND=32 # Custom icon. # typeset -g POWERLEVEL9K_AZURE_VISUAL_IDENTIFIER_EXPANSION='⭐' ##########[ gcloud: google cloud account and project (https://cloud.google.com/) ]########### # Show gcloud only when the command you are typing invokes one of these tools. # Tip: Remove the next line to always show gcloud. typeset -g POWERLEVEL9K_GCLOUD_SHOW_ON_COMMAND='gcloud|gcs|gsutil' # Google cloud color. typeset -g POWERLEVEL9K_GCLOUD_FOREGROUND=32 # Google cloud format. Change the value of POWERLEVEL9K_GCLOUD_PARTIAL_CONTENT_EXPANSION and/or # POWERLEVEL9K_GCLOUD_COMPLETE_CONTENT_EXPANSION if the default is too verbose or not informative # enough. You can use the following parameters in the expansions. Each of them corresponds to the # output of `gcloud` tool. # # Parameter | Source # -------------------------|-------------------------------------------------------------------- # P9K_GCLOUD_CONFIGURATION | gcloud config configurations list --format='value(name)' # P9K_GCLOUD_ACCOUNT | gcloud config get-value account # P9K_GCLOUD_PROJECT_ID | gcloud config get-value project # P9K_GCLOUD_PROJECT_NAME | gcloud projects describe $P9K_GCLOUD_PROJECT_ID --format='value(name)' # # Note: ${VARIABLE//\%/%%} expands to ${VARIABLE} with all occurrences of '%' replaced with '%%'. # # Obtaining project name requires sending a request to Google servers. This can take a long time # and even fail. When project name is unknown, P9K_GCLOUD_PROJECT_NAME is not set and gcloud # prompt segment is in state PARTIAL. When project name gets known, P9K_GCLOUD_PROJECT_NAME gets # set and gcloud prompt segment transitions to state COMPLETE. # # You can customize the format, icon and colors of gcloud segment separately for states PARTIAL # and COMPLETE. You can also hide gcloud in state PARTIAL by setting # POWERLEVEL9K_GCLOUD_PARTIAL_VISUAL_IDENTIFIER_EXPANSION and # POWERLEVEL9K_GCLOUD_PARTIAL_CONTENT_EXPANSION to empty. typeset -g POWERLEVEL9K_GCLOUD_PARTIAL_CONTENT_EXPANSION='${P9K_GCLOUD_PROJECT_ID//\%/%%}' typeset -g POWERLEVEL9K_GCLOUD_COMPLETE_CONTENT_EXPANSION='${P9K_GCLOUD_PROJECT_NAME//\%/%%}' # Send a request to Google (by means of `gcloud projects describe ...`) to obtain project name # this often. Negative value disables periodic polling. In this mode project name is retrieved # only when the current configuration, account or project id changes. typeset -g POWERLEVEL9K_GCLOUD_REFRESH_PROJECT_NAME_SECONDS=60 # Custom icon. # typeset -g POWERLEVEL9K_GCLOUD_VISUAL_IDENTIFIER_EXPANSION='⭐' #[ google_app_cred: google application credentials (https://cloud.google.com/docs/authentication/production) ]# # Show google_app_cred only when the command you are typing invokes one of these tools. # Tip: Remove the next line to always show google_app_cred. typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_SHOW_ON_COMMAND='terraform|pulumi|terragrunt' # Google application credentials classes for the purpose of using different colors, icons and # expansions with different credentials. # # POWERLEVEL9K_GOOGLE_APP_CRED_CLASSES is an array with even number of elements. The first # element in each pair defines a pattern against which the current kubernetes context gets # matched. More specifically, it's P9K_CONTENT prior to the application of context expansion # (see below) that gets matched. If you unset all POWERLEVEL9K_GOOGLE_APP_CRED_*CONTENT_EXPANSION # parameters, you'll see this value in your prompt. The second element of each pair in # POWERLEVEL9K_GOOGLE_APP_CRED_CLASSES defines the context class. Patterns are tried in order. # The first match wins. # # For example, given these settings: # # typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_CLASSES=( # '*:*prod*:*' PROD # '*:*test*:*' TEST # '*' DEFAULT) # # If your current Google application credentials is "service_account deathray-testing [email protected]", # its class is TEST because it doesn't match the pattern '* *prod* *' but does match '* *test* *'. # # You can define different colors, icons and content expansions for different classes: # # typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_TEST_FOREGROUND=28 # typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_TEST_VISUAL_IDENTIFIER_EXPANSION='⭐' # typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_TEST_CONTENT_EXPANSION='$P9K_GOOGLE_APP_CRED_PROJECT_ID' typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_CLASSES=( # '*:*prod*:*' PROD # These values are examples that are unlikely # '*:*test*:*' TEST # to match your needs. Customize them as needed. '*' DEFAULT) typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_DEFAULT_FOREGROUND=32 # typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_DEFAULT_VISUAL_IDENTIFIER_EXPANSION='⭐' # Use POWERLEVEL9K_GOOGLE_APP_CRED_CONTENT_EXPANSION to specify the content displayed by # google_app_cred segment. Parameter expansions are very flexible and fast, too. See reference: # http://zsh.sourceforge.net/Doc/Release/Expansion.html#Parameter-Expansion. # # You can use the following parameters in the expansion. Each of them corresponds to one of the # fields in the JSON file pointed to by GOOGLE_APPLICATION_CREDENTIALS. # # Parameter | JSON key file field # ---------------------------------+--------------- # P9K_GOOGLE_APP_CRED_TYPE | type # P9K_GOOGLE_APP_CRED_PROJECT_ID | project_id # P9K_GOOGLE_APP_CRED_CLIENT_EMAIL | client_email # # Note: ${VARIABLE//\%/%%} expands to ${VARIABLE} with all occurrences of '%' replaced by '%%'. typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_DEFAULT_CONTENT_EXPANSION='${P9K_GOOGLE_APP_CRED_PROJECT_ID//\%/%%}' ##############[ toolbox: toolbox name (https://github.com/containers/toolbox) ]############### # Toolbox color. typeset -g POWERLEVEL9K_TOOLBOX_FOREGROUND=178 # Don't display the name of the toolbox if it matches fedora-toolbox-*. typeset -g POWERLEVEL9K_TOOLBOX_CONTENT_EXPANSION='${P9K_TOOLBOX_NAME:#fedora-toolbox-*}' # Custom icon. # typeset -g POWERLEVEL9K_TOOLBOX_VISUAL_IDENTIFIER_EXPANSION='⭐' # Custom prefix. # typeset -g POWERLEVEL9K_TOOLBOX_PREFIX='%fin ' ###############################[ public_ip: public IP address ]############################### # Public IP color. typeset -g POWERLEVEL9K_PUBLIC_IP_FOREGROUND=94 # Custom icon. # typeset -g POWERLEVEL9K_PUBLIC_IP_VISUAL_IDENTIFIER_EXPANSION='⭐' ########################[ vpn_ip: virtual private network indicator ]######################### # VPN IP color. typeset -g POWERLEVEL9K_VPN_IP_FOREGROUND=81 # When on VPN, show just an icon without the IP address. # Tip: To display the private IP address when on VPN, remove the next line. typeset -g POWERLEVEL9K_VPN_IP_CONTENT_EXPANSION= # Regular expression for the VPN network interface. Run `ifconfig` or `ip -4 a show` while on VPN # to see the name of the interface. typeset -g POWERLEVEL9K_VPN_IP_INTERFACE='(gpd|wg|(.*tun)|tailscale)[0-9]*' # If set to true, show one segment per matching network interface. If set to false, show only # one segment corresponding to the first matching network interface. # Tip: If you set it to true, you'll probably want to unset POWERLEVEL9K_VPN_IP_CONTENT_EXPANSION. typeset -g POWERLEVEL9K_VPN_IP_SHOW_ALL=false # Custom icon. # typeset -g POWERLEVEL9K_VPN_IP_VISUAL_IDENTIFIER_EXPANSION='⭐' ###########[ ip: ip address and bandwidth usage for a specified network interface ]########### # IP color. typeset -g POWERLEVEL9K_IP_FOREGROUND=38 # The following parameters are accessible within the expansion: # # Parameter | Meaning # ----------------------+------------------------------------------- # P9K_IP_IP | IP address # P9K_IP_INTERFACE | network interface # P9K_IP_RX_BYTES | total number of bytes received # P9K_IP_TX_BYTES | total number of bytes sent # P9K_IP_RX_BYTES_DELTA | number of bytes received since last prompt # P9K_IP_TX_BYTES_DELTA | number of bytes sent since last prompt # P9K_IP_RX_RATE | receive rate (since last prompt) # P9K_IP_TX_RATE | send rate (since last prompt) typeset -g POWERLEVEL9K_IP_CONTENT_EXPANSION='$P9K_IP_IP${P9K_IP_RX_RATE:+ %70F⇣$P9K_IP_RX_RATE}${P9K_IP_TX_RATE:+ %215F⇡$P9K_IP_TX_RATE}' # Show information for the first network interface whose name matches this regular expression. # Run `ifconfig` or `ip -4 a show` to see the names of all network interfaces. typeset -g POWERLEVEL9K_IP_INTERFACE='[ew].*' # Custom icon. # typeset -g POWERLEVEL9K_IP_VISUAL_IDENTIFIER_EXPANSION='⭐' #########################[ proxy: system-wide http/https/ftp proxy ]########################## # Proxy color. typeset -g POWERLEVEL9K_PROXY_FOREGROUND=68 # Custom icon. # typeset -g POWERLEVEL9K_PROXY_VISUAL_IDENTIFIER_EXPANSION='⭐' ################################[ battery: internal battery ]################################# # Show battery in red when it's below this level and not connected to power supply. typeset -g POWERLEVEL9K_BATTERY_LOW_THRESHOLD=20 typeset -g POWERLEVEL9K_BATTERY_LOW_FOREGROUND=160 # Show battery in green when it's charging or fully charged. typeset -g POWERLEVEL9K_BATTERY_{CHARGING,CHARGED}_FOREGROUND=70 # Show battery in yellow when it's discharging. typeset -g POWERLEVEL9K_BATTERY_DISCONNECTED_FOREGROUND=178 # Battery pictograms going from low to high level of charge. typeset -g POWERLEVEL9K_BATTERY_STAGES='\uf58d\uf579\uf57a\uf57b\uf57c\uf57d\uf57e\uf57f\uf580\uf581\uf578' # Don't show the remaining time to charge/discharge. typeset -g POWERLEVEL9K_BATTERY_VERBOSE=false #####################################[ wifi: wifi speed ]##################################### # WiFi color. typeset -g POWERLEVEL9K_WIFI_FOREGROUND=68 # Custom icon. # typeset -g POWERLEVEL9K_WIFI_VISUAL_IDENTIFIER_EXPANSION='⭐' # Use different colors and icons depending on signal strength ($P9K_WIFI_BARS). # # # Wifi colors and icons for different signal strength levels (low to high). # typeset -g my_wifi_fg=(68 68 68 68 68) # <-- change these values # typeset -g my_wifi_icon=('WiFi' 'WiFi' 'WiFi' 'WiFi' 'WiFi') # <-- change these values # # typeset -g POWERLEVEL9K_WIFI_CONTENT_EXPANSION='%F{${my_wifi_fg[P9K_WIFI_BARS+1]}}$P9K_WIFI_LAST_TX_RATE Mbps' # typeset -g POWERLEVEL9K_WIFI_VISUAL_IDENTIFIER_EXPANSION='%F{${my_wifi_fg[P9K_WIFI_BARS+1]}}${my_wifi_icon[P9K_WIFI_BARS+1]}' # # The following parameters are accessible within the expansions: # # Parameter | Meaning # ----------------------+--------------- # P9K_WIFI_SSID | service set identifier, a.k.a. network name # P9K_WIFI_LINK_AUTH | authentication protocol such as "wpa2-psk" or "none"; empty if unknown # P9K_WIFI_LAST_TX_RATE | wireless transmit rate in megabits per second # P9K_WIFI_RSSI | signal strength in dBm, from -120 to 0 # P9K_WIFI_NOISE | noise in dBm, from -120 to 0 # P9K_WIFI_BARS | signal strength in bars, from 0 to 4 (derived from P9K_WIFI_RSSI and P9K_WIFI_NOISE) ####################################[ time: current time ]#################################### # Current time color. typeset -g POWERLEVEL9K_TIME_FOREGROUND=66 # Format for the current time: 09:51:02. See `man 3 strftime`. typeset -g POWERLEVEL9K_TIME_FORMAT='%D{%H:%M:%S}' # If set to true, time will update when you hit enter. This way prompts for the past # commands will contain the start times of their commands as opposed to the default # behavior where they contain the end times of their preceding commands. typeset -g POWERLEVEL9K_TIME_UPDATE_ON_COMMAND=false # Custom icon. # typeset -g POWERLEVEL9K_TIME_VISUAL_IDENTIFIER_EXPANSION='⭐' # Custom prefix. # typeset -g POWERLEVEL9K_TIME_PREFIX='%fat ' # Example of a user-defined prompt segment. Function prompt_example will be called on every # prompt if `example` prompt segment is added to POWERLEVEL9K_LEFT_PROMPT_ELEMENTS or # POWERLEVEL9K_RIGHT_PROMPT_ELEMENTS. It displays an icon and orange text greeting the user. # # Type `p10k help segment` for documentation and a more sophisticated example. function prompt_example() { p10k segment -f 208 -i '⭐' -t 'hello, %n' } # User-defined prompt segments may optionally provide an instant_prompt_* function. Its job # is to generate the prompt segment for display in instant prompt. See # https://github.com/romkatv/powerlevel10k/blob/master/README.md#instant-prompt. # # Powerlevel10k will call instant_prompt_* at the same time as the regular prompt_* function # and will record all `p10k segment` calls it makes. When displaying instant prompt, Powerlevel10k # will replay these calls without actually calling instant_prompt_*. It is imperative that # instant_prompt_* always makes the same `p10k segment` calls regardless of environment. If this # rule is not observed, the content of instant prompt will be incorrect. # # Usually, you should either not define instant_prompt_* or simply call prompt_* from it. If # instant_prompt_* is not defined for a segment, the segment won't be shown in instant prompt. function instant_prompt_example() { # Since prompt_example always makes the same `p10k segment` calls, we can call it from # instant_prompt_example. This will give us the same `example` prompt segment in the instant # and regular prompts. prompt_example } # User-defined prompt segments can be customized the same way as built-in segments. # typeset -g POWERLEVEL9K_EXAMPLE_FOREGROUND=208 # typeset -g POWERLEVEL9K_EXAMPLE_VISUAL_IDENTIFIER_EXPANSION='⭐' # Transient prompt works similarly to the builtin transient_rprompt option. It trims down prompt # when accepting a command line. Supported values: # # - off: Don't change prompt when accepting a command line. # - always: Trim down prompt when accepting a command line. # - same-dir: Trim down prompt when accepting a command line unless this is the first command # typed after changing current working directory. typeset -g POWERLEVEL9K_TRANSIENT_PROMPT=off # Instant prompt mode. # # - off: Disable instant prompt. Choose this if you've tried instant prompt and found # it incompatible with your zsh configuration files. # - quiet: Enable instant prompt and don't print warnings when detecting console output # during zsh initialization. Choose this if you've read and understood # https://github.com/romkatv/powerlevel10k/blob/master/README.md#instant-prompt. # - verbose: Enable instant prompt and print a warning when detecting console output during # zsh initialization. Choose this if you've never tried instant prompt, haven't # seen the warning, or if you are unsure what this all means. typeset -g POWERLEVEL9K_INSTANT_PROMPT=verbose # Hot reload allows you to change POWERLEVEL9K options after Powerlevel10k has been initialized. # For example, you can type POWERLEVEL9K_BACKGROUND=red and see your prompt turn red. Hot reload # can slow down prompt by 1-2 milliseconds, so it's better to keep it turned off unless you # really need it. typeset -g POWERLEVEL9K_DISABLE_HOT_RELOAD=true # If p10k is already loaded, reload configuration. # This works even with POWERLEVEL9K_DISABLE_HOT_RELOAD=true. (( ! $+functions[p10k] )) || p10k reload } # Tell `p10k configure` which file it should overwrite. typeset -g POWERLEVEL9K_CONFIG_FILE=${${(%):-%x}:a} (( ${#p10k_config_opts} )) && setopt ${p10k_config_opts[@]} 'builtin' 'unset' 'p10k_config_opts'
causica/.devcontainer/.p10k.zsh/0
{ "file_path": "causica/.devcontainer/.p10k.zsh", "repo_id": "causica", "token_count": 32138 }
464
repos: - repo: local hooks: - id: black name: black entry: poetry run black language: system types_or: [python, jupyter] args: [--config=./pyproject.toml] - repo: local hooks: - id: isort name: isort entry: poetry run isort --resolve-all-configs language: system types: [python] - repo: local hooks: - id: poetry lock name: poetry lock check entry: poetry lock --check pass_filenames: false language: system - repo: local hooks: - id: mypy name: mypy entry: poetry run mypy pass_filenames: false language: system types: [python] args: ["--config-file=pyproject.toml", "."] - repo: local hooks: - id: pylint name: pylint entry: poetry run pylint language: system types: [python]
causica/.pre-commit-config.yaml/0
{ "file_path": "causica/.pre-commit-config.yaml", "repo_id": "causica", "token_count": 321 }
465
# Neural Structure Learning with Stochastic Differential Equations (SCOTCH) [![Static Badge](https://img.shields.io/badge/paper-SCOTCH-brightgreen?style=plastic&label=Paper&labelColor=violet) ](https://openreview.net/forum?id=V1GM9xDvIY) [![Static Badge](https://img.shields.io/badge/Team-Causica-blue?style=plastic&label=Team&labelColor=purple) ]((https://www.microsoft.com/en-us/research/project/project_azua/people/)) This repo implements the SCOTCH proposed in the ICLR 2024 paper "Neural Structure Learning with Stochastic Differential Equations". SCOTCH is a structure learning method using neural stochastic differential equations (SDEs) for temporal data. SCOTCH, designed for the continuous-time processes, outperforms traditional discrete-time models, and is compatible with irregular sampling intervals. SCOTCH combines neural SDEs with variational inference over structures with theoretical guarantees regarding structure identifiability, establishing a new standard for structure learning. ## Dependency We use [Poetry](https://python-poetry.org/) to manage the project dependencies, they are specified in [pyproject](pyproject.toml) file. To install poetry, run: ```console curl -sSL https://install.python-poetry.org | python3 - ``` To install the environment, run `poetry install` in the directory of SCOTCH project. ## Prepare the data To reproduce the experiment results in the [paper](https://openreview.net/forum?id=V1GM9xDvIY), you need to either generate the synthetic data (Lorenz and Yeast glycolysis dataset) or download the raw data and process them (DREAM3 and Netsim dataset). ### Lorenz and Yeast data generation To generate the synthetic data, run the following command: ```console python -m scotch.dataset_generation.generate_and_save_data ``` This executes the [generate_and_save_data.py](src/scotch/dataset_generation/generate_and_save_data.py) to generate Lorenz and Yeast datasets with 5 seeds, both normalized and unnormalized data, different sub-sampling rates and different missing data probabilities to mimic irregular sampling intervals. The generated data will be saved in the `./data/lorenz96_processed`. ### DREAM3 We use the DREAM3 dataset from [DREAM challenge](https://gnw.sourceforge.net/dreamchallenge.html#dream3challenge). One can download the zip file and extract the content to the `./data`. ### Netsim One can download the Netsim dataset from [Netsim](https://www.fmrib.ox.ac.uk/datasets/netsim/), and unzip the file to the `./data`. Then, specify the path to the `sim3.mat` in the file [generate_and_save_data.py](src/scotch/dataset_generation/generate_and_save_data.py#L59) and set `gen_netsim_data = True`. Run the following command to process the data: ```console python -m scotch.dataset_generation.generate_and_save_data ``` The processed data will be saved in `./data/netsim_processed`. If this already exists, it will override the data in `./data/netsim_processed`. ## Run experiments In the [src/scotch/experiments](src/scotch/experiments/) directory, we provide scripts to run the experiments for each datasets reported in the paper (Lorenz, Yeast, DREAM3 and Netsim). For `Ecoli1` dataset of DREAM3, run the following command: ```console python -m scotch.experiments.dream3 --dimension 100 --name Ecoli1 --epoch 40000 --lr 0.001 --sparsity 200 --dt 0.05 --seed 0 --normalize --experiment_name Ecoli1_exp --deci_diffusion --res_connection --sigmoid_output --lr_warmup 100 ``` For other DREAM3 datasets, please refer to paper for hyperparameters. For `Netsim` dataset, run the following command: ```console python -m scotch.experiments.netsim --epoch 20000 --lr 0.001 --sparsity 1000 --dt 0.05 --seed 0 --res_connection --deci_diffusion --lr_warmup 100 --sigmoid_output --missing_prob 0.1 --experiment_name Netsim_missing_0.1 ``` For `Lorenz` dataset, run the following command: ```console python -m scotch.experiments.lorenz --epoch 40000 --lr 0.003 --sparsity 500 --dt 1 --seed 0 --experiment_name Lorenz_missing_0.3 --res_connection --deci_diffusion --lr_warmup 100 --sigmoid_output --missing_prob 0.3 --num_time_points 100 --train_size 10 ``` For `Yeast` dataset, run the following command: ```console python -m scotch.experiments.yeast --epoch 40000 --lr 0.001 --sparsity 200 --dt 1 --seed 0 --experiment_name Yeast_exp --res_connection --deci_diffusion --lr_warmup 100 --sigmoid_output --missing_prob 0.0 --num_time_points 100 --train_size 10 --normalize ```
causica/research_experiments/scotch/README.md/0
{ "file_path": "causica/research_experiments/scotch/README.md", "repo_id": "causica", "token_count": 1368 }
466
import torch from torch import Tensor def confusion_matrix(true_graph: torch.Tensor, pred_graph: torch.Tensor) -> Tensor: """Evaluate metrics for the difference between a true and predicted graph. Args: true_graph: The true graph; Tensor of shape (state_size, state_size). pred_graph: The predicted graph; Tensor of shape (state_size, state_size). Returns: confusion matrix: Tensor of shape (2, 2) reprsenting confusion matrix: Entry (0, 0) is the number of true negatives. Entry (0, 1) is the number of false positives. Entry (1, 0) is the number of false negatives. Entry (1, 1) is the number of true positives. """ vec1 = torch.abs(true_graph) > 0 vec2 = torch.abs(pred_graph) > 0 tp = (vec1 & vec2).sum() tn = ((~vec1) & (~vec2)).sum() fp = ((~vec1) & (vec2)).sum() fn = ((vec1) & (~vec2)).sum() return torch.stack([tn, fp, fn, tp]).view(2, 2) def confusion_matrix_batched(true_graph: torch.Tensor, pred_graphs: torch.Tensor) -> Tensor: """Evaluate metrics for the difference between a true and a set of predicted graphs. Args: true_graph: The true graph; Tensor of shape (state_size, state_size). pred_graphs: Set of predicted graphs; Tensor of shape (batch_size, state_size, state_size). Returns: confusion matrix: Tensor of shape (2, 2) reprsenting confusion matrix acrpss all predicted graphs. """ batched_confusion_matrix = torch.vmap(confusion_matrix, in_dims=(None, 0), out_dims=0)(true_graph, pred_graphs) return torch.sum(batched_confusion_matrix, dim=0) def true_positive_rate(tp: int, fn: int) -> float: """Compute true positive rate given number of true positives and false negatives. Args: tp: Number of true positives. fn: Number of false negatives. Returns: True positive rate; -1 if tp + fn == 0. """ return -1 if tp + fn == 0 else tp / (tp + fn) def false_discovery_rate(tp: int, fp: int) -> float: """Compute false discovery rate given number of true positives and false positives. Args: tp: Number of true positives. fp: Number of false positives. Returns: False discovery rate; -1 if tp + fp == 0. """ return -1 if tp + fp == 0 else fp / (tp + fp) def f1_score(tp: int, fp: int, fn: int) -> float: """Compute F1 score given number of true positives, false positives, and false negatives. Args: tp: Number of true positives. fp: Number of false positives. fn: Number of false negatives. Returns: F1 score; -1 if tp + fp + fn == 0. """ return -1 if tp + fp + fn == 0 else 2 * tp / (2 * tp + fp + fn)
causica/research_experiments/scotch/src/scotch/scotch_utils/graph_metrics.py/0
{ "file_path": "causica/research_experiments/scotch/src/scotch/scotch_utils/graph_metrics.py", "repo_id": "causica", "token_count": 1075 }
467
import torch from causica.data_generation.samplers.functional_relationships_sampler import FunctionalRelationshipsSampler from causica.data_generation.samplers.noise_dist_sampler import JointNoiseModuleSampler from causica.data_generation.samplers.sampler import Sampler from causica.distributions import AdjacencyDistribution from causica.sem.distribution_parameters_sem import DistributionParametersSEM class SEMSampler(Sampler[DistributionParametersSEM]): """Sample a SEM given adjacency, a JointNoiseModuleSampler and functional relationships distributions.""" def __init__( self, adjacency_dist: AdjacencyDistribution, joint_noise_module_sampler: JointNoiseModuleSampler, functional_relationships_sampler: FunctionalRelationshipsSampler, ): self.adjacency_dist = adjacency_dist self.joint_noise_module_sampler = joint_noise_module_sampler self.functional_relationships_sampler = functional_relationships_sampler self.shapes_dict: dict[str, torch.Size] = functional_relationships_sampler.shapes_dict def sample(self): adjacency_matrix = self.adjacency_dist.sample() functional_relationships = self.functional_relationships_sampler.sample() joint_noise_module = self.joint_noise_module_sampler.sample() return DistributionParametersSEM( graph=adjacency_matrix, noise_dist=joint_noise_module, func=functional_relationships )
causica/src/causica/data_generation/samplers/sem_sampler.py/0
{ "file_path": "causica/src/causica/data_generation/samplers/sem_sampler.py", "repo_id": "causica", "token_count": 505 }
468
from functools import partial from typing import Callable, Type import torch import torch.distributions as td from causica.distributions.adjacency.adjacency_distributions import AdjacencyDistribution from causica.distributions.distribution_module import DistributionModule class ConstrainedAdjacencyDistribution(AdjacencyDistribution): """Adjacency distribution that applies hard constraints to a base distribution. Overrides elements produced from the base adjacency distribution with - 0 when the corresponding negative_constraint=0 - 1 when the corresponding positive constraint=1 - unmodified: all other elements """ arg_constraints = {"positive_constraints": td.constraints.boolean, "negative_constraints": td.constraints.boolean} def __init__( self, dist: AdjacencyDistribution, positive_constraints: torch.Tensor, negative_constraints: torch.Tensor ): """ Args: dist (AdjacencyDistribution): Base distribution, event shape matching the postive and negative constraints. positive_constraints (torch.Tensor): Positive constraints. 1 means edge is present. negative_constraints (torch.Tensor): Negative constraints. 0 means edge is not present. """ if not dist.event_shape == positive_constraints.shape == negative_constraints.shape: raise ValueError("The constraints must match the event shape of the distribution.") self.dist = dist self.positive_constraints = positive_constraints self.negative_constraints = negative_constraints super().__init__(self.dist.num_nodes) def relaxed_sample(self, sample_shape: torch.Size = torch.Size(), temperature: float = 0.0) -> torch.Tensor: """ Sample a binary adjacency matrix from the relaxed distribution and apply constraints. Args: sample_shape: the shape of the samples to return temperature: The temperature of the relaxed distribution Returns: A tensor of shape sample_shape + batch_shape + (num_nodes, num_nodes) """ return self._apply_constraints(self.dist.relaxed_sample(sample_shape=sample_shape, temperature=temperature)) def sample(self, sample_shape: torch.Size = torch.Size()) -> torch.Tensor: """ Sample a binary adjacency matrix from the underlying distribution and apply constraints. Args: sample_shape: the shape of the samples to return Returns: A tensor of shape sample_shape + batch_shape + (num_nodes, num_nodes) """ return self._apply_constraints(self.dist.sample(sample_shape=sample_shape)) @property def mean(self) -> torch.Tensor: """ Return the mean of the underlying distribution and applies the constraints. This will be a matrix with all entries in the interval [0, 1]. Returns: A tensor of shape batch_shape + (num_nodes, num_nodes) """ return self._apply_constraints(self.dist.mean) @property def mode(self) -> torch.Tensor: """ Return the mode of the underlying distribution and applies the constraints. This will be an adjacency matrix. Returns: A tensor of shape batch_shape + (num_nodes, num_nodes) """ return self._apply_constraints(self.dist.mode) def entropy(self) -> torch.Tensor: """ Return the entropy of the underlying distribution. NOTE: This does not account for the constraints. Returns: A tensor of shape batch_shape, with the entropy of the distribution """ return self.dist.entropy() def log_prob(self, value: torch.Tensor) -> torch.Tensor: """ Get the log probability of each tensor from the sample space NOTE: This does not account for the constraints. Args: value: a binary matrix of shape value_shape + batch_shape + (n, n) Returns: A tensor of shape value_shape + batch_shape, with the log probabilities of each tensor in the batch. """ # entropy might need to be modified to account for constraints return self.dist.log_prob(value) def _apply_constraints(self, G: torch.Tensor) -> torch.Tensor: """Return G with the positive and negative constraints applied.""" return 1.0 - (1.0 - G * self.negative_constraints) * (~self.positive_constraints) def get_graph_constraint(graph_constraint_matrix: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: """Converts graph constraint matrix into a positive and negative matrix for easier usage. Args: graph_constraint_matrix: Graph constraints: 0 = no edge, 1 = edge, nan: no constraint. Should be a square matrix with the lengths matching the number of nodes in the graph. Returns: A tuple of (positive_constraints, negative_constraint). See ConstrainedAdjacencyDistribution for their interpretation. """ assert graph_constraint_matrix.ndim == 2, "Constraint matrix must be 2D." assert graph_constraint_matrix.shape[0] == graph_constraint_matrix.shape[1], "Constraint matrix must be square." # Mask self-edges mask = ~torch.eye(graph_constraint_matrix.shape[0], dtype=torch.bool, device=graph_constraint_matrix.device) positive_constraints = mask * torch.nan_to_num(graph_constraint_matrix, nan=0).to( dtype=torch.bool, non_blocking=True ) negative_constraints = torch.nan_to_num(graph_constraint_matrix, nan=1).to(dtype=torch.bool, non_blocking=True) return positive_constraints, negative_constraints def _create_distribution( dist_class: Type[AdjacencyDistribution], *args, graph_constraint_matrix: torch.Tensor, **kwargs ) -> ConstrainedAdjacencyDistribution: """Utility function for generating a constrained adjacency distribution with a base distribution. Args: dist_class (Type[AdjacencyDistribution]): Type of the base adjacency distribution. graph_constraint_matrix: Graph constraints: 0 = no edge, 1 = edge, nan: no constraint. Must match the event shape of the distribution. Returns: ConstrainedAdjacencyDistribution: Constrained adjacency distribution. """ positive_constraints, negative_constraints = get_graph_constraint(graph_constraint_matrix) dist = dist_class(*args, **kwargs) return ConstrainedAdjacencyDistribution( dist, positive_constraints=positive_constraints, negative_constraints=negative_constraints ) def constrained_adjacency( dist_class: Type[AdjacencyDistribution], ) -> Callable[..., ConstrainedAdjacencyDistribution]: """Utility function that returns a function constructing a constrained adjacency distribution. Args: dist_class: Type of the base adjacency distribution. graph_constraint_matrix: Graph constraints: 0 = no edge, 1 = edge, nan: no constraint. Must match the event shape of the distribution. Returns: Callable[..., ConstrainedAdjacencyDistribution]: Utility function creating a ConstrainedAdjacencyDistribution. """ return partial( _create_distribution, dist_class=dist_class, ) class ConstrainedAdjacency(DistributionModule[AdjacencyDistribution]): """A constrained adjacency distribution module where certain parts edges of in the adjacency matrix are locked.""" def __init__( self, adjacency_distribution: DistributionModule[AdjacencyDistribution], graph_constraint_matrix: torch.Tensor ): """ Args: adjacency_distribution: Underlying adjacency distribution module. graph_constraint_matrix: Constraint matrix with edges defined according to `get_graph_constraint`. """ super().__init__() self.adjacency_distribution = adjacency_distribution positive_constraints, negative_constraints = get_graph_constraint(graph_constraint_matrix) self.positive_constraints: torch.Tensor self.negative_constraints: torch.Tensor self.register_buffer("positive_constraints", positive_constraints) self.register_buffer("negative_constraints", negative_constraints) def forward(self) -> ConstrainedAdjacencyDistribution: return ConstrainedAdjacencyDistribution( self.adjacency_distribution(), positive_constraints=self.positive_constraints, negative_constraints=self.negative_constraints, )
causica/src/causica/distributions/adjacency/constrained_adjacency_distributions.py/0
{ "file_path": "causica/src/causica/distributions/adjacency/constrained_adjacency_distributions.py", "repo_id": "causica", "token_count": 3150 }
469
from typing import Optional, Union import torch import torch.distributions as td from torch import nn from causica.distributions.noise.noise import IndependentNoise, Noise, NoiseModule from causica.distributions.noise.spline.rational_quadratic_transform import PiecewiseRationalQuadraticTransform # Ordered inputs to `_create_composite_layer` SplineParams = tuple[torch.Tensor, ...] class SplineNoise(td.TransformedDistribution, Noise): """A Spline Based Noise Distribution. Parametrized as in in [Neural Spline Flows](https://arxiv.org/pdf/1906.04032.pdf). """ def __init__( self, base_loc: torch.Tensor, base_scale: torch.Tensor, spline_transforms: list[Union[td.AffineTransform, td.ComposeTransform]], ): """ Args: base_loc: Loc of base normal distribution. base_scale: Scale of base normal distribution. spline_transforms: Spline transforms, where the last transform bijects from noise to samples. """ self.base_loc = base_loc self.base_scale = base_scale last_transform = spline_transforms[-1] if spline_transforms and not (isinstance(last_transform, td.AffineTransform) or not last_transform.parts): # `td.identity` is just a `td.ComposeTransform` with no `parts`. It's used when there's no output shift. raise TypeError( "The last transformation must be `td.AffineTransform` or td.identity, but was " f"`{type(last_transform)}`." ) super().__init__( base_distribution=td.Normal(loc=self.base_loc, scale=self.base_scale), transforms=spline_transforms, ) def sample_to_noise(self, samples: torch.Tensor) -> torch.Tensor: """ Transform from the sample observations to corresponding noise variables. Args: samples: Tensor of shape sample_shape + batch_shape + event_shape Returns: The generated samples with shape sample_shape + batch_shape + event_shape """ output_affine_transform = self.transforms[-1] return output_affine_transform.inv(samples) def noise_to_sample(self, noise: torch.Tensor) -> torch.Tensor: """ Generate samples using the given exogenous noise. Args: noise: noise variable with shape sample_shape + batch_shape. Returns: The generated samples with shape sample_shape + batch_shape + event_shape """ output_affine_transform = self.transforms[-1] return output_affine_transform(noise) def create_spline_dist_params( dim: int, num_bins: int, flow_steps: int, knot_locations_scale: float, derivatives_scale: float, ) -> list[SplineParams]: """Create initial values for a spline distribution. Args: dim: Number of dimensions of the represented variable. num_bins: Number of spline bins. flow_steps: Number of flow steps. knot_locations_scale: Scale of random values used for `knot_locations` of `PiecewiseRationalQuadraticTransform`. derivatives_scale: Scale of random values for `derivatives` of `PiecewiseRationalQuadraticTransform`. Returns: A list of parameters for `CompositeSplineLayer`s. """ param_list: list[SplineParams] = [] for i in range(flow_steps + 1): log_scale = torch.zeros(dim) # this will be exponentiated when passed to the spline distribution loc = torch.zeros(dim) if i == flow_steps: param_list.append((loc, log_scale)) else: knot_locations = knot_locations_scale * torch.randn(dim, num_bins, 2) derivatives = derivatives_scale * torch.randn(dim, num_bins - 1) param_list.append((loc, log_scale, knot_locations, derivatives)) return param_list class CompositeSplineLayer(nn.Module): """A layer constructing an affine transformation potentially composed with a `PiecewiseRationalQuadraticTransform`. Encapsulates one of the layers or `flow_steps` of the [Neural Spline Flows](https://arxiv.org/pdf/1906.04032.pdf). """ def __init__( self, init_loc: torch.Tensor, init_log_scale: torch.Tensor, init_knot_locations: Optional[torch.Tensor] = None, init_derivatives: Optional[torch.Tensor] = None, ): """ Args: init_loc: Initial value of base normal distribution loc. init_scale: Initial value of base normal distribution scale. init_knot_locations: Initialization value for the knot locations of `PiecewiseRationalQuadraticTransform`. init_derivatives: Initialization value for the derivatives of `PiecewiseRationalQuadraticTransform`. """ super().__init__() self.loc = nn.Parameter(init_loc) self.log_scale = nn.Parameter(init_log_scale) if (init_knot_locations is None) != (init_derivatives is None): raise ValueError("Either both or none of `knot_location` and `derivatives` must be set.") self.knot_locations = nn.Parameter(init_knot_locations) if init_knot_locations is not None else None self.derivatives = nn.Parameter(init_derivatives) if init_derivatives is not None else None def __call__(self, *args, **kwargs) -> td.Transform: return super().__call__(*args, **kwargs) def forward(self) -> td.Transform: affine = td.AffineTransform(loc=self.loc, scale=torch.exp(self.log_scale)) if self.knot_locations is None or self.derivatives is None: # Test both to ensure correct typing below return affine return td.ComposeTransform([affine, PiecewiseRationalQuadraticTransform(self.knot_locations, self.derivatives)]) class SplineNoiseModule(NoiseModule[IndependentNoise[SplineNoise]]): """Implements Neural Spline Flow noise with learnable parameters. See [Neural Spline Flows](https://arxiv.org/pdf/1906.04032.pdf). """ def __init__( self, dim: int, num_bins: int = 8, flow_steps: int = 1, init_knot_locations_scale: float = 1e-2, init_derivatives_scale: float = 1e-2, ): """ Args: dim: Number of dimensions of the represented variable. num_bins: Number of spline bins. flow_steps: Number of flow steps. init_knot_locations_scale: Scale of random initialization values for `knot_locations` of `PiecewiseRationalQuadraticTransform`. init_derivatives_scale: Scale of random initialization values for `derivatives` of `PiecewiseRationalQuadraticTransform`. """ super().__init__() spline_params = create_spline_dist_params( dim=dim, num_bins=num_bins, flow_steps=flow_steps, knot_locations_scale=init_knot_locations_scale, derivatives_scale=init_derivatives_scale, ) self.composite_spline_layers = nn.ModuleList(CompositeSplineLayer(*params) for params in spline_params) self.base_scale: torch.Tensor self.base_loc: torch.Tensor self.register_buffer("base_loc", torch.zeros(dim)) self.register_buffer("base_scale", torch.ones(dim)) def forward(self, x: Optional[torch.Tensor] = None) -> IndependentNoise[SplineNoise]: transforms = [spline_layer() for spline_layer in self.composite_spline_layers] if x is not None: transforms.append(td.AffineTransform(loc=x, scale=torch.ones_like(x, device=x.device))) else: # Maintain the old behavior of having this transform biject between noise and samples. transforms.append(td.identity_transform) spline_dist = SplineNoise(base_loc=self.base_loc, base_scale=self.base_scale, spline_transforms=transforms) return IndependentNoise(spline_dist, 1)
causica/src/causica/distributions/noise/spline/spline.py/0
{ "file_path": "causica/src/causica/distributions/noise/spline/spline.py", "repo_id": "causica", "token_count": 3289 }
470
import torch from causica.triangular_transformations import unfill_triangular def adjacency_precision_recall(graph1: torch.Tensor, graph2: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: """Evaluate the precision and recall of edge existence for two adjacency matrices.""" vec1 = torch.abs(_to_vector(graph1)) > 0 vec2 = torch.abs(_to_vector(graph2)) > 0 correspondence = (vec1 & vec2).sum() if (vec1_sum := vec1.sum()) != 0: recall = correspondence / vec1_sum else: recall = torch.tensor(0.0, device=graph1.device) if (vec2_sum := vec2.sum()) != 0: precision = correspondence / vec2_sum else: precision = torch.tensor(0.0, device=graph1.device) return precision, recall def adjacency_f1(graph1: torch.Tensor, graph2: torch.Tensor) -> torch.Tensor: """Evaluate the f1 score of edge existence for two adjacency matrices.""" return f1_score(*adjacency_precision_recall(graph1, graph2)) def orientation_precision_recall(graph1: torch.Tensor, graph2: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: """Evaluate the precision and recall of edge orientation for two adjacency matrices.""" vec1 = _to_vector(graph1) vec2 = _to_vector(graph2) non_zero_vec1 = vec1 != 0 non_zero_vec2 = vec2 != 0 if (non_zero_vec1_sum := non_zero_vec1.sum()) != 0: recall = ((vec1 == vec2) & non_zero_vec1).sum() / non_zero_vec1_sum else: recall = torch.tensor(0.0, device=graph1.device) if (non_zero_vec2_sum := non_zero_vec2.sum()) != 0: precision = ((vec1 == vec2) & non_zero_vec2).sum() / non_zero_vec2_sum else: precision = torch.tensor(0.0, device=graph1.device) return precision, recall def orientation_f1(graph1: torch.Tensor, graph2: torch.Tensor) -> torch.Tensor: """Evaluate the f1 score of edge existence for two adjacency matrices.""" return f1_score(*orientation_precision_recall(graph1, graph2)) def f1_score(precision: torch.Tensor, recall: torch.Tensor) -> torch.Tensor: """Calculate f1 score from precision and recall.""" if torch.abs(denominator := precision + recall) < 1e-8: return torch.tensor(0.0, device=precision.device) return 2.0 * precision * recall / denominator def _to_vector(graph: torch.Tensor) -> torch.Tensor: """ Convert an adjacency matrix to a vector of length n(n-1)/2. There is a 0 for no edge, -1 or 1 for a single edge and 2 for both edges between a pair of nodes """ lower_tri = unfill_triangular(graph, upper=False) upper_tri = unfill_triangular(graph, upper=True) diff = upper_tri - lower_tri return diff + (1 - torch.abs(diff)) * (upper_tri + lower_tri)
causica/src/causica/graph/evaluation_metrics.py/0
{ "file_path": "causica/src/causica/graph/evaluation_metrics.py", "repo_id": "causica", "token_count": 1040 }
471
import torch from torch import nn class DECIEmbedNN(nn.Module): """ Defines the function f for the SEM. For each variable x_i we use f_i(x) = f(e_i, sum_{k in pa(i)} g(e_k, x_k)), where e_i is a learned embedding for node i. """ def __init__( self, group_mask: torch.Tensor, embedding_size: int, out_dim_g: int, num_layers_g: int, num_layers_zeta: int, ): """ Args: group_mask: A mask of shape (num_nodes, num_processed_cols) such that group_mask[i, j] = 1. when col j is in group i. embedding_size: Size of the embeddings used by each node. Uses the larger of 4 * concatenated_shape or embedding_size. out_dim_g: Output dimension of the "inner" NN, l. If none, default is embedding size. num_layers_g: Number of layers in the "inner" NN, l. num_layers_zeta: Number of layers in the "outer" NN, ζ. """ super().__init__() self.group_mask = group_mask num_nodes, concatenated_shape = group_mask.shape # Initialize embeddings uⱼ self.embeddings = nn.parameter.Parameter(0.01 * torch.randn(num_nodes, embedding_size), requires_grad=True) # Set value for out_dim_g # Set NNs sizes a = max(4 * concatenated_shape, embedding_size, 64) in_dim_g = embedding_size + concatenated_shape in_dim_f = embedding_size + out_dim_g self.l = _generate_fully_connected( input_dim=in_dim_g, output_dim=out_dim_g, hidden_dims=[ a, ] * num_layers_g, ) self.zeta = _generate_fully_connected( input_dim=in_dim_f, output_dim=concatenated_shape, hidden_dims=[ a, ] * num_layers_zeta, ) self.w = torch.nn.Parameter(torch.zeros((num_nodes, num_nodes)), requires_grad=True) def forward(self, samples: torch.Tensor, graphs: torch.Tensor) -> torch.Tensor: """ Computes non-linear function hᵢ(X, G) using the given adjacency matrix. hᵢ(x, G) = ζᵢ(Σⱼ Wᵢⱼ Gⱼᵢ lⱼ(xⱼ) We also use an embedding u so: hᵢ(x, G) = ζ(uᵢ, Σⱼ Wᵢⱼ Gⱼᵢ l(uⱼ, xⱼ)) l takes inputs of size batch_shape + (embedding_size + concatenated_shape) and outputs batch_shape + (out_dim_g) the input will be appropriately masked to correspond to one variable group ζ takes inputs of size batch_shape + (embedding_size + out_dim_g) and outputs batch_shape + (concatenated_shape) the ouptut is then masked to correspond to one variable Args: samples: tensor of shape batch_shape_x + batch_shape_f + batch_shape_g + [n_cols] graph: tensor of shape batch_shape_g + [n_nodes, n_nodes] Returns: tensor of shape batch_shape_x + batch_shape_f + batch_shape_g + [n_cols] """ batch_shape_samples = samples.shape[:-1] batch_shape_g = graphs.shape[:-2] if len(batch_shape_g) > 0 and batch_shape_samples[-len(batch_shape_g) :] != batch_shape_g: raise ValueError( f"Batch shape of samples and graph must match but got {batch_shape_samples} and {batch_shape_g}" ) # Shape batch_shape_x + batch_shape_f + batch_shape_g + (num_nodes, concatenated_shape) masked_samples = torch.einsum("...i,ji->...ji", samples, self.group_mask) # Shape batch_shape_x + batch_shape_f + batch_shape_g + (num_nodes, embedding_size) expanded_embed = self.embeddings.expand(*batch_shape_samples, -1, -1) # l(uⱼ, xⱼ) Shape batch_shape_x + batch_shape_f + batch_shape_g + (num_nodes, embedding_size + concatenated_shape) encoded_samples = self.l( torch.cat([masked_samples, expanded_embed], dim=-1) # (concatenate xⱼ and embeddings uⱼ) ) # Shape batch_shape_x + batch_shape_f + batch_shape_g + (num_nodes, out_dim_g) # Aggregate sum and generate input for f (concatenate X_aggr and embeddings) # Σⱼ Wᵢⱼ Gⱼᵢ l(uⱼ, xⱼ) Shape batch_shape_x + batch_shape_f + batch_shape_g + (num_nodes, out_dim_g) encoded_samples_aggr = torch.einsum("...jk,...jl->...lk", encoded_samples, self.w * graphs) # ζ(uᵢ, Σⱼ Wᵢⱼ Gⱼᵢ l(uⱼ, xⱼ)) Shape batch_shape_x + batch_shape_f + batch_shape_g + (num_nodes, concatenated_shape) decoded_samples = self.zeta(torch.cat([encoded_samples_aggr, expanded_embed], dim=-1)) # Mask and aggregate Shape batch_shape_x + batch_shape_f + batch_shape_g + (concatenated_shape) return torch.einsum("...ij,ij->...j", decoded_samples, self.group_mask) def _generate_fully_connected( input_dim: int, output_dim: int, hidden_dims: list[int], ) -> nn.Module: """ Generate a fully connected network. Args: input_dim: Int. Size of input to network. output_dim: Int. Size of output of network. hidden_dims: List of int. Sizes of internal hidden layers. i.e. [a, b] is three linear layers with shapes (input_dim, a), (a, b), (b, output_dim) Returns: Sequential object containing the desired network. """ layers: list[nn.Module] = [] prev_dim = input_dim for idx, hidden_dim in enumerate(hidden_dims): block: list[nn.Module] = [] if idx > 0: block.append(nn.LayerNorm(prev_dim)) block.extend([nn.Linear(prev_dim, hidden_dim), nn.LeakyReLU()]) seq_block: nn.Module = nn.Sequential(*block) if prev_dim == hidden_dim: seq_block = _ResBlock(seq_block) layers.append(seq_block) prev_dim = hidden_dim layers.extend([nn.LayerNorm(prev_dim), nn.Linear(prev_dim, output_dim)]) return nn.Sequential(*layers) class _ResBlock(nn.Module): """ Wraps an nn.Module, adding a skip connection to it. """ def __init__(self, block: nn.Module): """ Args: block: module to which skip connection will be added. The input dimension must match the output dimension. """ super().__init__() self.block = block def forward(self, x): return x + self.block(x)
causica/src/causica/nn/deci_embed_nn.py/0
{ "file_path": "causica/src/causica/nn/deci_embed_nn.py", "repo_id": "causica", "token_count": 2931 }
472
import torch from tensordict import TensorDict from causica.datasets.normalization import fit_standardizer def test_standardizer(): data = TensorDict( { "x": torch.randn((100, 3)) * torch.arange(1, 4).float(), "y": torch.randn((100, 3)) + torch.arange(1, 4).float(), }, batch_size=100, ) standardizer = fit_standardizer(data) standardized_data = standardizer(data) # Test mean and std assert torch.allclose( standardized_data["x"].mean(dim=0), torch.zeros_like(standardized_data["x"].mean(dim=0)), atol=1e-6 ) assert torch.allclose( standardized_data["y"].mean(dim=0), torch.zeros_like(standardized_data["y"].mean(dim=0)), atol=1e-6 ) assert torch.allclose( standardized_data["x"].std(dim=0), torch.ones_like(standardized_data["x"].std(dim=0)), atol=1e-6 ) assert torch.allclose( standardized_data["y"].std(dim=0), torch.ones_like(standardized_data["y"].std(dim=0)), atol=1e-6 ) # Test specific values assert torch.allclose( standardized_data["x"], (data["x"] - data["x"].mean(dim=0, keepdim=True)) / data["x"].std(dim=0, keepdim=True), atol=1e-6, ) assert torch.allclose( standardized_data["y"], (data["y"] - data["y"].mean(dim=0, keepdim=True)) / data["y"].std(dim=0, keepdim=True), atol=1e-6, ) # Test Inverse assert torch.allclose(standardizer.inv(standardized_data)["x"], data["x"], atol=1e-6) assert torch.allclose(standardizer.inv(standardized_data)["y"], data["y"], atol=1e-6) def test_standardizer_subset(): data = TensorDict( { "x": torch.randn((100, 3)) * torch.arange(1, 4).float(), "y": torch.randn((100, 3)) + torch.arange(1, 4).float(), }, batch_size=100, ) standardizer = fit_standardizer(data.select("x")) standardized_data = standardizer(data) assert torch.allclose( standardized_data["x"], (data["x"] - data["x"].mean(dim=0, keepdim=True)) / data["x"].std(dim=0, keepdim=True), ) assert torch.allclose(standardized_data["y"], data["y"]) # Test mean and std assert torch.allclose( standardized_data["x"].mean(dim=0), torch.zeros_like(standardized_data["x"].mean(dim=0)), atol=1e-6 ) assert torch.allclose( standardized_data["x"].std(dim=0), torch.ones_like(standardized_data["x"].std(dim=0)), atol=1e-6 ) def test_standardizer_with_zero_std(): data = TensorDict( { "x": torch.ones((100, 3)), }, batch_size=100, ) standardizer = fit_standardizer(data) standardized_data = standardizer(data) assert torch.allclose(standardized_data["x"], torch.zeros_like(data["x"])) assert torch.allclose(standardizer.inv(standardized_data)["x"], data["x"], atol=1e-6)
causica/test/datasets/test_standardizer.py/0
{ "file_path": "causica/test/datasets/test_standardizer.py", "repo_id": "causica", "token_count": 1290 }
473
import pandas as pd import torch from causica.datasets.causica_dataset_format import Variable from causica.lightning.data_modules.basic_data_module import BasicDECIDataModule def test_basic_data_module(): """Test Basic Data Module functionality""" df = pd.DataFrame( { "x0_0": { 0: -0.5569139122962952, 1: 0.5148046016693115, 2: -0.29866957664489746, 3: -0.9968659877777101, 4: 2.916576147079468, 5: -0.05121749639511108, 6: 0.4290653467178345, 7: -0.7042781710624695, 8: -0.06559652090072632, 9: -0.9346157908439635, }, "x0_1": { 0: 0.06510090827941895, 1: 1.4334404468536377, 2: -0.9826208949089049, 3: -0.2685583829879761, 4: 1.1215617656707764, 5: 0.7001379728317261, 6: -0.1907709240913391, 7: 0.49415969848632807, 8: -0.2164681553840637, 9: 0.5424903631210327, }, "x1_0": { 0: 0.008327394723892214, 1: -0.3806004524230957, 2: -0.2750669717788696, 3: -0.2773442268371582, 4: 4.198153972625732, 5: -0.7430528402328491, 6: -0.07149887084960938, 7: 0.5431607961654663, 8: 0.2636866867542267, 9: -0.5033876895904541, }, "x1_1": { 0: 0.33135163784027094, 1: 0.03788989782333375, 2: -1.343551516532898, 3: -0.39070096611976624, 4: 0.04580897092819214, 5: -0.41362786293029785, 6: -0.6606348752975464, 7: 0.021188572049140927, 8: -0.8007687330245972, 9: 0.4804639220237732, }, } ) variables_list = [ Variable( group_name="x0", lower=-0.9997121691703796, name="x0_0", upper=9.77190113067627, ), Variable( group_name="x0", lower=-0.9997121691703796, name="x0_1", upper=9.77190113067627, ), Variable( group_name="x1", lower=-1.3532483577728271, name="x1_0", upper=6.881451606750488, ), Variable( group_name="x1", lower=-1.3532483577728271, name="x1_1", upper=6.881451606750488, ), ] data_module = BasicDECIDataModule(df, variables_list, batch_size=2) assert list(data_module.train_dataloader())[0].batch_size == torch.Size([2]) assert data_module.dataset_train.batch_size == torch.Size([10]) assert data_module.dataset_train["x0"].shape == torch.Size([10, 2])
causica/test/lightning/test_basic_data_module.py/0
{ "file_path": "causica/test/lightning/test_basic_data_module.py", "repo_id": "causica", "token_count": 1930 }
474
# Support ## How to file issues and get help This project uses GitHub Issues to track bugs and feature requests. Please search the existing issues before filing new issues to avoid duplicates. For new issues, file your bug or feature request as a new Issue. For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER CHANNEL. WHERE WILL YOU HELP PEOPLE?**. ## Microsoft Support Policy Support for this **PROJECT or PRODUCT** is limited to the resources listed above.
cliffordlayers/SUPPORT.md/0
{ "file_path": "cliffordlayers/SUPPORT.md", "repo_id": "cliffordlayers", "token_count": 151 }
475
##################################################################################### # Major ideas of complex batch norm taken from https://github.com/ivannz/cplxmodule # MIT License ##################################################################################### from typing import Optional, Union import torch import torch.nn as nn def whiten_data( x: torch.Tensor, training: bool = True, running_mean: Optional[torch.Tensor] = None, running_cov: Optional[torch.Tensor] = None, momentum: float = 0.1, eps: float = 1e-5, ) -> torch.Tensor: """Jointly whiten features in tensors `(B, C, *D, I)`: take n_blades(I)-dim vectors and whiten individually for each channel dimension C over `(B, *D)`. I is the number of blades in the respective Clifford algebra, e.g. I = 2 for complex numbers. Args: x (torch.Tensor): The tensor to whiten. training (bool, optional): Wheter to update the running mean and covariance. Defaults to `True`. running_mean (torch.Tensor, optional): The running mean of shape `(I, C). Defaults to `None`. running_cov (torch.Tensor, optional): The running covariance of shape `(I, I, C)` Defaults to `None`. momentum (float, optional): The momentum to use for the running mean and covariance. Defaults to `0.1`. eps (float, optional): A small number to add to the covariance. Defaults to 1e-5. Returns: (torch.Tensor): Whitened data of shape `(B, C, *D, I)`. """ assert x.dim() >= 3 # Get whitening shape of [1, C, ...] _, C, *_, I = x.shape B_dim, C_dim, *D_dims, I_dim = range(len(x.shape)) shape = 1, C, *([1] * (x.dim() - 3)) # Get feature mean. if not (running_mean is None or running_mean.shape == (I, C)): raise ValueError(f"Running_mean expected to be none, or of shape ({I}, {C}).") if training or running_mean is None: mean = x.mean(dim=(B_dim, *D_dims)) if running_mean is not None: running_mean += momentum * (mean.data.permute(1, 0) - running_mean) else: mean = running_mean.permute(1, 0) # Get feature covariance. x = x - mean.reshape(*shape, I) if not (running_cov is None or running_cov.shape == (I, I, C)): raise ValueError(f"Running_cov expected to be none, or of shape ({I}, {I}, {C}).") if training or running_cov is None: # B, C, *D, I -> C, I, B, *D X = x.permute(C_dim, I_dim, B_dim, *D_dims).flatten(2, -1) # Covariance XX^T matrix of shape C x I x I cov = torch.matmul(X, X.transpose(-1, -2)) / X.shape[-1] if running_cov is not None: running_cov += momentum * (cov.data.permute(1, 2, 0) - running_cov) else: cov = running_cov.permute(2, 0, 1) # Upper triangle Cholesky decomposition of covariance matrix: U^T U = Cov # eye = eps * torch.eye(I, device=cov.device, dtype=cov.dtype).unsqueeze(0) # Modified the scale of eps to help prevent the occurence of negative-definite matrices # 1e-5 may not fit the scale of matrices with large numbers max_values = torch.amax(cov, dim=(1, 2)) A = torch.eye(cov.shape[-1], device=cov.device, dtype=cov.dtype) eye = eps * torch.einsum('ij,k->kij', A, max_values) U = torch.linalg.cholesky(cov + eye).mH # Invert Cholesky decomposition, returns tensor of shape [B, C, *D, I] x_whiten = torch.linalg.solve_triangular( U.reshape(*shape, I, I), x.unsqueeze(-1), upper=True, ).squeeze(-1) return x_whiten def complex_batch_norm( x: torch.Tensor, running_mean: Optional[torch.Tensor] = None, running_cov: Optional[torch.Tensor] = None, weight: Optional[Union[torch.Tensor, nn.Parameter]] = None, bias: Optional[Union[torch.Tensor, nn.Parameter]] = None, training: bool = True, momentum: float = 0.1, eps: float = 1e-05, ) -> torch.Tensor: """Applies complex-valued Batch Normalization as described in (Trabelsi et al., 2018) for each channel across a batch of data. Args: x (torch.Tensor): The input complex-valued data is expected to be at least 2d, with shape `(B, C, *D)`, where `B` is the batch dimension, `C` the channels/features, and *D the remaining dimensions (if present). running_mean (Union[torch.Tensor, nn.Parameter], optional): The tensor with running mean statistics having shape `(2, C)`. running_cov (Union[torch.Tensor, nn.Parameter], optional): The tensor with running real-imaginary covariance statistics having shape `(2, 2, C)`. weight (torch.Tensor, optional): Additional weight tensor which is applied post normalization, and has the shape `(2, 2, C)`. bias (torch.Tensor, optional): Additional bias tensor which is applied post normalization, and has the shape `(2, C)`. training (bool, optional): Whether to use the running mean and variance. Defaults to `True`. momentum (float, optional): Momentum for the running mean and variance. Defaults to `0.1`. eps (float, optional): Epsilon for the running mean and variance. Defaults to `1e-05`. Returns: (torch.Tensor): Normalized input as complex tensor of shape `(B, C, *D)`. """ # Check arguments. assert (running_mean is None and running_cov is None) or (running_mean is not None and running_cov is not None) assert (weight is None and bias is None) or (weight is not None and bias is not None) x = torch.view_as_real(x) _, C, *_, I = x.shape assert I == 2 # Whiten and apply affine transformation. x_norm = whiten_data( x, training, running_mean, running_cov, momentum, eps, ) if weight is not None and bias is not None: # Check if weight and bias tensors are of correct shape. assert weight.shape == (2, 2, C) assert bias.shape == (2, C) # Unsqueeze weight and bias for each dimension except the channel dimension. shape = 1, C, *([1] * (x.dim() - 3)) weight = weight.reshape(2, 2, *shape) # Apply additional affine transformation post normalization. weight_idx = list(range(weight.dim())) # TODO weight multiplication should be changed to complex product. weight = weight.permute(*weight_idx[2:], *weight_idx[:2]) x_norm = weight.matmul(x_norm[..., None]).squeeze(-1) + bias.reshape(*shape, 2) return torch.view_as_complex(x_norm) def clifford_batch_norm( x: torch.Tensor, n_blades: int, running_mean: Optional[torch.Tensor] = None, running_cov: Optional[torch.Tensor] = None, weight: Optional[Union[torch.Tensor, nn.Parameter]] = None, bias: Optional[Union[torch.Tensor, nn.Parameter]] = None, training: bool = True, momentum: float = 0.1, eps: float = 1e-05, ) -> torch.Tensor: """Clifford batch normalization for each channel across a batch of data. Args: x (torch.Tensor): Input tensor of shape `(B, C, *D, I)` where I is the blade of the algebra. n_blades (int): Number of blades of the Clifford algebra. running_mean (torch.Tensor, optional): The tensor with running mean statistics having shape `(I, C)`. running_cov (torch.Tensor, optional): The tensor with running covariance statistics having shape `(I, I, C)`. weight (Union[torch.Tensor, nn.Parameter], optional): Additional weight tensor which is applied post normalization, and has the shape `(I, I, C)`. bias (Union[torch.Tensor, nn.Parameter], optional): Additional bias tensor which is applied post normalization, and has the shape `(I, C)`. training (bool, optional): Whether to use the running mean and variance. Defaults to True. Defaults to True. momentum (float, optional): Momentum for the running mean and variance. Defaults to 0.1. eps (float, optional): Epsilon for the running mean and variance. Defaults to 1e-05. Returns: (torch.Tensor): Normalized input of shape `(B, C, *D, I)` """ # Check arguments. assert (running_mean is None and running_cov is None) or (running_mean is not None and running_cov is not None) assert (weight is None and bias is None) or (weight is not None and bias is not None) # Whiten and apply affine transformation _, C, *_, I = x.shape assert I == n_blades x_norm = whiten_data( x, training=training, running_mean=running_mean, running_cov=running_cov, momentum=momentum, eps=eps, ) if weight is not None and bias is not None: # Check if weight and bias tensors are of correct shape. assert weight.shape == (I, I, C) assert bias.shape == (I, C) # Unsqueeze weight and bias for each dimension except the channel dimension. shape = 1, C, *([1] * (x.dim() - 3)) weight = weight.reshape(I, I, *shape) # Apply additional affine transformation post normalization. weight_idx = list(range(weight.dim())) # TODO: weight multiplication should be changed to geometric product. weight = weight.permute(*weight_idx[2:], *weight_idx[:2]) x_norm = weight.matmul(x_norm[..., None]).squeeze(-1) + bias.reshape(*shape, I) return x_norm
cliffordlayers/cliffordlayers/nn/functional/batchnorm.py/0
{ "file_path": "cliffordlayers/cliffordlayers/nn/functional/batchnorm.py", "repo_id": "cliffordlayers", "token_count": 3581 }
476
site_name: CliffordLayers site_description: Documentation for CliffordLayers repo_name: microsoft/cliffordlayers edit_uri: edit/main/docs repo_url: https://github.com/microsoft/cliffordlayers markdown_extensions: - attr_list - tables - admonition - md_in_html - pymdownx.details - pymdownx.superfences - pymdownx.tabbed: alternate_style: true - pymdownx.highlight: anchor_linenums: true - pymdownx.inlinehilite - pymdownx.snippets - pymdownx.superfences - pymdownx.critic - pymdownx.caret - pymdownx.keys - pymdownx.mark - pymdownx.tilde - pymdownx.arithmatex: generic: true - toc: permalink: "¤" - pymdownx.emoji: emoji_index: !!python/name:materialx.emoji.twemoji emoji_generator: !!python/name:materialx.emoji.to_svg theme: name: material palette: scheme: clifford features: - content.code.annotate custom_dir: docs/overrides font: text: Merriweather Sans code: Noto Sans Mono logo: assets/images/logo.png extra_css: - css/extra.css extra_javascript: - https://unpkg.com/[email protected]/dist/tablesort.min.js - javascripts/tablesort.js - javascripts/mathjax.js - https://polyfill.io/v3/polyfill.min.js?features=es6 - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js plugins: - search - mkdocstrings: default_handler: python enable_inventory: true handlers: python: import: - https://docs.python.org/3/objects.inv - https://pytorch.org/docs/stable/objects.inv nav: - Home: "index.md" - Installation: "install.md" - Research: "research.md" - Code reference: - Modules: "reference/modules.md" - Functional: "reference/functional.md" - Internals: "reference/misc.md" - Models: "reference/models.md"
cliffordlayers/mkdocs.yml/0
{ "file_path": "cliffordlayers/mkdocs.yml", "repo_id": "cliffordlayers", "token_count": 775 }
477
#!/usr/bin/env/python """ Usage: get_data.py --dataset zinc|qm9|cep Options: -h --help Show this screen. --dataset NAME Dataset name: zinc, qm9, cep """ import sys, os sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) from rdkit import Chem from rdkit.Chem import rdmolops from rdkit.Chem import QED import glob import csv, json import numpy as np from utils import bond_dict, dataset_info, need_kekulize, to_graph,graph_to_adj_mat import utils import pickle import random from docopt import docopt from get_qm9 import preprocess dataset = "zinc" def train_valid_split(download_path): # load validation dataset with open("valid_idx_zinc.json", 'r') as f: valid_idx = json.load(f) print('reading data...') raw_data = {'train': [], 'valid': []} # save the train, valid dataset. with open(download_path, 'r') as f: all_data = list(csv.DictReader(f)) file_count=0 for i, data_item in enumerate(all_data): smiles = data_item['smiles'].strip() QED = float(data_item['qed']) if i not in valid_idx: raw_data['train'].append({'smiles': smiles, 'QED': QED}) else: raw_data['valid'].append({'smiles': smiles, 'QED': QED}) file_count += 1 if file_count % 2000 ==0: print('finished reading: %d' % file_count, end='\r') return raw_data if __name__ == "__main__": download_path = '250k_rndm_zinc_drugs_clean_3.csv' if not os.path.exists(download_path): print('downloading data to %s ...' % download_path) source = 'https://raw.githubusercontent.com/aspuru-guzik-group/chemical_vae/master/models/zinc_properties/250k_rndm_zinc_drugs_clean_3.csv' os.system('wget -O %s %s' % (download_path, source)) print('finished downloading') raw_data = train_valid_split(download_path) preprocess(raw_data, dataset)
constrained-graph-variational-autoencoder/data/get_zinc.py/0
{ "file_path": "constrained-graph-variational-autoencoder/data/get_zinc.py", "repo_id": "constrained-graph-variational-autoencoder", "token_count": 851 }
478
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from typing import Dict, List import spacy from spacy.language import Language class SpacyExtractor: """class SpacyExtractor encapsulates logic to pipe Records with an id and text body through a spacy model and return entities separated by Entity Type """ def __init__( self, nlp: Language, input_id_col: str = "id", input_text_col: str = "text" ): """Initialize the SpacyExtractor pipeline. nlp (spacy.language.Language): pre-loaded spacy language model input_text_col (str): property on each document to run the model on input_id_col (str): property on each document to correlate with request RETURNS (EntityRecognizer): The newly constructed object. """ self.nlp = nlp self.input_id_col = input_id_col self.input_text_col = input_text_col def _name_to_id(self, text: str): """Utility function to do a messy normalization of an entity name text (str): text to create "id" from """ return "-".join([s.lower() for s in text.split()]) def extract_entities(self, records: List[Dict[str, str]]): """Apply the pre-trained model to a batch of records records (list): The list of "document" dictionaries each with an `id` and `text` property RETURNS (list): List of responses containing the id of the correlating document and a list of entities. """ ids = (doc[self.input_id_col] for doc in records) texts = (doc[self.input_text_col] for doc in records) res = [] for doc_id, spacy_doc in zip(ids, self.nlp.pipe(texts)): entities = {} for ent in spacy_doc.ents: ent_id = ent.kb_id if not ent_id: ent_id = ent.ent_id if not ent_id: ent_id = self._name_to_id(ent.text) if ent_id not in entities: if ent.text.lower() == ent.text: ent_name = ent.text.capitalize() else: ent_name = ent.text entities[ent_id] = { "name": ent_name, "label": ent.label_, "matches": [], } entities[ent_id]["matches"].append( {"start": ent.start_char, "end": ent.end_char, "text": ent.text} ) res.append({"id": doc_id, "entities": list(entities.values())}) return res
cookiecutter-spacy-fastapi/{{cookiecutter.project_slug}}/app/spacy_extractor.py/0
{ "file_path": "cookiecutter-spacy-fastapi/{{cookiecutter.project_slug}}/app/spacy_extractor.py", "repo_id": "cookiecutter-spacy-fastapi", "token_count": 1274 }
479
{ "extends": "solium:recommended", "plugins": [ "security" ], "rules": { "quotes": [ "error", "double" ], "indentation": [ "error", 4 ], "linebreak-style": [ "error", "unix" ] } }
0xDeCA10B/demo/.soliumrc.json/0
{ "file_path": "0xDeCA10B/demo/.soliumrc.json", "repo_id": "0xDeCA10B", "token_count": 139 }
0
import blue from '@material-ui/core/colors/blue' import CssBaseline from '@material-ui/core/CssBaseline' import { createMuiTheme } from '@material-ui/core/styles' import { ThemeProvider } from '@material-ui/styles' import { SnackbarProvider } from 'notistack' import React, { Component } from 'react' import { BrowserRouter as Router, Route } from 'react-router-dom' import About from './components/About' import AddModel from './components/addModel' import AppBar from './components/appBar' import AddDeployedModel from './components/addDeployedModel' import Model from './components/model' import ModelList from './containers/modelList' import Footer from './Footer' const theme = createMuiTheme({ palette: { primary: { // Use a lighter color for better contrast and accessibility. main: blue[300] }, type: 'dark', }, typography: { useNextVariants: true, }, }) class App extends Component { render() { const mainDiv = { marginTop: '50px' } const page = { position: 'relative', minHeight: '100vh', } const contentWrap = { // Pad enough for the footer height. paddingBottom: '18rem', } return ( <Router> <ThemeProvider theme={theme}> <SnackbarProvider maxSnack={5}> <CssBaseline /> <div className="App" style={page}> <div className="content-wrap" style={contentWrap}> <div className="App-header"> <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500" /> <link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons" /> <AppBar /> </div> <div style={mainDiv}> <Route exact path="/" component={ModelList} /> <Route path="/about" component={About} /> <Route path="/add" component={AddModel} /> <Route path="/addDeployedModel" component={AddDeployedModel} /> <Route path="/model" component={Model} /> </div> </div> <Footer /> </div> </SnackbarProvider> </ThemeProvider> </Router> ) } } export default App
0xDeCA10B/demo/client/src/App.js/0
{ "file_path": "0xDeCA10B/demo/client/src/App.js", "repo_id": "0xDeCA10B", "token_count": 837 }
1
pragma solidity ^0.6; pragma experimental ABIEncoderV2; import "../../../lib/Math.sol"; import {Classifier64} from "./Classifier.sol"; /** * A nearest centroid classifier that uses Euclidean distance to predict the closest centroid based on a sparse data sample. * Data must be sorted indices of features with each feature occurring at most once. * * https://en.wikipedia.org/wiki/Nearest_centroid_classifier */ contract SparseNearestCentroidClassifier is Classifier64 { /** A class has been added. */ event AddClass( /** The name of the class. */ string name, /** The index for the class in the members of this classifier. */ uint index ); uint256 constant public UINT256_MAX = ~uint256(0); uint256 constant public dataCountLimit = 2 ** (256 - 64 - 1); /** * A value in a centroid. * The value is the number stored (multiplied by `toFloat`). * `numSamples` is the denominator in `value`, it helps to avoid updating the value for very sparse updates to the model. * `value` should be converted before use: `this.value` * `this.numSamples` / `classInfo.numSamples`. */ struct CentroidValue { uint64 value; uint64 numSamples; } /** * Information for a class. */ struct ClassInfo { /** * The number of samples in the class. */ uint64 numSamples; /** * The squared 2-norm of the centroid. Multiplied by `(toFloat * toFloat)`. */ uint squaredMagnitude; /** * The average of all data points in the class. */ mapping(uint32 => CentroidValue) centroid; } /** * Information for each supported classification. */ ClassInfo[] public classInfos; constructor( string[] memory _classifications, uint64[][][] memory _centroids, uint64[] memory dataCounts) Classifier64(_classifications) public { require(_centroids.length == _classifications.length, "The number of centroids and classifications must be the same."); require(_classifications.length > 0, "At least one class is required."); require(_classifications.length < 2 ** 64, "Too many classes given."); for (uint i = 0; i < _centroids.length; ++i) { uint squaredMagnitude = 0; classInfos.push(ClassInfo(dataCounts[i], squaredMagnitude)); ClassInfo storage storedInfo = classInfos[i]; for (uint j = 0; j < _centroids[i].length; ++j) { storedInfo.centroid[uint32(_centroids[i][j][0])] = CentroidValue(_centroids[i][j][1], dataCounts[i]); // Should be safe multiplication and addition because vector entries should be small. squaredMagnitude += uint(_centroids[i][j][1]) * _centroids[i][j][1]; } storedInfo.squaredMagnitude = squaredMagnitude; } } /** * Extend the number of dimensions of a centroid. * Made to be called just after the contract is created and never again. * @param extension The values to append to a centroid vector. * @param classification The class to add the extension to. */ function extendCentroid(uint64[][] memory extension, uint64 classification) public onlyOwner { require(classification < classInfos.length, "This classification has not been added yet."); ClassInfo storage classInfo = classInfos[classification]; mapping(uint32 => CentroidValue) storage centroid = classInfo.centroid; uint squaredMagnitude = classInfo.squaredMagnitude; for (uint i = 0; i < extension.length; ++i) { centroid[uint32(extension[i][0])] = CentroidValue(extension[i][1], classInfo.numSamples); // Should be safe multiplication and addition because vector entries should be small. squaredMagnitude += uint(extension[i][1]) * extension[i][1]; } classInfo.squaredMagnitude = squaredMagnitude; } function addClass(uint64[][] memory centroid, string memory classification, uint64 dataCount) public onlyOwner { require(classifications.length + 1 < 2 ** 64, "There are too many classes already."); require(dataCount < dataCountLimit, "Data count is too large."); uint squaredMagnitude = 0; classInfos.push(ClassInfo(dataCount, squaredMagnitude)); ClassInfo storage storedInfo = classInfos[classInfos.length - 1]; classifications.push(classification); for (uint i = 0; i < centroid.length; ++i) { storedInfo.centroid[uint32(centroid[i][0])] = CentroidValue(centroid[i][1], dataCount); // Should be safe multiplication and addition because vector entries should be small. squaredMagnitude += uint(centroid[i][1]) * centroid[i][1]; } storedInfo.squaredMagnitude = squaredMagnitude; emit AddClass(classification, classifications.length - 1); } function norm(int64[] memory /* data */) public override pure returns (uint) { revert("Normalization is not required."); } function predict(int64[] memory data) public override view returns (uint64 bestClass) { // Sparse representation: each number in data is a feature index. // Assume values in data are sorted in increasing order. uint minDistance = UINT256_MAX; bestClass = 0; for (uint64 currentClass = 0; currentClass < classInfos.length; ++currentClass) { mapping(uint32 => CentroidValue) storage centroid = classInfos[currentClass].centroid; // Default distance for empty data is `classInfos[currentClass].squaredMagnitude`. // Well use that as a base and update it. // distance = classInfos[currentClass].squaredMagnitude // For each feature j that is present in `data`: // distance = distance - centroid[j]^2 + (centroid[j] - toFloat)^2 // = distance - centroid[j]^2 + centroid[j]^2 - 2 * centroid[j] * toFloat + toFloat^2 // = distance - 2 * centroid[j] * toFloat + toFloat^2 // = distance + toFloat * (-2 * centroid[j] + toFloat) int distanceUpdate = 0; for (uint dataIndex = 0; dataIndex < data.length; ++dataIndex) { CentroidValue memory centroidValue = centroid[uint32(data[dataIndex])]; uint value = centroidValue.value; if (centroidValue.numSamples != classInfos[currentClass].numSamples) { // The value has not been updated yet so it needs to be scaled for the correct number of samples. value = value * centroidValue.numSamples / classInfos[currentClass].numSamples; } // Should be safe since data is not very long. distanceUpdate += int(toFloat) - 2 * int(value); } uint distance = uint(int(classInfos[currentClass].squaredMagnitude) + distanceUpdate * toFloat); if (distance < minDistance) { minDistance = distance; bestClass = currentClass; } } } function update(int64[] memory data, uint64 classification) public override onlyOwner { require(classification < classInfos.length, "This classification has not been added yet."); ClassInfo storage classInfo = classInfos[classification]; mapping(uint32 => CentroidValue) storage centroid = classInfo.centroid; uint n = classInfo.numSamples; uint64 newN; uint squaredMagnitude = classInfo.squaredMagnitude; // Keep n small enough for multiplication. if (n < dataCountLimit) { newN = classInfo.numSamples + 1; classInfo.numSamples = newN; } else { newN = classInfo.numSamples; } uint squaredMagnitudeUpdate = 0; // Update centroid using moving average calculation. for (uint dataIndex = 0; dataIndex < data.length; ++dataIndex) { uint32 featureIndex = uint32(data[dataIndex]); uint64 prevNumSamples = centroid[featureIndex].numSamples; // The value at the centroid might not be correct so it needs to be scaled. uint prevValue = centroid[featureIndex].value * prevNumSamples / n; // Now prevValue is correct up to before this update. // Update `squaredMagnitude`. // First, remove the incorrect value that was there. squaredMagnitude = squaredMagnitude.sub(prevValue * prevValue); // Compute the new value using the moving average calculation. uint64 v = uint64((n * prevValue + toFloat) / newN); centroid[featureIndex].value = v; centroid[featureIndex].numSamples = newN; // Add the correct value. squaredMagnitudeUpdate = squaredMagnitudeUpdate.add(uint(v) * v); } if (n != newN) { // Optimize updating squaredMagnitude. // updated squaredMagnitude = Sum_each value { ((value * n + update) / newN) ^2 } // if update = 0 // updated squaredMagnitude = Sum_each value { ((value * n) / newN) ^2 } // = Sum_each value { (n / newN)^2 * value ^2 } // = (n / newN)^2 * Sum_each value { value ^2 } // = (n / newN)^2 * previous squared magnitude // Should be safe since n is actually uint64 and squaredMagnitude should be small. squaredMagnitude = squaredMagnitude.mul(n * n); squaredMagnitude = squaredMagnitude.div(newN * newN); } classInfo.squaredMagnitude = squaredMagnitude.add(squaredMagnitudeUpdate); } // Useful methods to view the underlying data: function getNumSamples(uint classIndex) public view returns (uint64) { return classInfos[classIndex].numSamples; } function getCentroidValue(uint classIndex, uint32 featureIndex) public view returns (uint64) { uint64 valueNumSamples = classInfos[classIndex].centroid[featureIndex].numSamples; uint64 correctNumSamples = classInfos[classIndex].numSamples; if (valueNumSamples == correctNumSamples) { return classInfos[classIndex].centroid[featureIndex].value; } else { return uint64(uint(classInfos[classIndex].centroid[featureIndex].value) * valueNumSamples / correctNumSamples); } } function getSquaredMagnitude(uint classIndex) public view returns (uint) { return classInfos[classIndex].squaredMagnitude; } }
0xDeCA10B/demo/client/src/contracts/classification/SparseNearestCentroidClassifier.sol/0
{ "file_path": "0xDeCA10B/demo/client/src/contracts/classification/SparseNearestCentroidClassifier.sol", "repo_id": "0xDeCA10B", "token_count": 4134 }
2
body { font-family: sans-serif; }
0xDeCA10B/demo/client/src/index.css/0
{ "file_path": "0xDeCA10B/demo/client/src/index.css", "repo_id": "0xDeCA10B", "token_count": 16 }
3
const fs = require('fs') const CollaborativeTrainer64 = artifacts.require('./CollaborativeTrainer64') const DataHandler64 = artifacts.require('./data/DataHandler64') const Stakeable64 = artifacts.require('./incentive/Stakeable64') const { deployModel } = require('../../src/ml-models/deploy-model-node') const { convertData } = require('../../src/float-utils-node') /** * This test was mainly created to report gas usage. */ contract('CheckGasUsage', function (accounts) { const toFloat = 1E9 async function normalize(classifier, data) { data = convertData(data, web3, toFloat) return classifier.norm(data).then(norm => { return data.map(x => x.mul(web3.utils.toBN(toFloat)).div(norm)) }) } function parseBN(num) { if (web3.utils.isBN(num)) { return num.toNumber() } else { assert.typeOf(num, 'number') return num } } async function initialize(modelPath) { let gasUsed = 0 // Low default times for testing. const refundTimeS = ownerClaimWaitTimeS = anyAddressClaimWaitTimeS = 0 // Weight for deposit cost in wei. const costWeight = 1E9 console.log(" Deploying DataHandler.") const dataHandler = await DataHandler64.new() gasUsed += (await web3.eth.getTransactionReceipt(dataHandler.transactionHash)).gasUsed console.log(` Deployed data handler to ${dataHandler.address}. Total gasUsed: ${gasUsed}.`) const classifierInfo = await deployModel(modelPath, web3, { toFloat }) const classifier = classifierInfo.classifierContract gasUsed += classifierInfo.gasUsed console.log(" Deploying Incentive Mechanism.") const incentiveMechanism = await Stakeable64.new( refundTimeS, ownerClaimWaitTimeS, anyAddressClaimWaitTimeS, costWeight ) gasUsed += (await web3.eth.getTransactionReceipt(incentiveMechanism.transactionHash)).gasUsed console.log(` Deployed incentive mechanism to ${incentiveMechanism.address}. Total gasUsed: ${gasUsed}.`) const mainInterface = await CollaborativeTrainer64.new( "name", "description", "encoder", dataHandler.address, incentiveMechanism.address, classifier.address, ) gasUsed += (await web3.eth.getTransactionReceipt(mainInterface.transactionHash)).gasUsed console.log(` Deployed main interface to ${mainInterface.address}. Total gasUsed: ${gasUsed}.`) return Promise.all([ dataHandler.transferOwnership(mainInterface.address), incentiveMechanism.transferOwnership(mainInterface.address), classifier.transferOwnership(mainInterface.address), ]).then(responses => { for (const r of responses) { gasUsed += r.receipt.gasUsed } console.log(` Transfered ownership to main interface. Total gasUsed: ${gasUsed}.`) return { classifier, mainInterface, gasUsed, } }) } it("...should log gasUsed", async () => { const usdPerEth = 373 const gasPrice = 4E-9 const usdPerGas = usdPerEth * gasPrice const models = [ // { // path: `${__dirname}/../../../../simulation/saved_runs/1580856910-fitness-nb-model.json`, // data: [1, 1, 1, 1, 1, 1, 1, 0, 0], // dataset: 'fitness', modelName: "Naive Bayes", // }, // { // path: `${__dirname}/../../../../simulation/saved_runs/1580845144-fitness-ncc-model.json`, // data: [1, 1, 1, 1, 1, 1, 1, 0, 0], // normalize: true, // dataset: 'fitness', modelName: "Dense Nearest Centroid", // }, // { // path: `${__dirname}/../../../../simulation/saved_runs/1580854505-fitness-dense-perceptron-model.json`, // data: [1, 1, 1, 1, 1, 1, 1, 0, 0], // normalize: true, // dataset: 'fitness', modelName: "Dense Perceptron", // }, // { // path: `${__dirname}/../../../../simulation/saved_runs/1580940061-news-nb-model.json`, // data: [1, 2, 3, 14, 25, 36, 57, 88, 299, 310, 411, 512, 613, 714, 815], // dataset: 'Fake News', modelName: "Naive Bayes", // }, // { // path: `${__dirname}/../../../../simulation/saved_runs/1580940189-news-ncc-model.json`, // data: [1, 2, 3, 14, 25, 36, 57, 88, 299, 310, 411, 512, 613, 714, 815], // dataset: 'Fake News', modelName: "Sparse Nearest Centroid", // }, // { // path: `${__dirname}/../../../../simulation/saved_runs/1580940494-news-perceptron-model.json`, // data: [1, 2, 3, 14, 25, 36, 57, 88, 299, 310, 411, 512, 613, 714, 815], // dataset: 'Fake News', modelName: "Sparse Perceptron", // }, // { // path: `${__dirname}/../../../../simulation/saved_runs/1580943847-imdb-nb-model.json`, // data: [1, 2, 3, 14, 15, 26, 37, 48, 59, 110, 111, 112, 213, 314, 515, 616, 717, 818, 919, 920], // dataset: 'IMDB Reviews', modelName: "Naive Bayes", // }, // { // path: `${__dirname}/../../../../simulation/saved_runs/1580945025-imdb-ncc-model.json`, // data: [1, 2, 3, 14, 15, 26, 37, 48, 59, 110, 111, 112, 213, 314, 515, 616, 717, 818, 919, 920], // dataset: 'IMDB Reviews', modelName: "Sparse Nearest Centroid", // }, // { // path: `${__dirname}/../../../../simulation/saved_runs/1580945565-imdb-perceptron-model.json`, // data: [1, 2, 3, 14, 15, 26, 37, 48, 59, 110, 111, 112, 213, 314, 515, 616, 717, 818, 919, 920], // dataset: 'IMDB Reviews', modelName: "Sparse Perceptron", // }, ] const gasUsages = [] const tableData = {} for (const model of models) { if (!fs.existsSync(model.path)) { console.debug(`Skipping model path that does not exist: ${model.path}`) continue } console.log(`Checking gas usage for ${model.path}`) const gasUsage = { model: model.path, } if (model.dataset) { if (tableData[model.dataset] === undefined) { tableData[model.dataset] = {} } tableData[model.dataset][model.modelName] = gasUsage } gasUsages.push(gasUsage) const mainInterfaceInfo = await initialize(model.path) const { classifier, mainInterface } = mainInterfaceInfo gasUsage['deploy'] = mainInterfaceInfo.gasUsed const data = model.normalize ? (await normalize(classifier, model.data)) : model.data // Add with predicted class so that it can be refunded. const predictedClassification = parseBN(await classifier.predict(data)) console.log(` predictedClassification: ${predictedClassification}`) let r = await mainInterface.addData(data, predictedClassification, { from: accounts[0], value: 1E17 }) let e = r.logs.filter(e => e.event == 'AddData')[0] let addedTime = e.args.t gasUsage['addData'] = r.receipt.gasUsed console.log(`Adding data gas used: ${r.receipt.gasUsed}`) // Refund r = await mainInterface.refund(data, predictedClassification, addedTime) gasUsage['refund'] = r.receipt.gasUsed console.log(`Refund gas used: ${r.receipt.gasUsed}`) // Report // Someone else adds bad data. console.debug(" Adding currently incorrect data using another account...") r = await mainInterface.addData(data, 1 - predictedClassification, { from: accounts[1], value: 1E17 }) console.log(`Adding data (was incorrect) gas used: ${r.receipt.gasUsed}`) gasUsage['addIncorrectData'] = r.receipt.gasUsed e = r.logs.filter(e => e.event == 'AddData')[0] addedTime = e.args.t r = await mainInterface.report(data, 1 - predictedClassification, addedTime, accounts[1]) gasUsage['report'] = r.receipt.gasUsed console.log(`Report gas used: ${r.receipt.gasUsed}`) console.log(`gasUsage: ${JSON.stringify(gasUsage, null, 4)}`) fs.writeFileSync('gasUsages.json~', JSON.stringify(gasUsages, null, 4)) } // Make tables for LaTeX. for (const [dataset, models] of Object.entries(tableData)) { console.log(`Table for ${dataset}:`) let titleRow = "Action" let deploymentRow = "Deployment" let updateRow = "Update" let refundRow = "Refund" let rewardRow = "Reward" const minimums = { deploy: Math.min(...Object.values(models).map(g => g.deploy)), addIncorrectData: Math.min(...Object.values(models).map(g => g.addIncorrectData)), refund: Math.min(...Object.values(models).map(g => g.refund)), report: Math.min(...Object.values(models).map(g => g.report)), } for (const [modelName, gasCosts] of Object.entries(models)) { titleRow += ` & ${modelName}` if (minimums.deploy === gasCosts.deploy) { deploymentRow += ` & \\textbf{${gasCosts.deploy.toLocaleString()}} (${(gasCosts.deploy * usdPerGas).toFixed(2)} USD)` } else { deploymentRow += ` & ${gasCosts.deploy.toLocaleString()}` } if (minimums.addIncorrectData === gasCosts.addIncorrectData) { updateRow += ` & \\textbf{${gasCosts.addIncorrectData.toLocaleString()}} (${(gasCosts.addIncorrectData * usdPerGas).toFixed(2)} USD)` } else { updateRow += ` & ${gasCosts.addIncorrectData.toLocaleString()}` } if (minimums.refund === gasCosts.refund) { refundRow += ` & \\textbf{${gasCosts.refund.toLocaleString()}} (${(gasCosts.refund * usdPerGas).toFixed(2)} USD)` } else { refundRow += ` & ${gasCosts.refund.toLocaleString()}` } if (minimums.report === gasCosts.report) { rewardRow += ` & \\textbf{${gasCosts.report.toLocaleString()}} (${(gasCosts.report * usdPerGas).toFixed(2)} USD)` } else { rewardRow += ` & ${gasCosts.report.toLocaleString()}` } } titleRow += String.raw` \\` deploymentRow += String.raw` \\` updateRow += String.raw` \\` refundRow += String.raw` \\` rewardRow += String.raw` \\` console.log(`${titleRow}\n${deploymentRow}\n${updateRow}\n${refundRow}\n${rewardRow}\n`) } }) })
0xDeCA10B/demo/client/test/contracts/check-gas-costs.js/0
{ "file_path": "0xDeCA10B/demo/client/test/contracts/check-gas-costs.js", "repo_id": "0xDeCA10B", "token_count": 3820 }
4
const express = require('express') const fs = require('fs') const initSqlJs = require('sql.js') const app = express() const port = process.env.PORT || 5387 const jsonParser = express.json() const dbPath = 'db.sqlite' initSqlJs().then(SQL => { let db if (fs.existsSync(dbPath)) { const fileBuffer = fs.readFileSync(dbPath) console.log(`Loading DB from "${dbPath}".`) db = new SQL.Database(fileBuffer) } else { console.log("Creating a new DB.") db = new SQL.Database() const sqlstr = "CREATE TABLE model (id INTEGER PRIMARY KEY, name TEXT, address TEXT, description TEXT, model_type TEXT, encoder TEXT, accuracy NUMBER);" + "CREATE TABLE data (transaction_hash TEXT PRIMARY KEY, text TEXT);" + "CREATE INDEX index_address ON model(address);" + "CREATE TABLE accuracy (transaction_hash TEXT, block_number INTEGER, model_id INTEGER, accuracy NUMBER, timestamp DATETIME DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (model_id) REFERENCES model (id));" db.run(sqlstr) } // MODEL MANAGEMENT function isBodyValid(body) { return body && body.name && body.address && typeof body.address === 'string' && body.address.startsWith('0x') } function persistModel(model) { db.run('INSERT INTO model VALUES (NULL, ?, ?, ?, ?, ?, ?);', [ model.name, model.address, model.description, model.modelType, model.encoder, model.accuracy, ]) fs.writeFileSync(dbPath, Buffer.from(db.export())) } // Health app.get('/api/health', (req, res) => { res.send({ healthy: true }) }) // Get all models. app.get('/api/models', (req, res) => { const { afterAddress, limit } = req.query const getStmt = db.prepare('SELECT * FROM model WHERE address > $afterAddress LIMIT $limit;', { $afterAddress: afterAddress || '', $limit: limit == null ? 10 : limit, }) const models = [] while (getStmt.step()) { const model = getStmt.get() models.push({ id: model[0], name: model[1], address: model[2], description: model[3], modelType: model[4], encoder: model[5], accuracy: model[6] }) } getStmt.free() let lastAddress = '' if (models.length > 0) { lastAddress = models[models.length - 1].address } const remainingCountStmt = db.prepare('SELECT COUNT(id) FROM model WHERE address > $afterAddress;', { $afterAddress: lastAddress, }) remainingCountStmt.step() const remaining = remainingCountStmt.get()[0] remainingCountStmt.free() res.send({ models, remaining }) }) // Get model with specific ID. app.get('/api/model', (req, res) => { const { modelId, address } = req.query // Prefer searching by modelId since an address can exist on two different blockchains. // There's probably a more pure SQL way to do this but it was unclear how exactly. let model if (modelId != null) { const getModelStmt = db.prepare('SELECT * FROM model WHERE id == $modelId LIMIT 1') model = getModelStmt.getAsObject({ $modelId: modelId }) getModelStmt.free() } else if (address != null) { const getModelStmt = db.prepare('SELECT * FROM model WHERE address == $address LIMIT 1') model = getModelStmt.getAsObject({ $address: address }) getModelStmt.free() } if (model && model.id) { model.modelType = model.model_type delete model.model_type res.send({ model }) } else { return res.status(404).send({ message: "Not found." }) } }) // Insert a new model. app.post('/api/models', jsonParser, (req, res) => { const body = req.body if (!isBodyValid(body)) { return res.status(400).send({ message: "The body is invalid." }) } try { persistModel(body) } catch (err) { return res.status(400).send({ message: err.message || err }) } return res.sendStatus(200) }) // DATA MANAGEMENT function persistData(data) { db.run('INSERT INTO data VALUES (?, ?);', [ data.transactionHash, data.originalData.text, ]) // eslint-disable-next-line @typescript-eslint/no-empty-function fs.writeFile(dbPath, Buffer.from(db.export()), () => { }) } // Insert a training sample. app.post('/api/data', jsonParser, (req, res) => { const body = req.body persistData(body) return res.sendStatus(200) }) // Get original training data. app.get('/api/data/:transactionHash', (req, res) => { const getTextStmt = db.prepare('SELECT text FROM data WHERE transaction_hash == $transactionHash LIMIT 1;') const result = getTextStmt.getAsObject({ $transactionHash: req.params.transactionHash }) getTextStmt.free() const { text } = result res.send({ originalData: { text } }) }) app.listen(port, () => console.log(`Listening on port ${port}`)) // ACCURACY RECORD MANAGEMENT function presistAccuracyRecord(accuracy) { db.run('INSERT INTO accuracy VALUES (?, ?, ?, ?, CURRENT_TIMESTAMP);', [ accuracy.transactionHash, accuracy.blockNumber, accuracy.modelId, accuracy.accuracy, ]) // eslint-disable-next-line @typescript-eslint/no-empty-function fs.writeFile(dbPath, Buffer.from(db.export()), () => {}) } // Add a new accuracy record for a model app.post('/api/accuracy', jsonParser, (req, res) => { const body = req.body presistAccuracyRecord(body) return res.sendStatus(200) }) // Get the accuracy history app.get('/api/accuracy/model', (req, res) => { const { modelId } = req.query if (modelId != null) { const getStmt = db.prepare('SELECT * FROM accuracy where model_id == $modelId ORDER BY timestamp;', { $modelId: modelId }) const accuracyHistory = [] while (getStmt.step()) { const accuracy = getStmt.get() accuracyHistory.push({ transactionHash: accuracy[0], blockNumber: accuracy[1], modelId: accuracy[2], accuracy: accuracy[3], timestamp: accuracy[4], }) } getStmt.free() if (accuracyHistory.length) { res.send({ accuracyHistory }) } else { res.status(404).send({ message: "No results found: Please try a different modelId." }) } } else { return res.status(400).send({ message: "`modelId` was not given in the request." }) } }) })
0xDeCA10B/demo/server.js/0
{ "file_path": "0xDeCA10B/demo/server.js", "repo_id": "0xDeCA10B", "token_count": 2304 }
5
from collections import Counter from logging import Logger import math from injector import inject, Module, singleton from decai.simulation.contract.balances import Balances from decai.simulation.contract.data.data_handler import StoredData from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism from decai.simulation.contract.objects import Address, RejectException, TimeMock @singleton class Stakeable(IncentiveMechanism): """ The Deposit, Take, Reward IM. A deposit is required to add data. Later that deposit can be reclaimed if the model still agrees with the contribution. """ @inject def __init__(self, # Injected balances: Balances, logger: Logger, time_method: TimeMock, # Parameters refund_time_s=60 * 60 * 24 * 1, any_address_claim_wait_time_s=60 * 60 * 24 * 9, cost_weight=1, ): super().__init__(refund_time_s=refund_time_s, any_address_claim_wait_time_s=any_address_claim_wait_time_s) self._balances = balances self._logger = logger self._time = time_method # Make sure there is at least a week for the refund. min_refund_window_s = 60 * 60 * 24 * 7 assert self.any_address_claim_wait_time_s > self.refund_time_s + min_refund_window_s, "Claim time is not enough." self.cost_weight = cost_weight self.num_good_data_per_user = Counter() self.total_num_good_data = 0 self._last_update_time_s = int(self._time()) def distribute_payment_for_prediction(self, sender, value): if value > 0: for agent_address, num_good in self.num_good_data_per_user.items(): # Round down like Solidity would. # Also helps avoid errors for possible rounding so # total value distributed < value. self._balances.send(sender, agent_address, int(value * num_good / self.total_num_good_data)) def get_next_add_data_cost(self, data, classification) -> float: """ :param data: A single sample of training data for the model. :param classification: The label for `data`. :return: The current cost to update a model with a specific sample of training data. """ current_time_s = int(self._time()) # TODO Limit how many times a data point can be added if the model already classifies right for it? # TODO Add cost to flip all data? # TODO Add discount if already submitted good data? # Convert to integers like in Solidity. time_since_last_update_s = int((current_time_s - self._last_update_time_s)) if time_since_last_update_s <= 0: raise RejectException("Not enough time has passed since the last update.") # We really want to think about the time in hours # (divide by 3600 but this is in the square root of the denominator so we multiply by sqrt(3600)). # Equivalent to: cost = self.cost_weight / int(math.sqrt(time_since_last_update_s * 3600)) result = self.cost_weight * 60 / int(math.sqrt(time_since_last_update_s)) result = int(result) # Make sure there is a minimum cost to adding data. if result < 1: result = 1 return result def handle_add_data(self, contributor_address: Address, msg_value: float, data, classification) -> (float, bool): cost = self.get_next_add_data_cost(data, classification) update_model = True if cost > msg_value: raise RejectException(f"Did not pay enough. Sent {msg_value} < {cost}") self._last_update_time_s = self._time() return (cost, update_model) def handle_refund(self, submitter: str, stored_data: StoredData, claimable_amount: float, claimed_by_submitter: bool, prediction) -> float: result = claimable_amount # Do not need to check submitter == stored_data.sender because DataHandler already did it. if claimed_by_submitter: raise RejectException("Deposit already claimed by submitter.") if result <= 0: raise RejectException("There is no reward left to claim.") current_time_s = int(self._time()) if current_time_s - stored_data.time <= self.refund_time_s: raise RejectException("Not enough time has passed.") if callable(prediction): prediction = prediction() if prediction != stored_data.classification: raise RejectException("The model doesn't agree with your contribution.") self.num_good_data_per_user[submitter] += 1 self.total_num_good_data += 1 return result def handle_report(self, reporter: str, stored_data: StoredData, claimed_by_reporter: bool, prediction) -> float: if stored_data.claimable_amount <= 0: raise RejectException("There is no reward left to claim.") current_time_s = int(self._time()) if current_time_s - stored_data.time >= self.any_address_claim_wait_time_s: # Enough time has passed, give the entire remaining deposit to the reporter. self._logger.debug("Giving all remaining deposit to \"%s\".", reporter) result = stored_data.claimable_amount return result # Don't allow someone to claim back their own deposit if their data was wrong. # They can still claim it from another address but they will have had to have sent good data from that address. if reporter == stored_data.sender: raise RejectException("Cannot take your own deposit. Ask for a refund instead.") if claimed_by_reporter: raise RejectException("Deposit already claimed by reporter.") if current_time_s - stored_data.time <= self.refund_time_s: raise RejectException("Not enough time has passed.") if callable(prediction): prediction = prediction() if prediction == stored_data.classification: raise RejectException("The model should not agree with the contribution.") num_good = self.num_good_data_per_user[reporter] if num_good <= 0: raise RejectException(f"No good data was verified by reporter '{reporter}'.") result = stored_data.initial_deposit * num_good / self.total_num_good_data # Handle possible rounding errors or if there is too little to divide to reporters. if result <= 0 or result > stored_data.claimable_amount: result = stored_data.claimable_amount return result class StakeableImModule(Module): def configure(self, binder): binder.bind(IncentiveMechanism, to=Stakeable)
0xDeCA10B/simulation/decai/simulation/contract/incentive/stakeable.py/0
{ "file_path": "0xDeCA10B/simulation/decai/simulation/contract/incentive/stakeable.py", "repo_id": "0xDeCA10B", "token_count": 2685 }
6
import ast import logging import os import re import time from collections import Counter from dataclasses import dataclass, field from logging import Logger from pathlib import Path from typing import List, Set, Tuple import numpy as np from injector import ClassAssistedBuilder, inject, Module, provider, singleton from sklearn.utils import shuffle from tqdm import tqdm from .data_loader import DataLoader @inject @dataclass class FitnessDataLoader(DataLoader): """ Load sport activity data from Endomondo. Requires endomondoHR_proper.json from https://sites.google.com/eng.ucsd.edu/fitrec-project/home to be stored at simulation/training_data/fitness/endomondoHR_proper.json. From the first 5K samples, the 2842 'bike' and 2158 'run' occurrences. Some info from the fire 10K samples: genders: 'male', 'unknown', 'female' sports: 'bike', 'bike (transport)', 'run', 'kayaking', 'indoor cycling', 'mountain bike', 'orienteering', 'core stability training', 'walk', 'cross-country skiing', 'fitness walking', 'roller skiing' """ _logger: Logger _seed: int = field(default=2, init=False) _train_split: float = field(default=0.7, init=False) _classes: Set[str] = field(default_factory=lambda: {'bike', 'run'}, init=False) def classifications(self) -> List[str]: return ["BIKING", "RUNNING"] def load_data(self, train_size: int = None, test_size: int = None) -> (Tuple, Tuple): self._logger.info("Loading Endomondo fitness data.") # Look for cached data. file_identifier = f'fitness-data-{train_size}-{test_size}.npy' base_path = Path(os.path.dirname(__file__)) / 'cached_data' os.makedirs(base_path, exist_ok=True) cache_paths = { 'x_train': base_path / f'x_train-{file_identifier}', 'y_train': base_path / f'y_train-{file_identifier}', 'x_test': base_path / f'x_test-{file_identifier}', 'y_test': base_path / f'y_test-{file_identifier}' } # Use if modified in the last day. if all([p.exists() for p in cache_paths.values()]) and \ all([time.time() - p.stat().st_mtime < 60 * 60 * 24 for p in cache_paths.values()]): self._logger.info("Loaded cached Endomondo fitness data from %s.", cache_paths) return (np.load(cache_paths['x_train']), np.load(cache_paths['y_train'])), \ (np.load(cache_paths['x_test']), np.load(cache_paths['y_test'])) data = [] labels = [] data_folder_path = Path(__file__, '../../../../training_data/fitness').resolve() user_id_to_set = {} sport_to_label = { 'bike': 0, 'run': 1 } gender_to_index = {} if train_size is not None and test_size is not None: max_num_samples = train_size + test_size else: max_num_samples = 10_000 classes = '|'.join(self._classes) classes_pattern = re.compile(f' \'sport\': \'({classes})\', ') data_path = data_folder_path / 'endomondoHR_proper.json' assert data_path.exists(), f"See the documentation for how to download the dataset. It must be stored at {data_path}" with open(data_path) as f, \ tqdm(f, desc="Loading data", unit_scale=True, mininterval=2, unit=" samples", total=max_num_samples, ) as pbar: for line in f: # TODO Keep users in train set mutually exclusive from users in test set. # Check line before more expensive parsing. if not classes_pattern.search(line): continue record = ast.literal_eval(line) sport = record['sport'] if sport not in self._classes: continue if 'speed' not in record: continue label = sport_to_label[sport] labels.append(label) heart_rates = record['heart_rate'] gender = gender_to_index.setdefault(record['gender'], len(gender_to_index)) speeds = record['speed'] # Other fields: # record['longitude'] # record['altitude'] # record['latitude'] # record['id'] # record['timestamp'] # record['userId'] data.append({ # Values to keep as they are: 'rawValues': [ np.mean(heart_rates) / np.min(heart_rates), np.median(heart_rates) / np.min(heart_rates), np.max(speeds), np.min(speeds), np.mean(speeds), np.median(speeds), ], # Values that need to be converted: 'gender': gender, }) pbar.update() if len(data) >= max_num_samples: break if train_size is None: if test_size is None: train_size = int(self._train_split * len(data)) else: train_size = len(data) - test_size if test_size is None: test_size = len(data) - train_size # Thresholds for making sure features can be discretized for Naive Bayes. # Just use training data to make thresholds. thresholds = np.empty(len(data[0]['rawValues']), dtype=np.int32) for i in range(len(data[0]['rawValues'])): thresholds[i] = np.median([d['rawValues'][i] for d in data[:train_size]]) def _featurize(datum): raw_values = np.array(thresholds < datum['rawValues'], dtype=np.int8) gender_one_hot = np.zeros(len(gender_to_index), dtype=np.int8) gender_one_hot[datum['gender']] = 1 return np.concatenate([raw_values, gender_one_hot]) if self._logger.isEnabledFor(logging.DEBUG): self._logger.debug("Labels: %s", Counter(labels)) data, labels = shuffle(data, labels, random_state=self._seed) x_train = np.array([_featurize(d) for d in data[:train_size]]) y_train = np.array(labels[:train_size]) x_test = np.array([_featurize(d) for d in data[-test_size:]]) y_test = np.array(labels[-test_size:]) np.save(cache_paths['x_train'], x_train, allow_pickle=False) np.save(cache_paths['y_train'], y_train, allow_pickle=False) np.save(cache_paths['x_test'], x_test, allow_pickle=False) np.save(cache_paths['y_test'], y_test, allow_pickle=False) self._logger.info("Done loading Endomondo fitness data.") return (x_train, y_train), (x_test, y_test) @dataclass class FitnessDataModule(Module): @provider @singleton def provide_data_loader(self, builder: ClassAssistedBuilder[FitnessDataLoader]) -> DataLoader: return builder.build()
0xDeCA10B/simulation/decai/simulation/data/fitness_data_loader.py/0
{ "file_path": "0xDeCA10B/simulation/decai/simulation/data/fitness_data_loader.py", "repo_id": "0xDeCA10B", "token_count": 3430 }
7
import os import sys from typing import Optional from injector import Injector from decai.simulation.contract.classification.perceptron import PerceptronModule from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule from decai.simulation.contract.incentive.stakeable import StakeableImModule from decai.simulation.data.imdb_data_loader import ImdbDataModule from decai.simulation.logging_module import LoggingModule from decai.simulation.simulate import Agent, Simulator # For `bokeh serve`. sys.path.append(os.path.join(os.path.dirname(__file__), '../..')) num_words = 1000 train_size: Optional[int] = None if train_size is None: init_train_data_portion = 0.08 else: init_train_data_portion = 100 / train_size def main(): # Set up the agents that will act in the simulation. agents = [ # Good Agent(address="Good", start_balance=10_000, mean_deposit=50, stdev_deposit=10, mean_update_wait_s=10 * 60, prob_mistake=0.0001, ), # Malicious: A determined agent with the goal of disrupting others. Agent(address="Bad", start_balance=10_000, mean_deposit=100, stdev_deposit=3, mean_update_wait_s=1 * 60 * 60, good=False, ), # One that just calls the model and pays to use the model. Agent(address="Caller", start_balance=30_000, mean_deposit=0, stdev_deposit=0, mean_update_wait_s=2 * 60 * 60, calls_model=True, pay_to_call=50 ), ] # No caller (assume free to call). agents = agents[:-1] # Set up the data, model, and incentive mechanism. inj = Injector([ DefaultCollaborativeTrainerModule, ImdbDataModule(num_words=num_words), LoggingModule, PerceptronModule, StakeableImModule, ]) s = inj.get(Simulator) # Accuracy on hidden test set after training with all training data: baseline_accuracies = { 100: 0.6210, 200: 0.6173, 1000: 0.7945, 10000: 0.84692, 20000: 0.8484, } # Start the simulation. s.simulate(agents, baseline_accuracy=baseline_accuracies[num_words], init_train_data_portion=init_train_data_portion, train_size=train_size, ) # Run with `bokeh serve PATH`. if __name__.startswith('bk_script_'): main()
0xDeCA10B/simulation/decai/simulation/simulate_imdb_perceptron.py/0
{ "file_path": "0xDeCA10B/simulation/decai/simulation/simulate_imdb_perceptron.py", "repo_id": "0xDeCA10B", "token_count": 1182 }
8
#!/bin/bash set -e if [[ "$1" = "serve" ]]; then shift 1 torchserve --start --ts-config /home/model-server/config.properties else eval "$@" fi # prevent docker exit tail -f /dev/null
AI-System/Labs/BasicLabs/Lab5/dockerd-entrypoint.sh/0
{ "file_path": "AI-System/Labs/BasicLabs/Lab5/dockerd-entrypoint.sh", "repo_id": "AI-System", "token_count": 79 }
9
<!--Copyright © Microsoft Corporation. All rights reserved. 适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可--> # 强化学习系统前言 # 简介 近年来,强化学习不断发展,强化学习的应用在学术界和工业层出不穷,让人留下了深刻印象。 例如,2016年的AlphaGo[1]通过自我对弈数以万计盘进行练习强化,在一场五番棋比赛中4:1击败顶尖职业棋手李世石。 而深度强化学习真正的发展归功于神经网络、深度学习以及计算力的提升。纵观近年的顶级会议论文,强化学习的理论进步,应用领域逐渐爆发式增广。 然而,不同于深度学习,强化学习的框架处于百花齐放的状态。这是由于强化学习仍然在系统上仍然面临许多挑战。 在这个章节里,我们将描述强化学习有别于机器学习的地方,以及由此带来的在框架和系统上的需求和挑战。 本章将围绕以下主题展开叙述: 1) 强化学习的基本概念(包括与传统机器学习,自动机器学习的区别)。 2)当前的分布式机器学习算法。 3)强化学习对系统提出的需求和挑战。 4)当前分布式强化学习的框架和应用。 # 内容概览 本章包含以下内容: - [强化学习的基本概念](10.1-强化学习的基本概念.md) - [分布式强化学习算法](10.2.1-分布式强化学习算法.md) - [强化学习对系统提出的需求和挑战](10.2.2-分布式强化学习对框架的需求和挑战.md) - [分布式强化学习的框架和应用](10.2.3-分布式强化学习框架和应用.md) ## 参考文献 - [1] Silver D, Huang A, Maddison C J, et al. Mastering the game of Go with deep neural networks and tree search[J]. nature, 2016, 529(7587): 484-489.
AI-System/Textbook/第10章-强化学习系统/10-强化学习系统-前言.md/0
{ "file_path": "AI-System/Textbook/第10章-强化学习系统/10-强化学习系统-前言.md", "repo_id": "AI-System", "token_count": 1219 }
10
<!--Copyright © Microsoft Corporation. All rights reserved. 适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可--> # 模型压缩与加速 (Model Compression and Acceleration) # 简介 在深度学习中,模型压缩通常是指通过特定的方法使用更少的数据比特表示原有模型,类似于传统计算机科学中的数据压缩或视频编码。 学术界高精度的深度学习模型在落地部署到工业界应用的过程中,经常面临着低吞吐,高延迟和高功耗的挑战。 模型压缩可以删除模型中的冗余,进而减少对硬件的存储需求和计算需求,以达到加速模型推理或训练的目的。 适当的压缩通常可以保持模型的原有效果,但是在不同场景和任务中,不同模型能实现的压缩率也不尽相同。 近年来广泛使用的模型压缩方法主要包括:**数值量化(Data Quantization)**,**模型稀疏化(Model sparsification)**,**知识蒸馏(Knowledge Distillation)**, **轻量化网络设计(Lightweight Network Design)**和**张量分解(Tensor Decomposition)**。 本章第1节将首先对这些压缩技术进行简要介绍。 其中模型稀疏化是应用最为广泛的一种模型压缩方法,可以直接减少模型中的参数量。 本章第2节将对基于稀疏化的模型压缩方法进行详细介绍。 经过压缩后的模型并一定适用于原有通用处理器,往往需要特定的加速库或者加速硬件的支持。 本章第3节将介绍不同模型压缩算法所适应的硬件加速方案。 # 内容概览 本章包含以下内容: - [11.1 模型压缩简介](11.1-模型压缩简介.md) - [11.2 基于稀疏化的模型压缩](11.2-基于稀疏化的模型压缩.md) - [11.3 模型压缩与硬件加速](11.3-模型压缩与硬件加速.md)
AI-System/Textbook/第11章-模型压缩与加速/11-前言.md/0
{ "file_path": "AI-System/Textbook/第11章-模型压缩与加速/11-前言.md", "repo_id": "AI-System", "token_count": 1268 }
11
<!--Copyright © Microsoft Corporation. All rights reserved. 适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可--> # 1.4 深度学习样例背后的系统问题 算法工程师通过 Python 和深度学习框架书写人工智能程序,而人工智能程序底层的系统问题被当前高层的抽象隐藏,到底在每个代码片段底层执行发生了什么?有哪些有意思的系统设计问题?我们将从一个实例启发读者,并和后面各个章节构建起桥梁与联系。 - [1.4 深度学习样例背后的系统问题](#14-深度学习样例背后的系统问题) - [1.4.1 一个深度学习样例与其中的系统问题](#141-一个深度学习样例与其中的系统问题) - [1.4.2 模型算子实现中的系统问题](#142-模型算子实现中的系统问题) - [1.4.3 框架执行深度学习模型的生命周期](#143-框架执行深度学习模型的生命周期) - [1.4.4 更广泛的人工智能系统生态](#144-更广泛的人工智能系统生态) - [1.4.5 深度学习框架及工具入门实验](#145-深度学习框架及工具入门实验) - [1.4.5.1 实验目的](#1451-实验目的) - [1.4.5.2 实验环境](#1452-实验环境) - [1.4.5.3 实验原理](#1453-实验原理) - [1.4.5.4 实验内容](#1454--实验内容) - [1.4.5.5 实验计划](#1455--实验计划) - [小结与讨论](#小结与讨论) - [参考文献](#参考文献) ## 1.4.1 一个深度学习样例与其中的系统问题 如下图 1.5.1 所示,我们可以看到一个深度学习模型可以接受输入(例如,图中实例为手写数字图片),产生输出(例如图中实例为数字分类),这个过程叫前向传播(Forward Propagation)。那么如何得到一个针对当前已有的输入输出数据上,预测效果最好的模型呢?一般需要通过训练的过程,而训练过程本身可以抽象为一个优化问题的求解过程,优化目标一般被称作损失函数: $$\theta = argmin_{\theta}\sum[Loss(f_{\theta}(x), y)]$$ 其中的函数 $f_{\theta}$ 代表深度学习模型(例如,后面提到的换机神经网络模型 LeNet),$Loss$ 代表损失函数(例如,让预测值和目标值之前整体误差最小),$x$ 代表数据中的输入也就是图像,$y$ 代表数据中的标签值(本例中为数字类别),也就是输出。训练的过程就是找到最小化 $Loss$ 所对应的 $\theta$ 取值,$\theta$ 也称作深度学习模型的权重(Weight)。在训练过程中一般通过梯度下降等算法进行求解: $$\theta = \theta - \alpha \delta_{\theta}Loss(\theta)$$ 其中 $\alpha$ 也叫学习率(Learning Rate)。 当模型训练完成,准确度或者误差在指定测试数据集上满足用户需求,就可以通过模型 $\hat{y} = f_\theta(x)$ 进行推理预测。 <center><img src="./img/4/4-1-2.png" /></center> <center>图 1.5.1 深度学习训练过程</center> 如下图 1.5.2 所示,左上角的图示中展示的是输入为手写数字图像,输出为分类向量,中间的矩形为各层输出的特征图(Feature Map),我们将其映射为具体的实现代码,其结构通过图右侧对应出来。我们可以看到深度学习模型就是通过各个层将输入图像通过多个层的算子处理为类别输出概率向量。用户一般经过两个阶段进行构建: (1)定义网络结构,例如图中和下面代码实例中构建的 LeNet 网络,其中包含有二维卷积(Conv2D),最大池化(MaxPool2D),全连接(Linear)层。 (2)开始训练,训练算法是一个迭代的过程,每次迭代遍历一个批尺寸(Batch Size)数据,将数据移动到计算设备,前向传播计算,计算损失(Loss)。 <center><img src="./img/4/4-1-1.png" /></center> <center>图 1.5.2 PyTorch训练LeNet实例</center> 下面的实例是 PyTorch 在 MNIST 数据集上训练一个卷积神经网络 [LeNet](http://yann.lecun.com/exdb/lenet/)[<sup>[1]</sup>](#lenet) 的代码实例。具体解释请读者参考代码中的注释,代码入口为 "def main():" 函数。 ``` ... # 读者可以参考"第 3 章深度学习框架基础"理解深度学习框架的底层原理和设计 import torch ... # 如果模型层数多,权重多到无法在单 GPU 显存放置,我们需要通过模型并行方式进行训练,读者可以参考"第 6 章分布式训练算法与系统"进行了解 class LeNet(nn.Module): def __init__(self): super(LeNet, self).__init__() # 请参考 1.4.2 小节,通过循环实现卷积理解卷积的执行逻辑并思考其中的潜在系统问题 self.conv1 = nn.Conv2d(3, 6, 5) # 我们能否调整超参数 6 为 64?如何高效的搜索最有的配置?这些内容我们将在"第 9 章自动化机器学习系统"展开介绍 self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16*5*5, 120) self.fc2 = nn.Linear(120, 84) self.fc2 = nn.Linear(84, 10) def forward(self, x): out = F.relu(self.conv1(x)) out = F.max_pool2d(out, 2) out = F.relu(self.conv2(out)) out = F.max_pool2d(out, 2) out = out.view(out.size(0), -1) out = F.relu(self.fc1(out)) out = F.relu(self.fc2(out)) out = self.fc3(out) return out def train(args, model, device, train_loader, optimizer, epoch): # 框架是如何进行模型训练?我们将在"第 3 章深度学习框架基础"进行介绍 model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() ... def test(model, device, test_loader): model.eval() ... with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) # 推理系统如何设计以及如何进行模型推理?我们将在"第 8 章深度学习推理系统"进行介绍 output = model(data) ... def main(): ... # 当前语句决定了使用哪种加速器,读者可以通过"第4章矩阵运算与计算机体系结构"了解不同加速器的体系结构及底层原理。 device = torch.device("cuda" if use_cuda else "cpu") # 如果batch size过大,造成单 GPU 内存无法容纳模型及中间激活的张量,读者可以参考"第 6 章分布式训练算法与系统"进行了解如何通过分布式训练打破单卡资源限制 train_kwargs = {'batch_size': args.batch_size} test_kwargs = {'batch_size': args.test_batch_size} ... """ 如何高效的进行数据读取?这些内容我们将在"第7章异构计算集群调度与资源管理系统"进行介绍。 如果我们训练的数据集和模型是为了解决'预测系统优化配置'问题,我们想训练的模型是优化系统配置,那么读者可以参考"第 13 章人工智能优化计算机系统",思考如何将人工智能应用到系统优化,也就是 AI for System。 如果我们的数据集没有提前准备好,需要实时和环境交互获取,那么读者可以参考"第 10 章强化学习系统"进行理解。 """ dataset1 = datasets.MNIST('../data', train=True, download=True, transform=transform) dataset2 = datasets.MNIST('../data', train=False, transform=transform) train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs) test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs) model = LeNet().to(device) optimizer = optim.Adadelta(model.parameters(), lr=args.lr) ... for epoch in range(1, args.epochs + 1): train(args, model, device, train_loader, optimizer, epoch) # 模型如果训练完成需要部署,我们如何压缩和量化后再部署?读者可以参考"第 11 章模型压缩与加速"进行了解 test(model, device, test_loader) ... # 如果用户提交多个这样的训练作业,系统如何调度和管理资源?读者可以参考"第 7 章异构计算集群调度与资源管理系统"进行了解 if __name__ == '__main__': main() ``` ## 1.4.2 模型算子实现中的系统问题 我们在深度学习中所描述的层(Layer),一般在深度学习编译器中也称作操作符(Operator)或算子。底层算子的具体实现时先将其映射或转换为对应的矩阵运算(例如,通用矩阵乘 GEMM),再由其对应的矩阵运算翻译为对应的循环程序(当前实例中,为方便理解,我们简化问题,后面的实例中忽略 stride 等其他超参数对循环的影响)。 图 1.5.3 的卷积层实例中,每次选取输入数据一层的一个窗口的矩阵(例如,卷积核一样的宽高尺寸),然后和对应的卷积核(例如 Filter-1 中的 $5 \times 5$ 卷积核代表高 5 维宽 5 维的矩阵)进行[矩阵内积(Dot Product)](https://en.wikipedia.org/wiki/Dot_product)运算,最后将所有的计算结果与偏置项 $b$ 相加后输出。 然后依次沿着行(Row)进行滑动,移动一定的步长,再进行下次矩阵内积计算,直到边界后再沿着一定步长跳到下一列重复刚才的滑动窗口运算。这些结果最终组合成输出矩阵也被称作特征图(Feature Map)。 例如,图中所示的维度为输入张量形状(Tensor Shape)为 $3 \times 32 \times 32$(3 代表通道,32 代表张量高度,32 代表张量宽度),经过 $2 \times 3 \times 5 \times 5$ 的卷积(2 代表输出通道数,3 代表输入通道数,5 代表卷积核高度,5 代表卷积核宽度)后,输出张量形状(Tensor Shape)为$2 \times 28 \times 28$(2 代表通道,28 代表高,28 代表宽)。 <center><img src="./img/4/conv3d.png" /></center> <center>图 1.5.3 Conv2D 计算过程实例 (<a href="https://github.com/microsoft/ai-edu">图片引用 ai-edu</a>)</center> 图中所示的卷积的计算可以表达为多层嵌套循环,我们以下面伪代码为例进行分析。 ``` # Conv2D 将被转换为如下的7层循环进行计算: # 批尺寸维度 batch_size for n in range(batch_size): # 输出张量通道维度 output_channel for oc in range(output_channel): # 输入张量通道维度 input_channel for ic in range(input_channel): # 输出张量高度维度 out_height for h in range(out_height): # 输出张量宽度维度 out_width for w in range(out_width): # 卷积核高度维度 filter_height for fh in range(filter_height): # 卷积核宽度维度 filter_width for fw in range(filter_width): # 乘加(Multiply Add)运算 output[h, w, oc] += input[h + fw, w + fh, ic] * kernel[fw, fh, c, oc] # 备注:为简化阐述计算过程,我们简化没有呈现维度(Dimension)的形状推导(Shape Inference)计算逻辑。 ``` 在这其中有很多有趣的问题问题读者可以思考与分析: - 算法变换:从算法来说,当前7层循环可以转换为更加易于优化和高效的矩阵计算(例如,[cuDNN](https://docs.nvidia.com/deeplearning/cudnn/)[<sup>[2]</sup>](#cudnn) 库中的卷积就提供了[多种实现卷积的算法](https://docs.nvidia.com/deeplearning/cudnn/api/index.html#cudnnConvolutionForward))方式。这些算法被封装在库中,有些框架会在运行时启发式搜索选择不同算法策略。 深度学习加速库(例如 cuDNN 等)通常通过应用 [im2col](https://hal.inria.fr/inria-00112631/document) 函数将卷积转换为通用矩阵乘法(General Matrix Multiplication)缩写 [GEMM](https://en.wikipedia.org/wiki/GEMM),如图 1.4.4 所示。cuDNN 也支持利用其他算法实现卷积,例如 FFT, WINOGRAD 等。通用矩阵乘是计算机视觉和自然语言处理模型中的主要的算子实现算法(例如,卷积,全连接,平均池化,注意力等算子均可以转换为 GEMM),同时底层 GPU 和其他专有人工智能芯片 ASIC也针对矩阵乘的计算特点提供底层硬件计算单元的支持(例如 NVIDIA GPU 张量核([Tensor Core](https://www.nvidia.com/en-us/data-center/tensor-cores/)),Google TPU 脉动阵列的矩阵乘单元(Matrix Multiply Unit)等),这样的转换就可以让大多数常见算子能利用专有底层硬件和软件的优化。 <center> <img src="./img/4/1-4-4-imtocol.png" /></center> 图 1.4.4 卷积通过 im2col 转换为通用矩阵乘(图中我们使用一个卷积核和一个输入图片通道为例简要说明)(<a href="https://arxiv.org/pdf/2005.13076.pdf">图片引用 PHAST[<sup>[3]</sup>](#im2col)</a>) - 局部性:循环执行的主要计算语句是否有局部性可以利用?空间局部性(缓存线内相邻的空间是否会被连续访问)以及时间局部性(同一块内存多久后还会被继续访问),这样我们可以通过预估后,尽可能的通过编译调度循环执行,这些内容将在第 5 章着重介绍。在 1.5 中我们也将打印矩阵乘的局部性,读者可以参考理解。 - 利用片上内存:利用局部性的同时,程序可以减少下一级存储的读写,但是其中参与计算的输入,权重和输出张量能否完全放入 GPU 缓存(L1,L2)或片上共享内存(Shared Memory)?如果不能放入则需要通过循环块(Tile)编译优化进行切片,这些内容将在第 5 章着重介绍。 - 近似计算:如果有些权重为 0 是否可以不进行计算和存储?读者可以参考第 11 章稀疏性(Sparsity)部分进行了解。 - 内存管理与扩展(Scale Out):读者可以[预估](https://www.microsoft.com/en-us/research/uploads/prod/2020/09/dnnmem.pdf)各个层的输出(Output)张量,输入(Input)张量,和内核(Kernel)张量大小,进而评估是否需要多卡,虚拟内存管理策略设计,以及变量存活分析结合模型特点动态释放内存等。读者可以参考第 5,8 章相关内存优化与管理内容。 - 运行时任务调度:那么当算子与算子在运行时按一定调度次序执行,框架如何进行运行时资源管理与任务调度。 ## 1.4.3 框架执行深度学习模型的生命周期 “Inside every large program is a small program struggling to get out.”--Tony Hoare。 我们从下面实例会看到,目前算法工程师只需要书写核心算法,也就是Small Program - 核心算法与高层设计,而不需要关注底层的细节代码(底层 large program)或指令,通过层层抽象,提升了开发效率,但是对系统研发却隐藏了细节,造成我们遇到性能问题难以理解,需要我们进一步探究。 在之前的实例中,我们基本知晓 Python 如何书写深度学习训练程序,以及深度学习框架代码中的一个算子(例如,卷积)是如何翻译成底层 for 循环进行计算的。但是这类 for 循环计算通常可以被设备厂商提供的运行时算子库抽象,不需要用户继续书写 for 循环了,例如, cuDNN 提供卷积的实现和 API。 如图 1.4.5 所示,假设我们已经抽象到了 cuDNN 这层书写深度学习程序,相比直接写算子实现,似乎我们已经提升了很多开发效率: - 我们为什么还需要深度学习框架(例如,TensorFlow,PyTorch)? - 那么框架作为至关重要的深度学习系统究竟在其中扮演什么角色和做了其他什么工作呢? - 用户的 Python 代码是如何一步步翻译到底层的具体实现呢? 我们以一个实例为例介绍: <center> <img src="./img/4/4-1-4-frompythontolower.png"/></center> <center>图 1.4.5 深度学习程序的层次化调用关系</center> 首先,我们先对比一下,如果没有深度学习框架,而只将算子 for 循环抽象提供算子库(例如,cuDNN)的调用,读者将只能通过设备提供的底层 API 编写作业。例如,通过 CUDA + cuDNN 库书写卷积神经网络([cuDNN书写的卷积神经网络LeNet实例](https://github.com/tbennun/cudnn-training))[<sup>[4]</sup>](#cudnnlenet)。 我们通过实现 LeNet 实例,对比说明 cuDNN + CUDA 这层抽象还不足以让算法工程师非常高效的设计模型和书写算法。如下两个实例所示,同样实现 LeNet,使用高层框架只需要 9 行,而通过 cuDNN 需要上千行代码,而且还需要精心的管理内存分配释放,拼接模型计算图,效率十分低下。 ***(1) 通过cuDNN + CUDA API编程实现LeNet,需要~1000行实现模型结构和内存管理等逻辑*** [参考实例 cudnn-training](https://github.com/tbennun/cudnn-training/blob/master/lenet.cu) ```C++ // 内存分配,用户需要精确算出需要分配的张量大小,如果用深度学习框架此步骤会省略 ... cudaMalloc(&d_data, sizeof(float) * context.m_batchSize * channels * height * width); cudaMalloc(&d_labels, sizeof(float) * context.m_batchSize * 1 * 1 * 1); cudaMalloc(&d_conv1, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width); ... // 前向传播第一个卷积算子(仍需要写其他算子) ... cudnnConvolutionForward(cudnnHandle, &alpha, dataTensor, data, conv1filterDesc, pconv1, conv1Desc, conv1algo, workspace, m_workspaceSize, &beta, conv1Tensor, conv1); ... // 反向传播第一个卷积算子(仍需要写其他算子),如果用深度学习框架此步骤会省略,框架会通过自动求导方式补全反向传播计算逻辑 cudnnConvolutionBackwardBias(cudnnHandle, &alpha, conv1Tensor, dpool1, &beta, conv1BiasTensor, gconv1bias); cudnnConvolutionBackwardFilter(cudnnHandle, &alpha, dataTensor, data, conv1Tensor, dpool1, conv1Desc, conv1bwfalgo, workspace, m_workspaceSize, &beta, conv1filterDesc, gconv1)); // 第一个卷积权重梯度更新(仍需要写其他算子),如果用深度学习框架此步骤只需要一行用户代码调用完成底层全模型的梯度更新 cublasSaxpy(cublasHandle, static_cast<int>(conv1.pconv.size()), &alpha, gconv1, 1, pconv1, 1); cublasSaxpy(cublasHandle, static_cast<int>(conv1.pbias.size()), &alpha, gconv1bias, 1, pconv1bias, 1); // 内存释放,如果用深度学习框架此步骤会省略,自动完成内存垃圾回收 ... cudaFree(d_data); cudaFree(d_labels); cudaFree(d_conv1); ... ``` ***(2) 通过 Keras 书写 LeNet (TensorFlow Backend)[<sup>[5]</sup>](#keraslenet),只需要 9 行构建模型结构,算上训练逻辑只需要几十行代码*** [参考文档 LeNet-5-with-Keras](https://github.com/TaavishThaman/LeNet-5-with-Keras/blob/master/lenet_5.py) ```python model = keras.Sequential() model.add(layers.Conv2D(filters=6, kernel_size=(3, 3), activation='relu', input_shape=(32, 32, 1))) model.add(layers.AveragePooling2D()) model.add(layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu')) model.add(layers.AveragePooling2D()) model.add(layers.Flatten()) model.add(layers.Dense(units=120, activation='relu')) model.add(layers.Dense(units=84, activation='relu')) model.add(layers.Dense(units=10, activation = 'softmax')) ``` 从上面对比的实例我们看到,深度学习框架对算法工程师开发深度学习模型,训练模型非常重要,能大幅减少编码量和减少程序缺陷的发生(例如,内存管理缺陷),提升开发效率,让算法工程师解放出来,专注于算法设计研究本身。总结起来,深度学习框架一般会提供以下功能: 1. 以 Python API 供开发者编写复杂的模型计算图(Computation Graph)结构,调用基本算子实现(例如,卷积的 cuDNN 实现),大幅降低开发代码量。 2. 自动化内存管理,不暴露指针和内存管理给用户。 3. 自动微分(Automatic Differentiation)的功能,并能自动构建反向传播计算图,与前向传播图拼接成统一计算图。 4. 调用或生成运行期优化代码(静态优化) 5. 调度算子在指定设备的执行,并在运行期应用并行算子,提升设备利用率等优化(动态优化)。 从上面我们已经了解到深度学习框架已经我们解决了很多底层系统的资源管理与任务调度问题,隐藏了很多细节,但是这些细节和底层实现又是系统工程师比较关注的,这些细节影响程序性能等非功能性属性。接下来我们以一个深度学习作业如何被框架一步步底层执行的流程为例,为大家揭开框架底层隐藏的实现。 TensorFlow 是应用非常广泛的深度学习框架,相比 PyTorch 的[命令式执行(Imperative Execution)](https://en.wikipedia.org/wiki/Imperative_programming)方式(运行到算子代码即触发执行,易于调试),TensorFlow 采用[符号执行(Symbolic Execution)](https://en.wikipedia.org/wiki/Symbolic_execution)方式 (程序调用 session.run() 才真正触发执行,并且框架能获取完整计算图进行优化)。二者详细区别我们将在后面框架章节进行介绍。我们在下面的图示和实例中以 TensorFlow 的一个简单程序为例,展示一个深度学习模型是如何被深度学习框架静态(Static)编译与运行时动态(Dynamic)管理的。 如图 1.4.6 到图 1.4.9 所示,我们通过划分不同阶段,解释一个 TensorFlow 程序完成一个精简示例模型 x * y + z 的训练全流程。 (1)前端程序转换为数据流图:如图 1-4-6 所示,这个阶段框架会将用户使用 Python 书写的深度学习模型,通过预先定义的接口,翻译为中间表达(Intermediate Representation),并且构建算子直接的依赖关系,形成前向数据流图(Data-Flow Graph)。 <center> <img src="./img/4/4-1-5-pythontoforward.png" /></center> <center>图 1.4.6 Python + TensorFlow 程序解析为中间表达和前向传播数据流图</center> (2)反向求导:如图 1-4-7 所示,这个阶段框架会分析形成前向数据流图,通过算子之前定义的反向传播函数,构建反向传播数据流图,并和前向传播数据流图一起形成整体的数据流图。 <center> <img src="./img/4/4-1-6-backwardgraph.png" /></center> <center>图 1.4.7 反向求导,自动微分(Automatic Differentiation)</center> (3)产生运行期代码:如图 1.4.8 所示,这个阶段框架会分析整体的数据流图,并根据运行时部署所在的设备(CPU,GPU 等),将算子中间表达替换为算子针对特定设备的运行期的代码,例如图中的 CPU 的 C++ 算子实现或者针对 NVIDIA GPU 的 CUDA 算子实现。 <center> <img src="./img/4/4-1-7-genruntime.png" /></center> <center>图 1.4.8 产生运行期代码</center> (4)调度并运行代码:如图 1.4.9 所示,这个阶段框架会将算子及其运行期的代码实现抽象为“任务”,依次根据“任务”依赖关系,调度到计算设备上进行执行。对一些不方便静态做优化的选项,可以通过运行期调度达到,例如,并发(Concurrent)计算与 I/O,如有空闲资源并行执行没有依赖的算子等。目前框架例如,[PyTorch](https://proceedings.neurips.cc/paper/2019/file/bdbca288fee7f92f2bfa9f7012727740-Paper.pdf),[TensorFlow](https://github.com/tensorflow/runtime/blob/master/documents/cuda-proposal.md#core-design-principles-and-decisions) 一般选择单 CUDA Stream 在 NVIDIA GPU 侧进行算子内核调度,数据加载会选择再设置其他 Stream。例如,PyTorch 出于[以下考量](https://proceedings.neurips.cc/paper/2019/file/bdbca288fee7f92f2bfa9f7012727740-Paper.pdf)[<sup>[6]</sup>](#pytorch):“以一种让他们合作共享 GPU 的方式编写 CUDA 内核较为困难,因为精确的调度是硬件控制。在实践中,内核编写者通常组合多个任务形成单片内核。数据加载和分布式计算程序是单 Stream 设计的例外,它们小心地插入额外的同步以避免与内存分配器的不良交互。” <center> <img src="./img/4/4-1-8-execution.png" /></center> <center>图 1.4.9 调度并运行内核任务</center> 综上所示,我们通过上面两个小节可以发现,如果没有框架和算子库的支持,算法工程师进行简单的深度学习模型设计与开发都会举步维艰,所以我们看到深度学习算法本身飞速发展的同时,也要看到底层系统对提升整个算法研发的生产力起到了不可或缺的作用。 ## 1.4.4 更广泛的人工智能系统生态 除了以上小节框架本身提供的功能进行单模型训练,当前还在以下几个方面存在更广泛的人工智能系统生态。 - 更大的超参数组合与模型结构的搜索空间 - 之前我们看到的实例本身是单个模型的样例,但是深度学习模型可以通过变换其中的超参数和模型结构获取和训练更好的结果,这种探索式的多任务的学习过程也叫做自动化机器学习,读者可以参考第 9 章-自动化机器学习系统了解相关领域内容与挑战。 - 共享的资源与多租的环境 - 如果我们现在的 GPU 等训练资源都是被公司或组织机构集中管理,用户需要共享使用资源进而提升资源整体利用率,那么在这种环境下系统如何提供给算法工程师接近单机的使用环境体验,进而让算法工程师更加简便,高效的使用资源?读者可以参考第 7 章-异构计算集群调度与资源管理系统,了解平台如何应对当前的挑战。 - 假设数据无法离线提前准备好? - 如果数据没有提前准备好,需要系统提供更加多样的训练方式,深度学习系统需要不断与环境或者模拟器交互,获取实时数据,通过强化学习方式进行训练,读者可以参考第 10 章-强化学习系统进行了解,强化学习系统如何在更复杂与多样的场景下进行模型训练以及数据获取。 - 数据和人工智能模型的安全与隐私如何保障? - 当前深度学习为数据驱动的方法,同时部署时会产生交付的模型文件。模型泄露,篡改以及本身的缺陷会造成潜在的安全风险。如何保障深度学习整体的安全与隐私相比传统安全领域遇到了新的挑战,读者可以参考第 12 章-人工智能安全与隐私进行了解。 - 之前我们大部分了解的是针对人工智能负载做系统设计也称作 System for AI,反过来我们也可以思考如何通过人工智能这种数据驱动的方法反过来指导系统设计与优化,也就是 AI for System,读者可以参考第13章-人工智能优化计算机系统进行了解。 ## 1.4.5 深度学习框架及工具入门实验 Remzi Arpaci-Dusseau(University of Wisconsin—Madison)曾在 2019 年 FAST(USENIX Conference on File and Storage Technologies)大会上演讲“[Measure, Then Build](https://www.usenix.org/conference/atc19/presentation/keynote#:~:text=This%20talk%20will%20discuss%20an,implementing%20new%20systems%20to%20solve)”[<sup>[7]</sup>](#measurebuild)和 2022 年 FAST 大会上演讲“[FAST '22 - 25 Years of Storage Research and Education: A Retrospective](https://www.youtube.com/watch?v=u9RECEzxk6I)”[<sup>[8]</sup>](#storageresearch),其中提到系统研与学习工作中度量之后构建 ***[“Measure, Then Build”](https://www.usenix.org/conference/atc19/presentation/keynote#:~:text=This%20talk%20will%20discuss%20an,implementing%20new%20systems%20to%20solve)*** 的系统工作研究方法。 核心想法是:通过度量(Measurement)去学习和找到实际的问题。 螺旋式学习路线: 1. 度量(Measure) 2. 理解(Understand) 2.1. 学习到新的机会 2.2. 启发新的想法 3. 构建(Build) 4. 重复1~3的过程 其提出:研究是一种学习练习 “Research is a learning exercise” 的思想,不断思考什么是可度量的,以及如何从中可以学到什么。 那么我们也开始从度量深度学习作业的执行进而感知和学习深度学习系统,为后续更深入的学习形成具象的认知。 读者可以通过本小节的实验,初步感受与观测人工智能系统的运行。 通过在深度学习框架上调试和运行样例程序,观察不同配置下的运行结果,了解深度学习系统的工作流程。通过实验读者将了解:(1)深度学习框架及工作流程(Deep Learning Workload)。(2)在不同硬件和批尺寸(Batch Size)条件下,张量运算产生的开销。 具体实现细节请大家参考实验 [AI-System Lab1 框架及工具入门示例](https://github.com/microsoft/AI-System/tree/main/Labs/BasicLabs/Lab1)。 实验(Experiment)与遥测(Telemetry)是系统工作必不可少的环节,同时系统研究与工作离不开动手实践。希望读者通过上面实例端到端跑通样例并对相关工具和系统有初步的实践体验。 ### 1.4.5.1 实验目的 1. 了解深度学习框架及工作流程(Deep Learning Workload) 2. 了解在不同硬件和批尺寸(Batch Size)条件下,张量运算产生的开销 ### 1.4.5.2 实验环境 * PyTorch==1.5.0 * TensorFlow>=1.15.0 * 【可选环境】 单机Nvidia GPU with CUDA 10.0 ### 1.4.5.3 实验原理 通过在深度学习框架上调试和运行样例程序,观察不同配置下的运行结果,了解深度学习系统的工作流程。 ### 1.4.5.4 实验内容 ***实验流程图*** <center> <img src="./img/4/Lab1-flow.png"/></center> <center>图 1.4.10 实验流程图</center> ***具体步骤*** 1. 安装依赖包。PyTorch==1.5, TensorFlow>=1.15.0 2. 下载并运行 PyTorch 仓库中提供的 MNIST 样例程序。 3. 修改样例代码,保存网络信息,并使用 TensorBoard 画出神经网络数据流图。 4. 继续修改样例代码,记录并保存训练时正确率和损失值,使用 TensorBoard 画出损失和正确率趋势图。 5. 添加神经网络分析功能(profiler),并截取使用率前十名的操作。 6. 更改批次大小为1,16,64,再执行分析程序,并比较结果。 7. 【可选实验】改变硬件配置(e.g.: 使用/ 不使用 GPU),重新执行分析程序,并比较结果。 ### 1.4.5.5 实验计划 ***实验环境*** |||| |--------|--------------|--------------------------| |硬件环境|CPU(vCPU数目)|&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; | ||GPU(型号,数目)|| |软件环境|OS版本|| ||深度学习框架<br>python包名称及版本|| ||CUDA版本|| |||| ***实验结果*** 1. 模型可视化结果截图 ||| |---------------|---------------------------| |<br/>&nbsp;<br/>神经网络数据流图<br/>&nbsp;<br/>&nbsp;|&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; | |<br/>&nbsp;<br/>损失和正确率趋势图<br/>&nbsp;<br/>&nbsp;|| |<br/>&nbsp;<br/>网络分析,使用率前十名的操作<br/>&nbsp;<br/>&nbsp;|| |||| 2. 网络分析,不同批大小结果比较 ||| |------|--------------| |批大小 &nbsp;| &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; 结果比较 &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; | |<br/>&nbsp;<br/>1<br/>&nbsp;<br/>&nbsp;|| |<br/>&nbsp;<br/>16<br/>&nbsp;<br/>&nbsp;|| |<br/>&nbsp;<br/>64<br/>&nbsp;<br/>&nbsp;|| ||| ***参考代码*** 1. MNIST样例程序: 代码位置:AI-System/Labs/BasicLabs/Lab1/mnist_basic.py 运行命令:`python mnist_basic.py` 2. 可视化模型结构、正确率、损失值 代码位置:AI-System/Labs/BasicLabs/Lab1/mnist_tensorboard.py 运行命令:`python mnist_tensorboard.py` 3. 网络性能分析 代码位置:AI-System/Labs/BasicLabs/Lab1/mnist_profiler.py ***参考资料*** * 样例代码:[PyTorch-MNIST Code](https://github.com/pytorch/examples/blob/master/mnist/main.py) * 模型可视化: * [PyTorch Tensorboard Tutorial](https://pytorch.org/tutorials/intermediate/tensorboard_tutorial.html) * [PyTorch TensorBoard Doc](https://pytorch.org/docs/stable/tensorboard.html) * [pytorch-tensorboard-tutorial-for-a-beginner](https://medium.com/@rktkek456/pytorch-tensorboard-tutorial-for-a-beginner-b037ee66574a) * Profiler:[how-to-profiling-layer-by-layer-in-pytroch](https://stackoverflow.com/questions/53736966/how-to-profiling-layer-by-layer-in-pytroch) ## 小结与讨论 本章我们主要通过一些实例启发读者建立本书各个章节之间的联系,由于系统的多层抽象造成我们实践人工智能的过程中已经无法感知底层系统的运行机制。希望读者结合后面章节的学习后,能够看到深度学习系统底层的作用和复杂性,从而指导上层人工智能作业和代码更加高效的书写。 请读者读完后面章节后再回看当前章节,并重新思考当前书写的人工智能 Python 程序底层发生了什么? ## 参考文献 <div id="lenet"></div> 1. [Y. Lecun, L. Bottou, Y. Bengio and P. Haffner, "Gradient-based learning applied to document recognition," in Proceedings of the IEEE, vol. 86, no. 11, pp. 2278-2324, Nov. 1998, doi: 10.1109/5.726791.]() <div id="cudnn"></div> 2. [Chetlur, Sharan, et al. "cudnn: Efficient primitives for deep learning." arXiv preprint arXiv:1410.0759 (2014).](https://arxiv.org/abs/1410.0759) <div id="im2col"></div> 3. [Gómez-Hernández, Eduardo José, et al. "Using PHAST to port Caffe library: First experiences and lessons learned." arXiv preprint arXiv:2005.13076 (2020).](https://arxiv.org/abs/2005.13076) <div id="cudnnlenet"></div> 4. [https://github.com/tbennun/cudnn-training](https://github.com/tbennun/cudnn-training) <div id="keraslenet"></div> 5. [https://github.com/TaavishThaman/LeNet-5-with-Keras](https://github.com/TaavishThaman/LeNet-5-with-Keras) <div id="pytorch"></div> 6. [Paszke, Adam, et al. "Pytorch: An imperative style, high-performance deep learning library." Advances in neural information processing systems 32 (2019).](https://arxiv.org/abs/1912.01703) <div id="measurebuild"></div> 7. [Remzi Arpaci-Dusseau, University of Wisconsin—Madison. Measure, Then Build. ATC 2019](https://www.usenix.org/conference/atc19/presentation/keynote#:~:text=This%20talk%20will%20discuss%20an,implementing%20new%20systems%20to%20solve) <div id="storageresearch"></div> 8. [Remzi Arpaci-Dusseau, University of Wisconsin—Madison. 25 Years of Storage Research and Education: A Retrospective. FAST 2022](https://www.usenix.org/conference/fast22/presentation/keynote)
AI-System/Textbook/第1章-人工智能系统概述/1.4-深度学习样例背后的系统问题.md/0
{ "file_path": "AI-System/Textbook/第1章-人工智能系统概述/1.4-深度学习样例背后的系统问题.md", "repo_id": "AI-System", "token_count": 22614 }
12
# MLAKSDeploy Pipeline trigger: batch: true branches: include: - master paths: include: - submodules/DeployMLModelKubernetes/* pr: branches: include: - master paths: include: - submodules/DeployMLModelKubernetes/* schedules: - cron: "0 * * * *" displayName: Daily midnight build branches: include: - master always: true variables: deployment_name: 'python-ml-rts' azureresourcegroup: 'python-ml-rts-test' workspacename: 'pythonmlrtsws' azureregion: 'westus' expires : "2019-08-01" python_path: "$(System.DefaultWorkingDirectory)/submodules/DeployMLModelKubernetes/{{cookiecutter.project_name}}/" jobs: - job: MLAKSDeployAMLJob timeoutInMinutes: 300 cancelTimeoutInMinutes: 2 pool: vmImage: 'Ubuntu-16.04' steps: - template: steps/deploy_rts.yml parameters: azureSubscription: $(azureSubscription) azureresourcegroup: $(azureresourcegroup) location: "submodules/DeployMLModelKubernetes/{{cookiecutter.project_name}}" alias : $(alias) project : $(project) expires : "2019-08-01" conda: "MLAKSDeployAML" azure_subscription: $(azure_subscription) workspacename: 'pythonmlrtsws' azureregion: 'westus' aksimagename: "myimage" aks_name: "pythonmlaks" aks_location: "westus" aks_service_name: "pythonmlaksservice" python_path: $(python_path) - task: AzureCLI@1 displayName: 'Backup Cleanup' condition: or(canceled(),failed()) inputs: azureSubscription: $(azureSubscription) scriptLocation: inlineScript inlineScript: | source /usr/share/miniconda/etc/profile.d/conda.sh conda activate MLAKSDeployAML echo Execute Resource Group Delete existResponse=$(az group exists -n $(azureresourcegroup)) if [ "$existResponse" == "true" ]; then echo Deleting project resource group az group delete --name $(azureresourcegroup) --yes else echo Project resource group did not exist fi echo Done Cleanup
AI/.ci/python-ml-realtimeserving.yml/0
{ "file_path": "AI/.ci/python-ml-realtimeserving.yml", "repo_id": "AI", "token_count": 859 }
13
# Python package # Create and test a Python package on multiple Python versions. # Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more: # https://docs.microsoft.com/azure/devops/pipelines/languages/python trigger: - master pool: vmImage: 'ubuntu-latest' strategy: matrix: Python37: python.version: '3.7' steps: - task: UsePythonVersion@0 inputs: versionSpec: '$(python.version)' displayName: 'Use Python $(python.version)' - script: | python -m pip install --upgrade pip pip install setuptools wheel twine displayName: 'Install dependencies' - task: TwineAuthenticate@1 inputs: artifactFeed: <> - script: | python setup.py sdist bdist_wheel twine upload -r <> --config-file $(PYPIRC_PATH) dist/*
AI/.ci/stage/publish_ado_pypi.yml/0
{ "file_path": "AI/.ci/stage/publish_ado_pypi.yml", "repo_id": "AI", "token_count": 271 }
14
parameters: azureSubscription: '' azure_subscription: '' location: submodules/DeployMLModelPipelines azureresourcegroup: dcibhpdl workspacename: dcibhpwsdl azureregion: westus2 aksimagename: dcibhpaksdl aks_name: dcibhpaksdl aks_service_name: dcibhpaksdlapi conda: amlmm doCleanup: true python_path: "$(System.DefaultWorkingDirectory)/submodules/DeployMLModelPipelines" flighting_release: false flighting_preview: false flighting_master: false steps: - template: config_conda.yml parameters: conda_location: ${{parameters.location}} azureSubscription: ${{parameters.azureSubscription}} conda: ${{parameters.conda}} flighting_release: ${{parameters.flighting_release}} flighting_preview: ${{parameters.flighting_preview}} flighting_master: ${{parameters.flighting_master}} - template: azpapermill.yml parameters: notebook: 00_AMLConfiguration.ipynb location: ${{parameters.location}} azureSubscription: ${{parameters.azureSubscription}} conda: ${{parameters.conda}} azure_subscription: ${{parameters.azure_subscription}} azureresourcegroup: ${{parameters.azureresourcegroup}} workspacename: ${{parameters.workspacename}} azureregion: ${{parameters.azureregion}} - template: azpapermill.yml parameters: notebook: 01_DataPrep.ipynb location: ${{parameters.location}} azureSubscription: ${{parameters.azureSubscription}} conda: ${{parameters.conda}} azure_subscription: ${{parameters.azure_subscription}} azureresourcegroup: ${{parameters.azureresourcegroup}} workspacename: ${{parameters.workspacename}} azureregion: ${{parameters.azureregion}} - template: azpapermill.yml parameters: notebook: 02_create_pipeline.ipynb location: ${{parameters.location}} azureSubscription: ${{parameters.azureSubscription}} conda: ${{parameters.conda}} - template: cleanuptask.yml parameters: azureSubscription: ${{parameters.azureSubscription}} conda: deployment_aml azureresourcegroup: ${{parameters.azureresourcegroup}} doCleanup: ${{parameters.doCleanup}}
AI/.ci/steps/MLBatchDeployAMLJob.yml/0
{ "file_path": "AI/.ci/steps/MLBatchDeployAMLJob.yml", "repo_id": "AI", "token_count": 758 }
15
parameters: azureSubscription: 'AICAT-VB-E2E (989b90f7-da4f-41f9-84c9-44848802052d)' azureresourcegroup: "-" location : "eastus2" alias : $(Build.RequestedForId) project : "e2etestharness" expires : "2019-08-01" steps: - task: AzureCLI@1 displayName: "Create Resource Group" inputs: azureSubscription: ${{parameters.azureSubscription}} scriptLocation: inlineScript inlineScript: | echo Execute Resource Group Creation az group create -l ${{parameters.location}} -n ${{parameters.azureresourcegroup}} --tags alias='${{parameters.alias}}' project='${{parameters.project}}' expires='${{parameters.expires}}'
AI/.ci/steps/createResourceGroupTemplate.yml/0
{ "file_path": "AI/.ci/steps/createResourceGroupTemplate.yml", "repo_id": "AI", "token_count": 238 }
16
parameters: conda_env: # this param must be set clean_conda: true steps: - script: | conda env remove -n ${{parameters.conda_env}} -y workingDirectory: tests displayName: 'Conda remove' continueOnError: true condition: always() # this step will always run, even if the pipeline is canceled enabled: ${{parameters.clean_conda}}
AI/.ci/steps/reco_conda_clean_linux.yml/0
{ "file_path": "AI/.ci/steps/reco_conda_clean_linux.yml", "repo_id": "AI", "token_count": 115 }
17
variables: TridentWorkloadTypeShort: airmlrts DeployLocation: eastus ProjectLocation: "" PythonPath: . Template: RMLAKSDeployAMLJob.yml
AI/.ci/vars/rml_realtime_scoring.yml/0
{ "file_path": "AI/.ci/vars/rml_realtime_scoring.yml", "repo_id": "AI", "token_count": 50 }
18
gpu_no=8 # model type model_type="dpr" seq_length=256 triplet="--triplet --optimizer lamb" # set this to empty for non triplet model # hyper parameters batch_size=16 gradient_accumulation_steps=1 learning_rate=1e-5 warmup_steps=1000 # input/output directories base_data_dir="../data/QA_NQ_data/" job_name="ann_NQ_test" model_dir="${base_data_dir}${job_name}/" model_ann_data_dir="${model_dir}ann_data/" pretrained_checkpoint_dir="../../../DPR/checkpoint/retriever/multiset/bert-base-encoder.cp" train_cmd="\ sudo python -m torch.distributed.launch --nproc_per_node=$gpu_no ../drivers/run_ann_dpr.py --model_type $model_type \ --model_name_or_path $pretrained_checkpoint_dir --task_name MSMarco $triplet --data_dir $base_data_dir \ --ann_dir $model_ann_data_dir --max_seq_length $seq_length --per_gpu_train_batch_size=$batch_size \ --gradient_accumulation_steps $gradient_accumulation_steps --learning_rate $learning_rate --output_dir $model_dir \ --warmup_steps $warmup_steps --logging_steps 100 --save_steps 1000 --log_dir "~/tensorboard/${DLWS_JOB_ID}/logs/${job_name}" \ " echo $train_cmd eval $train_cmd echo "copy current script to model directory" sudo cp $0 $model_dir
ANCE/commands/run_train_dpr.sh/0
{ "file_path": "ANCE/commands/run_train_dpr.sh", "repo_id": "ANCE", "token_count": 438 }
19
#from transformers.utils import logging import torch import torch.nn as nn import torch.nn.functional as F import math from .modules import ( LayerNorm, get_activation_fn, MultiheadAttention, ) from .modules import quant_noise as apply_quant_noise_ from .transformer_sentence_encoder import TransformerSentenceEncoder,TransformerDecoder,EncoderOut import os from transformers.modeling_utils import PreTrainedModel #logger = logging.get_logger(__name__) import logging logger = logging.getLogger(__name__) from model.SEED_Encoder import SEEDEncoderConfig class SEEDEncoderPretrainedModel(PreTrainedModel): config_class = SEEDEncoderConfig base_model_prefix = "seed_encoder" def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, MultiheadAttention): module.q_proj.weight.data.normal_(mean=0.0, std=self.config.initializer_range) module.k_proj.weight.data.normal_(mean=0.0, std=self.config.initializer_range) module.v_proj.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class RobertaEncoder(nn.Module): """RoBERTa encoder.""" def __init__(self, args): super().__init__() self.args = args if args.encoder_layers_to_keep: args.encoder_layers = len(args.encoder_layers_to_keep.split(",")) self.sentence_encoder = TransformerSentenceEncoder( padding_idx=args.pad_token_id, vocab_size=args.vocab_size, num_encoder_layers=args.encoder_layers, embedding_dim=args.encoder_embed_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, layerdrop=args.encoder_layerdrop, max_seq_len=args.max_positions, num_segments=0, encoder_normalize_before=True, apply_bert_init=True, activation_fn=args.activation_fn, q_noise=args.quant_noise_pq, qn_block_size=args.quant_noise_pq_block_size, ) #args.untie_weights_roberta = getattr(args, 'untie_weights_roberta', False) def forward(self, src_tokens, return_all_hiddens=False, **unused): inner_states, _ = self.sentence_encoder( src_tokens, last_state_only=not return_all_hiddens, ) x = inner_states[-1].transpose(0, 1) # T x B x C -> B x T x C # x_origin=x # if not features_only: # x = self.output_layer(x, masked_tokens=masked_tokens) return x, {'inner_states': inner_states if return_all_hiddens else None} class SEEDEncoderModel(SEEDEncoderPretrainedModel): def __init__(self, config): super().__init__(config) self.encoder=RobertaEncoder(config) self.init_weights() def forward(self, src_tokens, prev_tokens, return_all_hiddens=False, **kwargs): x_encoder, extra = self.encoder(src_tokens, return_all_hiddens, **kwargs) return x_encoder ,extra def get_input_embeddings(self): return self.encoder.sentence_encoder.embed_tokens def set_input_embeddings(self, value): self.encoder.sentence_encoder.embed_tokens = value class SEEDEncoderForMaskedLM(SEEDEncoderPretrainedModel): """docstring for ClassName""" # _keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"] # _keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"] def __init__(self, config): super().__init__(config) self.seed_encoder = SEEDEncoderModel(config) self.decoder=TransformerDecoder(config,self.encoder.sentence_encoder.embed_tokens,no_encoder_attn=config.no_cross_attention) self.lm_head = RobertaLMHead( embed_dim=config.encoder_embed_dim, output_dim=config.vocab_size, activation_fn=config.activation_fn, weight=self.seed_encoder.encoder.sentence_encoder.embed_tokens.weight ) self.train_ratio=config.train_ratio self.decoder_atten_window=config.decoder_atten_window self.init_weights() def forward( src_tokens,prev_tokens, masked_tokens=None,**kwargs): x_encoder,_=self.seed_encoder(src_tokens) h=x_encoder[:,0:1,:] h=h.transpose(0,1) h=EncoderOut( encoder_out=h, # T x B x C encoder_padding_mask=None, # B x T encoder_embedding=None, # B x T x C encoder_states=None, # List[T x B x C] src_tokens=None, src_lengths=None, ) decoder_output=self.decoder(prev_tokens, encoder_out=h,local_attn_mask=self.decoder_atten_window)[0] features=self.lm_head(x_encoder, masked_tokens) return features, decoder_output def get_output_embeddings(self): return self.lm_head.weight def set_output_embeddings(self, new_embeddings): self.lm_head.weight = new_embeddings class SEEDEncoderForSequenceClassification(SEEDEncoderPretrainedModel): """docstring for ClassName""" def __init__(self, config): super().__init__(config) self.seed_encoder = SEEDEncoderModel(config) self.classification_heads=RobertaClassificationHead( config.encoder_embed_dim, config.encoder_embed_dim, config.num_labels, config.pooler_activation_fn, config.pooler_dropout, config.quant_noise_pq, config.quant_noise_pq_block_size,) self.init_weights() def forward(src_tokens,return_all_hiddens=False,**kwargs): x_encoder, extra = self.seed_encoder.encoder(src_tokens, return_all_hiddens, **kwargs) x = self.classification_heads(x_encoder,**kwargs) return x class RobertaLMHead(nn.Module): """Head for masked language modeling.""" def __init__(self, embed_dim, output_dim, activation_fn, weight=None): super().__init__() self.dense = nn.Linear(embed_dim, embed_dim) self.activation_fn = get_activation_fn(activation_fn) self.layer_norm = LayerNorm(embed_dim) if weight is None: weight = nn.Linear(embed_dim, output_dim, bias=False).weight self.weight = weight self.bias = nn.Parameter(torch.zeros(output_dim)) def forward(self, features, masked_tokens=None, **kwargs): # Only project the masked tokens while training, # saves both memory and computation if masked_tokens is not None: features = features[masked_tokens, :] x = self.dense(features) x = self.activation_fn(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = F.linear(x, self.weight) + self.bias return x class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout, q_noise=0, qn_block_size=8): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.activation_fn = get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = apply_quant_noise_( nn.Linear(inner_dim, num_classes), q_noise, qn_block_size ) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x
ANCE/model/SEED_Encoder/modeling_seed_encoder.py/0
{ "file_path": "ANCE/model/SEED_Encoder/modeling_seed_encoder.py", "repo_id": "ANCE", "token_count": 3958 }
20
""" Code for self-training with weak supervision. Author: Giannis Karamanolakis ([email protected]) """ import math import random import numpy as np from numpy.random import seed import tensorflow as tf import tensorflow.keras as K from tensorflow.keras.layers import Embedding, Input, Dropout, Dense, Lambda class RAN: """ Rule Attention Network * Input: text embedding x, array of rule predictions * Output: aggregate label """ def __init__(self, args, num_rules, logger=None, name='ran'): self.args = args self.name = name self.logger = logger self.manual_seed = args.seed tf.random.set_seed(self.manual_seed) self.datapath = args.datapath self.model_dir = args.logdir self.sup_batch_size = args.train_batch_size self.unsup_batch_size = args.unsup_batch_size self.sup_epochs = args.num_epochs self.unsup_epochs = args.num_unsup_epochs self.num_labels = args.num_labels self.num_rules = num_rules # max_rule_seq_length: used for efficiency (Note: no rules are discarded.) self.max_rule_seq_length = min(self.num_rules, args.max_rule_seq_length) # Using Student as an extra rule self.num_rules += 1 self.max_rule_seq_length += 1 self.student_rule_id = self.num_rules self.hard_student_rule = args.hard_student_rule self.preprocess = None self.trained = False self.xdim = -1 self.ignore_student = False self.gpus = 1 def init(self, rule_pred): # Initialize RAN as majority voting (all sources have equal weights) self.majority_model = MajorityVoter(num_labels=self.num_labels) return def postprocess_rule_preds(self, rule_pred, student_pred=None): """ Converts rule predictions to appropriate format :param rule_pred: a 2D array of rule preds: num_examples x num_rules :return: rule_one_hot: a 2D mask matrix: 1 if rule applies otherwise 0 rule_pred: a 3D rule prediction matrix (N x num_rules x num_classes): converting class indices to one-hot vectors # if a rule predicts -1, then pred = [0,...,0] student_pred: the soft predictions of a student network """ max_rule_seq_length = self.max_rule_seq_length - 1 # -1: Using student as extra rule N = rule_pred.shape[0] rule_mask = (rule_pred != -1).astype(int) fired_rule_ids = [(np.nonzero(x)[0] + 1).tolist() for x in rule_mask] non_zero_rule_pred = [] for i, fired_rules in enumerate(fired_rule_ids): preds_i = [rule_pred[i, j-1] for j in fired_rules] preds_i = preds_i + [self.num_labels] * (max_rule_seq_length - len(preds_i)) if len(preds_i) > max_rule_seq_length: self.logger.info("WARNING: Num firing rules = {} > max_rule_seq_length = {}".format(len(preds_i), max_rule_seq_length)) preds_i = preds_i[:max_rule_seq_length] non_zero_rule_pred.append(preds_i) one_hot_rule_pred = tf.one_hot(non_zero_rule_pred, self.num_labels + 1).numpy() one_hot_rule_pred = one_hot_rule_pred[:, :, :-1] fired_rule_ids = [x + [0] * (max_rule_seq_length - len(x)) for x in fired_rule_ids] fired_rule_ids = np.array(fired_rule_ids) if student_pred is not None: mask_one = np.ones((N, 1)) if student_pred.ndim > 2: student_pred = np.squeeze(student_pred, axis=None) if self.hard_student_rule: # Convert Student's soft probabilities to hard labels student_pred = to_one_hot(np.argmax(student_pred, axis=1), self.num_labels) student_pred = student_pred[..., np.newaxis, :] # Add axis=1 one_hot_rule_pred = np.concatenate([student_pred, one_hot_rule_pred], axis=1) rule_mask = np.concatenate([mask_one, rule_mask], axis=1) if not self.ignore_student: student_rule_id = np.ones((N, 1)) * self.student_rule_id else: student_rule_id = np.zeros((N, 1)) fired_rule_ids = np.concatenate([student_rule_id, fired_rule_ids], axis=1) return rule_mask, fired_rule_ids, one_hot_rule_pred def train(self, x_train, rule_pred_train, y_train, x_dev=None, rule_pred_dev=None, y_dev=None, student_pred_train=None, student_pred_dev=None, x_unsup=None, rule_pred_unsup=None, student_pred_unsup=None): assert x_unsup is not None, "For SSL RAN you need to also provide unlabeled data... " if x_train is not None: x_train = np.array(x_train) y_train = np.array(y_train) rule_one_hot_train, fired_rule_ids_train, rule_pred_train = self.postprocess_rule_preds(rule_pred_train, student_pred_train) self.logger.info("X Train Shape " + str(x_train.shape) + ' ' + str(rule_pred_train.shape) + ' ' + str(y_train.shape)) else: rule_one_hot_train, fired_rule_ids_train, rule_pred_train = None, None, None if x_dev is not None: x_dev = np.array(x_dev) y_dev = np.array(y_dev) rule_one_hot_dev, fired_rule_ids_dev, rule_pred_dev = self.postprocess_rule_preds(rule_pred_dev, student_pred_dev) self.logger.info("X Dev Shape " + str(x_dev.shape) + ' ' + str(rule_pred_dev.shape) + ' ' + str(y_dev.shape)) else: rule_one_hot_dev, fired_rule_ids_dev, rule_pred_dev = None, None, None x_unsup = np.array(x_unsup) rule_one_hot_unsup, fired_rule_ids_unsup, rule_pred_unsup = self.postprocess_rule_preds(rule_pred_unsup, student_pred_unsup) self.logger.info("X Unsup Shape " + str(x_unsup.shape) + ' ' + str(rule_pred_unsup.shape)) if not self.trained or (x_train is not None and self.xdim != x_train.shape[1]): if self.trained and self.xdim != x_train.shape[1]: self.logger.info("WARNING: Changing dimensionality of x from {} to {}".format(self.xdim, x_train.shape[1])) self.xdim = x_train.shape[1] if x_train is not None else x_unsup.shape[1] self.model = construct_rule_network(self.xdim, num_rules=self.num_rules, num_labels=self.num_labels, max_rule_seq_length=self.max_rule_seq_length, seed=self.manual_seed) self.logger.info("\n\n\t\t*** Training RAN ***") loss_fn = MinEntropyLoss(batch_size=self.unsup_batch_size * self.gpus) # SSLLoss() self.model.compile(optimizer=tf.keras.optimizers.Adam(), loss=loss_fn, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")]) self.model.fit( x=[x_unsup, fired_rule_ids_unsup, rule_pred_unsup], y=np.array([-1] * x_unsup.shape[0]), batch_size=self.unsup_batch_size * self.gpus, shuffle=True, epochs=self.sup_epochs, callbacks=[ create_learning_rate_scheduler(max_learn_rate=1e-2, end_learn_rate=1e-5, warmup_epoch_count=20, total_epoch_count=self.sup_epochs), K.callbacks.EarlyStopping(patience=20, restore_best_weights=True) ], validation_data=([x_dev, fired_rule_ids_dev, rule_pred_dev], y_dev)) loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False) self.model.compile(optimizer=tf.keras.optimizers.Adam(), loss=loss_fn, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")]) self.model.fit( x=[x_train, fired_rule_ids_train, rule_pred_train], y=y_train, batch_size=self.sup_batch_size * self.gpus, shuffle=True, epochs=self.sup_epochs, callbacks=[ create_learning_rate_scheduler(max_learn_rate=1e-2, end_learn_rate=1e-5, warmup_epoch_count=20, total_epoch_count=self.sup_epochs), K.callbacks.EarlyStopping(patience=20, restore_best_weights=True) ], validation_data=([x_dev, fired_rule_ids_dev, rule_pred_dev], y_dev)) self.trained = True dev_loss = self.model.evaluate([x_dev, fired_rule_ids_dev, rule_pred_dev], y_dev) res = {} res['dev_loss'] = dev_loss return res def predict(self, rule_pred, student_features, student_pred=None): if not self.trained: return self.predict_majority(rule_pred) else: return self.predict_ran(student_features, rule_pred, student_pred=student_pred) def predict_majority(self, rule_pred): agg_labels = self.majority_model.predict(rule_pred) agg_proba = self.majority_model.predict_proba(rule_pred) return { 'preds': agg_labels, 'proba': agg_proba, "att_scores": None, "rule_mask": None } def predict_ran(self, x, rule_pred, student_pred=None, batch_size=128, prefix=""): x = np.array(x) if student_pred is None: random_pred = (rule_pred != -1).sum(axis=1) == 0 else: random_pred = np.array([False] * rule_pred.shape[0]) rule_mask, fired_rule_ids, rule_pred_one_hot = self.postprocess_rule_preds(rule_pred, student_pred) self.logger.info("RAN - Predicting labels for {} texts".format(x.shape[0])) y_pred = self.model.predict( [x, fired_rule_ids, rule_pred_one_hot], batch_size=batch_size ) self.logger.info("DONE, Getting attention scores...".format(x.shape[0])) desiredOutputs = [self.model.get_layer("attention").output] newModel = tf.keras.Model(self.model.inputs, desiredOutputs) att_scores = newModel.predict( [x, fired_rule_ids, rule_pred_one_hot], batch_size=batch_size) preds = np.argmax(y_pred, axis=-1).flatten() dist_to_random = 0.0 confidence_thres = 1 / self.num_labels + dist_to_random max_proba = np.max(y_pred, axis=-1).flatten() ignore_pred = max_proba < confidence_thres random_pred[ignore_pred] = True soft_proba = y_pred preds[random_pred] = -1 return { 'preds': preds, 'proba': soft_proba, "att_scores": att_scores, "rule_mask": rule_mask, } def load(self, savefile): self.logger.info("loading rule attention network from {}".format(savefile)) self.model.load_weights(savefile) def save(self, savefile): self.logger.info("Saving rule attention network at {}".format(savefile)) self.model.save_weights(savefile) return def construct_rule_network(student_emb_dim, num_rules, num_labels, dense_dropout=0.3, max_rule_seq_length=10, seed=42): # Rule Attention Network # encoder = TFBertModel.from_pretrained(model_type) student_embeddings = Input(shape=(student_emb_dim,), name="student_embeddings") rule_ids = Input(shape=(max_rule_seq_length,), dtype=tf.int32, name="rule_ids") rule_preds_onehot = Input(shape=(max_rule_seq_length, num_labels,), name="rule_preds") # x_hidden: batch_size x 128 x_hidden = Dropout(dense_dropout)(student_embeddings) x_hidden = Dense(units=128, activation="relu", name="dense")(x_hidden) x_hidden = Dropout(dense_dropout)(x_hidden) # rule_embeddings_hidden: batch_size x 128 x max_rule_seq_length rule_embeddings = Embedding(num_rules+1, 128, # embeddings_initializer='uniform', embeddings_initializer=tf.keras.initializers.GlorotUniform(seed=seed), embeddings_regularizer=None, activity_regularizer=None, embeddings_constraint=None, mask_zero=True, input_length=max_rule_seq_length, name="rule_embed")(rule_ids) # Rule bias parameters rule_biases = Embedding(num_rules+1, 1, embeddings_initializer='uniform', embeddings_regularizer=None, activity_regularizer=None, embeddings_constraint=None, mask_zero=True, input_length=max_rule_seq_length, name="rule_bias")(rule_ids) # Compute attention scores att_scores = tf.keras.layers.Dot(axes=[1, 2])([x_hidden, rule_embeddings]) att_scores = tf.keras.layers.Add()([att_scores, tf.keras.backend.squeeze(rule_biases, axis=-1)]) att_sigmoid_proba = Lambda(lambda x: tf.keras.activations.sigmoid(x), name='attention')(att_scores) outputs = tf.keras.layers.Dot(axes=[1, 1], name='raw_outputs')([att_sigmoid_proba, rule_preds_onehot]) outputs = Lambda(lambda x: normalize_with_random_rule(x[0], x[1], x[2]), name='outputs_with_uniform')((outputs, att_sigmoid_proba, rule_preds_onehot)) # Normalize Outputs outputs = Lambda(lambda x: l1_normalize(x, num_labels), name='normalized_outputs')(outputs) # Build Model model = tf.keras.Model(inputs=[student_embeddings, rule_ids, rule_preds_onehot], outputs=outputs) print(model.summary()) return model def MinEntropyLoss(batch_size): def loss(y_true, y_prob): per_example_loss = -y_prob * tf.math.log(y_prob) return tf.nn.compute_average_loss(per_example_loss, global_batch_size=batch_size) return loss class MajorityVoter: """ Predicts probabilities using the majority vote of the weak sources Code adapted from the Snorkel source: https://github.com/snorkel-team/snorkel/blob/b3b0669f716a7b3ed6cd573b57f3f8e12bcd495a/snorkel/labeling/model/baselines.py """ def __init__(self, num_labels): self.num_labels = num_labels def predict(self, rule_pred): Y_probs = self.predict_proba(rule_pred) Y_p = self.probs_to_preds(Y_probs) return Y_p def predict_proba(self, rule_pred): n, m = rule_pred.shape pred = np.zeros((n, self.num_labels)) for i in range(n): counts = np.zeros(self.num_labels) for j in range(m): if rule_pred[i, j] != -1: counts[rule_pred[i, j]] += 1 pred[i, :] = np.where(counts == max(counts), 1, 0) pred /= pred.sum(axis=1).reshape(-1, 1) return pred def probs_to_preds(self, probs): num_datapoints, num_classes = probs.shape Y_pred = np.empty(num_datapoints) diffs = np.abs(probs - probs.max(axis=1).reshape(-1, 1)) for i in range(num_datapoints): max_idxs = np.where(diffs[i, :] < 1e-5)[0] if len(max_idxs) == 1: Y_pred[i] = max_idxs[0] else: Y_pred[i] = -1 return Y_pred.astype(np.int) def to_one_hot(x, num_classes): targets = np.array([x]).reshape(-1) return np.eye(num_classes)[targets] def create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7, warmup_epoch_count=10, total_epoch_count=90): def lr_scheduler(epoch): if epoch < warmup_epoch_count: res = (max_learn_rate / warmup_epoch_count) * (epoch + 1) else: res = max_learn_rate * math.exp( math.log(end_learn_rate / max_learn_rate) * (epoch - warmup_epoch_count + 1) / ( total_epoch_count - warmup_epoch_count + 1)) return float(res) learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return learning_rate_scheduler def l1_normalize(x, num_labels): x = x + 1e-05 # avoid stability issues l1_norm = tf.keras.backend.stop_gradient(tf.keras.backend.sum(x, axis=-1)) l1_norm = tf.keras.backend.repeat_elements(tf.keras.backend.expand_dims(l1_norm), num_labels, axis=-1) return x / l1_norm def normalize_with_random_rule(output, att_sigmoid_proba, rule_preds_onehot): num_labels = rule_preds_onehot.shape[-1] sum_prob = tf.keras.backend.stop_gradient(tf.keras.backend.sum(rule_preds_onehot, axis=-1)) rule_mask = tf.keras.backend.cast(sum_prob > 0, 'float32') num_rules = tf.keras.backend.cast(tf.keras.backend.sum(sum_prob, axis=-1), 'float32') masked_att_proba = att_sigmoid_proba * rule_mask sum_masked_att_proba = tf.keras.backend.sum(masked_att_proba, axis=-1) uniform_rule_att_proba = num_rules - sum_masked_att_proba uniform_vec = tf.ones((tf.shape(uniform_rule_att_proba)[0], num_labels)) / num_labels uniform_pred = tf.math.multiply( tf.keras.backend.repeat_elements(tf.keras.backend.expand_dims(uniform_rule_att_proba), num_labels, axis=-1), uniform_vec) output_with_uniform_rule = output+uniform_pred return output_with_uniform_rule
ASTRA/astra/RuleAttentionNetwork.py/0
{ "file_path": "ASTRA/astra/RuleAttentionNetwork.py", "repo_id": "ASTRA", "token_count": 8398 }
21
#!/bin/bash # NAACL 2021 Experiments baselogdir="$(date +'%h%d_%H-%M')_NAACL_experiments" # TREC for seed in 0 20 7 1993 128 42; do logdir="${baselogdir}/seed${seed}" python ../astra/main.py --dataset trec --logdir ${logdir} --seed $seed --learning_rate 0.0001 --finetuning_rate 0.0001 --datapath ../data done # SMS for seed in 0 20 7 1993 128 42; do logdir="${baselogdir}/seed${seed}" python ../astra/main.py --dataset sms --logdir ${logdir} --seed $seed --learning_rate 0.0001 --datapath ../data done # YOUTUBE for seed in 0 20 7 1993 128 42; do logdir="${baselogdir}/seed${seed}" python ../astra/main.py --dataset youtube --logdir ${logdir} --seed $seed --learning_rate 0.0001 --hard_student_rule --datapath ../data done # CENSUS for seed in 0 20 7 1993 128 42; do logdir="${baselogdir}/seed${seed}" python ../astra/main.py --dataset census --logdir ${logdir} --seed $seed --learning_rate 0.001 --hard_student_rule --datapath ../data done # MIT-R for seed in 0 20 7 1993 128 42; do logdir="${baselogdir}/seed${seed}" python ../astra/main.py --dataset mitr --logdir ${logdir} --seed $seed --learning_rate 0.001 --finetuning_rate 0.0001 --hard_student_rule --num_iter 3 --soft_labels --datapath ../data done
ASTRA/scripts/run_experiments.sh/0
{ "file_path": "ASTRA/scripts/run_experiments.sh", "repo_id": "ASTRA", "token_count": 477 }
22
include LICENSE
AdaMix/MANIFEST.in/0
{ "file_path": "AdaMix/MANIFEST.in", "repo_id": "AdaMix", "token_count": 4 }
23
#!/bin/bash cd eval echo "installing evaluation dependencies" echo "downloading e2e-metrics..." git clone https://github.com/tuetschek/e2e-metrics e2e pip install -r e2e/requirements.txt echo "downloading GenerationEval for webnlg and dart..." git clone https://github.com/WebNLG/GenerationEval.git cd GenerationEval # INSTALL PYTHON DEPENDENCIES pip install -r requirements.txt # INSTALL BLEURT pip install --upgrade pip git clone https://github.com/google-research/bleurt.git cd bleurt pip install . wget https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip unzip bleurt-base-128.zip rm bleurt-base-128.zip cd ../ mv bleurt metrics # INSTALL METEOR wget https://www.cs.cmu.edu/~alavie/METEOR/download/meteor-1.5.tar.gz tar -xvf meteor-1.5.tar.gz mv meteor-1.5 metrics rm meteor-1.5.tar.gz rm -r data/en rm -r data/ru cd .. mv eval.py GenerationEval/ echo "script complete!"
AdaMix/NLG/eval/download_evalscript.sh/0
{ "file_path": "AdaMix/NLG/eval/download_evalscript.sh", "repo_id": "AdaMix", "token_count": 344 }
24
. ./venv/bin/activate seed=110 python -m torch.distributed.launch --nproc_per_node=1 src/gpt2_ft.py \ --train_data ./data/e2e/train.jsonl \ --valid_data ./data/e2e/valid.jsonl \ --train_batch_size 8 \ --grad_acc 1 \ --valid_batch_size 4 \ --seq_len 512 \ --model_card gpt2.md \ --init_checkpoint ./pretrained_checkpoints/gpt2-medium-pytorch_model.bin \ --platform local \ --clip 0.0 \ --lr 0.0002 \ --weight_decay 0.01 \ --correct_bias \ --adam_beta2 0.999 \ --scheduler linear \ --warmup_step 500 \ --max_epoch 5 \ --save_interval 1000 \ --lora_dim 4 \ --lora_alpha 32 \ --lora_dropout 0.1 \ --label_smooth 0.1 \ --work_dir ./trained_models/GPT2_M/e2e/$seed/lora_only \ --random_seed $seed \ --lora_only 1 bash run_eval_e2e_lora_only.sh --seed $seed
AdaMix/NLG/run_train_e2e_lora_only.sh/0
{ "file_path": "AdaMix/NLG/run_train_e2e_lora_only.sh", "repo_id": "AdaMix", "token_count": 325 }
25
# ------------------------------------------------------------------------------------------ # Copyright (c). All rights reserved. # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. # ------------------------------------------------------------------------------------------ import argparse import time import math import os, sys import itertools import numpy as np import torch import torch.nn as nn import torch.optim as optim import torch.distributed as dist def add_gpu_params(parser: argparse.ArgumentParser): parser.add_argument("--platform", default='k8s', type=str, help='platform cloud') parser.add_argument("--local_rank", default=0, type=int, help='local rank') parser.add_argument("--rank", default=0, type=int, help='rank') parser.add_argument("--device", default=0, type=int, help='device') parser.add_argument("--world_size", default=0, type=int, help='world size') parser.add_argument("--random_seed", default=10, type=int, help='random seed') def distributed_opt(args, model, opt, grad_acc=1): if args.platform == 'azure': args.hvd.broadcast_parameters(model.state_dict(), root_rank=0) opt = args.hvd.DistributedOptimizer( opt, named_parameters=model.named_parameters(), backward_passes_per_step=grad_acc ) elif args.platform == 'philly' or args.platform == 'k8s' or args.platform == 'local': model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True, broadcast_buffers=False ) return model, opt def distributed_gather(args, tensor): g_y = [torch.zeros_like(tensor) for _ in range(args.world_size)] torch.distributed.all_gather(g_y, tensor, async_op=False) return torch.stack(g_y) def distributed_sync(args): if args.platform == 'azure': args.hvd.allreduce(torch.tensor(0), name='barrier') else: args.dist.barrier() def parse_gpu(args): torch.manual_seed(args.random_seed) if args.platform == 'local': dist.init_process_group(backend='nccl') local_rank = torch.distributed.get_rank() torch.cuda.set_device(local_rank) device = torch.device('cuda', local_rank) args.rank = local_rank args.device = device args.world_size = torch.distributed.get_world_size() args.dist = dist elif args.platform == 'azure': import horovod.torch as hvd hvd.init() print('azure hvd rank', hvd.rank(), 'local rank', hvd.local_rank()) local_rank = hvd.local_rank() torch.cuda.set_device(local_rank) device = torch.device('cuda', local_rank) rank = hvd.rank() world_size = hvd.size() args.local_rank = local_rank args.rank = rank args.device = device args.world_size = world_size args.hvd = hvd elif args.platform == 'philly': local_rank = args.local_rank torch.cuda.set_device(local_rank) dist.init_process_group(backend='nccl') rank = dist.get_rank() world_size = torch.distributed.get_world_size() device = torch.device('cuda', local_rank) args.rank = rank args.device = device args.world_size = world_size args.dist = dist elif args.platform == 'k8s': master_uri = f"tcp://{os.environ['MASTER_ADDR']}:{os.environ['MASTER_PORT']}" local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) args.local_rank = local_rank world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) world_rank = int(os.environ['OMPI_COMM_WORLD_RANK']) rank = world_rank torch.cuda.set_device(local_rank) dist.init_process_group( backend='nccl', init_method=master_uri, world_size=world_size, rank=world_rank, ) device = torch.device("cuda", local_rank) args.rank = rank args.device = device args.world_size = world_size args.dist = dist print( 'myrank:', args.rank, 'local_rank:', args.local_rank, 'device_count:', torch.cuda.device_count(), 'world_size:', args.world_size ) def cleanup(args): if args.platform == 'k8s' or args.platform == 'philly': args.dist.destroy_process_group()
AdaMix/NLG/src/gpu.py/0
{ "file_path": "AdaMix/NLG/src/gpu.py", "repo_id": "AdaMix", "token_count": 1965 }
26
.. Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Callbacks ----------------------------------------------------------------------------------------------------------------------- Callbacks are objects that can customize the behavior of the training loop in the PyTorch :class:`~transformers.Trainer` (this feature is not yet implemented in TensorFlow) that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms...) and take decisions (like early stopping). Callbacks are "read only" pieces of code, apart from the :class:`~transformers.TrainerControl` object they return, they cannot change anything in the training loop. For customizations that require changes in the training loop, you should subclass :class:`~transformers.Trainer` and override the methods you need (see :doc:`trainer` for examples). By default a :class:`~transformers.Trainer` will use the following callbacks: - :class:`~transformers.DefaultFlowCallback` which handles the default behavior for logging, saving and evaluation. - :class:`~transformers.PrinterCallback` or :class:`~transformers.ProgressCallback` to display progress and print the logs (the first one is used if you deactivate tqdm through the :class:`~transformers.TrainingArguments`, otherwise it's the second one). - :class:`~transformers.integrations.TensorBoardCallback` if tensorboard is accessible (either through PyTorch >= 1.4 or tensorboardX). - :class:`~transformers.integrations.WandbCallback` if `wandb <https://www.wandb.com/>`__ is installed. - :class:`~transformers.integrations.CometCallback` if `comet_ml <https://www.comet.ml/site/>`__ is installed. - :class:`~transformers.integrations.MLflowCallback` if `mlflow <https://www.mlflow.org/>`__ is installed. - :class:`~transformers.integrations.AzureMLCallback` if `azureml-sdk <https://pypi.org/project/azureml-sdk/>`__ is installed. The main class that implements callbacks is :class:`~transformers.TrainerCallback`. It gets the :class:`~transformers.TrainingArguments` used to instantiate the :class:`~transformers.Trainer`, can access that Trainer's internal state via :class:`~transformers.TrainerState`, and can take some actions on the training loop via :class:`~transformers.TrainerControl`. Available Callbacks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is the list of the available :class:`~transformers.TrainerCallback` in the library: .. autoclass:: transformers.integrations.CometCallback :members: setup .. autoclass:: transformers.DefaultFlowCallback .. autoclass:: transformers.PrinterCallback .. autoclass:: transformers.ProgressCallback .. autoclass:: transformers.EarlyStoppingCallback .. autoclass:: transformers.integrations.TensorBoardCallback .. autoclass:: transformers.integrations.WandbCallback :members: setup .. autoclass:: transformers.integrations.MLflowCallback :members: setup .. autoclass:: transformers.integrations.AzureMLCallback TrainerCallback ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TrainerCallback :members: TrainerState ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TrainerState :members: TrainerControl ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TrainerControl :members:
AdaMix/docs/source/main_classes/callback.rst/0
{ "file_path": "AdaMix/docs/source/main_classes/callback.rst", "repo_id": "AdaMix", "token_count": 1077 }
27
.. Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. BERT ----------------------------------------------------------------------------------------------------------------------- Overview ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The BERT model was proposed in `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding <https://arxiv.org/abs/1810.04805>`__ by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer pretrained using a combination of masked language modeling objective and next sentence prediction on a large corpus comprising the Toronto Book Corpus and Wikipedia. The abstract from the paper is the following: *We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications.* *BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).* Tips: - BERT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. - BERT was trained with the masked language modeling (MLM) and next sentence prediction (NSP) objectives. It is efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. The original code can be found `here <https://github.com/google-research/bert>`__. BertConfig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.BertConfig :members: BertTokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.BertTokenizer :members: build_inputs_with_special_tokens, get_special_tokens_mask, create_token_type_ids_from_sequences, save_vocabulary BertTokenizerFast ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.BertTokenizerFast :members: Bert specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.models.bert.modeling_bert.BertForPreTrainingOutput :members: .. autoclass:: transformers.models.bert.modeling_tf_bert.TFBertForPreTrainingOutput :members: BertModel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.BertModel :members: forward BertForPreTraining ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.BertForPreTraining :members: forward BertModelLMHeadModel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.BertLMHeadModel :members: forward BertForMaskedLM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.BertForMaskedLM :members: forward BertForNextSentencePrediction ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.BertForNextSentencePrediction :members: forward BertForSequenceClassification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.BertForSequenceClassification :members: forward BertForMultipleChoice ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.BertForMultipleChoice :members: forward BertForTokenClassification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.BertForTokenClassification :members: forward BertForQuestionAnswering ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.BertForQuestionAnswering :members: forward TFBertModel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFBertModel :members: call TFBertForPreTraining ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFBertForPreTraining :members: call TFBertModelLMHeadModel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFBertLMHeadModel :members: call TFBertForMaskedLM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFBertForMaskedLM :members: call TFBertForNextSentencePrediction ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFBertForNextSentencePrediction :members: call TFBertForSequenceClassification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFBertForSequenceClassification :members: call TFBertForMultipleChoice ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFBertForMultipleChoice :members: call TFBertForTokenClassification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFBertForTokenClassification :members: call TFBertForQuestionAnswering ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFBertForQuestionAnswering :members: call FlaxBertModel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.FlaxBertModel :members: __call__ FlaxBertForMaskedLM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.FlaxBertForMaskedLM :members: __call__
AdaMix/docs/source/model_doc/bert.rst/0
{ "file_path": "AdaMix/docs/source/model_doc/bert.rst", "repo_id": "AdaMix", "token_count": 1784 }
28
.. Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. FlauBERT ----------------------------------------------------------------------------------------------------------------------- Overview ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The FlauBERT model was proposed in the paper `FlauBERT: Unsupervised Language Model Pre-training for French <https://arxiv.org/abs/1912.05372>`__ by Hang Le et al. It's a transformer model pretrained using a masked language modeling (MLM) objective (like BERT). The abstract from the paper is the following: *Language models have become a key step to achieve state-of-the art results in many different Natural Language Processing (NLP) tasks. Leveraging the huge amount of unlabeled texts nowadays available, they provide an efficient way to pre-train continuous word representations that can be fine-tuned for a downstream task, along with their contextualization at the sentence level. This has been widely demonstrated for English using contextualized representations (Dai and Le, 2015; Peters et al., 2018; Howard and Ruder, 2018; Radford et al., 2018; Devlin et al., 2019; Yang et al., 2019b). In this paper, we introduce and share FlauBERT, a model learned on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) Jean Zay supercomputer. We apply our French language models to diverse NLP tasks (text classification, paraphrasing, natural language inference, parsing, word sense disambiguation) and show that most of the time they outperform other pretraining approaches. Different versions of FlauBERT as well as a unified evaluation protocol for the downstream tasks, called FLUE (French Language Understanding Evaluation), are shared to the research community for further reproducible experiments in French NLP.* The original code can be found `here <https://github.com/getalp/Flaubert>`__. FlaubertConfig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.FlaubertConfig :members: FlaubertTokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.FlaubertTokenizer :members: FlaubertModel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.FlaubertModel :members: forward FlaubertWithLMHeadModel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.FlaubertWithLMHeadModel :members: forward FlaubertForSequenceClassification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.FlaubertForSequenceClassification :members: forward FlaubertForMultipleChoice ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.FlaubertForMultipleChoice :members: forward FlaubertForTokenClassification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.FlaubertForTokenClassification :members: forward FlaubertForQuestionAnsweringSimple ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.FlaubertForQuestionAnsweringSimple :members: forward FlaubertForQuestionAnswering ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.FlaubertForQuestionAnswering :members: forward TFFlaubertModel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFFlaubertModel :members: call TFFlaubertWithLMHeadModel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFFlaubertWithLMHeadModel :members: call TFFlaubertForSequenceClassification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFFlaubertForSequenceClassification :members: call TFFlaubertForMultipleChoice ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFFlaubertForMultipleChoice :members: call TFFlaubertForTokenClassification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFFlaubertForTokenClassification :members: call TFFlaubertForQuestionAnsweringSimple ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFFlaubertForQuestionAnsweringSimple :members: call
AdaMix/docs/source/model_doc/flaubert.rst/0
{ "file_path": "AdaMix/docs/source/model_doc/flaubert.rst", "repo_id": "AdaMix", "token_count": 1279 }
29
.. Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. MT5 ----------------------------------------------------------------------------------------------------------------------- Overview ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The mT5 model was presented in `mT5: A massively multilingual pre-trained text-to-text transformer <https://arxiv.org/abs/2010.11934>`_ by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel. The abstract from the paper is the following: *The recent "Text-to-Text Transfer Transformer" (T5) leveraged a unified text-to-text format and scale to attain state-of-the-art results on a wide variety of English-language NLP tasks. In this paper, we introduce mT5, a multilingual variant of T5 that was pre-trained on a new Common Crawl-based dataset covering 101 languages. We describe the design and modified training of mT5 and demonstrate its state-of-the-art performance on many multilingual benchmarks. All of the code and model checkpoints* The original code can be found `here <https://github.com/google-research/multilingual-t5>`__. MT5Config ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.MT5Config :members: MT5Tokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.MT5Tokenizer See :class:`~transformers.T5Tokenizer` for all details. MT5TokenizerFast ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.MT5TokenizerFast See :class:`~transformers.T5TokenizerFast` for all details. MT5Model ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.MT5Model :members: MT5ForConditionalGeneration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.MT5ForConditionalGeneration :members: MT5EncoderModel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.MT5EncoderModel :members: TFMT5Model ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFMT5Model :members: TFMT5ForConditionalGeneration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFMT5ForConditionalGeneration :members: TFMT5EncoderModel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFMT5EncoderModel :members:
AdaMix/docs/source/model_doc/mt5.rst/0
{ "file_path": "AdaMix/docs/source/model_doc/mt5.rst", "repo_id": "AdaMix", "token_count": 834 }
30
.. Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. XLM-RoBERTa ----------------------------------------------------------------------------------------------------------------------- Overview ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The XLM-RoBERTa model was proposed in `Unsupervised Cross-lingual Representation Learning at Scale <https://arxiv.org/abs/1911.02116>`__ by Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov. It is based on Facebook's RoBERTa model released in 2019. It is a large multi-lingual language model, trained on 2.5TB of filtered CommonCrawl data. The abstract from the paper is the following: *This paper shows that pretraining multilingual language models at scale leads to significant performance gains for a wide range of cross-lingual transfer tasks. We train a Transformer-based masked language model on one hundred languages, using more than two terabytes of filtered CommonCrawl data. Our model, dubbed XLM-R, significantly outperforms multilingual BERT (mBERT) on a variety of cross-lingual benchmarks, including +13.8% average accuracy on XNLI, +12.3% average F1 score on MLQA, and +2.1% average F1 score on NER. XLM-R performs particularly well on low-resource languages, improving 11.8% in XNLI accuracy for Swahili and 9.2% for Urdu over the previous XLM model. We also present a detailed empirical evaluation of the key factors that are required to achieve these gains, including the trade-offs between (1) positive transfer and capacity dilution and (2) the performance of high and low resource languages at scale. Finally, we show, for the first time, the possibility of multilingual modeling without sacrificing per-language performance; XLM-Ris very competitive with strong monolingual models on the GLUE and XNLI benchmarks. We will make XLM-R code, data, and models publicly available.* Tips: - XLM-RoBERTa is a multilingual model trained on 100 different languages. Unlike some XLM multilingual models, it does not require :obj:`lang` tensors to understand which language is used, and should be able to determine the correct language from the input ids. - This implementation is the same as RoBERTa. Refer to the :doc:`documentation of RoBERTa <roberta>` for usage examples as well as the information relative to the inputs and outputs. The original code can be found `here <https://github.com/pytorch/fairseq/tree/master/examples/xlmr>`__. XLMRobertaConfig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.XLMRobertaConfig :members: XLMRobertaTokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.XLMRobertaTokenizer :members: build_inputs_with_special_tokens, get_special_tokens_mask, create_token_type_ids_from_sequences, save_vocabulary XLMRobertaTokenizerFast ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.XLMRobertaTokenizerFast :members: XLMRobertaModel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.XLMRobertaModel :members: forward XLMRobertaForCausalLM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.XLMRobertaForCausalLM :members: forward XLMRobertaForMaskedLM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.XLMRobertaForMaskedLM :members: forward XLMRobertaForSequenceClassification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.XLMRobertaForSequenceClassification :members: forward XLMRobertaForMultipleChoice ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.XLMRobertaForMultipleChoice :members: forward XLMRobertaForTokenClassification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.XLMRobertaForTokenClassification :members: forward XLMRobertaForQuestionAnswering ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.XLMRobertaForQuestionAnswering :members: forward TFXLMRobertaModel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFXLMRobertaModel :members: call TFXLMRobertaForMaskedLM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFXLMRobertaForMaskedLM :members: call TFXLMRobertaForSequenceClassification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFXLMRobertaForSequenceClassification :members: call TFXLMRobertaForMultipleChoice ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFXLMRobertaForMultipleChoice :members: call TFXLMRobertaForTokenClassification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFXLMRobertaForTokenClassification :members: call TFXLMRobertaForQuestionAnswering ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: transformers.TFXLMRobertaForQuestionAnswering :members: call
AdaMix/docs/source/model_doc/xlmroberta.rst/0
{ "file_path": "AdaMix/docs/source/model_doc/xlmroberta.rst", "repo_id": "AdaMix", "token_count": 1589 }
31
.. Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Training and fine-tuning ======================================================================================================================= Model classes in 🤗 Transformers are designed to be compatible with native PyTorch and TensorFlow 2 and can be used seamlessly with either. In this quickstart, we will show how to fine-tune (or train from scratch) a model using the standard training tools available in either framework. We will also show how to use our included :func:`~transformers.Trainer` class which handles much of the complexity of training for you. This guide assume that you are already familiar with loading and use our models for inference; otherwise, see the :doc:`task summary <task_summary>`. We also assume that you are familiar with training deep neural networks in either PyTorch or TF2, and focus specifically on the nuances and tools for training models in 🤗 Transformers. Sections: - :ref:`pytorch` - :ref:`tensorflow` - :ref:`trainer` - :ref:`additional-resources` .. _pytorch: Fine-tuning in native PyTorch ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Model classes in 🤗 Transformers that don't begin with ``TF`` are `PyTorch Modules <https://pytorch.org/docs/master/generated/torch.nn.Module.html>`_, meaning that you can use them just as you would any model in PyTorch for both inference and optimization. Let's consider the common task of fine-tuning a masked language model like BERT on a sequence classification dataset. When we instantiate a model with :func:`~transformers.PreTrainedModel.from_pretrained`, the model configuration and pre-trained weights of the specified model are used to initialize the model. The library also includes a number of task-specific final layers or 'heads' whose weights are instantiated randomly when not present in the specified pre-trained model. For example, instantiating a model with ``BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)`` will create a BERT model instance with encoder weights copied from the ``bert-base-uncased`` model and a randomly initialized sequence classification head on top of the encoder with an output size of 2. Models are initialized in ``eval`` mode by default. We can call ``model.train()`` to put it in train mode. .. code-block:: python from transformers import BertForSequenceClassification model = BertForSequenceClassification.from_pretrained('bert-base-uncased') model.train() This is useful because it allows us to make use of the pre-trained BERT encoder and easily train it on whatever sequence classification dataset we choose. We can use any PyTorch optimizer, but our library also provides the :func:`~transformers.AdamW` optimizer which implements gradient bias correction as well as weight decay. .. code-block:: python from transformers import AdamW optimizer = AdamW(model.parameters(), lr=1e-5) The optimizer allows us to apply different hyperpameters for specific parameter groups. For example, we can apply weight decay to all parameters other than bias and layer normalization terms: .. code-block:: python no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=1e-5) Now we can set up a simple dummy training batch using :func:`~transformers.PreTrainedTokenizer.__call__`. This returns a :func:`~transformers.BatchEncoding` instance which prepares everything we might need to pass to the model. .. code-block:: python from transformers import BertTokenizer tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') text_batch = ["I love Pixar.", "I don't care for Pixar."] encoding = tokenizer(text_batch, return_tensors='pt', padding=True, truncation=True) input_ids = encoding['input_ids'] attention_mask = encoding['attention_mask'] When we call a classification model with the ``labels`` argument, the first returned element is the Cross Entropy loss between the predictions and the passed labels. Having already set up our optimizer, we can then do a backwards pass and update the weights: .. code-block:: python labels = torch.tensor([1,0]).unsqueeze(0) outputs = model(input_ids, attention_mask=attention_mask, labels=labels) loss = outputs.loss loss.backward() optimizer.step() Alternatively, you can just get the logits and calculate the loss yourself. The following is equivalent to the previous example: .. code-block:: python from torch.nn import functional as F labels = torch.tensor([1,0]) outputs = model(input_ids, attention_mask=attention_mask) loss = F.cross_entropy(outputs.logits, labels) loss.backward() optimizer.step() Of course, you can train on GPU by calling ``to('cuda')`` on the model and inputs as usual. We also provide a few learning rate scheduling tools. With the following, we can set up a scheduler which warms up for ``num_warmup_steps`` and then linearly decays to 0 by the end of training. .. code-block:: python from transformers import get_linear_schedule_with_warmup scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_train_steps) Then all we have to do is call ``scheduler.step()`` after ``optimizer.step()``. .. code-block:: python loss.backward() optimizer.step() scheduler.step() We highly recommend using :func:`~transformers.Trainer`, discussed below, which conveniently handles the moving parts of training 🤗 Transformers models with features like mixed precision and easy tensorboard logging. Freezing the encoder ----------------------------------------------------------------------------------------------------------------------- In some cases, you might be interested in keeping the weights of the pre-trained encoder frozen and optimizing only the weights of the head layers. To do so, simply set the ``requires_grad`` attribute to ``False`` on the encoder parameters, which can be accessed with the ``base_model`` submodule on any task-specific model in the library: .. code-block:: python for param in model.base_model.parameters(): param.requires_grad = False .. _tensorflow: Fine-tuning in native TensorFlow 2 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Models can also be trained natively in TensorFlow 2. Just as with PyTorch, TensorFlow models can be instantiated with :func:`~transformers.PreTrainedModel.from_pretrained` to load the weights of the encoder from a pretrained model. .. code-block:: python from transformers import TFBertForSequenceClassification model = TFBertForSequenceClassification.from_pretrained('bert-base-uncased') Let's use ``tensorflow_datasets`` to load in the `MRPC dataset <https://www.tensorflow.org/datasets/catalog/glue#gluemrpc>`_ from GLUE. We can then use our built-in :func:`~transformers.data.processors.glue.glue_convert_examples_to_features` to tokenize MRPC and convert it to a TensorFlow ``Dataset`` object. Note that tokenizers are framework-agnostic, so there is no need to prepend ``TF`` to the pretrained tokenizer name. .. code-block:: python from transformers import BertTokenizer, glue_convert_examples_to_features import tensorflow as tf import tensorflow_datasets as tfds tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') data = tfds.load('glue/mrpc') train_dataset = glue_convert_examples_to_features(data['train'], tokenizer, max_length=128, task='mrpc') train_dataset = train_dataset.shuffle(100).batch(32).repeat(2) The model can then be compiled and trained as any Keras model: .. code-block:: python optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model.compile(optimizer=optimizer, loss=loss) model.fit(train_dataset, epochs=2, steps_per_epoch=115) With the tight interoperability between TensorFlow and PyTorch models, you can even save the model and then reload it as a PyTorch model (or vice-versa): .. code-block:: python from transformers import BertForSequenceClassification model.save_pretrained('./my_mrpc_model/') pytorch_model = BertForSequenceClassification.from_pretrained('./my_mrpc_model/', from_tf=True) .. _trainer: Trainer ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We also provide a simple but feature-complete training and evaluation interface through :func:`~transformers.Trainer` and :func:`~transformers.TFTrainer`. You can train, fine-tune, and evaluate any 🤗 Transformers model with a wide range of training options and with built-in features like logging, gradient accumulation, and mixed precision. .. code-block:: python ## PYTORCH CODE from transformers import BertForSequenceClassification, Trainer, TrainingArguments model = BertForSequenceClassification.from_pretrained("bert-large-uncased") training_args = TrainingArguments( output_dir='./results', # output directory num_train_epochs=3, # total # of training epochs per_device_train_batch_size=16, # batch size per device during training per_device_eval_batch_size=64, # batch size for evaluation warmup_steps=500, # number of warmup steps for learning rate scheduler weight_decay=0.01, # strength of weight decay logging_dir='./logs', # directory for storing logs ) trainer = Trainer( model=model, # the instantiated 🤗 Transformers model to be trained args=training_args, # training arguments, defined above train_dataset=train_dataset, # training dataset eval_dataset=test_dataset # evaluation dataset ) ## TENSORFLOW CODE from transformers import TFBertForSequenceClassification, TFTrainer, TFTrainingArguments model = TFBertForSequenceClassification.from_pretrained("bert-large-uncased") training_args = TFTrainingArguments( output_dir='./results', # output directory num_train_epochs=3, # total # of training epochs per_device_train_batch_size=16, # batch size per device during training per_device_eval_batch_size=64, # batch size for evaluation warmup_steps=500, # number of warmup steps for learning rate scheduler weight_decay=0.01, # strength of weight decay logging_dir='./logs', # directory for storing logs ) trainer = TFTrainer( model=model, # the instantiated 🤗 Transformers model to be trained args=training_args, # training arguments, defined above train_dataset=tfds_train_dataset, # tensorflow_datasets training dataset eval_dataset=tfds_test_dataset # tensorflow_datasets evaluation dataset ) Now simply call ``trainer.train()`` to train and ``trainer.evaluate()`` to evaluate. You can use your own module as well, but the first argument returned from ``forward`` must be the loss which you wish to optimize. :func:`~transformers.Trainer` uses a built-in default function to collate batches and prepare them to be fed into the model. If needed, you can also use the ``data_collator`` argument to pass your own collator function which takes in the data in the format provided by your dataset and returns a batch ready to be fed into the model. Note that :func:`~transformers.TFTrainer` expects the passed datasets to be dataset objects from ``tensorflow_datasets``. To calculate additional metrics in addition to the loss, you can also define your own ``compute_metrics`` function and pass it to the trainer. .. code-block:: python from sklearn.metrics import accuracy_score, precision_recall_fscore_support def compute_metrics(pred): labels = pred.label_ids preds = pred.predictions.argmax(-1) precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='binary') acc = accuracy_score(labels, preds) return { 'accuracy': acc, 'f1': f1, 'precision': precision, 'recall': recall } Finally, you can view the results, including any calculated metrics, by launching tensorboard in your specified ``logging_dir`` directory. .. _additional-resources: Additional resources ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - `A lightweight colab demo <https://colab.research.google.com/drive/1-JIJlao4dI-Ilww_NnTc0rxtp-ymgDgM?usp=sharing>`_ which uses ``Trainer`` for IMDb sentiment classification. - `🤗 Transformers Examples <https://github.com/huggingface/transformers/tree/master/examples>`_ including scripts for training and fine-tuning on GLUE, SQuAD, and several other tasks. - `How to train a language model <https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/01_how_to_train.ipynb>`_, a detailed colab notebook which uses ``Trainer`` to train a masked language model from scratch on Esperanto. - `🤗 Transformers Notebooks <notebooks.html>`_ which contain dozens of example notebooks from the community for training and using 🤗 Transformers on a variety of tasks.
AdaMix/docs/source/training.rst/0
{ "file_path": "AdaMix/docs/source/training.rst", "repo_id": "AdaMix", "token_count": 4533 }
32
#!/usr/bin/env python # coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenAI GPT model fine-tuning script. Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset: python run_openai_gpt.py \ --model_name openai-gpt \ --do_train \ --do_eval \ --train_dataset "$ROC_STORIES_DIR/cloze_test_val__spring2016 - cloze_test_ALL_val.csv" \ --eval_dataset "$ROC_STORIES_DIR/cloze_test_test__spring2016 - cloze_test_ALL_test.csv" \ --output_dir ../log \ --train_batch_size 16 \ """ import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) logger = logging.getLogger(__name__) def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def load_rocstories_dataset(dataset_path): """ Output a list of tuples(story, 1st continuation, 2nd continuation, label) """ with open(dataset_path, encoding="utf_8") as f: f = csv.reader(f) output = [] next(f) # skip the first line for line in tqdm(f): output.append((" ".join(line[1:5]), line[5], line[6], int(line[-1]) - 1)) return output def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token): """Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label) To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation: input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] """ tensor_datasets = [] for dataset in encoded_datasets: n_batch = len(dataset) input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64) mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64) lm_labels = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.int64) mc_labels = np.zeros((n_batch,), dtype=np.int64) for ( i, (story, cont1, cont2, mc_label), ) in enumerate(dataset): with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token] input_ids[i, 0, : len(with_cont1)] = with_cont1 input_ids[i, 1, : len(with_cont2)] = with_cont2 mc_token_ids[i, 0] = len(with_cont1) - 1 mc_token_ids[i, 1] = len(with_cont2) - 1 lm_labels[i, 0, : len(with_cont1)] = with_cont1 lm_labels[i, 1, : len(with_cont2)] = with_cont2 mc_labels[i] = mc_label all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs)) return tensor_datasets def main(): parser = argparse.ArgumentParser() parser.add_argument("--model_name", type=str, default="openai-gpt", help="pretrained model name") parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--train_dataset", type=str, default="") parser.add_argument("--eval_dataset", type=str, default="") parser.add_argument("--seed", type=int, default=42) parser.add_argument("--num_train_epochs", type=int, default=3) parser.add_argument("--train_batch_size", type=int, default=8) parser.add_argument("--eval_batch_size", type=int, default=16) parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", type=int, default=1) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training \ steps to perform. Override num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before\ performing a backward/update pass.", ) parser.add_argument("--learning_rate", type=float, default=6.25e-5) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--lr_schedule", type=str, default="warmup_linear") parser.add_argument("--weight_decay", type=float, default=0.01) parser.add_argument("--lm_coef", type=float, default=0.9) parser.add_argument("--n_valid", type=int, default=374) parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") args = parser.parse_args() print(args) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() logger.info("device: {}, n_gpu {}".format(device, n_gpu)) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset special_tokens = ["_start_", "_delimiter_", "_classify_"] tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name) tokenizer.add_tokens(special_tokens) special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens) model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name) model.resize_token_embeddings(len(tokenizer)) model.to(device) # Load and encode the datasets def tokenize_and_encode(obj): """ Tokenize and encode a nested object """ if isinstance(obj, str): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj)) elif isinstance(obj, int): return obj return list(tokenize_and_encode(o) for o in obj) logger.info("Encoding dataset...") train_dataset = load_rocstories_dataset(args.train_dataset) eval_dataset = load_rocstories_dataset(args.eval_dataset) datasets = (train_dataset, eval_dataset) encoded_datasets = tokenize_and_encode(datasets) # Compute the max input length for the Transformer max_length = model.config.n_positions // 2 - 2 input_length = max( len(story[:max_length]) + max(len(cont1[:max_length]), len(cont2[:max_length])) + 3 for dataset in encoded_datasets for story, cont1, cont2, _ in dataset ) input_length = min(input_length, model.config.n_positions) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids) train_tensor_dataset, eval_tensor_dataset = tensor_datasets[0], tensor_datasets[1] train_data = TensorDataset(*train_tensor_dataset) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) eval_data = TensorDataset(*eval_tensor_dataset) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) # Prepare optimizer if args.do_train: if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs param_optimizer = list(model.named_parameters()) no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) if args.do_train: nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_steps = 0 tqdm_bar = tqdm(train_dataloader, desc="Training") for step, batch in enumerate(tqdm_bar): batch = tuple(t.to(device) for t in batch) input_ids, mc_token_ids, lm_labels, mc_labels = batch losses = model(input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels) loss = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() exp_average_loss = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, scheduler.get_lr()[0]) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer model_to_save = model.module if hasattr(model, "module") else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) # Load a trained model and vocabulary that you have fine-tuned model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir) tokenizer = OpenAIGPTTokenizer.from_pretrained(args.output_dir) model.to(device) if args.do_eval: model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in tqdm(eval_dataloader, desc="Evaluating"): batch = tuple(t.to(device) for t in batch) input_ids, mc_token_ids, lm_labels, mc_labels = batch with torch.no_grad(): _, mc_loss, _, mc_logits = model( input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels ) mc_logits = mc_logits.detach().cpu().numpy() mc_labels = mc_labels.to("cpu").numpy() tmp_eval_accuracy = accuracy(mc_logits, mc_labels) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples train_loss = tr_loss / nb_tr_steps if args.do_train else None result = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss} output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": main()
AdaMix/examples/legacy/run_openai_gpt.py/0
{ "file_path": "AdaMix/examples/legacy/run_openai_gpt.py", "repo_id": "AdaMix", "token_count": 6149 }
33
# coding=utf-8 # Copyright 2020 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A subclass of `Trainer` specific to Question-Answering tasks """ from transformers import Trainer, is_datasets_available, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput if is_datasets_available(): import datasets if is_torch_tpu_available(): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class QuestionAnsweringTrainer(Trainer): def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs): super().__init__(*args, **kwargs) self.eval_examples = eval_examples self.post_process_function = post_process_function def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None): eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset eval_dataloader = self.get_eval_dataloader(eval_dataset) eval_examples = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. compute_metrics = self.compute_metrics self.compute_metrics = None try: output = self.prediction_loop( eval_dataloader, description="Evaluation", # No point gathering the predictions if there are no metrics, otherwise we defer to # self.args.prediction_loss_only prediction_loss_only=True if compute_metrics is None else None, ignore_keys=ignore_keys, ) finally: self.compute_metrics = compute_metrics # We might have removed columns from the dataset so we put them back. if isinstance(eval_dataset, datasets.Dataset): eval_dataset.set_format(type=eval_dataset.format["type"], columns=list(eval_dataset.features.keys())) if self.post_process_function is not None and self.compute_metrics is not None: eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions) metrics = self.compute_metrics(eval_preds) self.log(metrics) else: metrics = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics) return metrics def predict(self, test_dataset, test_examples, ignore_keys=None): test_dataloader = self.get_test_dataloader(test_dataset) # Temporarily disable metric computation, we will do it in the loop here. compute_metrics = self.compute_metrics self.compute_metrics = None try: output = self.prediction_loop( test_dataloader, description="Evaluation", # No point gathering the predictions if there are no metrics, otherwise we defer to # self.args.prediction_loss_only prediction_loss_only=True if compute_metrics is None else None, ignore_keys=ignore_keys, ) finally: self.compute_metrics = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output # We might have removed columns from the dataset so we put them back. if isinstance(test_dataset, datasets.Dataset): test_dataset.set_format(type=test_dataset.format["type"], columns=list(test_dataset.features.keys())) eval_preds = self.post_process_function(test_examples, test_dataset, output.predictions) metrics = self.compute_metrics(eval_preds) return PredictionOutput(predictions=eval_preds.predictions, label_ids=eval_preds.label_ids, metrics=metrics)
AdaMix/examples/question-answering/trainer_qa.py/0
{ "file_path": "AdaMix/examples/question-answering/trainer_qa.py", "repo_id": "AdaMix", "token_count": 1796 }
34
# coding=utf-8 # Copyright 2019 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BertAbs configuration """ import logging from transformers import PretrainedConfig logger = logging.getLogger(__name__) BERTABS_FINETUNED_CONFIG_MAP = { "bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json", } class BertAbsConfig(PretrainedConfig): r"""Class to store the configuration of the BertAbs model. Arguments: vocab_size: int Number of tokens in the vocabulary. max_pos: int The maximum sequence length that this model will be used with. enc_layer: int The numner of hidden layers in the Transformer encoder. enc_hidden_size: int The size of the encoder's layers. enc_heads: int The number of attention heads for each attention layer in the encoder. enc_ff_size: int The size of the encoder's feed-forward layers. enc_dropout: int The dropout probability for all fully connected layers in the embeddings, layers, pooler and also the attention probabilities in the encoder. dec_layer: int The numner of hidden layers in the decoder. dec_hidden_size: int The size of the decoder's layers. dec_heads: int The number of attention heads for each attention layer in the decoder. dec_ff_size: int The size of the decoder's feed-forward layers. dec_dropout: int The dropout probability for all fully connected layers in the embeddings, layers, pooler and also the attention probabilities in the decoder. """ model_type = "bertabs" def __init__( self, vocab_size=30522, max_pos=512, enc_layers=6, enc_hidden_size=512, enc_heads=8, enc_ff_size=512, enc_dropout=0.2, dec_layers=6, dec_hidden_size=768, dec_heads=8, dec_ff_size=2048, dec_dropout=0.2, **kwargs, ): super().__init__(**kwargs) self.vocab_size = vocab_size self.max_pos = max_pos self.enc_layers = enc_layers self.enc_hidden_size = enc_hidden_size self.enc_heads = enc_heads self.enc_ff_size = enc_ff_size self.enc_dropout = enc_dropout self.dec_layers = dec_layers self.dec_hidden_size = dec_hidden_size self.dec_heads = dec_heads self.dec_ff_size = dec_ff_size self.dec_dropout = dec_dropout
AdaMix/examples/research_projects/bertabs/configuration_bertabs.py/0
{ "file_path": "AdaMix/examples/research_projects/bertabs/configuration_bertabs.py", "repo_id": "AdaMix", "token_count": 1345 }
35
import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def entropy(x): """Calculate entropy of a pre-softmax logit Tensor""" exp_x = torch.exp(x) A = torch.sum(exp_x, dim=1) # sum of exp(x_i) B = torch.sum(x * exp_x, dim=1) # sum of x_i * exp(x_i) return torch.log(A) - B / A class DeeBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) self.highway = nn.ModuleList([BertHighway(config) for _ in range(config.num_hidden_layers)]) self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)] def set_early_exit_entropy(self, x): if (type(x) is float) or (type(x) is int): for i in range(len(self.early_exit_entropy)): self.early_exit_entropy[i] = x else: self.early_exit_entropy = x def init_highway_pooler(self, pooler): loaded_model = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name]) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): all_hidden_states = () all_attentions = () all_highway_exits = () for i, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask ) hidden_states = layer_outputs[0] if self.output_attentions: all_attentions = all_attentions + (layer_outputs[1],) current_outputs = (hidden_states,) if self.output_hidden_states: current_outputs = current_outputs + (all_hidden_states,) if self.output_attentions: current_outputs = current_outputs + (all_attentions,) highway_exit = self.highway[i](current_outputs) # logits, pooled_output if not self.training: highway_logits = highway_exit[0] highway_entropy = entropy(highway_logits) highway_exit = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy all_highway_exits = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(new_output, i + 1) else: all_highway_exits = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) outputs = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). ", BERT_START_DOCSTRING, ) class DeeBertModel(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = DeeBertEncoder(config) self.pooler = BertPooler(config) self.init_weights() def init_highway_pooler(self): self.encoder.init_highway_pooler(self.pooler) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, ): r""" Return: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pre-training. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. highway_exits (:obj:`tuple(tuple(torch.Tensor))`: Tuple of each early exit's results (total length: number of layers) Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states. """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = encoder_extended_attention_mask.to( dtype=next(self.parameters()).dtype ) # fp16 compatibility encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) outputs = (sequence_output, pooled_output,) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class HighwayException(Exception): def __init__(self, message, exit_layer): self.message = message self.exit_layer = exit_layer # start from 1! class BertHighway(nn.Module): """A module to provide a shortcut from (the output of one non-final BertLayer in BertEncoder) to (cross-entropy computation in BertForSequenceClassification) """ def __init__(self, config): super().__init__() self.pooler = BertPooler(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward(self, encoder_outputs): # Pooler pooler_input = encoder_outputs[0] pooler_output = self.pooler(pooler_input) # "return" pooler_output # BertModel bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification pooled_output = bmodel_output[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """, BERT_START_DOCSTRING, ) class DeeBertForSequenceClassification(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.num_layers = config.num_hidden_layers self.bert = DeeBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_layer=-1, train_highway=False, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. highway_exits (:obj:`tuple(tuple(torch.Tensor))`: Tuple of each early exit's results (total length: number of layers) Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states. """ exit_layer = self.num_layers try: outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: outputs = e.message exit_layer = e.exit_layer logits = outputs[0] if not self.training: original_entropy = entropy(logits) highway_entropy = [] highway_logits_all = [] if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) # work with highway exits highway_losses = [] for highway_exit in outputs[-1]: highway_logits = highway_exit[0] if not self.training: highway_logits_all.append(highway_logits) highway_entropy.append(highway_exit[2]) if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1)) highway_losses.append(highway_loss) if train_highway: outputs = (sum(highway_losses[:-1]),) + outputs # exclude the final highway, of course else: outputs = (loss,) + outputs if not self.training: outputs = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: outputs = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
AdaMix/examples/research_projects/deebert/src/modeling_highway_bert.py/0
{ "file_path": "AdaMix/examples/research_projects/deebert/src/modeling_highway_bert.py", "repo_id": "AdaMix", "token_count": 7794 }
36
""" coding=utf-8 Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal Adapted From Facebook Inc, Detectron2 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.import copy """ import colorsys import io import matplotlib as mpl import matplotlib.colors as mplc import matplotlib.figure as mplfigure import numpy as np import torch from matplotlib.backends.backend_agg import FigureCanvasAgg import cv2 from utils import img_tensorize _SMALL_OBJ = 1000 class SingleImageViz: def __init__( self, img, scale=1.2, edgecolor="g", alpha=0.5, linestyle="-", saveas="test_out.jpg", rgb=True, pynb=False, id2obj=None, id2attr=None, pad=0.7, ): """ img: an RGB image of shape (H, W, 3). """ if isinstance(img, torch.Tensor): img = img.numpy().astype("np.uint8") if isinstance(img, str): img = img_tensorize(img) assert isinstance(img, np.ndarray) width, height = img.shape[1], img.shape[0] fig = mplfigure.Figure(frameon=False) dpi = fig.get_dpi() width_in = (width * scale + 1e-2) / dpi height_in = (height * scale + 1e-2) / dpi fig.set_size_inches(width_in, height_in) ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) ax.axis("off") ax.set_xlim(0.0, width) ax.set_ylim(height) self.saveas = saveas self.rgb = rgb self.pynb = pynb self.img = img self.edgecolor = edgecolor self.alpha = 0.5 self.linestyle = linestyle self.font_size = int(np.sqrt(min(height, width)) * scale // 3) self.width = width self.height = height self.scale = scale self.fig = fig self.ax = ax self.pad = pad self.id2obj = id2obj self.id2attr = id2attr self.canvas = FigureCanvasAgg(fig) def add_box(self, box, color=None): if color is None: color = self.edgecolor (x0, y0, x1, y1) = box width = x1 - x0 height = y1 - y0 self.ax.add_patch( mpl.patches.Rectangle( (x0, y0), width, height, fill=False, edgecolor=color, linewidth=self.font_size // 3, alpha=self.alpha, linestyle=self.linestyle, ) ) def draw_boxes(self, boxes, obj_ids=None, obj_scores=None, attr_ids=None, attr_scores=None): if len(boxes.shape) > 2: boxes = boxes[0] if len(obj_ids.shape) > 1: obj_ids = obj_ids[0] if len(obj_scores.shape) > 1: obj_scores = obj_scores[0] if len(attr_ids.shape) > 1: attr_ids = attr_ids[0] if len(attr_scores.shape) > 1: attr_scores = attr_scores[0] if isinstance(boxes, torch.Tensor): boxes = boxes.numpy() if isinstance(boxes, list): boxes = np.array(boxes) assert isinstance(boxes, np.ndarray) areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) sorted_idxs = np.argsort(-areas).tolist() boxes = boxes[sorted_idxs] if boxes is not None else None obj_ids = obj_ids[sorted_idxs] if obj_ids is not None else None obj_scores = obj_scores[sorted_idxs] if obj_scores is not None else None attr_ids = attr_ids[sorted_idxs] if attr_ids is not None else None attr_scores = attr_scores[sorted_idxs] if attr_scores is not None else None assigned_colors = [self._random_color(maximum=1) for _ in range(len(boxes))] assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] if obj_ids is not None: labels = self._create_text_labels_attr(obj_ids, obj_scores, attr_ids, attr_scores) for i in range(len(boxes)): color = assigned_colors[i] self.add_box(boxes[i], color) self.draw_labels(labels[i], boxes[i], color) def draw_labels(self, label, box, color): x0, y0, x1, y1 = box text_pos = (x0, y0) instance_area = (y1 - y0) * (x1 - x0) small = _SMALL_OBJ * self.scale if instance_area < small or y1 - y0 < 40 * self.scale: if y1 >= self.height - 5: text_pos = (x1, y0) else: text_pos = (x0, y1) height_ratio = (y1 - y0) / np.sqrt(self.height * self.width) lighter_color = self._change_color_brightness(color, brightness_factor=0.7) font_size = np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) font_size *= 0.75 * self.font_size self.draw_text( text=label, position=text_pos, color=lighter_color, ) def draw_text( self, text, position, color="g", ha="left", ): rotation = 0 font_size = self.font_size color = np.maximum(list(mplc.to_rgb(color)), 0.2) color[np.argmax(color)] = max(0.8, np.max(color)) bbox = { "facecolor": "black", "alpha": self.alpha, "pad": self.pad, "edgecolor": "none", } x, y = position self.ax.text( x, y, text, size=font_size * self.scale, family="sans-serif", bbox=bbox, verticalalignment="top", horizontalalignment=ha, color=color, zorder=10, rotation=rotation, ) def save(self, saveas=None): if saveas is None: saveas = self.saveas if saveas.lower().endswith(".jpg") or saveas.lower().endswith(".png"): cv2.imwrite( saveas, self._get_buffer()[:, :, ::-1], ) else: self.fig.savefig(saveas) def _create_text_labels_attr(self, classes, scores, attr_classes, attr_scores): labels = [self.id2obj[i] for i in classes] attr_labels = [self.id2attr[i] for i in attr_classes] labels = [ f"{label} {score:.2f} {attr} {attr_score:.2f}" for label, score, attr, attr_score in zip(labels, scores, attr_labels, attr_scores) ] return labels def _create_text_labels(self, classes, scores): labels = [self.id2obj[i] for i in classes] if scores is not None: if labels is None: labels = ["{:.0f}%".format(s * 100) for s in scores] else: labels = ["{} {:.0f}%".format(li, s * 100) for li, s in zip(labels, scores)] return labels def _random_color(self, maximum=255): idx = np.random.randint(0, len(_COLORS)) ret = _COLORS[idx] * maximum if not self.rgb: ret = ret[::-1] return ret def _get_buffer(self): if not self.pynb: s, (width, height) = self.canvas.print_to_buffer() if (width, height) != (self.width, self.height): img = cv2.resize(self.img, (width, height)) else: img = self.img else: buf = io.BytesIO() # works for cairo backend self.canvas.print_rgba(buf) width, height = self.width, self.height s = buf.getvalue() img = self.img buffer = np.frombuffer(s, dtype="uint8") img_rgba = buffer.reshape(height, width, 4) rgb, alpha = np.split(img_rgba, [3], axis=2) try: import numexpr as ne # fuse them with numexpr visualized_image = ne.evaluate("img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)") except ImportError: alpha = alpha.astype("float32") / 255.0 visualized_image = img * (1 - alpha) + rgb * alpha return visualized_image.astype("uint8") def _change_color_brightness(self, color, brightness_factor): assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return modified_color # Color map _COLORS = ( np.array( [ 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, 1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000, 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500, 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667, 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000, ] ) .astype(np.float32) .reshape(-1, 3) )
AdaMix/examples/research_projects/lxmert/visualizing_image.py/0
{ "file_path": "AdaMix/examples/research_projects/lxmert/visualizing_image.py", "repo_id": "AdaMix", "token_count": 8182 }
37