text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="pumpkin" format="2">
<advance width="1200"/>
<unicode hex="F0BBF"/>
<note>
pumpkin
</note>
<outline>
<contour>
<point x="208" y="1030"/>
<point x="88" y="913"/>
<point x="20" y="711"/>
<point x="20" y="477"/>
<point x="88" y="276"/>
<point x="208" y="159"/>
<point x="281" y="159" type="qcurve" smooth="yes"/>
<point x="366" y="159"/>
<point x="431" y="238" type="qcurve"/>
<point x="505" y="159"/>
<point x="695" y="159"/>
<point x="769" y="238" type="qcurve"/>
<point x="834" y="159"/>
<point x="919" y="159" type="qcurve" smooth="yes"/>
<point x="992" y="159"/>
<point x="1112" y="276"/>
<point x="1180" y="477"/>
<point x="1180" y="711"/>
<point x="1112" y="913"/>
<point x="992" y="1030"/>
<point x="919" y="1030" type="qcurve" smooth="yes"/>
<point x="834" y="1030"/>
<point x="769" y="951" type="qcurve"/>
<point x="717" y="1003"/>
<point x="657" y="1022" type="qcurve"/>
<point x="657" y="1147" type="line"/>
<point x="774" y="1147" type="line"/>
<point x="774" y="1261" type="line"/>
<point x="657" y="1261" type="line" smooth="yes"/>
<point x="611" y="1261"/>
<point x="543" y="1193"/>
<point x="543" y="1147" type="qcurve" smooth="yes"/>
<point x="543" y="1022" type="line"/>
<point x="483" y="1003"/>
<point x="431" y="951" type="qcurve"/>
<point x="366" y="1030"/>
<point x="281" y="1030" type="qcurve" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/pumpkin.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/pumpkin.glif",
"repo_id": "cascadia-code",
"token_count": 829
}
| 1,111 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="relation-one-or-many-to-one-or-many" format="2">
<advance width="1200"/>
<unicode hex="F149E"/>
<note>
relation-one-or-many-to-one-or-many
</note>
<outline>
<contour>
<point x="1123" y="653" type="line"/>
<point x="1006" y="536" type="line"/>
<point x="1006" y="653" type="line"/>
<point x="891" y="653" type="line"/>
<point x="891" y="536" type="line"/>
<point x="657" y="536" type="line"/>
<point x="657" y="1001" type="line"/>
<point x="309" y="1001" type="line"/>
<point x="309" y="1116" type="line"/>
<point x="194" y="1116" type="line"/>
<point x="194" y="1001" type="line"/>
<point x="77" y="1116" type="line"/>
<point x="20" y="1116" type="line"/>
<point x="20" y="767" type="line"/>
<point x="77" y="767" type="line"/>
<point x="194" y="884" type="line"/>
<point x="194" y="767" type="line"/>
<point x="309" y="767" type="line"/>
<point x="309" y="884" type="line"/>
<point x="543" y="884" type="line"/>
<point x="543" y="419" type="line"/>
<point x="891" y="419" type="line"/>
<point x="891" y="304" type="line"/>
<point x="1006" y="304" type="line"/>
<point x="1006" y="419" type="line"/>
<point x="1123" y="304" type="line"/>
<point x="1180" y="304" type="line"/>
<point x="1180" y="653" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/relation-one-or-many-to-one-or-many.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/relation-one-or-many-to-one-or-many.glif",
"repo_id": "cascadia-code",
"token_count": 698
}
| 1,112 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="room-service" format="2">
<advance width="1200"/>
<unicode hex="F088D"/>
<note>
room-service
</note>
<outline>
<contour>
<point x="551" y="1116"/>
<point x="483" y="1048"/>
<point x="483" y="1001" type="qcurve" smooth="yes"/>
<point x="483" y="980"/>
<point x="491" y="961" type="qcurve"/>
<point x="374" y="936"/>
<point x="186" y="797"/>
<point x="77" y="596"/>
<point x="77" y="479" type="qcurve"/>
<point x="1123" y="479" type="line"/>
<point x="1123" y="596"/>
<point x="1014" y="797"/>
<point x="826" y="936"/>
<point x="709" y="961" type="qcurve"/>
<point x="717" y="980"/>
<point x="717" y="1001" type="qcurve" smooth="yes"/>
<point x="717" y="1048"/>
<point x="649" y="1116"/>
<point x="600" y="1116" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="1180" y="419" type="line"/>
<point x="20" y="419" type="line"/>
<point x="20" y="304" type="line"/>
<point x="1180" y="304" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/room-service.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/room-service.glif",
"repo_id": "cascadia-code",
"token_count": 566
}
| 1,113 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="routes" format="2">
<advance width="1200"/>
<unicode hex="F046A"/>
<note>
routes
</note>
<outline>
<contour>
<point x="537" y="193" type="line"/>
<point x="482" y="193"/>
<point x="407" y="118"/>
<point x="407" y="67" type="qcurve"/>
<point x="793" y="67" type="line"/>
<point x="793" y="118"/>
<point x="718" y="193"/>
<point x="663" y="193" type="qcurve"/>
<point x="663" y="580" type="line"/>
<point x="1050" y="580" type="line"/>
<point x="1180" y="710" type="line"/>
<point x="1050" y="840" type="line"/>
<point x="663" y="840" type="line"/>
<point x="663" y="967" type="line"/>
<point x="1050" y="967" type="line"/>
<point x="1180" y="1097" type="line"/>
<point x="1050" y="1227" type="line"/>
<point x="663" y="1227" type="line"/>
<point x="663" y="1290" type="line"/>
<point x="600" y="1353" type="line"/>
<point x="537" y="1290" type="line"/>
<point x="537" y="1097" type="line"/>
<point x="150" y="1097" type="line"/>
<point x="20" y="967" type="line"/>
<point x="150" y="840" type="line"/>
<point x="537" y="840" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/routes.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/routes.glif",
"repo_id": "cascadia-code",
"token_count": 621
}
| 1,114 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="send-outline" format="2">
<advance width="1200"/>
<unicode hex="F1165"/>
<note>
send-outline
</note>
<outline>
<contour>
<point x="129" y="917" type="line"/>
<point x="129" y="1039" type="line"/>
<point x="543" y="863" type="line"/>
</contour>
<contour>
<point x="129" y="503" type="line"/>
<point x="543" y="557" type="line"/>
<point x="129" y="381" type="line"/>
</contour>
<contour>
<point x="1180" y="710" type="line"/>
<point x="20" y="1207" type="line"/>
<point x="20" y="821" type="line"/>
<point x="849" y="710" type="line"/>
<point x="20" y="599" type="line"/>
<point x="20" y="213" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/send-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/send-outline.glif",
"repo_id": "cascadia-code",
"token_count": 379
}
| 1,115 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="sign-caution" format="2">
<advance width="1200"/>
<unicode hex="F04A1"/>
<note>
sign-caution
</note>
<outline>
<contour>
<point x="20" y="653" type="line"/>
<point x="251" y="653" type="line"/>
<point x="251" y="187" type="line"/>
<point x="369" y="187" type="line"/>
<point x="369" y="653" type="line"/>
<point x="831" y="653" type="line"/>
<point x="831" y="187" type="line"/>
<point x="949" y="187" type="line"/>
<point x="949" y="653" type="line"/>
<point x="1180" y="653" type="line"/>
<point x="1180" y="1233" type="line"/>
<point x="20" y="1233" type="line"/>
</contour>
<contour>
<point x="842" y="767" type="line"/>
<point x="1066" y="990" type="line"/>
<point x="1066" y="827" type="line"/>
<point x="1006" y="767" type="line"/>
</contour>
<contour>
<point x="513" y="767" type="line"/>
<point x="861" y="1116" type="line"/>
<point x="1025" y="1116" type="line"/>
<point x="676" y="767" type="line"/>
</contour>
<contour>
<point x="183" y="767" type="line"/>
<point x="532" y="1116" type="line"/>
<point x="695" y="1116" type="line"/>
<point x="347" y="767" type="line"/>
</contour>
<contour>
<point x="369" y="1116" type="line"/>
<point x="134" y="884" type="line"/>
<point x="134" y="1048" type="line"/>
<point x="205" y="1116" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/sign-caution.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/sign-caution.glif",
"repo_id": "cascadia-code",
"token_count": 744
}
| 1,116 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="signal-distance-variant" format="2">
<advance width="1200"/>
<unicode hex="F0E64"/>
<note>
signal-distance-variant
</note>
<outline>
<contour>
<point x="203" y="1137"/>
<point x="427" y="1045"/>
<point x="599" y="873"/>
<point x="693" y="647"/>
<point x="693" y="527" type="qcurve"/>
<point x="813" y="527" type="line"/>
<point x="813" y="724"/>
<point x="616" y="1062"/>
<point x="281" y="1260"/>
<point x="80" y="1260" type="qcurve"/>
<point x="80" y="1137" type="line"/>
</contour>
<contour>
<point x="180" y="893"/>
<point x="349" y="793"/>
<point x="447" y="624"/>
<point x="447" y="527" type="qcurve"/>
<point x="570" y="527" type="line"/>
<point x="570" y="624"/>
<point x="495" y="802"/>
<point x="358" y="939"/>
<point x="178" y="1014"/>
<point x="80" y="1014" type="qcurve"/>
<point x="80" y="893" type="line"/>
</contour>
<contour>
<point x="80" y="527" type="line"/>
<point x="326" y="527" type="line"/>
<point x="326" y="627"/>
<point x="183" y="770"/>
<point x="80" y="770" type="qcurve"/>
</contour>
<contour>
<point x="20" y="280" type="line"/>
<point x="997" y="280" type="line"/>
<point x="997" y="160" type="line"/>
<point x="1180" y="343" type="line"/>
<point x="997" y="527" type="line"/>
<point x="997" y="404" type="line"/>
<point x="20" y="404" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/signal-distance-variant.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/signal-distance-variant.glif",
"repo_id": "cascadia-code",
"token_count": 802
}
| 1,117 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="skip-next-circle-outline" format="2">
<advance width="1200"/>
<unicode hex="F0662"/>
<note>
skip-next-circle-outline
</note>
<outline>
<contour>
<point x="714" y="1290"/>
<point x="600" y="1290" type="qcurve" smooth="yes"/>
<point x="486" y="1290"/>
<point x="271" y="1203"/>
<point x="107" y="1039"/>
<point x="20" y="824"/>
<point x="20" y="596"/>
<point x="107" y="381"/>
<point x="271" y="217"/>
<point x="486" y="130"/>
<point x="714" y="130"/>
<point x="929" y="217"/>
<point x="1093" y="381"/>
<point x="1180" y="596"/>
<point x="1180" y="824"/>
<point x="1093" y="1039"/>
<point x="929" y="1203"/>
</contour>
<contour>
<point x="475" y="1176"/>
<point x="600" y="1176" type="qcurve" smooth="yes"/>
<point x="725" y="1176"/>
<point x="940" y="1050"/>
<point x="1066" y="835"/>
<point x="1066" y="585"/>
<point x="940" y="370"/>
<point x="725" y="244"/>
<point x="475" y="244"/>
<point x="260" y="370"/>
<point x="134" y="585"/>
<point x="134" y="835"/>
<point x="260" y="1050"/>
</contour>
<contour>
<point x="657" y="710" type="line"/>
<point x="369" y="941" type="line"/>
<point x="369" y="479" type="line"/>
</contour>
<contour>
<point x="831" y="941" type="line"/>
<point x="717" y="941" type="line"/>
<point x="717" y="479" type="line"/>
<point x="831" y="479" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/skip-next-circle-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/skip-next-circle-outline.glif",
"repo_id": "cascadia-code",
"token_count": 825
}
| 1,118 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="sort-alphabetical-descending-variant" format="2">
<advance width="1200"/>
<unicode hex="F1149"/>
<note>
sort-alphabetical-descending-variant
</note>
<outline>
<contour>
<point x="430" y="428" type="line"/>
<point x="643" y="215" type="line"/>
<point x="852" y="428" type="line"/>
</contour>
<contour>
<point x="457" y="555" type="line"/>
<point x="618" y="555" type="line"/>
<point x="415" y="1205" type="line"/>
<point x="220" y="1205" type="line"/>
<point x="20" y="555" type="line"/>
<point x="175" y="555" type="line"/>
<point x="220" y="731" type="line"/>
<point x="409" y="731" type="line"/>
</contour>
<contour>
<point x="281" y="971" type="line"/>
<point x="312" y="1099" type="line"/>
<point x="312" y="1099" type="line"/>
<point x="345" y="974" type="line"/>
<point x="387" y="835" type="line"/>
<point x="242" y="835" type="line"/>
</contour>
<contour>
<point x="1180" y="555" type="line"/>
<point x="1180" y="680" type="line"/>
<point x="873" y="680" type="line"/>
<point x="873" y="686" type="line"/>
<point x="1174" y="1117" type="line"/>
<point x="1174" y="1205" type="line"/>
<point x="706" y="1205" type="line"/>
<point x="706" y="1080" type="line"/>
<point x="986" y="1080" type="line"/>
<point x="986" y="1077" type="line"/>
<point x="676" y="637" type="line"/>
<point x="676" y="555" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/sort-alphabetical-descending-variant.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/sort-alphabetical-descending-variant.glif",
"repo_id": "cascadia-code",
"token_count": 762
}
| 1,119 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="spellcheck" format="2">
<advance width="1200"/>
<unicode hex="F04C6"/>
<note>
spellcheck
</note>
<outline>
<contour>
<point x="1180" y="697" type="line"/>
<point x="1101" y="776" type="line"/>
<point x="644" y="319" type="line"/>
<point x="437" y="528" type="line"/>
<point x="358" y="448" type="line"/>
<point x="644" y="160" type="line"/>
</contour>
<contour>
<point x="477" y="808" type="line"/>
<point x="245" y="808" type="line"/>
<point x="361" y="1120" type="line"/>
</contour>
<contour>
<point x="519" y="697" type="line"/>
<point x="585" y="528" type="line"/>
<point x="702" y="528" type="line"/>
<point x="414" y="1260" type="line"/>
<point x="308" y="1260" type="line"/>
<point x="20" y="528" type="line"/>
<point x="139" y="528" type="line"/>
<point x="202" y="697" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/spellcheck.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/spellcheck.glif",
"repo_id": "cascadia-code",
"token_count": 472
}
| 1,120 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="square-small" format="2">
<advance width="1200"/>
<unicode hex="F0A15"/>
<note>
square-small
</note>
<outline>
<contour>
<point x="1180" y="130" type="line"/>
<point x="1180" y="1290" type="line"/>
<point x="20" y="1290" type="line"/>
<point x="20" y="130" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/square-small.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/square-small.glif",
"repo_id": "cascadia-code",
"token_count": 181
}
| 1,121 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="star-half" format="2">
<advance width="1200"/>
<unicode hex="F0246"/>
<note>
star-half
</note>
<outline>
<contour>
<point x="973" y="280" type="line"/>
<point x="973" y="1420" type="line"/>
<point x="763" y="926" type="line"/>
<point x="227" y="880" type="line"/>
<point x="633" y="526" type="line"/>
<point x="511" y="0" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/star-half.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/star-half.glif",
"repo_id": "cascadia-code",
"token_count": 221
}
| 1,122 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="star" format="2">
<advance width="1200"/>
<unicode hex="F04CE"/>
<note>
star
</note>
<outline>
<contour>
<point x="241" y="159" type="line"/>
<point x="600" y="376" type="line"/>
<point x="959" y="159" type="line"/>
<point x="864" y="567" type="line"/>
<point x="1180" y="842" type="line"/>
<point x="763" y="877" type="line"/>
<point x="600" y="1261" type="line"/>
<point x="437" y="877" type="line"/>
<point x="20" y="842" type="line"/>
<point x="336" y="567" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/star.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/star.glif",
"repo_id": "cascadia-code",
"token_count": 299
}
| 1,123 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="stop" format="2">
<advance width="1200"/>
<unicode hex="F04DB"/>
<note>
stop
</note>
<outline>
<contour>
<point x="1180" y="1290" type="line"/>
<point x="20" y="1290" type="line"/>
<point x="20" y="130" type="line"/>
<point x="1180" y="130" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/stop.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/stop.glif",
"repo_id": "cascadia-code",
"token_count": 176
}
| 1,124 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="tag-arrow-up" format="2">
<advance width="1200"/>
<unicode hex="F1731"/>
<note>
tag-arrow-up
</note>
<outline>
<contour>
<point x="1180" y="699"/>
<point x="1147" y="735" type="qcurve" smooth="yes"/>
<point x="625" y="1257" type="line" smooth="yes"/>
<point x="589" y="1290"/>
<point x="543" y="1290" type="qcurve" smooth="yes"/>
<point x="134" y="1290" type="line" smooth="yes"/>
<point x="88" y="1290"/>
<point x="20" y="1222"/>
<point x="20" y="1176" type="qcurve" smooth="yes"/>
<point x="20" y="767" type="line" smooth="yes"/>
<point x="20" y="721"/>
<point x="53" y="685" type="qcurve" smooth="yes"/>
<point x="77" y="664" type="line"/>
<point x="156" y="710"/>
<point x="347" y="710"/>
<point x="507" y="617"/>
<point x="600" y="457"/>
<point x="600" y="266"/>
<point x="554" y="187" type="qcurve"/>
<point x="575" y="163" type="line" smooth="yes"/>
<point x="611" y="130"/>
<point x="706" y="130"/>
<point x="739" y="163" type="qcurve" smooth="yes"/>
<point x="1147" y="571" type="line" smooth="yes"/>
<point x="1180" y="604"/>
</contour>
<contour>
<point x="260" y="1001"/>
<point x="222" y="1001" type="qcurve" smooth="yes"/>
<point x="186" y="1001"/>
<point x="134" y="1050"/>
<point x="134" y="1124"/>
<point x="186" y="1176"/>
<point x="260" y="1176"/>
<point x="309" y="1124"/>
<point x="309" y="1050"/>
</contour>
<contour>
<point x="77" y="419" type="line"/>
<point x="194" y="419" type="line"/>
<point x="194" y="187" type="line"/>
<point x="309" y="187" type="line"/>
<point x="309" y="419" type="line"/>
<point x="426" y="419" type="line"/>
<point x="251" y="593" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/tag-arrow-up.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/tag-arrow-up.glif",
"repo_id": "cascadia-code",
"token_count": 949
}
| 1,125 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="text-long" format="2">
<advance width="1200"/>
<unicode hex="F09AA"/>
<note>
text-long
</note>
<outline>
<contour>
<point x="20" y="1073" type="line"/>
<point x="1180" y="1073" type="line"/>
<point x="1180" y="1215" type="line"/>
<point x="20" y="1215" type="line"/>
</contour>
<contour>
<point x="20" y="781" type="line"/>
<point x="1180" y="781" type="line"/>
<point x="1180" y="927" type="line"/>
<point x="20" y="927" type="line"/>
</contour>
<contour>
<point x="20" y="493" type="line"/>
<point x="1180" y="493" type="line"/>
<point x="1180" y="639" type="line"/>
<point x="20" y="639" type="line"/>
</contour>
<contour>
<point x="20" y="205" type="line"/>
<point x="746" y="205" type="line"/>
<point x="746" y="347" type="line"/>
<point x="20" y="347" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/text-long.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/text-long.glif",
"repo_id": "cascadia-code",
"token_count": 480
}
| 1,126 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="transfer-down" format="2">
<advance width="1200"/>
<unicode hex="F0DA1"/>
<note>
transfer-down
</note>
<outline>
<contour>
<point x="302" y="1420" type="line"/>
<point x="302" y="1269" type="line"/>
<point x="898" y="1269" type="line"/>
<point x="898" y="1420" type="line"/>
</contour>
<contour>
<point x="302" y="1122" type="line"/>
<point x="302" y="971" type="line"/>
<point x="898" y="971" type="line"/>
<point x="898" y="1122" type="line"/>
</contour>
<contour>
<point x="302" y="820" type="line"/>
<point x="302" y="673" type="line"/>
<point x="898" y="673" type="line"/>
<point x="898" y="820" type="line"/>
</contour>
<contour>
<point x="600" y="0" type="line"/>
<point x="1122" y="522" type="line"/>
<point x="78" y="522" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/transfer-down.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/transfer-down.glif",
"repo_id": "cascadia-code",
"token_count": 460
}
| 1,127 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="transmission-tower-import" format="2">
<advance width="1200"/>
<unicode hex="F192D"/>
<note>
transmission-tower-import
</note>
<outline>
<contour>
<point x="605" y="1132" type="line"/>
<point x="921" y="1132" type="line"/>
<point x="960" y="1056" type="line"/>
<point x="1054" y="1102" type="line"/>
<point x="987" y="1236" type="line"/>
<point x="541" y="1236" type="line"/>
<point x="474" y="1102" type="line"/>
<point x="568" y="1056" type="line"/>
</contour>
<contour>
<point x="1180" y="787" type="line"/>
<point x="1113" y="920" type="line"/>
<point x="874" y="920" type="line"/>
<point x="832" y="1078" type="line"/>
<point x="696" y="1078" type="line"/>
<point x="652" y="920" type="line"/>
<point x="415" y="920" type="line"/>
<point x="348" y="787" type="line"/>
<point x="442" y="740" type="line"/>
<point x="482" y="816" type="line"/>
<point x="1047" y="816" type="line"/>
<point x="1084" y="740" type="line"/>
</contour>
<contour>
<point x="916" y="762" type="line"/>
<point x="807" y="762" type="line"/>
<point x="825" y="693" type="line"/>
<point x="763" y="599" type="line"/>
<point x="701" y="693" type="line"/>
<point x="721" y="762" type="line"/>
<point x="612" y="762" type="line"/>
<point x="459" y="184" type="line"/>
<point x="568" y="184" type="line"/>
<point x="580" y="231" type="line"/>
<point x="763" y="505" type="line"/>
<point x="946" y="231" type="line"/>
<point x="958" y="184" type="line"/>
<point x="1066" y="184" type="line"/>
</contour>
<contour>
<point x="622" y="387" type="line"/>
<point x="684" y="624" type="line"/>
<point x="731" y="552" type="line"/>
</contour>
<contour>
<point x="795" y="552" type="line"/>
<point x="842" y="624" type="line"/>
<point x="906" y="387" type="line"/>
</contour>
<contour>
<point x="232" y="288" type="line"/>
<point x="442" y="500" type="line"/>
<point x="232" y="710" type="line"/>
<point x="232" y="552" type="line"/>
<point x="20" y="552" type="line"/>
<point x="20" y="446" type="line"/>
<point x="232" y="446" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/transmission-tower-import.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/transmission-tower-import.glif",
"repo_id": "cascadia-code",
"token_count": 1153
}
| 1,128 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="vector-polygon" format="2">
<advance width="1200"/>
<unicode hex="F0560"/>
<note>
vector-polygon
</note>
<outline>
<contour>
<point x="369" y="1290" type="line"/>
<point x="20" y="1290" type="line"/>
<point x="20" y="941" type="line"/>
<point x="151" y="941" type="line"/>
<point x="227" y="479" type="line"/>
<point x="134" y="479" type="line"/>
<point x="134" y="130" type="line"/>
<point x="483" y="130" type="line"/>
<point x="483" y="242" type="line"/>
<point x="774" y="242" type="line"/>
<point x="774" y="130" type="line"/>
<point x="1123" y="130" type="line"/>
<point x="1123" y="479" type="line"/>
<point x="1017" y="479" type="line"/>
<point x="1066" y="884" type="line"/>
<point x="1180" y="884" type="line"/>
<point x="1180" y="1233" type="line"/>
<point x="831" y="1233" type="line"/>
<point x="831" y="1029" type="line"/>
<point x="763" y="941" type="line"/>
<point x="461" y="941" type="line"/>
<point x="369" y="1069" type="line"/>
</contour>
<contour>
<point x="134" y="1059" type="line"/>
<point x="134" y="1176" type="line"/>
<point x="251" y="1176" type="line"/>
<point x="251" y="1059" type="line"/>
</contour>
<contour>
<point x="949" y="1001" type="line"/>
<point x="949" y="1116" type="line"/>
<point x="1066" y="1116" type="line"/>
<point x="1066" y="1001" type="line"/>
</contour>
<contour>
<point x="344" y="479" type="line"/>
<point x="271" y="941" type="line"/>
<point x="317" y="941" type="line"/>
<point x="426" y="792" type="line"/>
<point x="426" y="593" type="line"/>
<point x="774" y="593" type="line"/>
<point x="774" y="773" type="line"/>
<point x="864" y="884" type="line"/>
<point x="949" y="884" type="line"/>
<point x="900" y="479" type="line"/>
<point x="774" y="479" type="line"/>
<point x="774" y="359" type="line"/>
<point x="483" y="359" type="line"/>
<point x="483" y="479" type="line"/>
</contour>
<contour>
<point x="543" y="710" type="line"/>
<point x="543" y="827" type="line"/>
<point x="657" y="827" type="line"/>
<point x="657" y="710" type="line"/>
</contour>
<contour>
<point x="251" y="244" type="line"/>
<point x="251" y="361" type="line"/>
<point x="369" y="361" type="line"/>
<point x="369" y="244" type="line"/>
</contour>
<contour>
<point x="891" y="244" type="line"/>
<point x="891" y="361" type="line"/>
<point x="1006" y="361" type="line"/>
<point x="1006" y="244" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/vector-polygon.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/vector-polygon.glif",
"repo_id": "cascadia-code",
"token_count": 1350
}
| 1,129 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="video-image" format="2">
<advance width="1200"/>
<unicode hex="F091A"/>
<note>
video-image
</note>
<outline>
<contour>
<point x="923" y="1033" type="line" smooth="yes"/>
<point x="923" y="1060"/>
<point x="884" y="1097"/>
<point x="857" y="1097" type="qcurve" smooth="yes"/>
<point x="83" y="1097" type="line" smooth="yes"/>
<point x="56" y="1097"/>
<point x="20" y="1060"/>
<point x="20" y="1033" type="qcurve" smooth="yes"/>
<point x="20" y="387" type="line" smooth="yes"/>
<point x="20" y="360"/>
<point x="56" y="323"/>
<point x="83" y="323" type="qcurve" smooth="yes"/>
<point x="857" y="323" type="line" smooth="yes"/>
<point x="884" y="323"/>
<point x="923" y="360"/>
<point x="923" y="387" type="qcurve" smooth="yes"/>
<point x="923" y="613" type="line"/>
<point x="1180" y="357" type="line"/>
<point x="1180" y="1063" type="line"/>
<point x="923" y="807" type="line"/>
</contour>
<contour>
<point x="793" y="517" type="line"/>
<point x="150" y="517" type="line"/>
<point x="325" y="752" type="line"/>
<point x="455" y="574" type="line"/>
<point x="503" y="610" type="line"/>
<point x="419" y="722" type="line"/>
<point x="531" y="867" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/video-image.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/video-image.glif",
"repo_id": "cascadia-code",
"token_count": 683
}
| 1,130 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="view-compact-outline" format="2">
<advance width="1200"/>
<unicode hex="F0E6C"/>
<note>
view-compact-outline
</note>
<outline>
<contour>
<point x="1180" y="1137" type="line"/>
<point x="20" y="1137" type="line"/>
<point x="20" y="283" type="line"/>
<point x="1180" y="283" type="line"/>
</contour>
<contour>
<point x="143" y="770" type="line"/>
<point x="143" y="1016" type="line"/>
<point x="1060" y="1016" type="line"/>
<point x="1060" y="770" type="line"/>
</contour>
<contour>
<point x="387" y="404" type="line"/>
<point x="143" y="404" type="line"/>
<point x="143" y="650" type="line"/>
<point x="387" y="650" type="line"/>
</contour>
<contour>
<point x="1060" y="404" type="line"/>
<point x="510" y="404" type="line"/>
<point x="510" y="650" type="line"/>
<point x="1060" y="650" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/view-compact-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/view-compact-outline.glif",
"repo_id": "cascadia-code",
"token_count": 482
}
| 1,131 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="view-headline" format="2">
<advance width="1200"/>
<unicode hex="F0571"/>
<note>
view-headline
</note>
<outline>
<contour>
<point x="1180" y="1186" type="line"/>
<point x="20" y="1186" type="line"/>
<point x="20" y="1052" type="line"/>
<point x="1180" y="1052" type="line"/>
</contour>
<contour>
<point x="20" y="915" type="line"/>
<point x="20" y="777" type="line"/>
<point x="1180" y="777" type="line"/>
<point x="1180" y="915" type="line"/>
</contour>
<contour>
<point x="20" y="368" type="line"/>
<point x="20" y="234" type="line"/>
<point x="1180" y="234" type="line"/>
<point x="1180" y="368" type="line"/>
</contour>
<contour>
<point x="20" y="643" type="line"/>
<point x="20" y="505" type="line"/>
<point x="1180" y="505" type="line"/>
<point x="1180" y="643" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/view-headline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/view-headline.glif",
"repo_id": "cascadia-code",
"token_count": 478
}
| 1,132 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="volume-minus" format="2">
<advance width="1200"/>
<unicode hex="F075E"/>
<note>
volume-minus
</note>
<outline>
<contour>
<point x="20" y="527" type="line"/>
<point x="263" y="527" type="line"/>
<point x="570" y="220" type="line"/>
<point x="570" y="1200" type="line"/>
<point x="263" y="893" type="line"/>
<point x="20" y="893" type="line"/>
</contour>
<contour>
<point x="693" y="650" type="line"/>
<point x="1180" y="650" type="line"/>
<point x="1180" y="770" type="line"/>
<point x="693" y="770" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/volume-minus.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/volume-minus.glif",
"repo_id": "cascadia-code",
"token_count": 316
}
| 1,133 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="window-open-variant" format="2">
<advance width="1200"/>
<unicode hex="F11DC"/>
<note>
window-open-variant
</note>
<outline>
<contour>
<point x="1180" y="315" type="line"/>
<point x="1074" y="315" type="line"/>
<point x="1074" y="1263" type="line"/>
<point x="126" y="1263" type="line"/>
<point x="126" y="315" type="line"/>
<point x="20" y="315" type="line"/>
<point x="20" y="157" type="line"/>
<point x="1180" y="157" type="line"/>
</contour>
<contour>
<point x="864" y="1159" type="line"/>
<point x="968" y="1159" type="line"/>
<point x="968" y="789" type="line"/>
<point x="864" y="789" type="line"/>
</contour>
<contour>
<point x="232" y="789" type="line"/>
<point x="232" y="1159" type="line"/>
<point x="336" y="1159" type="line"/>
<point x="336" y="789" type="line"/>
</contour>
<contour>
<point x="336" y="315" type="line"/>
<point x="232" y="315" type="line"/>
<point x="232" y="685" type="line"/>
<point x="336" y="685" type="line"/>
</contour>
<contour>
<point x="758" y="315" type="line"/>
<point x="442" y="315" type="line"/>
<point x="442" y="1159" type="line"/>
<point x="758" y="1159" type="line"/>
</contour>
<contour>
<point x="968" y="315" type="line"/>
<point x="864" y="315" type="line"/>
<point x="864" y="685" type="line"/>
<point x="968" y="685" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/window-open-variant.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/window-open-variant.glif",
"repo_id": "cascadia-code",
"token_count": 754
}
| 1,134 |
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>com.schriftgestaltung.appVersion</key>
<string>3241</string>
<key>com.schriftgestaltung.disablesAutomaticAlignment</key>
<false/>
<key>com.schriftgestaltung.font.customParameters</key>
<array>
<dict>
<key>active</key>
<false/>
<key>name</key>
<string>glyphOrder</string>
<key>value</key>
<array>
<string>.notdef</string>
<string>lozenge</string>
<string>uni2630</string>
<string>uniE0A0</string>
<string>uniE0A1</string>
<string>uniE0A2</string>
<string>uniE0A3</string>
<string>uniE0B0</string>
<string>uniE0B1</string>
<string>uniE0B2</string>
<string>uniE0B3</string>
<string>uniE0B4</string>
<string>uniE0B5</string>
<string>uniE0B6</string>
<string>uniE0B7</string>
<string>uniE0B8</string>
<string>uniE0B9</string>
<string>uniE0BA</string>
<string>uniE0BB</string>
<string>uniE0BC</string>
<string>uniE0BD</string>
<string>uniE0BE</string>
<string>uniE0BF</string>
<string>uniE0C0</string>
<string>uniE0C1</string>
<string>uniE0C2</string>
<string>uniE0C3</string>
<string>uniE0C4</string>
<string>uniE0C5</string>
<string>uniE0C6</string>
<string>uniE0C7</string>
<string>uniE0C8</string>
<string>uniE0CA</string>
<string>uniE0CC</string>
<string>uniE0CD</string>
<string>uniE0CE</string>
<string>uniE0CF</string>
<string>uniE0D0</string>
<string>uniE0D1</string>
<string>uniE0D2</string>
<string>uniE0D4</string>
<string>uniFB01</string>
<string>uniFB02</string>
<string>uniFB03</string>
<string>uniFB04</string>
<string>uniFEFF</string>
<string>uniFFFC</string>
<string>uniFFFD</string>
<string>.null</string>
<string>nonmarkingreturn</string>
<string>cyrillicbreve</string>
<string>dotlessj</string>
<string>caroncommaaccent</string>
<string>commaaccent</string>
<string>commaaccentrotate</string>
<string>zerosuperior</string>
<string>foursuperior</string>
<string>fivesuperior</string>
<string>sixsuperior</string>
<string>sevensuperior</string>
<string>eightsuperior</string>
<string>ninesuperior</string>
<string>dasiaoxia</string>
<string>circumflexacutecomb</string>
<string>circumflexgravecomb</string>
<string>circumflexhookcomb</string>
<string>circumflextildecomb</string>
<string>breveacutecomb</string>
<string>brevegravecomb</string>
<string>brevehookcomb</string>
<string>brevetildecomb</string>
<string>cyrillichookleft</string>
<string>cyrillicbighookUC</string>
<string>cyrillicbighookLC</string>
</array>
</dict>
</array>
<key>com.schriftgestaltung.fontMasterID</key>
<string>m01</string>
<key>com.schriftgestaltung.formatVersion</key>
<integer>3</integer>
<key>com.schriftgestaltung.useGlyphOrder</key>
<false/>
<key>com.schriftgestaltung.useNiceNames</key>
<false/>
<key>public.glyphOrder</key>
<array>
<string>uni2630</string>
<string>uniE0A0</string>
<string>uniE0A1</string>
<string>uniE0A2</string>
<string>uniE0A3</string>
<string>uniE0B0</string>
<string>uniE0B1</string>
<string>uniE0B2</string>
<string>uniE0B3</string>
<string>uniE0B4</string>
<string>uniE0B5</string>
<string>uniE0B6</string>
<string>uniE0B7</string>
<string>uniE0B8</string>
<string>uniE0B9</string>
<string>uniE0BA</string>
<string>uniE0BB</string>
<string>uniE0BC</string>
<string>uniE0BD</string>
<string>uniE0BE</string>
<string>uniE0BF</string>
<string>uniE0C0</string>
<string>uniE0C1</string>
<string>uniE0C2</string>
<string>uniE0C3</string>
<string>uniE0C4</string>
<string>uniE0C5</string>
<string>uniE0C6</string>
<string>uniE0C7</string>
<string>uniE0C8</string>
<string>.notdef</string>
<string>uniE0CA</string>
<string>uniE0CC</string>
<string>uniE0CD</string>
<string>uniE0CE</string>
<string>uniE0CF</string>
<string>uniE0D0</string>
<string>uniE0D1</string>
<string>uniE0D2</string>
<string>uniE0D4</string>
</array>
<key>public.postscriptNames</key>
<dict>
<key>.notdef</key>
<string>uniE0C9</string>
</dict>
</dict>
</plist>
|
cascadia-code/sources/nerdfonts/full/processed/PowerlineExtraSymbols.ufo/lib.plist/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/PowerlineExtraSymbols.ufo/lib.plist",
"repo_id": "cascadia-code",
"token_count": 2687
}
| 1,135 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="add" format="2">
<advance width="1200"/>
<unicode hex="EA60"/>
<note>
add
</note>
<outline>
<contour>
<point x="643" y="755" type="line"/>
<point x="643" y="1288" type="line"/>
<point x="552" y="1288" type="line"/>
<point x="552" y="755" type="line"/>
<point x="20" y="755" type="line"/>
<point x="20" y="665" type="line"/>
<point x="552" y="665" type="line"/>
<point x="552" y="132" type="line"/>
<point x="643" y="132" type="line"/>
<point x="643" y="665" type="line"/>
<point x="1180" y="665" type="line"/>
<point x="1180" y="755" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/add.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/add.glif",
"repo_id": "cascadia-code",
"token_count": 335
}
| 1,136 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="check" format="2">
<advance width="1200"/>
<unicode hex="EAB2"/>
<note>
check
</note>
<outline>
<contour>
<point x="1107" y="1199" type="line"/>
<point x="368" y="328" type="line"/>
<point x="93" y="715" type="line"/>
<point x="20" y="661" type="line"/>
<point x="328" y="225" type="line"/>
<point x="402" y="221" type="line"/>
<point x="1180" y="1141" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/check.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/check.glif",
"repo_id": "cascadia-code",
"token_count": 237
}
| 1,137 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="clear-all" format="2">
<advance width="1200"/>
<unicode hex="EABF"/>
<note>
clear-all
</note>
<outline>
<contour>
<point x="901" y="473" type="line"/>
<point x="768" y="340" type="line"/>
<point x="826" y="283" type="line"/>
<point x="959" y="416" type="line"/>
<point x="1091" y="283" type="line"/>
<point x="1158" y="340" type="line"/>
<point x="1016" y="473" type="line"/>
<point x="1158" y="606" type="line"/>
<point x="1091" y="672" type="line"/>
<point x="959" y="531" type="line"/>
<point x="826" y="672" type="line"/>
<point x="768" y="606" type="line"/>
</contour>
<contour>
<point x="20" y="1137" type="line"/>
<point x="20" y="1053" type="line"/>
<point x="1180" y="1053" type="line"/>
<point x="1180" y="1137" type="line"/>
</contour>
<contour>
<point x="20" y="889" type="line"/>
<point x="20" y="805" type="line"/>
<point x="1180" y="805" type="line"/>
<point x="1180" y="889" type="line"/>
</contour>
<contour>
<point x="684" y="557" type="line"/>
<point x="684" y="597" type="line"/>
<point x="684" y="637" type="line"/>
<point x="20" y="637" type="line"/>
<point x="20" y="557" type="line"/>
</contour>
<contour>
<point x="20" y="305" type="line"/>
<point x="684" y="305" type="line"/>
<point x="684" y="389" type="line"/>
<point x="20" y="389" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/clear-all.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/clear-all.glif",
"repo_id": "cascadia-code",
"token_count": 759
}
| 1,138 |
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>account</key>
<string>account.glif</string>
<key>activate-breakpoints</key>
<string>activate-breakpoints.glif</string>
<key>add</key>
<string>add.glif</string>
<key>archive</key>
<string>archive.glif</string>
<key>arrow-both</key>
<string>arrow-both.glif</string>
<key>arrow-down</key>
<string>arrow-down.glif</string>
<key>arrow-left</key>
<string>arrow-left.glif</string>
<key>arrow-right</key>
<string>arrow-right.glif</string>
<key>arrow-small-down</key>
<string>arrow-small-down.glif</string>
<key>arrow-small-left</key>
<string>arrow-small-left.glif</string>
<key>arrow-small-right</key>
<string>arrow-small-right.glif</string>
<key>arrow-small-up</key>
<string>arrow-small-up.glif</string>
<key>arrow-swap</key>
<string>arrow-swap.glif</string>
<key>arrow-up</key>
<string>arrow-up.glif</string>
<key>azure</key>
<string>azure.glif</string>
<key>azure-devops</key>
<string>azure-devops.glif</string>
<key>beaker</key>
<string>beaker.glif</string>
<key>beaker-stop</key>
<string>beaker-stop.glif</string>
<key>bell</key>
<string>bell.glif</string>
<key>bell-dot</key>
<string>bell-dot.glif</string>
<key>bold</key>
<string>bold.glif</string>
<key>book</key>
<string>book.glif</string>
<key>bookmark</key>
<string>bookmark.glif</string>
<key>bracket-dot</key>
<string>bracket-dot.glif</string>
<key>bracket-error</key>
<string>bracket-error.glif</string>
<key>briefcase</key>
<string>briefcase.glif</string>
<key>broadcast</key>
<string>broadcast.glif</string>
<key>browser</key>
<string>browser.glif</string>
<key>bug</key>
<string>bug.glif</string>
<key>calendar</key>
<string>calendar.glif</string>
<key>call-incoming</key>
<string>call-incoming.glif</string>
<key>call-outgoing</key>
<string>call-outgoing.glif</string>
<key>case-sensitive</key>
<string>case-sensitive.glif</string>
<key>check</key>
<string>check.glif</string>
<key>check-all</key>
<string>check-all.glif</string>
<key>checklist</key>
<string>checklist.glif</string>
<key>chevron-down</key>
<string>chevron-down.glif</string>
<key>chevron-left</key>
<string>chevron-left.glif</string>
<key>chevron-right</key>
<string>chevron-right.glif</string>
<key>chevron-up</key>
<string>chevron-up.glif</string>
<key>chrome-close</key>
<string>chrome-close.glif</string>
<key>chrome-maximize</key>
<string>chrome-maximize.glif</string>
<key>chrome-minimize</key>
<string>chrome-minimize.glif</string>
<key>chrome-restore</key>
<string>chrome-restore.glif</string>
<key>circle-filled</key>
<string>circle-filled.glif</string>
<key>circle-large-filled</key>
<string>circle-large-filled.glif</string>
<key>circle-large-outline</key>
<string>circle-large-outline.glif</string>
<key>circle-outline</key>
<string>circle-outline.glif</string>
<key>circle-slash</key>
<string>circle-slash.glif</string>
<key>circuit-board</key>
<string>circuit-board.glif</string>
<key>clear-all</key>
<string>clear-all.glif</string>
<key>clippy</key>
<string>clippy.glif</string>
<key>close</key>
<string>close.glif</string>
<key>close-all</key>
<string>close-all.glif</string>
<key>cloud</key>
<string>cloud.glif</string>
<key>cloud-download</key>
<string>cloud-download.glif</string>
<key>cloud-upload</key>
<string>cloud-upload.glif</string>
<key>code</key>
<string>code.glif</string>
<key>collapse-all</key>
<string>collapse-all.glif</string>
<key>color-mode</key>
<string>color-mode.glif</string>
<key>combine</key>
<string>combine.glif</string>
<key>comment</key>
<string>comment.glif</string>
<key>comment-discussion</key>
<string>comment-discussion.glif</string>
<key>compass</key>
<string>compass.glif</string>
<key>compass-active</key>
<string>compass-active.glif</string>
<key>compass-dot</key>
<string>compass-dot.glif</string>
<key>copy</key>
<string>copy.glif</string>
<key>credit-card</key>
<string>credit-card.glif</string>
<key>dash</key>
<string>dash.glif</string>
<key>dashboard</key>
<string>dashboard.glif</string>
<key>database</key>
<string>database.glif</string>
<key>debug</key>
<string>debug.glif</string>
<key>debug-all</key>
<string>debug-all.glif</string>
<key>debug-alt</key>
<string>debug-alt.glif</string>
<key>debug-alt-small</key>
<string>debug-alt-small.glif</string>
<key>debug-breakpoint-conditional</key>
<string>debug-breakpoint-conditional.glif</string>
<key>debug-breakpoint-conditional-unverified</key>
<string>debug-breakpoint-conditional-unverified.glif</string>
<key>debug-breakpoint-data</key>
<string>debug-breakpoint-data.glif</string>
<key>debug-breakpoint-data-unverified</key>
<string>debug-breakpoint-data-unverified.glif</string>
<key>debug-breakpoint-function</key>
<string>debug-breakpoint-function.glif</string>
<key>debug-breakpoint-function-unverified</key>
<string>debug-breakpoint-function-unverified.glif</string>
<key>debug-breakpoint-log</key>
<string>debug-breakpoint-log.glif</string>
<key>debug-breakpoint-log-unverified</key>
<string>debug-breakpoint-log-unverified.glif</string>
<key>debug-breakpoint-unsupported</key>
<string>debug-breakpoint-unsupported.glif</string>
<key>debug-console</key>
<string>debug-console.glif</string>
<key>debug-continue</key>
<string>debug-continue.glif</string>
<key>debug-continue-small</key>
<string>debug-continue-small.glif</string>
<key>debug-coverage</key>
<string>debug-coverage.glif</string>
<key>debug-disconnect</key>
<string>debug-disconnect.glif</string>
<key>debug-line-by-line</key>
<string>debug-line-by-line.glif</string>
<key>debug-pause</key>
<string>debug-pause.glif</string>
<key>debug-rerun</key>
<string>debug-rerun.glif</string>
<key>debug-restart</key>
<string>debug-restart.glif</string>
<key>debug-restart-frame</key>
<string>debug-restart-frame.glif</string>
<key>debug-reverse-continue</key>
<string>debug-reverse-continue.glif</string>
<key>debug-stackframe</key>
<string>debug-stackframe.glif</string>
<key>debug-stackframe-active</key>
<string>debug-stackframe-active.glif</string>
<key>debug-stackframe-dot</key>
<string>debug-stackframe-dot.glif</string>
<key>debug-start</key>
<string>debug-start.glif</string>
<key>debug-step-back</key>
<string>debug-step-back.glif</string>
<key>debug-step-into</key>
<string>debug-step-into.glif</string>
<key>debug-step-out</key>
<string>debug-step-out.glif</string>
<key>debug-step-over</key>
<string>debug-step-over.glif</string>
<key>debug-stop</key>
<string>debug-stop.glif</string>
<key>desktop-download</key>
<string>desktop-download.glif</string>
<key>device-camera</key>
<string>device-camera.glif</string>
<key>device-camera-video</key>
<string>device-camera-video.glif</string>
<key>device-mobile</key>
<string>device-mobile.glif</string>
<key>diff</key>
<string>diff.glif</string>
<key>diff-added</key>
<string>diff-added.glif</string>
<key>diff-ignored</key>
<string>diff-ignored.glif</string>
<key>diff-modified</key>
<string>diff-modified.glif</string>
<key>diff-removed</key>
<string>diff-removed.glif</string>
<key>diff-renamed</key>
<string>diff-renamed.glif</string>
<key>discard</key>
<string>discard.glif</string>
<key>edit</key>
<string>edit.glif</string>
<key>editor-layout</key>
<string>editor-layout.glif</string>
<key>ellipsis</key>
<string>ellipsis.glif</string>
<key>empty-window</key>
<string>empty-window.glif</string>
<key>error</key>
<string>error.glif</string>
<key>exclude</key>
<string>exclude.glif</string>
<key>expand-all</key>
<string>expand-all.glif</string>
<key>export</key>
<string>export.glif</string>
<key>extensions</key>
<string>extensions.glif</string>
<key>eye</key>
<string>eye.glif</string>
<key>eye-closed</key>
<string>eye-closed.glif</string>
<key>feedback</key>
<string>feedback.glif</string>
<key>file</key>
<string>file.glif</string>
<key>file-binary</key>
<string>file-binary.glif</string>
<key>file-code</key>
<string>file-code.glif</string>
<key>file-media</key>
<string>file-media.glif</string>
<key>file-pdf</key>
<string>file-pdf.glif</string>
<key>file-submodule</key>
<string>file-submodule.glif</string>
<key>file-symlink-directory</key>
<string>file-symlink-directory.glif</string>
<key>file-symlink-file</key>
<string>file-symlink-file.glif</string>
<key>file-zip</key>
<string>file-zip.glif</string>
<key>files</key>
<string>files.glif</string>
<key>filter</key>
<string>filter.glif</string>
<key>filter-filled</key>
<string>filter-filled.glif</string>
<key>flame</key>
<string>flame.glif</string>
<key>fold</key>
<string>fold.glif</string>
<key>fold-down</key>
<string>fold-down.glif</string>
<key>fold-up</key>
<string>fold-up.glif</string>
<key>folder</key>
<string>folder.glif</string>
<key>folder-active</key>
<string>folder-active.glif</string>
<key>folder-library</key>
<string>folder-library.glif</string>
<key>folder-opened</key>
<string>folder-opened.glif</string>
<key>gear</key>
<string>gear.glif</string>
<key>gift</key>
<string>gift.glif</string>
<key>gist-secret</key>
<string>gist-secret.glif</string>
<key>git-commit</key>
<string>git-commit.glif</string>
<key>git-compare</key>
<string>git-compare.glif</string>
<key>git-merge</key>
<string>git-merge.glif</string>
<key>git-pull-request</key>
<string>git-pull-request.glif</string>
<key>git-pull-request-closed</key>
<string>git-pull-request-closed.glif</string>
<key>git-pull-request-create</key>
<string>git-pull-request-create.glif</string>
<key>git-pull-request-draft</key>
<string>git-pull-request-draft.glif</string>
<key>github</key>
<string>github.glif</string>
<key>github-action</key>
<string>github-action.glif</string>
<key>github-alt</key>
<string>github-alt.glif</string>
<key>github-inverted</key>
<string>github-inverted.glif</string>
<key>globe</key>
<string>globe.glif</string>
<key>go-to-file</key>
<string>go-to-file.glif</string>
<key>grabber</key>
<string>grabber.glif</string>
<key>graph</key>
<string>graph.glif</string>
<key>graph-left</key>
<string>graph-left.glif</string>
<key>graph-line</key>
<string>graph-line.glif</string>
<key>graph-scatter</key>
<string>graph-scatter.glif</string>
<key>gripper</key>
<string>gripper.glif</string>
<key>group-by-ref-type</key>
<string>group-by-ref-type.glif</string>
<key>heart</key>
<string>heart.glif</string>
<key>history</key>
<string>history.glif</string>
<key>home</key>
<string>home.glif</string>
<key>horizontal-rule</key>
<string>horizontal-rule.glif</string>
<key>hubot</key>
<string>hubot.glif</string>
<key>inbox</key>
<string>inbox.glif</string>
<key>info</key>
<string>info.glif</string>
<key>inspect</key>
<string>inspect.glif</string>
<key>issue-draft</key>
<string>issue-draft.glif</string>
<key>issue-reopened</key>
<string>issue-reopened.glif</string>
<key>issues</key>
<string>issues.glif</string>
<key>italic</key>
<string>italic.glif</string>
<key>jersey</key>
<string>jersey.glif</string>
<key>json</key>
<string>json.glif</string>
<key>kebab-vertical</key>
<string>kebab-vertical.glif</string>
<key>key</key>
<string>key.glif</string>
<key>law</key>
<string>law.glif</string>
<key>layers</key>
<string>layers.glif</string>
<key>layers-active</key>
<string>layers-active.glif</string>
<key>layers-dot</key>
<string>layers-dot.glif</string>
<key>layout</key>
<string>layout.glif</string>
<key>library</key>
<string>library.glif</string>
<key>lightbulb</key>
<string>lightbulb.glif</string>
<key>lightbulb-autofix</key>
<string>lightbulb-autofix.glif</string>
<key>link</key>
<string>link.glif</string>
<key>link-external</key>
<string>link-external.glif</string>
<key>list-filter</key>
<string>list-filter.glif</string>
<key>list-flat</key>
<string>list-flat.glif</string>
<key>list-ordered</key>
<string>list-ordered.glif</string>
<key>list-selection</key>
<string>list-selection.glif</string>
<key>list-tree</key>
<string>list-tree.glif</string>
<key>list-unordered</key>
<string>list-unordered.glif</string>
<key>live-share</key>
<string>live-share.glif</string>
<key>loading</key>
<string>loading.glif</string>
<key>location</key>
<string>location.glif</string>
<key>lock</key>
<string>lock.glif</string>
<key>lock-small</key>
<string>lock-small.glif</string>
<key>magnet</key>
<string>magnet.glif</string>
<key>mail</key>
<string>mail.glif</string>
<key>mail-read</key>
<string>mail-read.glif</string>
<key>markdown</key>
<string>markdown.glif</string>
<key>megaphone</key>
<string>megaphone.glif</string>
<key>mention</key>
<string>mention.glif</string>
<key>menu</key>
<string>menu.glif</string>
<key>merge</key>
<string>merge.glif</string>
<key>milestone</key>
<string>milestone.glif</string>
<key>mirror</key>
<string>mirror.glif</string>
<key>mortar-board</key>
<string>mortar-board.glif</string>
<key>move</key>
<string>move.glif</string>
<key>multiple-windows</key>
<string>multiple-windows.glif</string>
<key>mute</key>
<string>mute.glif</string>
<key>new-file</key>
<string>new-file.glif</string>
<key>new-folder</key>
<string>new-folder.glif</string>
<key>newline</key>
<string>newline.glif</string>
<key>no-newline</key>
<string>no-newline.glif</string>
<key>note</key>
<string>note.glif</string>
<key>notebook</key>
<string>notebook.glif</string>
<key>notebook-template</key>
<string>notebook-template.glif</string>
<key>octoface</key>
<string>octoface.glif</string>
<key>open-preview</key>
<string>open-preview.glif</string>
<key>organization</key>
<string>organization.glif</string>
<key>output</key>
<string>output.glif</string>
<key>package</key>
<string>package.glif</string>
<key>paintcan</key>
<string>paintcan.glif</string>
<key>pass</key>
<string>pass.glif</string>
<key>pass-filled</key>
<string>pass-filled.glif</string>
<key>person</key>
<string>person.glif</string>
<key>person-add</key>
<string>person-add.glif</string>
<key>pie-chart</key>
<string>pie-chart.glif</string>
<key>pin</key>
<string>pin.glif</string>
<key>pinned</key>
<string>pinned.glif</string>
<key>pinned-dirty</key>
<string>pinned-dirty.glif</string>
<key>play</key>
<string>play.glif</string>
<key>play-circle</key>
<string>play-circle.glif</string>
<key>plug</key>
<string>plug.glif</string>
<key>preserve-case</key>
<string>preserve-case.glif</string>
<key>preview</key>
<string>preview.glif</string>
<key>primitive-square</key>
<string>primitive-square.glif</string>
<key>project</key>
<string>project.glif</string>
<key>pulse</key>
<string>pulse.glif</string>
<key>question</key>
<string>question.glif</string>
<key>quote</key>
<string>quote.glif</string>
<key>radio-tower</key>
<string>radio-tower.glif</string>
<key>reactions</key>
<string>reactions.glif</string>
<key>record</key>
<string>record.glif</string>
<key>record-keys</key>
<string>record-keys.glif</string>
<key>redo</key>
<string>redo.glif</string>
<key>references</key>
<string>references.glif</string>
<key>refresh</key>
<string>refresh.glif</string>
<key>regex</key>
<string>regex.glif</string>
<key>remote</key>
<string>remote.glif</string>
<key>remote-explorer</key>
<string>remote-explorer.glif</string>
<key>remove</key>
<string>remove.glif</string>
<key>replace</key>
<string>replace.glif</string>
<key>replace-all</key>
<string>replace-all.glif</string>
<key>reply</key>
<string>reply.glif</string>
<key>repo</key>
<string>repo.glif</string>
<key>repo-clone</key>
<string>repo-clone.glif</string>
<key>repo-force-push</key>
<string>repo-force-push.glif</string>
<key>repo-forked</key>
<string>repo-forked.glif</string>
<key>repo-pull</key>
<string>repo-pull.glif</string>
<key>repo-push</key>
<string>repo-push.glif</string>
<key>report</key>
<string>report.glif</string>
<key>request-changes</key>
<string>request-changes.glif</string>
<key>rocket</key>
<string>rocket.glif</string>
<key>root-folder</key>
<string>root-folder.glif</string>
<key>root-folder-opened</key>
<string>root-folder-opened.glif</string>
<key>rss</key>
<string>rss.glif</string>
<key>ruby</key>
<string>ruby.glif</string>
<key>run-above</key>
<string>run-above.glif</string>
<key>run-all</key>
<string>run-all.glif</string>
<key>run-below</key>
<string>run-below.glif</string>
<key>run-errors</key>
<string>run-errors.glif</string>
<key>save</key>
<string>save.glif</string>
<key>save-all</key>
<string>save-all.glif</string>
<key>save-as</key>
<string>save-as.glif</string>
<key>screen-full</key>
<string>screen-full.glif</string>
<key>screen-normal</key>
<string>screen-normal.glif</string>
<key>search</key>
<string>search.glif</string>
<key>search-stop</key>
<string>search-stop.glif</string>
<key>server</key>
<string>server.glif</string>
<key>server-environment</key>
<string>server-environment.glif</string>
<key>server-process</key>
<string>server-process.glif</string>
<key>settings</key>
<string>settings.glif</string>
<key>settings-gear</key>
<string>settings-gear.glif</string>
<key>shield</key>
<string>shield.glif</string>
<key>sign-in</key>
<string>sign-in.glif</string>
<key>sign-out</key>
<string>sign-out.glif</string>
<key>smiley</key>
<string>smiley.glif</string>
<key>sort-precedence</key>
<string>sort-precedence.glif</string>
<key>source-control</key>
<string>source-control.glif</string>
<key>split-horizontal</key>
<string>split-horizontal.glif</string>
<key>split-vertical</key>
<string>split-vertical.glif</string>
<key>squirrel</key>
<string>squirrel.glif</string>
<key>star-empty</key>
<string>star-empty.glif</string>
<key>star-full</key>
<string>star-full.glif</string>
<key>star-half</key>
<string>star-half.glif</string>
<key>stop-circle</key>
<string>stop-circle.glif</string>
<key>symbol-array</key>
<string>symbol-array.glif</string>
<key>symbol-boolean</key>
<string>symbol-boolean.glif</string>
<key>symbol-class</key>
<string>symbol-class.glif</string>
<key>symbol-color</key>
<string>symbol-color.glif</string>
<key>symbol-constant</key>
<string>symbol-constant.glif</string>
<key>symbol-enum</key>
<string>symbol-enum.glif</string>
<key>symbol-enum-member</key>
<string>symbol-enum-member.glif</string>
<key>symbol-event</key>
<string>symbol-event.glif</string>
<key>symbol-field</key>
<string>symbol-field.glif</string>
<key>symbol-file</key>
<string>symbol-file.glif</string>
<key>symbol-interface</key>
<string>symbol-interface.glif</string>
<key>symbol-key</key>
<string>symbol-key.glif</string>
<key>symbol-keyword</key>
<string>symbol-keyword.glif</string>
<key>symbol-method</key>
<string>symbol-method.glif</string>
<key>symbol-misc</key>
<string>symbol-misc.glif</string>
<key>symbol-namespace</key>
<string>symbol-namespace.glif</string>
<key>symbol-numeric</key>
<string>symbol-numeric.glif</string>
<key>symbol-operator</key>
<string>symbol-operator.glif</string>
<key>symbol-parameter</key>
<string>symbol-parameter.glif</string>
<key>symbol-property</key>
<string>symbol-property.glif</string>
<key>symbol-ruler</key>
<string>symbol-ruler.glif</string>
<key>symbol-snippet</key>
<string>symbol-snippet.glif</string>
<key>symbol-string</key>
<string>symbol-string.glif</string>
<key>symbol-structure</key>
<string>symbol-structure.glif</string>
<key>symbol-variable</key>
<string>symbol-variable.glif</string>
<key>sync</key>
<string>sync.glif</string>
<key>sync-ignored</key>
<string>sync-ignored.glif</string>
<key>table</key>
<string>table.glif</string>
<key>tag</key>
<string>tag.glif</string>
<key>tasklist</key>
<string>tasklist.glif</string>
<key>telescope</key>
<string>telescope.glif</string>
<key>terminal</key>
<string>terminal.glif</string>
<key>terminal-bash</key>
<string>terminal-bash.glif</string>
<key>terminal-cmd</key>
<string>terminal-cmd.glif</string>
<key>terminal-debian</key>
<string>terminal-debian.glif</string>
<key>terminal-linux</key>
<string>terminal-linux.glif</string>
<key>terminal-powershell</key>
<string>terminal-powershell.glif</string>
<key>terminal-tmux</key>
<string>terminal-tmux.glif</string>
<key>terminal-ubuntu</key>
<string>terminal-ubuntu.glif</string>
<key>text-size</key>
<string>text-size.glif</string>
<key>three-bars</key>
<string>three-bars.glif</string>
<key>thumbsdown</key>
<string>thumbsdown.glif</string>
<key>thumbsup</key>
<string>thumbsup.glif</string>
<key>tools</key>
<string>tools.glif</string>
<key>trash</key>
<string>trash.glif</string>
<key>triangle-down</key>
<string>triangle-down.glif</string>
<key>triangle-left</key>
<string>triangle-left.glif</string>
<key>triangle-right</key>
<string>triangle-right.glif</string>
<key>triangle-up</key>
<string>triangle-up.glif</string>
<key>twitter</key>
<string>twitter.glif</string>
<key>type-hierarchy</key>
<string>type-hierarchy.glif</string>
<key>type-hierarchy-sub</key>
<string>type-hierarchy-sub.glif</string>
<key>type-hierarchy-super</key>
<string>type-hierarchy-super.glif</string>
<key>unfold</key>
<string>unfold.glif</string>
<key>ungroup-by-ref-type</key>
<string>ungroup-by-ref-type.glif</string>
<key>unlock</key>
<string>unlock.glif</string>
<key>unmute</key>
<string>unmute.glif</string>
<key>unverified</key>
<string>unverified.glif</string>
<key>variable-group</key>
<string>variable-group.glif</string>
<key>verified</key>
<string>verified.glif</string>
<key>verified-filled</key>
<string>verified-filled.glif</string>
<key>versions</key>
<string>versions.glif</string>
<key>vm</key>
<string>vm.glif</string>
<key>vm-active</key>
<string>vm-active.glif</string>
<key>vm-connect</key>
<string>vm-connect.glif</string>
<key>vm-outline</key>
<string>vm-outline.glif</string>
<key>vm-running</key>
<string>vm-running.glif</string>
<key>wand</key>
<string>wand.glif</string>
<key>warning</key>
<string>warning.glif</string>
<key>watch</key>
<string>watch.glif</string>
<key>whitespace</key>
<string>whitespace.glif</string>
<key>whole-word</key>
<string>whole-word.glif</string>
<key>window</key>
<string>window.glif</string>
<key>word-wrap</key>
<string>word-wrap.glif</string>
<key>workspace-trusted</key>
<string>workspace-trusted.glif</string>
<key>workspace-unknown</key>
<string>workspace-unknown.glif</string>
<key>workspace-untrusted</key>
<string>workspace-untrusted.glif</string>
<key>zoom-in</key>
<string>zoom-in.glif</string>
<key>zoom-out</key>
<string>zoom-out.glif</string>
</dict>
</plist>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/contents.plist/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/contents.plist",
"repo_id": "cascadia-code",
"token_count": 11391
}
| 1,139 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="debug-breakpoint-log" format="2">
<advance width="1200"/>
<unicode hex="EAAB"/>
<note>
debug-breakpoint-log
</note>
<outline>
<contour>
<point x="20" y="710" type="line"/>
<point x="600" y="130" type="line"/>
<point x="1180" y="710" type="line"/>
<point x="600" y="1290" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/debug-breakpoint-log.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/debug-breakpoint-log.glif",
"repo_id": "cascadia-code",
"token_count": 184
}
| 1,140 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="debug-start" format="2">
<advance width="1200"/>
<unicode hex="EAD3"/>
<note>
debug-start
</note>
<outline>
<contour>
<point x="52" y="76" type="line"/>
<point x="192" y="0" type="line"/>
<point x="1148" y="637" type="line"/>
<point x="1148" y="783" type="line"/>
<point x="192" y="1420" type="line"/>
<point x="52" y="1350" type="line"/>
</contour>
<contour>
<point x="931" y="713" type="line"/>
<point x="231" y="242" type="line"/>
<point x="231" y="1178" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/debug-start.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/debug-start.glif",
"repo_id": "cascadia-code",
"token_count": 299
}
| 1,141 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="diff" format="2">
<advance width="1200"/>
<unicode hex="EAE1"/>
<note>
diff
</note>
<outline>
<contour>
<point x="20" y="309" type="line"/>
<point x="63" y="261" type="line"/>
<point x="512" y="261" type="line"/>
<point x="555" y="309" type="line"/>
<point x="555" y="1111" type="line"/>
<point x="512" y="1159" type="line"/>
<point x="63" y="1159" type="line"/>
<point x="20" y="1111" type="line"/>
</contour>
<contour>
<point x="106" y="891" type="line"/>
<point x="464" y="891" type="line"/>
<point x="464" y="352" type="line"/>
<point x="106" y="352" type="line"/>
</contour>
<contour>
<point x="106" y="1068" type="line"/>
<point x="464" y="1068" type="line"/>
<point x="464" y="977" type="line"/>
<point x="106" y="977" type="line"/>
</contour>
<contour>
<point x="645" y="1111" type="line"/>
<point x="645" y="309" type="line"/>
<point x="688" y="261" type="line"/>
<point x="1137" y="261" type="line"/>
<point x="1180" y="309" type="line"/>
<point x="1180" y="1111" type="line"/>
<point x="1137" y="1159" type="line"/>
<point x="688" y="1159" type="line"/>
</contour>
<contour>
<point x="736" y="533" type="line"/>
<point x="1094" y="533" type="line"/>
<point x="1094" y="352" type="line"/>
<point x="736" y="352" type="line"/>
</contour>
<contour>
<point x="736" y="1068" type="line"/>
<point x="1094" y="1068" type="line"/>
<point x="1094" y="710" type="line"/>
<point x="736" y="710" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/diff.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/diff.glif",
"repo_id": "cascadia-code",
"token_count": 832
}
| 1,142 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="folder-opened" format="2">
<advance width="1200"/>
<unicode hex="EAF7"/>
<note>
folder-opened
</note>
<outline>
<contour>
<point x="20" y="272" type="line"/>
<point x="58" y="234" type="line"/>
<point x="930" y="234" type="line"/>
<point x="968" y="259" type="line"/>
<point x="1180" y="818" type="line"/>
<point x="1142" y="869" type="line"/>
<point x="1053" y="869" type="line"/>
<point x="1053" y="1064" type="line"/>
<point x="1011" y="1106" type="line"/>
<point x="553" y="1106" type="line"/>
<point x="481" y="1174" type="line"/>
<point x="456" y="1186" type="line"/>
<point x="58" y="1186" type="line"/>
<point x="20" y="1144" type="line"/>
</contour>
<contour>
<point x="100" y="517" type="line"/>
<point x="100" y="1106" type="line"/>
<point x="439" y="1106" type="line"/>
<point x="507" y="1038" type="line"/>
<point x="536" y="1025" type="line"/>
<point x="973" y="1025" type="line"/>
<point x="973" y="869" type="line"/>
<point x="613" y="869" type="line"/>
<point x="587" y="856" type="line"/>
<point x="520" y="788" type="line"/>
<point x="219" y="788" type="line"/>
<point x="181" y="759" type="line"/>
</contour>
<contour>
<point x="1091" y="788" type="line"/>
<point x="901" y="310" type="line"/>
<point x="113" y="310" type="line"/>
<point x="244" y="708" type="line"/>
<point x="536" y="708" type="line"/>
<point x="562" y="721" type="line"/>
<point x="630" y="788" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/folder-opened.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/folder-opened.glif",
"repo_id": "cascadia-code",
"token_count": 819
}
| 1,143 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="inspect" format="2">
<advance width="1200"/>
<unicode hex="EBD1"/>
<note>
inspect
</note>
<outline>
<contour>
<point x="20" y="529" type="line"/>
<point x="100" y="449" type="line"/>
<point x="491" y="449" type="line"/>
<point x="491" y="529" type="line"/>
<point x="100" y="529" type="line"/>
<point x="100" y="1160" type="line"/>
<point x="1046" y="1160" type="line"/>
<point x="1046" y="685" type="line"/>
<point x="1121" y="685" type="line"/>
<point x="1121" y="1160" type="line"/>
<point x="1046" y="1240" type="line"/>
<point x="100" y="1240" type="line"/>
<point x="20" y="1160" type="line"/>
</contour>
<contour>
<point x="865" y="395" type="line"/>
<point x="1180" y="395" type="line"/>
<point x="650" y="924" type="line"/>
<point x="650" y="180" type="line"/>
</contour>
<contour>
<point x="831" y="470" type="line"/>
<point x="730" y="370" type="line"/>
<point x="730" y="731" type="line"/>
<point x="987" y="470" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/inspect.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/inspect.glif",
"repo_id": "cascadia-code",
"token_count": 562
}
| 1,144 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="mail" format="2">
<advance width="1200"/>
<unicode hex="EB1C"/>
<note>
mail
</note>
<outline>
<contour>
<point x="20" y="338" type="line"/>
<point x="60" y="294" type="line"/>
<point x="1140" y="294" type="line"/>
<point x="1180" y="338" type="line"/>
<point x="1180" y="1082" type="line"/>
<point x="1140" y="1126" type="line"/>
<point x="60" y="1126" type="line"/>
<point x="20" y="1082" type="line"/>
</contour>
<contour>
<point x="573" y="635" type="line"/>
<point x="627" y="635" type="line"/>
<point x="1100" y="998" type="line"/>
<point x="1100" y="378" type="line"/>
<point x="104" y="378" type="line"/>
<point x="104" y="998" type="line"/>
</contour>
<contour>
<point x="600" y="719" type="line"/>
<point x="184" y="1042" type="line"/>
<point x="1016" y="1042" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/mail.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/mail.glif",
"repo_id": "cascadia-code",
"token_count": 475
}
| 1,145 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="note" format="2">
<advance width="1200"/>
<unicode hex="EB26"/>
<note>
note
</note>
<outline>
<contour>
<point x="20" y="1124" type="line"/>
<point x="20" y="296" type="line"/>
<point x="60" y="252" type="line"/>
<point x="1140" y="252" type="line"/>
<point x="1180" y="296" type="line"/>
<point x="1180" y="1124" type="line"/>
<point x="1140" y="1168" type="line"/>
<point x="60" y="1168" type="line"/>
</contour>
<contour>
<point x="1100" y="1084" type="line"/>
<point x="1100" y="336" type="line"/>
<point x="104" y="336" type="line"/>
<point x="104" y="1084" type="line"/>
</contour>
<contour>
<point x="268" y="836" type="line"/>
<point x="932" y="836" type="line"/>
<point x="932" y="916" type="line"/>
<point x="268" y="916" type="line"/>
</contour>
<contour>
<point x="768" y="668" type="line"/>
<point x="768" y="752" type="line"/>
<point x="268" y="752" type="line"/>
<point x="268" y="668" type="line"/>
</contour>
<contour>
<point x="268" y="504" type="line"/>
<point x="600" y="504" type="line"/>
<point x="600" y="584" type="line"/>
<point x="268" y="584" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/note.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/note.glif",
"repo_id": "cascadia-code",
"token_count": 649
}
| 1,146 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="request-changes" format="2">
<advance width="1200"/>
<unicode hex="EB43"/>
<note>
request-changes
</note>
<outline>
<contour>
<point x="1180" y="1246" type="line"/>
<point x="1140" y="1286" type="line"/>
<point x="60" y="1286" type="line"/>
<point x="20" y="1246" type="line"/>
<point x="20" y="413" type="line"/>
<point x="60" y="374" type="line"/>
<point x="268" y="374" type="line"/>
<point x="268" y="165" type="line"/>
<point x="339" y="134" type="line"/>
<point x="578" y="374" type="line"/>
<point x="1140" y="374" type="line"/>
<point x="1180" y="413" type="line"/>
</contour>
<contour>
<point x="1100" y="1206" type="line"/>
<point x="1100" y="458" type="line"/>
<point x="560" y="458" type="line"/>
<point x="529" y="444" type="line"/>
<point x="352" y="267" type="line"/>
<point x="352" y="413" type="line"/>
<point x="308" y="458" type="line"/>
<point x="104" y="458" type="line"/>
<point x="104" y="1206" type="line"/>
</contour>
<contour>
<point x="768" y="626" type="line"/>
<point x="436" y="626" type="line"/>
<point x="436" y="542" type="line"/>
<point x="768" y="542" type="line"/>
</contour>
<contour>
<point x="560" y="954" type="line"/>
<point x="392" y="954" type="line"/>
<point x="392" y="874" type="line"/>
<point x="560" y="874" type="line"/>
<point x="560" y="706" type="line"/>
<point x="640" y="706" type="line"/>
<point x="640" y="874" type="line"/>
<point x="808" y="874" type="line"/>
<point x="808" y="954" type="line"/>
<point x="640" y="954" type="line"/>
<point x="640" y="1122" type="line"/>
<point x="560" y="1122" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/request-changes.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/request-changes.glif",
"repo_id": "cascadia-code",
"token_count": 901
}
| 1,147 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="search" format="2">
<advance width="1200"/>
<unicode hex="EA6D"/>
<note>
search
</note>
<outline>
<contour>
<point x="755" y="1326" type="qcurve"/>
<point x="755" y="1326" type="line"/>
<point x="627" y="1326"/>
<point x="417" y="1189"/>
<point x="313" y="958"/>
<point x="351" y="710"/>
<point x="433" y="619" type="qcurve"/>
<point x="20" y="144" type="line"/>
<point x="78" y="94" type="line"/>
<point x="491" y="565" type="line"/>
<point x="603" y="479"/>
<point x="879" y="470"/>
<point x="1111" y="627"/>
<point x="1202" y="892"/>
<point x="1119" y="1160"/>
<point x="892" y="1326"/>
</contour>
<contour>
<point x="755" y="549" type="qcurve"/>
<point x="755" y="553" type="line"/>
<point x="660" y="553"/>
<point x="499" y="644"/>
<point x="404" y="805"/>
<point x="404" y="995"/>
<point x="499" y="1156"/>
<point x="660" y="1247"/>
<point x="850" y="1247"/>
<point x="1007" y="1156"/>
<point x="1102" y="995"/>
<point x="1102" y="805"/>
<point x="1007" y="644"/>
<point x="850" y="549"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/search.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/search.glif",
"repo_id": "cascadia-code",
"token_count": 645
}
| 1,148 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="star-full" format="2">
<advance width="1200"/>
<unicode hex="EB59"/>
<note>
star-full
</note>
<outline>
<contour>
<point x="1180" y="856" type="line"/>
<point x="733" y="856" type="line"/>
<point x="600" y="1290" type="line"/>
<point x="467" y="856" type="line"/>
<point x="20" y="856" type="line"/>
<point x="383" y="573" type="line"/>
<point x="246" y="130" type="line"/>
<point x="600" y="405" type="line"/>
<point x="954" y="130" type="line"/>
<point x="817" y="573" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/star-full.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/star-full.glif",
"repo_id": "cascadia-code",
"token_count": 302
}
| 1,149 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="symbol-method" format="2">
<advance width="1200"/>
<unicode hex="EA8C"/>
<note>
symbol-method
</note>
<outline>
<contour>
<point x="1180" y="1001" type="line"/>
<point x="1128" y="1084" type="line"/>
<point x="649" y="1372" type="line"/>
<point x="551" y="1372" type="line"/>
<point x="66" y="1084" type="line"/>
<point x="20" y="1001" type="line"/>
<point x="20" y="419" type="line"/>
<point x="66" y="336" type="line"/>
<point x="551" y="48" type="line"/>
<point x="649" y="48" type="line"/>
<point x="1128" y="336" type="line"/>
<point x="1180" y="419" type="line"/>
</contour>
<contour>
<point x="551" y="682" type="line"/>
<point x="551" y="161" type="line"/>
<point x="113" y="419" type="line"/>
<point x="113" y="919" type="line"/>
</contour>
<contour>
<point x="597" y="764" type="line"/>
<point x="139" y="1017" type="line"/>
<point x="597" y="1290" type="line"/>
<point x="1056" y="1017" type="line"/>
</contour>
<contour>
<point x="1082" y="919" type="line"/>
<point x="1082" y="419" type="line"/>
<point x="649" y="161" type="line"/>
<point x="649" y="682" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/symbol-method.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/symbol-method.glif",
"repo_id": "cascadia-code",
"token_count": 643
}
| 1,150 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="tasklist" format="2">
<advance width="1200"/>
<unicode hex="EB67"/>
<note>
tasklist
</note>
<outline>
<contour>
<point x="171" y="739" type="line"/>
<point x="233" y="734" type="line"/>
<point x="706" y="1146" type="line"/>
<point x="649" y="1208" type="line"/>
<point x="206" y="827" type="line"/>
<point x="82" y="978" type="line"/>
<point x="20" y="925" type="line"/>
</contour>
<contour>
<point x="1180" y="1044" type="line"/>
<point x="693" y="1044" type="line"/>
<point x="503" y="876" type="line"/>
<point x="1180" y="876" type="line"/>
</contour>
<contour>
<point x="436" y="544" type="line"/>
<point x="1180" y="544" type="line"/>
<point x="1180" y="712" type="line"/>
<point x="436" y="712" type="line"/>
</contour>
<contour>
<point x="1180" y="212" type="line"/>
<point x="1180" y="380" type="line"/>
<point x="436" y="380" type="line"/>
<point x="436" y="212" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/tasklist.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/tasklist.glif",
"repo_id": "cascadia-code",
"token_count": 536
}
| 1,151 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="triangle-down" format="2">
<advance width="1200"/>
<unicode hex="EB6E"/>
<note>
triangle-down
</note>
<outline>
<contour>
<point x="551" y="421" type="line"/>
<point x="634" y="421" type="line"/>
<point x="1180" y="947" type="line"/>
<point x="1139" y="999" type="line"/>
<point x="56" y="999" type="line"/>
<point x="20" y="947" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/triangle-down.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/triangle-down.glif",
"repo_id": "cascadia-code",
"token_count": 223
}
| 1,152 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="versions" format="2">
<advance width="1200"/>
<unicode hex="EB78"/>
<note>
versions
</note>
<outline>
<contour>
<point x="1100" y="1126" type="line"/>
<point x="600" y="1126" type="line"/>
<point x="516" y="1042" type="line"/>
<point x="516" y="378" type="line"/>
<point x="600" y="294" type="line"/>
<point x="1100" y="294" type="line"/>
<point x="1180" y="378" type="line"/>
<point x="1180" y="1042" type="line"/>
</contour>
<contour>
<point x="1100" y="1042" type="line"/>
<point x="1100" y="378" type="line"/>
<point x="600" y="378" type="line"/>
<point x="600" y="1042" type="line"/>
</contour>
<contour>
<point x="352" y="462" type="line"/>
<point x="352" y="626" type="line"/>
<point x="352" y="958" type="line"/>
<point x="436" y="958" type="line"/>
<point x="436" y="1042" type="line"/>
<point x="308" y="1042" type="line"/>
<point x="268" y="1002" type="line"/>
<point x="268" y="418" type="line"/>
<point x="308" y="378" type="line"/>
<point x="436" y="378" type="line"/>
<point x="436" y="462" type="line"/>
</contour>
<contour>
<point x="104" y="546" type="line"/>
<point x="104" y="710" type="line"/>
<point x="104" y="878" type="line"/>
<point x="184" y="878" type="line"/>
<point x="184" y="958" type="line"/>
<point x="60" y="958" type="line"/>
<point x="20" y="918" type="line"/>
<point x="20" y="502" type="line"/>
<point x="60" y="462" type="line"/>
<point x="184" y="462" type="line"/>
<point x="184" y="546" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/versions.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/versions.glif",
"repo_id": "cascadia-code",
"token_count": 837
}
| 1,153 |
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>ascender</key>
<integer>480</integer>
<key>capHeight</key>
<integer>0</integer>
<key>descender</key>
<integer>-32</integer>
<key>familyName</key>
<string>font-awesome-extension</string>
<key>guidelines</key>
<array/>
<key>italicAngle</key>
<integer>0</integer>
<key>openTypeHeadCreated</key>
<string>2016/05/22 04:13:42</string>
<key>openTypeHheaAscender</key>
<integer>505</integer>
<key>openTypeHheaDescender</key>
<integer>0</integer>
<key>openTypeNamePreferredSubfamilyName</key>
<string>awesome-extension</string>
<key>openTypeOS2Panose</key>
<array>
<integer>2</integer>
<integer>0</integer>
<integer>5</integer>
<integer>9</integer>
<integer>0</integer>
<integer>0</integer>
<integer>0</integer>
<integer>0</integer>
<integer>0</integer>
<integer>0</integer>
</array>
<key>openTypeOS2StrikeoutPosition</key>
<integer>132</integer>
<key>openTypeOS2StrikeoutSize</key>
<integer>25</integer>
<key>openTypeOS2TypoAscender</key>
<integer>480</integer>
<key>openTypeOS2TypoDescender</key>
<integer>-32</integer>
<key>openTypeOS2TypoLineGap</key>
<integer>46</integer>
<key>openTypeOS2UnicodeRanges</key>
<array>
<integer>60</integer>
</array>
<key>openTypeOS2VendorID</key>
<string>PfEd</string>
<key>openTypeOS2WeightClass</key>
<integer>400</integer>
<key>openTypeOS2WidthClass</key>
<integer>5</integer>
<key>openTypeOS2WinAscent</key>
<integer>505</integer>
<key>openTypeOS2WinDescent</key>
<integer>65529</integer>
<key>postscriptBlueValues</key>
<array/>
<key>postscriptFamilyBlues</key>
<array/>
<key>postscriptFamilyOtherBlues</key>
<array/>
<key>postscriptFontName</key>
<string>font-awesome-extension</string>
<key>postscriptOtherBlues</key>
<array/>
<key>postscriptStemSnapH</key>
<array/>
<key>postscriptStemSnapV</key>
<array/>
<key>styleMapFamilyName</key>
<string>font-awesome-extension awesome-extension</string>
<key>styleMapStyleName</key>
<string>regular</string>
<key>styleName</key>
<string>awesome-extension</string>
<key>unitsPerEm</key>
<integer>2048</integer>
<key>versionMajor</key>
<integer>1</integer>
<key>versionMinor</key>
<integer>0</integer>
<key>xHeight</key>
<integer>0</integer>
</dict>
</plist>
|
cascadia-code/sources/nerdfonts/full/processed/font-awesome-extension.ufo/fontinfo.plist/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/font-awesome-extension.ufo/fontinfo.plist",
"repo_id": "cascadia-code",
"token_count": 1192
}
| 1,154 |
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>apple-fruit</key>
<string>apple-fruit.glif</string>
<key>atom</key>
<string>atom.glif</string>
<key>bacteria</key>
<string>bacteria.glif</string>
<key>banana</key>
<string>banana.glif</string>
<key>bath</key>
<string>bath.glif</string>
<key>bed</key>
<string>bed.glif</string>
<key>benzene</key>
<string>benzene.glif</string>
<key>bigger</key>
<string>bigger.glif</string>
<key>biohazard</key>
<string>biohazard.glif</string>
<key>blogger-circle</key>
<string>blogger-circle.glif</string>
<key>blogger-square</key>
<string>blogger-square.glif</string>
<key>bones</key>
<string>bones.glif</string>
<key>book-open</key>
<string>book-open.glif</string>
<key>book-open-o</key>
<string>book-open-o.glif</string>
<key>brain</key>
<string>brain.glif</string>
<key>bread</key>
<string>bread.glif</string>
<key>butterfly</key>
<string>butterfly.glif</string>
<key>carot</key>
<string>carot.glif</string>
<key>cc-by</key>
<string>cc-by.glif</string>
<key>cc-cc</key>
<string>cc-cc.glif</string>
<key>cc-nc</key>
<string>cc-nc.glif</string>
<key>cc-nc-eu</key>
<string>cc-nc-eu.glif</string>
<key>cc-nc-jp</key>
<string>cc-nc-jp.glif</string>
<key>cc-nd</key>
<string>cc-nd.glif</string>
<key>cc-remix</key>
<string>cc-remix.glif</string>
<key>cc-sa</key>
<string>cc-sa.glif</string>
<key>cc-share</key>
<string>cc-share.glif</string>
<key>cc-zero</key>
<string>cc-zero.glif</string>
<key>checklist-o</key>
<string>checklist-o.glif</string>
<key>cherry</key>
<string>cherry.glif</string>
<key>chess-bishop</key>
<string>chess-bishop.glif</string>
<key>chess-horse</key>
<string>chess-horse.glif</string>
<key>chess-king</key>
<string>chess-king.glif</string>
<key>chess-pawn</key>
<string>chess-pawn.glif</string>
<key>chess-queen</key>
<string>chess-queen.glif</string>
<key>chess-tower</key>
<string>chess-tower.glif</string>
<key>chesse</key>
<string>chesse.glif</string>
<key>chicken-thigh</key>
<string>chicken-thigh.glif</string>
<key>chilli</key>
<string>chilli.glif</string>
<key>chip</key>
<string>chip.glif</string>
<key>cicling</key>
<string>cicling.glif</string>
<key>cloud</key>
<string>cloud.glif</string>
<key>cockroach</key>
<string>cockroach.glif</string>
<key>coffe-beans</key>
<string>coffe-beans.glif</string>
<key>coins</key>
<string>coins.glif</string>
<key>comb</key>
<string>comb.glif</string>
<key>comet</key>
<string>comet.glif</string>
<key>crown</key>
<string>crown.glif</string>
<key>cup-coffe</key>
<string>cup-coffe.glif</string>
<key>dice</key>
<string>dice.glif</string>
<key>disco</key>
<string>disco.glif</string>
<key>dna</key>
<string>dna.glif</string>
<key>donut</key>
<string>donut.glif</string>
<key>dress</key>
<string>dress.glif</string>
<key>drop</key>
<string>drop.glif</string>
<key>ello</key>
<string>ello.glif</string>
<key>envelope-open</key>
<string>envelope-open.glif</string>
<key>envelope-open-o</key>
<string>envelope-open-o.glif</string>
<key>equal</key>
<string>equal.glif</string>
<key>equal-bigger</key>
<string>equal-bigger.glif</string>
<key>feedly</key>
<string>feedly.glif</string>
<key>file-export</key>
<string>file-export.glif</string>
<key>file-import</key>
<string>file-import.glif</string>
<key>fingerprint</key>
<string>fingerprint.glif</string>
<key>floppy</key>
<string>floppy.glif</string>
<key>footprint</key>
<string>footprint.glif</string>
<key>freecodecamp</key>
<string>freecodecamp.glif</string>
<key>galaxy</key>
<string>galaxy.glif</string>
<key>galery</key>
<string>galery.glif</string>
<key>gift-card</key>
<string>gift-card.glif</string>
<key>glass</key>
<string>glass.glif</string>
<key>google-drive</key>
<string>google-drive.glif</string>
<key>google-play</key>
<string>google-play.glif</string>
<key>gps</key>
<string>gps.glif</string>
<key>grav</key>
<string>grav.glif</string>
<key>guitar</key>
<string>guitar.glif</string>
<key>gut</key>
<string>gut.glif</string>
<key>halter</key>
<string>halter.glif</string>
<key>hamburger</key>
<string>hamburger.glif</string>
<key>hat</key>
<string>hat.glif</string>
<key>hexagon</key>
<string>hexagon.glif</string>
<key>high-heel</key>
<string>high-heel.glif</string>
<key>hotdog</key>
<string>hotdog.glif</string>
<key>ice-cream</key>
<string>ice-cream.glif</string>
<key>id-card</key>
<string>id-card.glif</string>
<key>imdb</key>
<string>imdb.glif</string>
<key>infinity</key>
<string>infinity.glif</string>
<key>injection</key>
<string>injection.glif</string>
<key>isle</key>
<string>isle.glif</string>
<key>java</key>
<string>java.glif</string>
<key>layers</key>
<string>layers.glif</string>
<key>lips</key>
<string>lips.glif</string>
<key>lipstick</key>
<string>lipstick.glif</string>
<key>liver</key>
<string>liver.glif</string>
<key>lollipop</key>
<string>lollipop.glif</string>
<key>loyalty-card</key>
<string>loyalty-card.glif</string>
<key>lung</key>
<string>lung.glif</string>
<key>makeup-brushes</key>
<string>makeup-brushes.glif</string>
<key>maximize</key>
<string>maximize.glif</string>
<key>meat</key>
<string>meat.glif</string>
<key>medicine</key>
<string>medicine.glif</string>
<key>microscope</key>
<string>microscope.glif</string>
<key>milk-bottle</key>
<string>milk-bottle.glif</string>
<key>minimize</key>
<string>minimize.glif</string>
<key>molecule</key>
<string>molecule.glif</string>
<key>moon-cloud</key>
<string>moon-cloud.glif</string>
<key>mountains</key>
<string>mountains.glif</string>
<key>mushroom</key>
<string>mushroom.glif</string>
<key>mustache</key>
<string>mustache.glif</string>
<key>mysql</key>
<string>mysql.glif</string>
<key>nintendo</key>
<string>nintendo.glif</string>
<key>orange</key>
<string>orange.glif</string>
<key>palette-color</key>
<string>palette-color.glif</string>
<key>peach</key>
<string>peach.glif</string>
<key>pear</key>
<string>pear.glif</string>
<key>pi</key>
<string>pi.glif</string>
<key>pizza</key>
<string>pizza.glif</string>
<key>planet</key>
<string>planet.glif</string>
<key>plant</key>
<string>plant.glif</string>
<key>playstation</key>
<string>playstation.glif</string>
<key>poison</key>
<string>poison.glif</string>
<key>popcorn</key>
<string>popcorn.glif</string>
<key>popsicle</key>
<string>popsicle.glif</string>
<key>pulse</key>
<string>pulse.glif</string>
<key>python</key>
<string>python.glif</string>
<key>quora-circle</key>
<string>quora-circle.glif</string>
<key>quora-square</key>
<string>quora-square.glif</string>
<key>radioactive</key>
<string>radioactive.glif</string>
<key>raining</key>
<string>raining.glif</string>
<key>real-heart</key>
<string>real-heart.glif</string>
<key>refrigerator</key>
<string>refrigerator.glif</string>
<key>restore</key>
<string>restore.glif</string>
<key>ring</key>
<string>ring.glif</string>
<key>ruby</key>
<string>ruby.glif</string>
<key>ruby-o</key>
<string>ruby-o.glif</string>
<key>ruler</key>
<string>ruler.glif</string>
<key>shirt</key>
<string>shirt.glif</string>
<key>slash</key>
<string>slash.glif</string>
<key>smaller</key>
<string>smaller.glif</string>
<key>snowing</key>
<string>snowing.glif</string>
<key>soda</key>
<string>soda.glif</string>
<key>sofa</key>
<string>sofa.glif</string>
<key>soup</key>
<string>soup.glif</string>
<key>spermatozoon</key>
<string>spermatozoon.glif</string>
<key>spin-double</key>
<string>spin-double.glif</string>
<key>stomach</key>
<string>stomach.glif</string>
<key>storm</key>
<string>storm.glif</string>
<key>sun-cloud</key>
<string>sun-cloud.glif</string>
<key>sushi</key>
<string>sushi.glif</string>
<key>tacos</key>
<string>tacos.glif</string>
<key>telegram</key>
<string>telegram.glif</string>
<key>telegram-circle</key>
<string>telegram-circle.glif</string>
<key>telescope</key>
<string>telescope.glif</string>
<key>thermometer</key>
<string>thermometer.glif</string>
<key>thermometer-high</key>
<string>thermometer-high.glif</string>
<key>thermometer-low</key>
<string>thermometer-low.glif</string>
<key>thin-close</key>
<string>thin-close.glif</string>
<key>toilet</key>
<string>toilet.glif</string>
<key>tools</key>
<string>tools.glif</string>
<key>tooth</key>
<string>tooth.glif</string>
<key>tree</key>
<string>tree.glif</string>
<key>triangle-ruler</key>
<string>triangle-ruler.glif</string>
<key>umbrella</key>
<string>umbrella.glif</string>
<key>uterus</key>
<string>uterus.glif</string>
<key>virus</key>
<string>virus.glif</string>
<key>w3c</key>
<string>w3c.glif</string>
<key>walking</key>
<string>walking.glif</string>
<key>wallet</key>
<string>wallet.glif</string>
<key>wind</key>
<string>wind.glif</string>
<key>xbox</key>
<string>xbox.glif</string>
</dict>
</plist>
|
cascadia-code/sources/nerdfonts/full/processed/font-awesome-extension.ufo/glyphs/contents.plist/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/font-awesome-extension.ufo/glyphs/contents.plist",
"repo_id": "cascadia-code",
"token_count": 4852
}
| 1,155 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="dwm" format="2">
<advance width="1200"/>
<unicode hex="F356"/>
<note>
dwm
</note>
<outline>
<contour>
<point x="20" y="468" type="line"/>
<point x="117" y="468" type="line"/>
<point x="117" y="662" type="line"/>
<point x="310" y="662" type="line"/>
<point x="310" y="565" type="line"/>
<point x="117" y="565" type="line"/>
<point x="117" y="468" type="line"/>
<point x="793" y="468" type="line"/>
<point x="793" y="662" type="line"/>
<point x="890" y="662" type="line"/>
<point x="890" y="468" type="line"/>
<point x="987" y="468" type="line"/>
<point x="987" y="662" type="line"/>
<point x="1083" y="662" type="line"/>
<point x="1083" y="468" type="line"/>
<point x="1180" y="468" type="line"/>
<point x="1180" y="758" type="line"/>
<point x="697" y="758" type="line"/>
<point x="697" y="565" type="line"/>
<point x="600" y="565" type="line"/>
<point x="600" y="758" type="line"/>
<point x="503" y="758" type="line"/>
<point x="503" y="565" type="line"/>
<point x="407" y="565" type="line"/>
<point x="407" y="952" type="line"/>
<point x="310" y="952" type="line"/>
<point x="310" y="758" type="line"/>
<point x="20" y="758" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/font-logos.ufo/glyphs/dwm.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/font-logos.ufo/glyphs/dwm.glif",
"repo_id": "cascadia-code",
"token_count": 659
}
| 1,156 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="i_seti_npm" format="2">
<advance width="1200"/>
<unicode hex="E616"/>
<note>
i_seti_npm
</note>
<outline>
<contour>
<point x="1180" y="937" type="line"/>
<point x="20" y="937" type="line"/>
<point x="20" y="551" type="line"/>
<point x="349" y="551" type="line"/>
<point x="349" y="483" type="line"/>
<point x="605" y="483" type="line"/>
<point x="605" y="546" type="line"/>
<point x="1180" y="546" type="line"/>
</contour>
<contour>
<point x="339" y="869" type="line"/>
<point x="339" y="613" type="line"/>
<point x="276" y="613" type="line"/>
<point x="276" y="802" type="line"/>
<point x="213" y="802" type="line"/>
<point x="213" y="613" type="line"/>
<point x="83" y="613" type="line"/>
<point x="83" y="869" type="line"/>
</contour>
<contour>
<point x="658" y="613" type="curve"/>
<point x="527" y="613" type="line"/>
<point x="527" y="546" type="line"/>
<point x="397" y="546" type="line"/>
<point x="397" y="865" type="line"/>
<point x="653" y="865" type="line"/>
<point x="658" y="787"/>
<point x="658" y="700"/>
</contour>
<contour>
<point x="1112" y="869" type="line"/>
<point x="1112" y="613" type="line"/>
<point x="1049" y="613" type="line"/>
<point x="1049" y="802" type="line"/>
<point x="987" y="802" type="line"/>
<point x="987" y="613" type="line"/>
<point x="919" y="613" type="line"/>
<point x="919" y="802" type="line"/>
<point x="856" y="802" type="line"/>
<point x="856" y="613" type="line"/>
<point x="726" y="613" type="line"/>
<point x="726" y="869" type="line"/>
</contour>
<contour>
<point x="595" y="676" type="line"/>
<point x="595" y="802" type="line"/>
<point x="532" y="802" type="line"/>
<point x="532" y="676" type="line"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:42:08 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_npm.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_npm.glif",
"repo_id": "cascadia-code",
"token_count": 1060
}
| 1,157 |
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>com.schriftgestaltung.appVersion</key>
<string>3241</string>
<key>com.schriftgestaltung.disablesAutomaticAlignment</key>
<false/>
<key>com.schriftgestaltung.fontMasterID</key>
<string>m01</string>
<key>com.schriftgestaltung.formatVersion</key>
<integer>3</integer>
<key>com.schriftgestaltung.useGlyphOrder</key>
<true/>
<key>com.schriftgestaltung.useNiceNames</key>
<false/>
<key>public.glyphOrder</key>
<array>
<string>i_custom_folder_npm</string>
<string>i_custom_folder_git</string>
<string>i_custom_folder_config</string>
<string>i_custom_folder_github</string>
<string>i_custom_folder_open</string>
<string>i_custom_folder</string>
<string>i_seti_stylus</string>
<string>i_seti_project</string>
<string>i_custom_play_arrow</string>
<string>i_seti_sass</string>
<string>i_seti_rails</string>
<string>i_seti_ruby</string>
<string>i_seti_python</string>
<string>i_seti_heroku</string>
<string>i_seti_php</string>
<string>i_seti_markdown</string>
<string>i_seti_license</string>
<string>i_seti_json</string>
<string>i_seti_javascript</string>
<string>i_seti_image</string>
<string>i_seti_html</string>
<string>i_seti_mustache</string>
<string>i_seti_gulp</string>
<string>i_seti_grunt</string>
<string>i_custom_default</string>
<string>i_seti_folder</string>
<string>i_seti_css</string>
<string>i_seti_config</string>
<string>i_seti_npm</string>
<string>i_custom_home</string>
<string>i_seti_ejs</string>
<string>i_seti_xml</string>
<string>i_seti_bower</string>
<string>i_seti_coffee</string>
<string>i_seti_twig</string>
<string>i_custom_cpp</string>
<string>i_custom_c</string>
<string>i_seti_haskell</string>
<string>i_seti_lua</string>
<string>i_indent_line</string>
<string>i_seti_karma</string>
<string>i_seti_favicon</string>
<string>i_seti_julia</string>
<string>i_seti_react</string>
<string>i_custom_go</string>
<string>i_seti_go</string>
<string>i_seti_typescript</string>
<string>i_custom_msdos</string>
<string>i_custom_windows</string>
<string>i_custom_vim</string>
<string>i_custom_elm</string>
<string>i_custom_elixir</string>
<string>i_custom_electron</string>
<string>i_custom_crystal</string>
<string>i_custom_purescript</string>
<string>i_seti_puppet</string>
<string>i_custom_emacs</string>
<string>i_custom_orgmode</string>
<string>i_custom_kotlin</string>
<string>i_seti_apple</string>
<string>i_seti_argdown</string>
<string>i_seti_asm</string>
<string>i_seti_audio</string>
<string>i_seti_babel</string>
<string>i_seti_bazel</string>
<string>i_seti_bicep</string>
<string>i_seti_bsl</string>
<string>i_seti_cake_php</string>
<string>i_seti_cake</string>
<string>i_seti_checkbox</string>
<string>i_seti_checkbox_unchecked</string>
<string>i_seti_clock</string>
<string>i_seti_clojure</string>
<string>i_seti_code_climate</string>
<string>i_seti_code_search</string>
<string>i_seti_coldfusion</string>
<string>i_seti_cpp</string>
<string>i_seti_crystal_embedded</string>
<string>i_seti_c_sharp</string>
<string>i_seti_c</string>
<string>i_seti_csv</string>
<string>i_seti_cu</string>
<string>i_seti_dart</string>
<string>i_seti_db</string>
<string>i_seti_default</string>
<string>i_seti_deprecation_cop</string>
<string>i_seti_docker</string>
<string>i_seti_d</string>
<string>i_seti_editorconfig</string>
<string>i_seti_elixir_script</string>
<string>i_seti_error</string>
<string>i_seti_eslint</string>
<string>i_seti_ethereum</string>
<string>i_seti_firebase</string>
<string>i_seti_firefox</string>
<string>i_seti_font</string>
<string>i_seti_f_sharp</string>
<string>i_seti_github</string>
<string>i_seti_gitlab</string>
<string>i_seti_git</string>
<string>i_seti_go2</string>
<string>i_seti_godot</string>
<string>i_seti_gradle</string>
<string>i_seti_grails</string>
<string>i_seti_graphql</string>
<string>i_seti_hacklang</string>
<string>i_seti_haml</string>
<string>i_seti_happenings</string>
<string>i_seti_haxe</string>
<string>i_seti_hex</string>
<string>i_seti_ignored</string>
<string>i_seti_illustrator</string>
<string>i_seti_info</string>
<string>i_seti_ionic</string>
<string>i_seti_jade</string>
<string>i_seti_java</string>
<string>i_seti_jenkins</string>
<string>i_seti_jinja</string>
<string>i_seti_liquid</string>
<string>i_seti_livescript</string>
<string>i_seti_lock</string>
<string>i_seti_makefile</string>
<string>i_seti_maven</string>
<string>i_seti_mdo</string>
<string>i_seti_new_file</string>
<string>i_seti_nim</string>
<string>i_seti_notebook</string>
<string>i_seti_nunjucks</string>
<string>i_seti_ocaml</string>
<string>i_seti_odata</string>
<string>i_seti_pddl</string>
<string>i_seti_pdf</string>
<string>i_seti_perl</string>
<string>i_seti_photoshop</string>
<string>i_seti_pipeline</string>
<string>i_seti_plan</string>
<string>i_seti_platformio</string>
<string>i_seti_powershell</string>
<string>i_seti_prisma</string>
<string>i_seti_prolog</string>
<string>i_seti_pug</string>
<string>i_seti_reasonml</string>
<string>i_seti_rescript</string>
<string>i_seti_rollup</string>
<string>i_seti_r</string>
<string>i_seti_rust</string>
<string>i_seti_salesforce</string>
<string>i_seti_sbt</string>
<string>i_seti_scala</string>
<string>i_seti_search</string>
<string>i_seti_settings</string>
<string>i_seti_shell</string>
<string>i_seti_slim</string>
<string>i_seti_smarty</string>
<string>i_seti_spring</string>
<string>i_seti_stylelint</string>
<string>i_seti_sublime</string>
<string>i_seti_svelte</string>
<string>i_seti_svg</string>
<string>i_seti_swift</string>
<string>i_seti_terraform</string>
<string>i_seti_tex</string>
<string>i_seti_todo</string>
<string>i_seti_tsconfig</string>
<string>i_seti_vala</string>
<string>i_seti_video</string>
<string>i_seti_vue</string>
<string>i_seti_wasm</string>
<string>i_seti_wat</string>
<string>i_seti_webpack</string>
<string>i_seti_wgt</string>
<string>i_seti_word</string>
<string>i_seti_xls</string>
<string>i_seti_yarn</string>
<string>i_seti_yml</string>
<string>i_seti_zig</string>
<string>i_seti_zip</string>
<string>i_custom_asm</string>
<string>i_custom_v_lang</string>
<string>i_custom_folder_oct</string>
<string>i_custom_neovim</string>
<string>i_custom_fennel</string>
<string>i_custom_common_lisp</string>
<string>i_custom_scheme</string>
<string>i_custom_toml</string>
</array>
<key>public.postscriptNames</key>
<dict>
<key>i_custom_asm</key>
<string>uniE5AB</string>
<key>i_custom_c</key>
<string>uniE51E</string>
<key>i_custom_common_lisp</key>
<string>uniE5B0</string>
<key>i_custom_cpp</key>
<string>uniE51D</string>
<key>i_custom_crystal</key>
<string>uniE52F</string>
<key>i_custom_default</key>
<string>uniE512</string>
<key>i_custom_electron</key>
<string>uniE52E</string>
<key>i_custom_elixir</key>
<string>uniE52D</string>
<key>i_custom_elm</key>
<string>uniE52C</string>
<key>i_custom_emacs</key>
<string>uniE532</string>
<key>i_custom_fennel</key>
<string>uniE5AF</string>
<key>i_custom_folder</key>
<string>uniE4FF</string>
<key>i_custom_folder_config</key>
<string>uniE4FC</string>
<key>i_custom_folder_git</key>
<string>uniE4FB</string>
<key>i_custom_folder_github</key>
<string>uniE4FD</string>
<key>i_custom_folder_npm</key>
<string>uniE4FA</string>
<key>i_custom_folder_oct</key>
<string>uniE5AD</string>
<key>i_custom_folder_open</key>
<string>uniE4FE</string>
<key>i_custom_go</key>
<string>uniE526</string>
<key>i_custom_home</key>
<string>uniE517</string>
<key>i_custom_kotlin</key>
<string>uniE534</string>
<key>i_custom_msdos</key>
<string>uniE529</string>
<key>i_custom_neovim</key>
<string>uniE5AE</string>
<key>i_custom_orgmode</key>
<string>uniE533</string>
<key>i_custom_play_arrow</key>
<string>uniE502</string>
<key>i_custom_purescript</key>
<string>uniE530</string>
<key>i_custom_scheme</key>
<string>uniE5B1</string>
<key>i_custom_toml</key>
<string>uniE5B2</string>
<key>i_custom_v_lang</key>
<string>uniE5AC</string>
<key>i_custom_vim</key>
<string>uniE52B</string>
<key>i_custom_windows</key>
<string>uniE52A</string>
<key>i_indent_line</key>
<string>uniE521</string>
<key>i_seti_apple</key>
<string>uniE535</string>
<key>i_seti_argdown</key>
<string>uniE536</string>
<key>i_seti_asm</key>
<string>uniE537</string>
<key>i_seti_audio</key>
<string>uniE538</string>
<key>i_seti_babel</key>
<string>uniE539</string>
<key>i_seti_bazel</key>
<string>uniE53A</string>
<key>i_seti_bicep</key>
<string>uniE53B</string>
<key>i_seti_bower</key>
<string>uniE51A</string>
<key>i_seti_bsl</key>
<string>uniE53C</string>
<key>i_seti_c</key>
<string>uniE549</string>
<key>i_seti_c_sharp</key>
<string>uniE548</string>
<key>i_seti_cake</key>
<string>uniE53E</string>
<key>i_seti_cake_php</key>
<string>uniE53D</string>
<key>i_seti_checkbox</key>
<string>uniE53F</string>
<key>i_seti_checkbox_unchecked</key>
<string>uniE540</string>
<key>i_seti_clock</key>
<string>uniE541</string>
<key>i_seti_clojure</key>
<string>uniE542</string>
<key>i_seti_code_climate</key>
<string>uniE543</string>
<key>i_seti_code_search</key>
<string>uniE544</string>
<key>i_seti_coffee</key>
<string>uniE51B</string>
<key>i_seti_coldfusion</key>
<string>uniE545</string>
<key>i_seti_config</key>
<string>uniE515</string>
<key>i_seti_cpp</key>
<string>uniE546</string>
<key>i_seti_crystal_embedded</key>
<string>uniE547</string>
<key>i_seti_css</key>
<string>uniE514</string>
<key>i_seti_csv</key>
<string>uniE54A</string>
<key>i_seti_cu</key>
<string>uniE54B</string>
<key>i_seti_d</key>
<string>uniE551</string>
<key>i_seti_dart</key>
<string>uniE54C</string>
<key>i_seti_db</key>
<string>uniE54D</string>
<key>i_seti_default</key>
<string>uniE54E</string>
<key>i_seti_deprecation_cop</key>
<string>uniE54F</string>
<key>i_seti_docker</key>
<string>uniE550</string>
<key>i_seti_editorconfig</key>
<string>uniE552</string>
<key>i_seti_ejs</key>
<string>uniE518</string>
<key>i_seti_elixir_script</key>
<string>uniE553</string>
<key>i_seti_error</key>
<string>uniE554</string>
<key>i_seti_eslint</key>
<string>uniE555</string>
<key>i_seti_ethereum</key>
<string>uniE556</string>
<key>i_seti_f_sharp</key>
<string>uniE55A</string>
<key>i_seti_favicon</key>
<string>uniE523</string>
<key>i_seti_firebase</key>
<string>uniE557</string>
<key>i_seti_firefox</key>
<string>uniE558</string>
<key>i_seti_folder</key>
<string>uniE513</string>
<key>i_seti_font</key>
<string>uniE559</string>
<key>i_seti_git</key>
<string>uniE55D</string>
<key>i_seti_github</key>
<string>uniE55B</string>
<key>i_seti_gitlab</key>
<string>uniE55C</string>
<key>i_seti_go</key>
<string>uniE527</string>
<key>i_seti_go2</key>
<string>uniE55E</string>
<key>i_seti_godot</key>
<string>uniE55F</string>
<key>i_seti_gradle</key>
<string>uniE560</string>
<key>i_seti_grails</key>
<string>uniE561</string>
<key>i_seti_graphql</key>
<string>uniE562</string>
<key>i_seti_grunt</key>
<string>uniE511</string>
<key>i_seti_gulp</key>
<string>uniE510</string>
<key>i_seti_hacklang</key>
<string>uniE563</string>
<key>i_seti_haml</key>
<string>uniE564</string>
<key>i_seti_happenings</key>
<string>uniE565</string>
<key>i_seti_haskell</key>
<string>uniE51F</string>
<key>i_seti_haxe</key>
<string>uniE566</string>
<key>i_seti_heroku</key>
<string>uniE507</string>
<key>i_seti_hex</key>
<string>uniE567</string>
<key>i_seti_html</key>
<string>uniE50E</string>
<key>i_seti_ignored</key>
<string>uniE568</string>
<key>i_seti_illustrator</key>
<string>uniE569</string>
<key>i_seti_image</key>
<string>uniE50D</string>
<key>i_seti_info</key>
<string>uniE56A</string>
<key>i_seti_ionic</key>
<string>uniE56B</string>
<key>i_seti_jade</key>
<string>uniE56C</string>
<key>i_seti_java</key>
<string>uniE56D</string>
<key>i_seti_javascript</key>
<string>uniE50C</string>
<key>i_seti_jenkins</key>
<string>uniE56E</string>
<key>i_seti_jinja</key>
<string>uniE56F</string>
<key>i_seti_json</key>
<string>uniE50B</string>
<key>i_seti_julia</key>
<string>uniE524</string>
<key>i_seti_karma</key>
<string>uniE522</string>
<key>i_seti_license</key>
<string>uniE50A</string>
<key>i_seti_liquid</key>
<string>uniE570</string>
<key>i_seti_livescript</key>
<string>uniE571</string>
<key>i_seti_lock</key>
<string>uniE572</string>
<key>i_seti_lua</key>
<string>uniE520</string>
<key>i_seti_makefile</key>
<string>uniE573</string>
<key>i_seti_markdown</key>
<string>uniE509</string>
<key>i_seti_maven</key>
<string>uniE574</string>
<key>i_seti_mdo</key>
<string>uniE575</string>
<key>i_seti_mustache</key>
<string>uniE50F</string>
<key>i_seti_new_file</key>
<string>uniE576</string>
<key>i_seti_nim</key>
<string>uniE577</string>
<key>i_seti_notebook</key>
<string>uniE578</string>
<key>i_seti_npm</key>
<string>uniE516</string>
<key>i_seti_nunjucks</key>
<string>uniE579</string>
<key>i_seti_ocaml</key>
<string>uniE57A</string>
<key>i_seti_odata</key>
<string>uniE57B</string>
<key>i_seti_pddl</key>
<string>uniE57C</string>
<key>i_seti_pdf</key>
<string>uniE57D</string>
<key>i_seti_perl</key>
<string>uniE57E</string>
<key>i_seti_photoshop</key>
<string>uniE57F</string>
<key>i_seti_php</key>
<string>uniE508</string>
<key>i_seti_pipeline</key>
<string>uniE580</string>
<key>i_seti_plan</key>
<string>uniE581</string>
<key>i_seti_platformio</key>
<string>uniE582</string>
<key>i_seti_powershell</key>
<string>uniE583</string>
<key>i_seti_prisma</key>
<string>uniE584</string>
<key>i_seti_project</key>
<string>uniE501</string>
<key>i_seti_prolog</key>
<string>uniE585</string>
<key>i_seti_pug</key>
<string>uniE586</string>
<key>i_seti_puppet</key>
<string>uniE531</string>
<key>i_seti_python</key>
<string>uniE506</string>
<key>i_seti_r</key>
<string>uniE58A</string>
<key>i_seti_rails</key>
<string>uniE504</string>
<key>i_seti_react</key>
<string>uniE525</string>
<key>i_seti_reasonml</key>
<string>uniE587</string>
<key>i_seti_rescript</key>
<string>uniE588</string>
<key>i_seti_rollup</key>
<string>uniE589</string>
<key>i_seti_ruby</key>
<string>uniE505</string>
<key>i_seti_rust</key>
<string>uniE58B</string>
<key>i_seti_salesforce</key>
<string>uniE58C</string>
<key>i_seti_sass</key>
<string>uniE503</string>
<key>i_seti_sbt</key>
<string>uniE58D</string>
<key>i_seti_scala</key>
<string>uniE58E</string>
<key>i_seti_search</key>
<string>uniE58F</string>
<key>i_seti_settings</key>
<string>uniE590</string>
<key>i_seti_shell</key>
<string>uniE591</string>
<key>i_seti_slim</key>
<string>uniE592</string>
<key>i_seti_smarty</key>
<string>uniE593</string>
<key>i_seti_spring</key>
<string>uniE594</string>
<key>i_seti_stylelint</key>
<string>uniE595</string>
<key>i_seti_stylus</key>
<string>uniE500</string>
<key>i_seti_sublime</key>
<string>uniE596</string>
<key>i_seti_svelte</key>
<string>uniE597</string>
<key>i_seti_svg</key>
<string>uniE598</string>
<key>i_seti_swift</key>
<string>uniE599</string>
<key>i_seti_terraform</key>
<string>uniE59A</string>
<key>i_seti_tex</key>
<string>uniE59B</string>
<key>i_seti_todo</key>
<string>uniE59C</string>
<key>i_seti_tsconfig</key>
<string>uniE59D</string>
<key>i_seti_twig</key>
<string>uniE51C</string>
<key>i_seti_typescript</key>
<string>uniE528</string>
<key>i_seti_vala</key>
<string>uniE59E</string>
<key>i_seti_video</key>
<string>uniE59F</string>
<key>i_seti_vue</key>
<string>uniE5A0</string>
<key>i_seti_wasm</key>
<string>uniE5A1</string>
<key>i_seti_wat</key>
<string>uniE5A2</string>
<key>i_seti_webpack</key>
<string>uniE5A3</string>
<key>i_seti_wgt</key>
<string>uniE5A4</string>
<key>i_seti_word</key>
<string>uniE5A5</string>
<key>i_seti_xls</key>
<string>uniE5A6</string>
<key>i_seti_xml</key>
<string>uniE519</string>
<key>i_seti_yarn</key>
<string>uniE5A7</string>
<key>i_seti_yml</key>
<string>uniE5A8</string>
<key>i_seti_zig</key>
<string>uniE5A9</string>
<key>i_seti_zip</key>
<string>uniE5AA</string>
</dict>
</dict>
</plist>
|
cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/lib.plist/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/lib.plist",
"repo_id": "cascadia-code",
"token_count": 10162
}
| 1,158 |
# syntax=docker/dockerfile:1
FROM mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.8-cudnn8-ubuntu22.04@sha256:fb6968427928df5d38a88b736f47f45f640eb874277678ea91a8a34649d9792d as base
USER root
# remove the unused conda environments
RUN conda install anaconda-clean && \
anaconda-clean -y && \
rm -rf /opt/miniconda
RUN apt-get update && \
apt-get install -y graphviz-dev python3-dev python3-pip && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
ln -s /usr/bin/python3 /usr/bin/python && \
python -c 'import sys; assert sys.version_info[:2] == (3, 10)'
ENV POETRY_CACHE_DIR="/root/.cache/pypoetry" \
POETRY_NO_INTERACTION=1 \
POETRY_VIRTUALENVS_CREATE=false \
POETRY_VIRTUALENVS_IN_PROJECT=false \
POETRY_VERSION=1.7.1
RUN python -m pip install -U pip setuptools wheel
RUN python -m pip install poetry==$POETRY_VERSION
# Install dependencies separately to allow dependency caching
# Note: Temporarily create dummy content to allow installing the dev dependencies.
WORKDIR /workspaces/causica
COPY pyproject.toml poetry.lock ./
RUN --mount=type=cache,target=/root/.cache/pypoetry,sharing=locked \
poetry install --only main --no-root --no-directory
FROM base as deploy
COPY . /workspaces/causica
FROM base as dev
# Install development shell and utils
COPY .devcontainer/.p10k.zsh /root/
RUN <<EOT
apt-get update
apt-get install -y zsh ruby-full moby-cli
curl -sL https://aka.ms/InstallAzureCLIDeb | bash
apt-get clean -y
rm -rf /var/lib/apt/lists/*
git clone --depth=1 https://github.com/scmbreeze/scm_breeze.git ~/.scm_breeze
~/.scm_breeze/install.sh
git clone --depth=1 https://github.com/romkatv/powerlevel10k.git ~/powerlevel10k
mkdir -p ~/command_history
cat <<' EOF' >> ~/.zshrc
# Set history file in mountable location
export HISTFILE=~/command_history/.zsh_history
export HISTFILESIZE=10000000
export HISTSIZE=10000000
export SAVEHIST=10000000
export HISTTIMEFORMAT="[%F %T] "
setopt HIST_IGNORE_ALL_DUPS
setopt EXTENDED_HISTORY
setopt INC_APPEND_HISTORY
setopt APPENDHISTORY
source ~/powerlevel10k/powerlevel10k.zsh-theme
[[ ! -f ~/.p10k.zsh ]] || source ~/.p10k.zsh
[ -s "$HOME/.scm_breeze/scm_breeze.sh" ] && source "$HOME/.scm_breeze/scm_breeze.sh"
# Set up keybindings for word navigation using ctrl + left/right
# The original key bindings are esc + b/f
bindkey "^[[1;5C" forward-word
bindkey "^[[1;5D" backward-word
EOF
EOT
RUN --mount=type=cache,target=/root/.cache/pypoetry,sharing=locked \
poetry install --with dev --no-root --no-directory
|
causica/Dockerfile/0
|
{
"file_path": "causica/Dockerfile",
"repo_id": "causica",
"token_count": 1138
}
| 1,159 |
[tool.poetry]
name = "scotch"
version = "0.1.0"
description = "Neural structure learning with stochastic differential equations"
readme = "README.md"
authors = ["Benjie Wang", "Joel Jennings", "Wenbo Gong"]
packages = [
{ include = "scotch", from = "src" }
]
license = "MIT"
[tool.poetry.dependencies]
python = "~3.10"
causica = "0.3.4"
torchsde = "0.2.5"
isort = "^5.10.1"
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
|
causica/research_experiments/scotch/pyproject.toml/0
|
{
"file_path": "causica/research_experiments/scotch/pyproject.toml",
"repo_id": "causica",
"token_count": 198
}
| 1,160 |
from dataclasses import dataclass, field
import torch
from tensordict import TensorDictBase
from causica.datasets.causica_dataset_format import CounterfactualWithEffects, InterventionWithEffects
@dataclass
class CausalDataset:
"""Dataclass for storing a causal dataset.
Args:
observations: TensorDictBase containing the observations.
graph: Adjacency matrix of the causal graph.
noise: Optional, TensorDictBase containing the noise used for generating the observations.
counterfactuals: Optional, List of CounterfactualWithEffects.
interventions: Optional, List of InterventionWithEffects.
"""
observations: TensorDictBase
graph: torch.Tensor
node_names: list[str] = field(init=False) # the nodes that are neither conditioned nor sampled
noise: TensorDictBase | None = None
counterfactuals: list[CounterfactualWithEffects] | None = None
interventions: list[InterventionWithEffects] | None = None
def __post_init__(self):
if len(self.observations.batch_size) != 1:
raise ValueError(f"Batch size must be a scalar, got {self.observations.batch_size}")
if self.graph.ndim != 2:
raise ValueError("Expected graph dimension to be 2")
if self.graph.shape[-1] != self.graph.shape[-2]:
raise ValueError("The graph must be a square matrix.")
if self.graph.shape[-1] != len(list(self.observations.keys())):
raise ValueError(
f"Graph shape {self.graph.shape} does not match observations: {list(self.observations.keys())}"
)
if self.noise is not None and self.noise.batch_size != self.observations.batch_size:
raise ValueError("Noise batch size does not match observations batch size")
if self.noise is not None and set(self.noise.keys()) != set(self.observations.keys()):
raise ValueError("Noise keys do not match observations keys")
if self.counterfactuals is not None and not all(
set(cf[0].factual_data.keys()) == set(self.observations.keys()) for cf in self.counterfactuals
):
raise ValueError("Some counterfactual factual data keys do not match observations keys")
if self.interventions is not None and not all(
set(intervention[0].intervention_data.keys()) == set(self.observations.keys())
for intervention in self.interventions
):
raise ValueError("Some intervention data keys do not match observations keys")
self.node_names = list(self.observations.keys())
|
causica/src/causica/datasets/causal_dataset.py/0
|
{
"file_path": "causica/src/causica/datasets/causal_dataset.py",
"repo_id": "causica",
"token_count": 945
}
| 1,161 |
from typing import Optional
import torch
import torch.distributions as td
import torch.nn.functional as F
from torch import nn
from torch.distributions.utils import logits_to_probs
from causica.distributions.adjacency.adjacency_distributions import AdjacencyDistribution
from causica.distributions.distribution_module import DistributionModule
from causica.distributions.gumbel_binary import gumbel_softmax_binary
from causica.triangular_transformations import fill_triangular
class ENCOAdjacencyDistribution(AdjacencyDistribution):
"""
The parameterization from the ENCO paper (https://arxiv.org/pdf/2107.10483.pdf).
For each edge, it parameterizes the existence and orientation separately.
We represent the orientation logit probabilities (θ) as a vector of length n(n - 1) / 2, (the strictly lower triangle)
We represent the existence logit probabilities (γ) as a matrix of size (n, n), where the main diagonal is ignored.
The probability of an edge existing is pᵢⱼ = σ(γᵢⱼ)σ(θᵢⱼ), where:
σ is the sigmoid function
θᵢⱼ is the orientation logit and θᵢⱼ = -θⱼᵢ, so the probabilities sum to one.
γᵢⱼ is the logit the edge exists and is independent for each i, j. Note γᵢⱼ need not equal -γⱼᵢ
The methods for this class constrct logit(pᵢⱼ), which is then a matrix of independent Bernoulli.
"""
arg_constraints = {"logits_exist": td.constraints.real, "logits_orient": td.constraints.real}
def __init__(
self,
logits_exist: torch.Tensor,
logits_orient: torch.Tensor,
validate_args: Optional[bool] = None,
):
"""
Args:
logits_exist: The logits for the existence of an edge, a batch_shape + (n, n) tensor
logits_orient: The logits for the orientation of each edge, a tensor with shape batch_shape + n(n-1)/2
validate_args: Whether to validate the arguments. Passed to the superclass
"""
num_nodes = logits_exist.shape[-1]
if validate_args:
assert len(logits_exist.shape) >= 2, "Logits_exist must be a matrix, batching is not supported"
assert logits_exist.shape[-2:] == (num_nodes, num_nodes), "Invalid logits_exist shape"
assert len(logits_orient.shape) >= 1, "Logits_exist must be 1 dimensional, batching is not supported"
assert logits_orient.shape[-1] == (num_nodes * (num_nodes - 1)) // 2, "Invalid logits_orient shape"
assert logits_exist.device == logits_orient.device, "Logits must exist on the same device"
self.logits_exist = logits_exist
self.logits_orient = logits_orient
super().__init__(num_nodes, validate_args=validate_args)
def _get_independent_bernoulli_logits(self) -> torch.Tensor:
"""
Construct the matrix logit(pᵢⱼ).
See the class docstring.
We use the following derivation
logit(pᵢⱼ) = - log(pᵢⱼ⁻¹ - 1)
= - log((1 + exp(-γᵢⱼ))(1 + exp(-θᵢⱼ)) - 1)
= - log(exp(-γᵢⱼ) + exp(-θᵢⱼ) + exp(-θᵢⱼ - γᵢⱼ))
= - logsumexp([-γᵢⱼ, -θᵢⱼ, -θᵢⱼ - γᵢⱼ])
"""
# (..., num_nodes, num_nodes)
neg_theta = fill_triangular(self.logits_orient, upper=True) - fill_triangular(self.logits_orient, upper=False)
return -torch.logsumexp(
torch.stack([-self.logits_exist, neg_theta, neg_theta - self.logits_exist], dim=-1), dim=-1
)
@staticmethod
def base_dist(logits: torch.Tensor) -> td.Distribution:
"""A matrix of independent Bernoulli distributions."""
return td.Independent(td.Bernoulli(logits=logits), 2)
def relaxed_sample(self, sample_shape: torch.Size = torch.Size(), temperature: float = 0.0) -> torch.Tensor:
"""
Sample from a relaxed distribution. We use a Gumbel Softmax.
Args:
sample_shape: the shape of the samples to return
Returns:
A tensor of shape sample_shape + batch_shape + (num_nodes, num_nodes)
"""
logits = self._get_independent_bernoulli_logits()
expanded_logits = logits.expand(*(sample_shape + logits.shape))
samples = gumbel_softmax_binary(logits=expanded_logits, tau=temperature, hard=True)
return samples * (1.0 - torch.eye(self.num_nodes, device=logits.device))
def sample(self, sample_shape: torch.Size = torch.Size()) -> torch.Tensor:
"""
Sample from the underyling independent Bernoulli distribution.
Gradients will not flow through this method, use relaxed_sample instead.
Args:
sample_shape: the shape of the samples to return
Returns:
A tensor of shape sample_shape + batch_shape + (num_nodes, num_nodes)
"""
logits = self._get_independent_bernoulli_logits()
samples = self.base_dist(logits).sample(sample_shape=sample_shape)
torch.diagonal(samples, dim1=-2, dim2=-1).zero_() # zero the diagonal elements
return samples
def entropy(self) -> torch.Tensor:
"""
Get the entropy of the independent Bernoulli distribution.
Returns:
A tensor of shape batch_shape, with the entropy of the distribution
"""
logits = self._get_independent_bernoulli_logits()
entropy = F.binary_cross_entropy_with_logits(logits, logits_to_probs(logits, is_binary=True), reduction="none")
return torch.sum(entropy, dim=(-2, -1)) - torch.sum(torch.diagonal(entropy, dim1=-2, dim2=-1), dim=-1)
@property
def mean(self) -> torch.Tensor:
"""
Return the mean of the underlying independent Bernoulli distribution.
This will be a matrix with all entries in the interval [0, 1].
Returns:
A tensor of shape batch_shape + (num_nodes, num_nodes)
"""
logits = self._get_independent_bernoulli_logits()
return self.base_dist(logits).mean * (1.0 - torch.eye(self.num_nodes, device=logits.device))
@property
def mode(self) -> torch.Tensor:
"""
Return the mode of the underlying independent Bernoulli distribution.
This will be an adjacency matrix.
Returns:
A tensor of shape batch_shape + (num_nodes, num_nodes)
"""
logits = self._get_independent_bernoulli_logits()
# bernoulli mode can be nan for very small logits, favour sparseness and set to 0
return torch.nan_to_num(self.base_dist(logits).mode, nan=0.0) * (
1.0 - torch.eye(self.num_nodes, device=logits.device)
)
def log_prob(self, value: torch.Tensor) -> torch.Tensor:
"""
Get the log probability of each tensor from the sample space
Args:
value: a binary matrix of shape value_shape + batch_shape + (n, n)
Returns:
A tensor of shape value_shape + batch_shape, with the log probabilities of each tensor in the batch.
"""
# need to subtract the diagonal log probabilities
logits = self._get_independent_bernoulli_logits()
full_log_prob = self.base_dist(logits).log_prob(value)
diag_log_prob = td.Independent(td.Bernoulli(logits=torch.diagonal(logits, dim1=-2, dim2=-1)), 1).log_prob(
torch.diagonal(value, dim1=-2, dim2=-1)
)
return full_log_prob - diag_log_prob
class ENCOAdjacencyDistributionModule(DistributionModule[ENCOAdjacencyDistribution]):
"""Represents an `ENCOAdjacencyDistributionModule` distribution with learnable logits."""
def __init__(self, num_nodes: int) -> None:
super().__init__()
self.logits_exist = nn.Parameter(torch.zeros(num_nodes, num_nodes), requires_grad=True)
self.logits_orient = nn.Parameter(torch.zeros(int(num_nodes * (num_nodes - 1) / 2)), requires_grad=True)
def forward(self) -> ENCOAdjacencyDistribution:
return ENCOAdjacencyDistribution(logits_exist=self.logits_exist, logits_orient=self.logits_orient)
|
causica/src/causica/distributions/adjacency/enco.py/0
|
{
"file_path": "causica/src/causica/distributions/adjacency/enco.py",
"repo_id": "causica",
"token_count": 3344
}
| 1,162 |
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Optional
import mlflow
import pytorch_lightning as pl
import torch
from pytorch_lightning.cli import LightningArgumentParser, Namespace, SaveConfigCallback
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities.types import STEP_OUTPUT
from causica.training.auglag import AugLagLossCalculator, AugLagLR
class AuglagLRCallback(pl.Callback):
"""Wrapper Class to make the Auglag Learning Rate Scheduler compatible with Pytorch Lightning"""
def __init__(self, scheduler: AugLagLR, log_auglag: bool = False, disabled_epochs: Optional[set[int]] = None):
"""
Args:
scheduler: The auglag learning rate scheduler to wrap.
log_auglag: Whether to log the auglag state as metrics at the end of each epoch.
"""
self.scheduler = scheduler
self._log_auglag = log_auglag
self._disabled_epochs = disabled_epochs
def on_train_batch_end(
self, trainer: pl.Trainer, pl_module: pl.LightningModule, outputs: STEP_OUTPUT, batch: Any, batch_idx: int
) -> None:
_ = trainer
_ = batch
_ = batch_idx
assert isinstance(outputs, dict)
optimizer = pl_module.optimizers()
assert isinstance(optimizer, torch.optim.Optimizer)
auglag_loss: AugLagLossCalculator = pl_module.auglag_loss # type: ignore
# Disable if we reached a disabled epoch - disable, otherwise make sure the scheduler is enabled
if self._disabled_epochs and trainer.current_epoch in self._disabled_epochs:
self.scheduler.disable(auglag_loss)
else:
self.scheduler.enable(auglag_loss)
is_converged, convergence_reasons = self.scheduler.step(
optimizer=optimizer,
loss=auglag_loss,
loss_value=outputs["loss"],
lagrangian_penalty=outputs["constraint"],
)
# Notify trainer to stop if the auglag algorithm has converged
if is_converged:
pl_module.log(
"auglag_inner_convergence_reason",
convergence_reasons.inner_convergence_reason.value,
on_epoch=True,
rank_zero_only=True,
prog_bar=False,
)
pl_module.log(
"auglag_outer_convergence_reason",
convergence_reasons.outer_convergence_reason.value,
on_epoch=True,
rank_zero_only=True,
prog_bar=False,
)
trainer.should_stop = True
def on_train_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
_ = trainer
if self._log_auglag:
auglag_state = {
"num_lr_updates": self.scheduler.num_lr_updates,
"outer_opt_counter": self.scheduler.outer_opt_counter,
"step_counter": self.scheduler.step_counter,
"outer_below_penalty_tol": self.scheduler.outer_below_penalty_tol,
"outer_max_rho": self.scheduler.outer_max_rho,
"last_best_step": self.scheduler.last_best_step,
"last_lr_update_step": self.scheduler.last_lr_update_step,
}
pl_module.log_dict(auglag_state, on_epoch=True, rank_zero_only=True, prog_bar=False)
class MLFlowSaveConfigCallback(SaveConfigCallback):
"""Logs the config using MLFlow if there is an active run, otherwise saves locally as the superclass."""
def __init__(
self,
parser: LightningArgumentParser,
config: Namespace,
config_filename: str = "config.yaml",
multifile: bool = False,
) -> None:
super().__init__(
parser=parser, config=config, config_filename=config_filename, overwrite=True, multifile=multifile
)
def setup(self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: TrainerFn) -> None: # type: ignore
# Save the file on rank 0
if trainer.is_global_zero and stage == TrainerFn.FITTING:
with TemporaryDirectory() as tmpdir:
temporary_config_path = str(Path(tmpdir) / self.config_filename)
self.parser.save(
self.config,
temporary_config_path,
skip_none=False,
overwrite=self.overwrite,
multifile=self.multifile,
)
# AzureML throws a raw Exception if the artifact already exists, so we check the error message
try:
mlflow.log_artifact(temporary_config_path)
# pylint: disable=broad-exception-caught
except Exception as e:
if "Resource Conflict" not in str(e.args):
raise e
|
causica/src/causica/lightning/callbacks.py/0
|
{
"file_path": "causica/src/causica/lightning/callbacks.py",
"repo_id": "causica",
"token_count": 2263
}
| 1,163 |
import pytest
import torch
from tensordict import TensorDict, TensorDictBase
from causica.datasets.tensordict_utils import expand_tensordict_groups, tensordict_shapes, unbind_values, unbound_items
def _assert_tensordict_allclose(a: TensorDictBase, b: TensorDictBase) -> None:
paired_items = zip(
a.items(include_nested=True, leaves_only=True),
b.items(include_nested=True, leaves_only=True),
)
assert all(
key_a == key_b and torch.allclose(value_a, value_b) for (key_a, value_a), (key_b, value_b) in paired_items
)
def test_expand_tensordict_groups():
# Create a TensorDict with a group of variables
td = TensorDict({"group": torch.ones((2, 3)), "categorical": torch.ones((2, 4))}, batch_size=2)
# Define the variable groups
variable_groups = {"group": ["var1", "var2", "var3"], "categorical": ["categorical"]}
# Expand the TensorDict
expanded_td = expand_tensordict_groups(td, variable_groups)
# Check that the expanded TensorDict has the correct variables
assert set(expanded_td.keys()) == {"var1", "var2", "var3", "categorical"}
# Check that the expanded variables have the correct shape
assert expanded_td["var1"].shape == (2, 1)
assert expanded_td["var2"].shape == (2, 1)
assert expanded_td["var3"].shape == (2, 1)
assert expanded_td["categorical"].shape == (2, 4)
def test_unbound_items():
td = TensorDict({"a": torch.tensor([[[3]], [[4]]])}, batch_size=2)
assert dict(unbound_items(td, dim=0)) == {
("a", "0"): torch.tensor([[3]]),
("a", "1"): torch.tensor([[4]]),
}
def test_unbind_values():
td = TensorDict({"a": torch.rand(2, 3, 4), "b": torch.rand(2, 3, 5)}, batch_size=2)
unbound_positive = unbind_values(td, dim=1)
unbound_negative = unbind_values(td, dim=-2)
assert unbound_positive.batch_size == unbound_negative.batch_size == torch.Size([2])
expected_shapes = {
("a", "0"): torch.Size([4]),
("a", "1"): torch.Size([4]),
("a", "2"): torch.Size([4]),
("b", "0"): torch.Size([5]),
("b", "1"): torch.Size([5]),
("b", "2"): torch.Size([5]),
}
assert tensordict_shapes(unbound_positive) == expected_shapes
_assert_tensordict_allclose(unbound_positive, unbound_negative)
def test_unbind_values_along_batch_dim():
td = TensorDict({"a": torch.rand(2, 3, 4), "b": torch.rand(2, 3, 5)}, batch_size=2)
expected_shapes = {
("a", "0"): torch.Size([3, 4]),
("a", "1"): torch.Size([3, 4]),
("b", "0"): torch.Size([3, 5]),
("b", "1"): torch.Size([3, 5]),
}
unbound_positive = unbind_values(td, dim=0)
unbound_negative = unbind_values(td, dim=-3)
assert unbound_positive.batch_size == unbound_negative.batch_size == torch.Size([])
assert tensordict_shapes(unbound_positive) == expected_shapes
_assert_tensordict_allclose(unbound_positive, unbound_negative)
def test_unbind_values_different_dims():
td = TensorDict({"a": torch.rand(1, 2), "b": torch.rand(1, 2, 3), "c": torch.rand(1, 2, 3, 4)}, batch_size=1)
unbound = unbind_values(td, dim=0)
assert unbound.batch_size == torch.Size([])
assert tensordict_shapes(unbound) == {
("a", "0"): torch.Size([2]),
("b", "0"): torch.Size([2, 3]),
("c", "0"): torch.Size([2, 3, 4]),
}
unbound = unbind_values(td, dim=1)
assert unbound.batch_size == torch.Size([1])
assert tensordict_shapes(unbound) == {
("a", "0"): torch.Size([]),
("a", "1"): torch.Size([]),
("b", "0"): torch.Size([3]),
("b", "1"): torch.Size([3]),
("c", "0"): torch.Size([3, 4]),
("c", "1"): torch.Size([3, 4]),
}
unbound = unbind_values(td, dim=-1)
assert unbound.batch_size == torch.Size([1])
assert tensordict_shapes(unbound) == {
("a", "0"): torch.Size([]),
("a", "1"): torch.Size([]),
("b", "0"): torch.Size([2]),
("b", "1"): torch.Size([2]),
("b", "2"): torch.Size([2]),
("c", "0"): torch.Size([2, 3]),
("c", "1"): torch.Size([2, 3]),
("c", "2"): torch.Size([2, 3]),
("c", "3"): torch.Size([2, 3]),
}
# We cannot unbind along an axis that will be a batch dimension for at least one samples, when it won't be the same
# batch dimension for all samples.
with pytest.raises(IndexError):
unbound = unbind_values(td, dim=-2)
with pytest.raises(IndexError):
unbound = unbind_values(td, dim=-4)
with pytest.raises(IndexError):
unbound = unbind_values(td, dim=2)
|
causica/test/datasets/test_tensordict_utils.py/0
|
{
"file_path": "causica/test/datasets/test_tensordict_utils.py",
"repo_id": "causica",
"token_count": 2023
}
| 1,164 |
from pathlib import Path
import mlflow
from causica.lightning.loggers import BufferingMlFlowLogger
def test_buffering_mlflow_logger(tmp_path: Path):
mlflow.set_tracking_uri(tmp_path)
client = mlflow.tracking.MlflowClient(tracking_uri=str(tmp_path))
experiment_id = client.create_experiment("test")
run = client.create_run(experiment_id=experiment_id)
logger = BufferingMlFlowLogger(buffer_size=3, run_id=run.info.run_id, tracking_uri=str(tmp_path))
logger.log_metrics({"a": 1})
logger.log_metrics({"a": 2})
assert logger.get_buffer_count() == 2
logger.log_metrics({"a": 3}) # Should flush due to full
assert logger.get_buffer_count() == 0
logger.log_metrics({"a": 4})
assert logger.get_buffer_count() == 1
logger.flush()
assert logger.get_buffer_count() == 0
|
causica/test/lightning/test_loggers.py/0
|
{
"file_path": "causica/test/lightning/test_loggers.py",
"repo_id": "causica",
"token_count": 317
}
| 1,165 |
"""Inspired by https://github.com/pygae/clifford"""
import functools
import itertools
import operator
import torch
# copied from the itertools docs
def _powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s) + 1))
class ShortLexBasisBladeOrder:
def __init__(self, n_vectors):
self.index_to_bitmap = torch.empty(2**n_vectors, dtype=int)
self.grades = torch.empty(2**n_vectors, dtype=int)
self.bitmap_to_index = torch.empty(2**n_vectors, dtype=int)
for i, t in enumerate(_powerset([1 << i for i in range(n_vectors)])):
bitmap = functools.reduce(operator.or_, t, 0)
self.index_to_bitmap[i] = bitmap
self.grades[i] = len(t)
self.bitmap_to_index[bitmap] = i
del t # enables an optimization inside itertools.combinations
def set_bit_indices(x: int):
"""Iterate over the indices of bits set to 1 in `x`, in ascending order"""
n = 0
while x > 0:
if x & 1:
yield n
x = x >> 1
n = n + 1
def count_set_bits(bitmap: int) -> int:
"""Counts the number of bits set to 1 in bitmap"""
count = 0
for i in set_bit_indices(bitmap):
count += 1
return count
def canonical_reordering_sign_euclidean(bitmap_a, bitmap_b):
"""
Computes the sign for the product of bitmap_a and bitmap_b
assuming a euclidean metric
"""
a = bitmap_a >> 1
sum_value = 0
while a != 0:
sum_value = sum_value + count_set_bits(a & bitmap_b)
a = a >> 1
if (sum_value & 1) == 0:
return 1
else:
return -1
def canonical_reordering_sign(bitmap_a, bitmap_b, metric):
"""
Computes the sign for the product of bitmap_a and bitmap_b
given the supplied metric
"""
bitmap = bitmap_a & bitmap_b
output_sign = canonical_reordering_sign_euclidean(bitmap_a, bitmap_b)
i = 0
while bitmap != 0:
if (bitmap & 1) != 0:
output_sign *= metric[i]
i = i + 1
bitmap = bitmap >> 1
return output_sign
def gmt_element(bitmap_a, bitmap_b, sig_array):
"""
Element of the geometric multiplication table given blades a, b.
The implementation used here is described in :cite:`ga4cs` chapter 19.
"""
output_sign = canonical_reordering_sign(bitmap_a, bitmap_b, sig_array)
output_bitmap = bitmap_a ^ bitmap_b
return output_bitmap, output_sign
def construct_gmt(index_to_bitmap, bitmap_to_index, signature):
n = len(index_to_bitmap)
array_length = int(n * n)
coords = torch.zeros((3, array_length), dtype=torch.uint8)
k_list = coords[0, :]
l_list = coords[1, :]
m_list = coords[2, :]
# use as small a type as possible to minimize type promotion
mult_table_vals = torch.zeros(array_length)
for i in range(n):
bitmap_i = index_to_bitmap[i]
for j in range(n):
bitmap_j = index_to_bitmap[j]
bitmap_v, mul = gmt_element(bitmap_i, bitmap_j, signature)
v = bitmap_to_index[bitmap_v]
list_ind = i * n + j
k_list[list_ind] = i
l_list[list_ind] = v
m_list[list_ind] = j
mult_table_vals[list_ind] = mul
return torch.sparse_coo_tensor(indices=coords, values=mult_table_vals, size=(n, n, n))
|
cliffordlayers/cliffordlayers/basisbladeorder.py/0
|
{
"file_path": "cliffordlayers/cliffordlayers/basisbladeorder.py",
"repo_id": "cliffordlayers",
"token_count": 1575
}
| 1,166 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional, Union
import torch
import torch.nn as nn
from .batchnorm import clifford_batch_norm, complex_batch_norm
def complex_group_norm(
x: torch.Tensor,
num_groups: int = 1,
running_mean: Optional[torch.Tensor] = None,
running_cov: Optional[torch.Tensor] = None,
weight: Optional[Union[torch.Tensor, nn.Parameter]] = None,
bias: Optional[Union[torch.Tensor, nn.Parameter]] = None,
training: bool = True,
momentum: float = 0.1,
eps: float = 1e-05,
):
"""Group normalization for complex-valued tensors.
Args:
x (torch.Tensor): The input complex-valued data is expected to be at least 2d, with
shape `(B, C, *D)`, where `B` is the batch dimension, `C` the
channels/features, and *D the remaining dimensions (if present).
num_groups (int): Number of groups for which normalization is calculated. Defaults to 1.
For `num_groups == 1`, it effectively applies complex-valued layer normalization;
for `num_groups == C`, it effectively applies complex-valued instance normalization.
running_mean (torch.Tensor, optional): The tensor with running mean statistics having shape `(2, C / num_groups)`. Defaults to None.
running_cov (torch.Tensor, optional): The tensor with running real-imaginary covariance statistics having shape `(2, 2, C / num_groups)`. Defaults to None.
weight (Union[torch.Tensor, nn.Parameter], optional): Additional weight tensor which is applied post normalization, and has the shape `(2, 2, C/ num_groups)`. Defaults to None.
bias (Union[torch.Tensor, nn.Parameter], optional): Additional bias tensor which is applied post normalization, and has the shape `(2, C / num_groups)`. Defaults to None.
training (bool, optional): Whether to use the running mean and variance. Defaults to True.
momentum (float, optional): Momentum for the running mean and variance. Defaults to 0.1.
eps (float, optional): Epsilon for the running mean and variance. Defaults to 1e-05.
Returns:
(torch.Tensor): Normalized input as complex tensor of shape `(B, C, *D)`.
"""
# Check arguments.
assert (running_mean is None and running_cov is None) or (running_mean is not None and running_cov is not None)
assert (weight is None and bias is None) or (weight is not None and bias is not None)
B, C, *D = x.shape
assert C % num_groups == 0, "Number of channels should be evenly divisible by the number of groups."
assert num_groups <= C
if weight is not None and bias is not None:
# Check if weight and bias tensors are of correct shape.
assert weight.shape == (2, 2, int(C / num_groups))
assert bias.shape == (2, int(C / num_groups))
weight = weight.repeat(1, 1, B)
bias = bias.repeat(1, B)
def _instance_norm(
x,
num_groups,
running_mean,
running_cov,
weight,
bias,
training,
momentum,
eps,
):
if running_mean is not None and running_cov is not None:
assert running_mean.shape == (2, int(C / num_groups))
running_mean_orig = running_mean
running_mean = running_mean_orig.repeat(1, B)
assert running_cov.shape == (2, 2, int(C / num_groups))
running_cov_orig = running_cov
running_cov = running_cov_orig.repeat(1, 1, B)
# Reshape such that batch normalization can be applied.
# For num_groups == 1, it defaults to layer normalization,
# for num_groups == C, it defaults to instance normalization.
x_reshaped = x.view(1, int(B * C / num_groups), num_groups, *D)
x_norm = complex_batch_norm(
x_reshaped,
running_mean,
running_cov,
weight=weight,
bias=bias,
training=training,
momentum=momentum,
eps=eps,
)
# Reshape back running mean and running var.
if running_mean is not None:
running_mean_orig.copy_(running_mean.view(2, B, int(C / num_groups)).mean(1, keepdim=False))
if running_cov is not None:
running_cov_orig.copy_(running_cov.view(2, 2, B, int(C / num_groups)).mean(2, keepdim=False))
return x_norm.view(B, C, *D)
return _instance_norm(
x,
num_groups,
running_mean,
running_cov,
weight=weight,
bias=bias,
training=training,
momentum=momentum,
eps=eps,
)
def clifford_group_norm(
x: torch.Tensor,
n_blades: int,
num_groups: int = 1,
running_mean: Optional[torch.Tensor] = None,
running_cov: Optional[torch.Tensor] = None,
weight: Optional[Union[torch.Tensor, nn.Parameter]] = None,
bias: Optional[Union[torch.Tensor, nn.Parameter]] = None,
training: bool = True,
momentum: float = 0.1,
eps: float = 1e-05,
) -> torch.Tensor:
"""Clifford group normalization
Args:
x (torch.Tensor): Input tensor of shape `(B, C, *D, I)` where I is the blade of the algebra.
n_blades (int): Number of blades of the Clifford algebra.
num_groups (int): Number of groups for which normalization is calculated. Defaults to 1.
For `num_groups == 1`, it effectively applies Clifford layer normalization, for `num_groups == C`, it effectively applies Clifford instance normalization.
running_mean (torch.Tensor, optional): The tensor with running mean statistics having shape `(I, C / num_groups)`. Defaults to None.
running_cov (torch.Tensor, optional): The tensor with running real-imaginary covariance statistics having shape `(I, I, C / num_groups)`. Defaults to None.
weight (Union[torch.Tensor, nn.Parameter], optional): Additional weight tensor which is applied post normalization, and has the shape `(I, I, C / num_groups)`. Defaults to None.
bias (Union[torch.Tensor, nn.Parameter], optional): Additional bias tensor which is applied post normalization, and has the shape `(I, C / num_groups)`. Defaults to None.
training (bool, optional): Whether to use the running mean and variance. Defaults to True.
momentum (float, optional): Momentum for the running mean and variance. Defaults to 0.1.
eps (float, optional): Epsilon for the running mean and variance. Defaults to 1e-05.
Returns:
(torch.Tensor): Group normalized input of shape `(B, C, *D, I)`.
"""
# Check arguments.
assert (running_mean is None and running_cov is None) or (running_mean is not None and running_cov is not None)
assert (weight is None and bias is None) or (weight is not None and bias is not None)
B, C, *D, I = x.shape
assert num_groups <= C
assert C % num_groups == 0, "Number of channels should be evenly divisible by the number of groups."
assert I == n_blades
if weight is not None and bias is not None:
# Check if weight and bias tensors are of correct shape.
assert weight.shape == (I, I, int(C / num_groups))
assert bias.shape == (I, int(C / num_groups))
weight = weight.repeat(1, 1, B)
bias = bias.repeat(1, B)
def _instance_norm(
x,
num_groups,
running_mean,
running_cov,
weight,
bias,
training,
momentum,
eps,
):
if running_mean is not None and running_cov is not None:
assert running_mean.shape == (I, int(C / num_groups))
running_mean_orig = running_mean
running_mean = running_mean_orig.repeat(1, B)
assert running_cov.shape == (I, I, int(C / num_groups))
running_cov_orig = running_cov
running_cov = running_cov_orig.repeat(1, 1, B)
# Reshape such that batch normalization can be applied.
# For num_groups == 1, it defaults to layer normalization,
# for num_groups == C, it defaults to instance normalization.
x_reshaped = x.reshape(1, int(B * C / num_groups), num_groups, *D, I)
x_norm = clifford_batch_norm(
x_reshaped,
n_blades,
running_mean,
running_cov,
weight,
bias,
training,
momentum,
eps,
)
# Reshape back running mean and running var.
if running_mean is not None:
running_mean_orig.copy_(running_mean.view(I, B, int(C / num_groups)).mean(1, keepdim=False))
if running_cov is not None:
running_cov_orig.copy_(running_cov.view(I, I, B, int(C / num_groups)).mean(1, keepdim=False))
return x_norm.view(B, C, *D, I)
return _instance_norm(
x,
num_groups,
running_mean,
running_cov,
weight,
bias,
training,
momentum,
eps,
)
|
cliffordlayers/cliffordlayers/nn/functional/groupnorm.py/0
|
{
"file_path": "cliffordlayers/cliffordlayers/nn/functional/groupnorm.py",
"repo_id": "cliffordlayers",
"token_count": 3745
}
| 1,167 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import torch.nn.functional as F
from cliffordlayers.models.utils import partialclass
from cliffordlayers.models.basic.twod import (
CliffordFluidNet2d,
CliffordBasicBlock2d,
CliffordFourierBasicBlock2d,
)
def test_clifford_resnet():
"""Test shape compatibility of CliffordFluidNet2d ResNet model."""
x = torch.randn(8, 4, 128, 128, 3)
in_channels = 4
out_channels = 1
model = CliffordFluidNet2d(
g=[1, 1],
block=CliffordBasicBlock2d,
num_blocks=[2, 2, 2, 2],
in_channels=in_channels,
out_channels=out_channels,
hidden_channels=32,
activation=F.gelu,
norm=False,
rotation=False,
)
if torch.cuda.is_available():
x = x.to("cuda")
model = model.to("cuda")
out = model(x)
assert out.shape == (8, out_channels, 128, 128, 3)
def test_clifford_resnet_norm():
"""Test shape compatibility of CliffordFluidNet2d ResNet model using normalization."""
in_channels = 4
out_channels = 1
x = torch.randn(8, in_channels, 128, 128, 3)
model = CliffordFluidNet2d(
g=[1, 1],
block=CliffordBasicBlock2d,
num_blocks=[2, 2, 2, 2],
in_channels=in_channels,
out_channels=out_channels,
hidden_channels=32,
activation=F.gelu,
norm=True,
rotation=False,
)
if torch.cuda.is_available():
x = x.to("cuda")
model = model.to("cuda")
out = model(x)
assert out.shape == (8, out_channels, 128, 128, 3)
def test_clifford_rotational_resnet_norm():
"""Test shape compatibility of CliffordFluidNet2d rotational ResNet model using normalization."""
in_channels = 4
out_channels = 1
x = torch.randn(8, in_channels, 128, 128, 3)
model = CliffordFluidNet2d(
g=[-1, -1],
block=CliffordBasicBlock2d,
num_blocks=[2, 2, 2, 2],
in_channels=in_channels,
out_channels=out_channels,
hidden_channels=32,
activation=F.gelu,
norm=True,
rotation=True,
)
if torch.cuda.is_available():
x = x.to("cuda")
model = model.to("cuda")
out = model(x)
assert out.shape == (8, out_channels, 128, 128, 3)
def test_clifford_fourier_net():
"""Test shape compatibility of CliffordFluidNet2d Fourier model."""
in_channels = 4
out_channels = 1
x = torch.randn(8, in_channels, 128, 128, 3)
model = CliffordFluidNet2d(
g=[1, 1],
block=partialclass("CliffordFourierBasicBlock2d", CliffordFourierBasicBlock2d, modes1=32, modes2=32),
num_blocks=[1, 1, 1, 1],
in_channels=in_channels,
out_channels=out_channels,
hidden_channels=32,
activation=F.gelu,
norm=False,
rotation=False,
)
if torch.cuda.is_available():
x = x.to("cuda")
model = model.to("cuda")
out = model(x)
assert out.shape == (8, out_channels, 128, 128, 3)
def test_clifford_fourier_net_norm():
"""Test shape compatibility of CliffordFluidNet2d Fourier model using normalization."""
in_channels = 4
out_channels = 1
x = torch.randn(8, in_channels, 128, 128, 3)
model = CliffordFluidNet2d(
g=[1, 1],
block=partialclass("CliffordFourierBasicBlock2d", CliffordFourierBasicBlock2d, modes1=32, modes2=32),
num_blocks=[1, 1, 1, 1],
in_channels=in_channels,
out_channels=out_channels,
hidden_channels=32,
activation=F.gelu,
norm=True,
rotation=False,
)
if torch.cuda.is_available():
x = x.to("cuda")
model = model.to("cuda")
out = model(x)
assert out.shape == (8, out_channels, 128, 128, 3)
def test_clifford_fourier_rotational_net_norm():
"""Test shapes compatibility of CliffordFluidNet2d Fourier model using normalization (and rotation)."""
in_channels = 4
out_channels = 1
x = torch.randn(8, in_channels, 128, 128, 3)
model = CliffordFluidNet2d(
g=[-1, -1],
block=partialclass("CliffordFourierBasicBlock2d", CliffordFourierBasicBlock2d, modes1=32, modes2=32),
num_blocks=[1, 1, 1, 1],
in_channels=in_channels,
out_channels=out_channels,
hidden_channels=32,
activation=F.gelu,
norm=True,
rotation=True,
)
if torch.cuda.is_available():
x = x.to("cuda")
model = model.to("cuda")
out = model(x)
assert out.shape == (8, out_channels, 128, 128, 3)
|
cliffordlayers/tests/test_CliffordFluidNet2d.py/0
|
{
"file_path": "cliffordlayers/tests/test_CliffordFluidNet2d.py",
"repo_id": "cliffordlayers",
"token_count": 2186
}
| 1,168 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from starlette.testclient import TestClient
from app.api import app
def test_docs_redirect():
client = TestClient(app)
response = client.get("/")
assert response.history[0].status_code == 302
assert response.status_code == 200
assert response.url == "http://testserver/docs"
def test_api():
client = TestClient(app)
text = """But Google is starting from behind. The company made a late push
into hardware, and Apple's Siri, available on iPhones, and Amazon's Alexa
software, which runs on its Echo and Dot devices, have clear leads in
consumer adoption."""
request_data = {
"values": [{"recordId": "a1", "data": {"text": text, "language": "en"}}]
}
response = client.post("/spacy_entities", json=request_data)
assert response.status_code == 200
first_record = response.json()["values"][0]
assert first_record["recordId"] == "a1"
assert first_record["errors"] == None
assert first_record["warnings"] == None
assert first_record["data"]["entities"] == [
"Alexa",
"Amazon",
"Apple",
"Echo and Dot",
"Google",
"iPhones",
"Siri",
]
|
cookiecutter-spacy-fastapi/{{cookiecutter.project_slug}}/app/tests/test_api.py/0
|
{
"file_path": "cookiecutter-spacy-fastapi/{{cookiecutter.project_slug}}/app/tests/test_api.py",
"repo_id": "cookiecutter-spacy-fastapi",
"token_count": 455
}
| 1,169 |
# This file is in the root folder for VS Code.
# Abstract contracts do not work yet: https://github.com/duaraghav8/Ethlint/issues/281
# grep -R --files-with-matches '^abstract contract' client/src/contracts/
client/src/contracts/classification/Classifier.sol
client/src/contracts/incentive/IncentiveMechanism.sol
|
0xDeCA10B/demo/.soliumignore/0
|
{
"file_path": "0xDeCA10B/demo/.soliumignore",
"repo_id": "0xDeCA10B",
"token_count": 100
}
| 0 |
{
"short_name": "React App",
"name": "Create React App Sample",
"icons": [
{
"src": "favicon.ico",
"sizes": "64x64 32x32 24x24 16x16",
"type": "image/x-icon"
}
],
"start_url": "./index.html",
"display": "standalone",
"theme_color": "#000000",
"background_color": "#ffffff"
}
|
0xDeCA10B/demo/client/public/manifest.json/0
|
{
"file_path": "0xDeCA10B/demo/client/public/manifest.json",
"repo_id": "0xDeCA10B",
"token_count": 145
}
| 1 |
pragma solidity ^0.6;
pragma experimental ABIEncoderV2;
import "../../../lib/Math.sol";
import {Classifier64} from "./Classifier.sol";
/**
* A nearest centroid classifier that uses Euclidean distance to predict the closest centroid.
*
* https://en.wikipedia.org/wiki/Nearest_centroid_classifier
*/
contract NearestCentroidClassifier is Classifier64 {
/** A class has been added. */
event AddClass(
/** The name of the class. */
string name,
/** The index for the class in the members of this classifier. */
uint index
);
uint256 constant public UINT256_MAX = ~uint256(0);
uint256 constant public dataCountLimit = 2 ** (256 - 64 - 1);
int64[][] public centroids;
uint[] public dataCounts;
constructor(
string[] memory _classifications,
int64[][] memory _centroids,
uint[] memory _dataCounts)
Classifier64(_classifications) public {
require(_centroids.length == _classifications.length, "The number of centroids and classifications must be the same.");
require(_classifications.length > 0, "At least one class is required.");
require(_classifications.length < 2 ** 64, "Too many classes given.");
centroids = _centroids;
dataCounts = _dataCounts;
uint dimensions = centroids[0].length;
for (uint i = 1; i < centroids.length; ++i) {
require(centroids[i].length == dimensions, "Inconsistent number of dimensions.");
}
}
/**
* Extend the number of dimensions of a centroid.
* Made to be called just after the contract is created and never again.
* @param extension The values to append to a centroid vector.
* @param classification The class to add the extension to.
*/
function extendCentroid(int64[] memory extension, uint64 classification) public onlyOwner {
require(classification < classifications.length, "This classification has not been added yet.");
require(centroids[classification].length + extension.length < 2 ** 63, "Centroid would be too long.");
for (uint i = 0; i < extension.length; ++i) {
centroids[classification].push(extension[i]);
}
}
function addClass(int64[] memory centroid, string memory classification, uint dataCount) public onlyOwner {
require(classifications.length + 1 < 2 ** 64, "There are too many classes already.");
require(centroid.length == centroids[0].length, "Data doesn't have the correct number of dimensions.");
require(dataCount < dataCountLimit, "Data count is too large.");
classifications.push(classification);
centroids.push(centroid);
dataCounts.push(dataCount);
emit AddClass(classification, classifications.length - 1);
}
function norm(int64[] memory data) public override pure returns (uint result) {
result = 0;
for (uint i = 0; i < data.length; ++i) {
result = result.add(uint(int128(data[i]) * data[i]));
}
result = Math.sqrt(result);
}
function predict(int64[] memory data) public override view returns (uint64 bestClass) {
require(data.length == centroids[0].length, "Data doesn't have the correct length.");
uint minDistance = UINT256_MAX;
bestClass = 0;
for (uint64 currentClass = 0; currentClass < centroids.length; ++currentClass) {
uint distance = 0;
for (uint j = 0; j < data.length; ++j) {
// Safe calculation because both values are int64.
int diff = data[j];
diff -= centroids[currentClass][j];
diff *= diff;
// Convert back to our float representation.
diff /= toFloat;
distance = distance.add(uint(diff));
if (distance >= minDistance) {
break;
}
}
if (distance < minDistance) {
minDistance = distance;
bestClass = currentClass;
}
}
}
function update(int64[] memory data, uint64 classification) public override onlyOwner {
require(data.length == centroids[classification].length, "Data doesn't have the correct number of dimensions.");
require(classification < classifications.length, "Classification is out of bounds.");
int64[] memory centroid = centroids[classification];
uint n = dataCounts[classification];
uint newN;
// Keep n small enough for multiplication.
if (n >= dataCountLimit) {
newN = dataCounts[classification];
} else {
newN = dataCounts[classification] + 1;
dataCounts[classification] = newN;
}
// Update centroid using moving average calculation.
int64[] memory c = new int64[](data.length);
uint _norm = 0;
for (uint j = 0; j < data.length; ++j) {
int128 datum = int128(data[j]);
_norm = _norm.add(uint(datum * datum));
c[j] = int64((int(centroid[j]) * int(n) + datum) / int(newN));
}
centroids[classification] = c;
// Must be almost within `toFloat` of `toFloat*toFloat` because we only care about the first `toFloat` digits.
uint oneSquared = uint(toFloat).mul(toFloat);
uint offset = uint(toFloat) * 100;
require(oneSquared - offset < _norm && _norm < oneSquared + offset, "The provided data does not have a norm of 1.");
}
// Useful methods to view the underlying data:
// To match the `SparseNearestCentroidClassifier`.
// These methods are not really needed now but they are added in case the implementation of the class
// changes later after some gas cost analysis.
function getNumSamples(uint classIndex) public view returns (uint) {
return dataCounts[classIndex];
}
function getCentroidValue(uint classIndex, uint featureIndex) public view returns (int64) {
return centroids[classIndex][featureIndex];
}
}
|
0xDeCA10B/demo/client/src/contracts/classification/NearestCentroidClassifier.sol/0
|
{
"file_path": "0xDeCA10B/demo/client/src/contracts/classification/NearestCentroidClassifier.sol",
"repo_id": "0xDeCA10B",
"token_count": 2352
}
| 2 |
global.fetch = require('node-fetch')
const UniversalSentenceEncoder = require('@tensorflow-models/universal-sentence-encoder')
const tf = require('@tensorflow/tfjs')
const fs = require('fs')
const path = require('path')
const dataPath = '../../../nlu-benchmark/2017-06-custom-intent-engines'
const intents = {
'GetWeather': "WEATHER_GET",
'PlayMusic': "MUSIC_PLAY",
}
// Re-train with all data before deploying.
const trainSplit = 1
// Normalize each sample like what will happen in production to avoid changing the centroid by too much.
const normalizeEachEmbedding = true
// Normalizing the centroid didn't change performance.
const normalizeCentroid = false
let embeddingCache
const embeddingCachePath = path.join(__dirname, 'embedding_cache.json')
if (fs.existsSync(embeddingCachePath)) {
embeddingCache = fs.readFileSync(embeddingCachePath, 'utf8')
embeddingCache = JSON.parse(embeddingCache)
} else {
embeddingCache = {}
}
async function getEmbedding(query) {
let result = embeddingCache[query]
if (result !== undefined) {
result = tf.tensor1d(result)
} else {
const newEmbeddings = await sentenceEncoder.embed([query])
result = newEmbeddings.gather(0)
embeddingCache[query] = result.arraySync()
}
if (normalizeEachEmbedding) {
result = normalize1d(result)
}
return result
}
function normalize1d(x) {
const ord = 2
const norm = tf.norm(x, ord)
return x.div(norm)
}
function normalize2d(x) {
const ord = 2
const axis = 1
const norms = tf.norm(x, ord, axis).expandDims(1)
return x.div(norms)
}
async function predict(model, query) {
let minDistance = Number.MAX_VALUE
let result
const emb = await getEmbedding(query)
Object.entries(model).forEach(([intent, centroidInfo]) => {
const centroid = tf.tensor1d(centroidInfo.centroid)
const distance = centroid.sub(emb).pow(2).sum()
if (distance.less(minDistance).dataSync()[0]) {
result = intent
minDistance = distance
}
})
return result
}
async function evaluate(intents, model) {
if (trainSplit == 1) {
return
}
const evalStats = []
const evalIntents = Object.entries(intents)
for (let i = 0; i < evalIntents.length; ++i) {
const [intent, expectedIntent] = evalIntents[i]
const filename = `train_${intent}_full.json`
let data = fs.readFileSync(path.join(dataPath, intent, filename), 'utf8')
data = JSON.parse(data)[intent]
data = data.slice(Math.round(trainSplit * data.length))
const queries = data.map(datum => datum.data.map(o => o.text).join(""))
console.log(`${expectedIntent}: Evaluating with ${queries.length} samples.`)
const stats = {
intent: expectedIntent,
recall: undefined,
numCorrect: 0,
confusion: {},
}
for (let i = 0; i < queries.length; ++i) {
const query = queries[i]
const prediction = await predict(model, query)
if (prediction === expectedIntent) {
stats.numCorrect += 1
} else {
if (!(prediction in stats.confusion)) {
stats.confusion[prediction] = 0
}
stats.confusion[prediction] += 1
}
}
stats.recall = stats.numCorrect / queries.length
evalStats.push(stats)
}
console.log(`normalizeEachEmbedding: ${normalizeEachEmbedding}`)
console.log(`normalizeCentroid: ${normalizeCentroid}`)
console.log(JSON.stringify(evalStats, null, 2))
}
async function main() {
const sentenceEncoder = await UniversalSentenceEncoder.load()
// The code for the encoder gives too many warnings.
tf.disableDeprecationWarnings()
async function getCentroid(intent) {
const filename = `train_${intent}_full.json`
let data = fs.readFileSync(path.join(dataPath, intent, filename), 'utf8')
data = JSON.parse(data)[intent]
console.log(`Found ${data.length} samples for ${intents[intent]}.`)
if (trainSplit < 1) {
data = data.slice(0, Math.round(trainSplit * data.length))
}
const queries = data.map(datum => datum.data.map(o => o.text).join(""))
console.log(` Training with ${queries.length} samples.`)
const chunkSize = 128
const allEmbeddings = []
for (let i = 0; i < queries.length; i += chunkSize) {
console.log(` ${intents[intent]}: ${(100 * i / queries.length).toFixed(1)}% (${i}/${queries.length})`)
const queriesNeedingEmbedding = []
const currentQueries = queries.slice(i, i + chunkSize)
for (let j = 0; j < currentQueries.length; ++j) {
const query = currentQueries[j]
let emb = embeddingCache[query]
if (emb !== undefined) {
emb = tf.tensor2d([emb])
if (normalizeEachEmbedding) {
emb = normalize2d(emb)
}
allEmbeddings.push(emb)
} else {
queriesNeedingEmbedding.push(query)
}
}
if (queriesNeedingEmbedding.length > 0) {
let newEmbeddings = await sentenceEncoder.embed(queriesNeedingEmbedding)
const n = newEmbeddings.arraySync()
for (let j = 0; j < queriesNeedingEmbedding.length; ++j) {
embeddingCache[queriesNeedingEmbedding[j]] = n[j]
}
if (normalizeEachEmbedding) {
newEmbeddings = normalize2d(newEmbeddings)
}
allEmbeddings.push(newEmbeddings)
}
}
const allEmbTensor = tf.concat(allEmbeddings)
if (allEmbTensor.shape[0] !== queries.length) {
throw new Exception(`Some embeddings are missing: allEmbTensor.shape[0] !== queries.length: ${allEmbTensor.shape[0]} !== ${queries.length}`)
}
let centroid = allEmbTensor.mean(axis = 0)
if (normalizeCentroid) {
centroid = normalize1d(centroid)
}
return {
centroid: centroid.arraySync(),
dataCount: queries.length,
}
}
Promise.all(Object.keys(intents).map(getCentroid))
.then(async centroidInfos => {
const model = {}
Object.values(intents).forEach((intent, i) => {
model[intent] = centroidInfos[i]
})
const path = `${__dirname}/vpa-classifier-centroids.json`
console.log(`Saving centroids to "${path}".`)
fs.writeFileSync(path, JSON.stringify(model))
evaluate(intents, model)
fs.writeFile(embeddingCachePath, JSON.stringify(embeddingCache), (err) => {
if (err) {
console.error("Error writing embedding cache.")
console.error(err)
} else {
console.debug(`Wrote embedding cache to \"${embeddingCachePath}\".`)
}
})
})
}
main()
|
0xDeCA10B/demo/client/src/ml-models/vpa/train-classifier.js/0
|
{
"file_path": "0xDeCA10B/demo/client/src/ml-models/vpa/train-classifier.js",
"repo_id": "0xDeCA10B",
"token_count": 2351
}
| 3 |
This folder is just for smart contract tests and is used by Truffle.
Other test files go in a `__tests__` folder in the folder with the source code (within the `src/` folder).
`custom-test-env.js` is not a test file.
It is a custom environment for Jest and is in this folder so that it is not picked up by default test file pattern rules.
|
0xDeCA10B/demo/client/test/README.md/0
|
{
"file_path": "0xDeCA10B/demo/client/test/README.md",
"repo_id": "0xDeCA10B",
"token_count": 91
}
| 4 |
set -e
if [ "${1}" == "--fix" ]; then
is_fix="true"
# Not ready yet:
# eslint . --fix --ext .js,.jsx,.ts,.tsx
# Some files are ready.
eslint --fix server.js
eslint . --fix --ext .ts,.tsx
solium --dir client/src/contracts/ --fix
else
# Not ready yet:
# eslint . --ext .js,.jsx,.ts,.tsx
# Some files are ready.
eslint server.js
eslint . --ext .ts,.tsx
solium --dir client/src/contracts/ --fix-dry-run
fi
matches=`git grep --name-only '^import .* "' -- *.{js,jsx,ts,tsx}` || true
if [ "${matches}" != "" ]; then
if [ "${is_fix}" == "true" ]; then
# Normally I don't like double quotes in strings for machines to read
# but I couldn't get it working with single quotes.
sed -i -r -e "s/^(import[^\"]*)\"([^\"]*)\"/\1'\2'/g" ${matches}
else
>&2 echo -e "Imports should use single quotes. Check:\n${matches}"
exit 1
fi
fi
|
0xDeCA10B/demo/scripts/custom_lint.sh/0
|
{
"file_path": "0xDeCA10B/demo/scripts/custom_lint.sh",
"repo_id": "0xDeCA10B",
"token_count": 353
}
| 5 |
from dataclasses import dataclass, field
from logging import Logger
from typing import Dict
from injector import inject, singleton
from decai.simulation.contract.objects import Address
@inject
@singleton
@dataclass
class Balances(object):
"""
Tracks balances in the simulation.
"""
_logger: Logger
_balances: Dict[Address, float] = field(default_factory=dict, init=False)
def __contains__(self, address: Address):
"""
:param address: A participant's address.
:return: `True` if the address is in the simulation, `False` otherwise.
"""
return address in self._balances
def __getitem__(self, address: Address) -> float:
"""
:param address: A participant's address.
:return: The balance for `address`.
"""
return self._balances[address]
def get_all(self) -> Dict[Address, float]:
"""
:return: A copy of the balances.
"""
return dict(self._balances)
def initialize(self, address: Address, start_balance: float):
""" Initialize a participant's balance. """
assert address not in self._balances, f"'{address}' already has a balance."
self._balances[address] = start_balance
def send(self, sending_address: Address, receiving_address: Address, amount):
""" Send funds from one participant to another. """
assert amount >= 0
if amount > 0:
sender_balance = self._balances[sending_address]
if sender_balance < amount:
self._logger.warning(f"'{sending_address} has {sender_balance} < {amount}.\n"
f"Will only send {sender_balance}.")
amount = sender_balance
self._balances[sending_address] -= amount
if receiving_address not in self._balances:
self.initialize(receiving_address, amount)
else:
self._balances[receiving_address] += amount
|
0xDeCA10B/simulation/decai/simulation/contract/balances.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/contract/balances.py",
"repo_id": "0xDeCA10B",
"token_count": 805
}
| 6 |
import random
from collections import Counter, defaultdict
from dataclasses import dataclass, field
from enum import Enum
from hashlib import sha256
from logging import Logger
from typing import Dict, List, Optional, Tuple
import math
import numpy as np
from injector import ClassAssistedBuilder, inject, Module, provider, singleton
from decai.simulation.contract.balances import Balances
from decai.simulation.contract.classification.classifier import Classifier
from decai.simulation.contract.data.data_handler import StoredData
from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism
from decai.simulation.contract.objects import Address, Msg, RejectException, TimeMock
class MarketPhase(Enum):
""" Phases for the current market. """
# Phases are in chronological order.
INITIALIZATION = 0
""" The market is being initialized and awaiting for the requested test set index to be revealed. """
PARTICIPATION = 1
""" The market is open to data contributions. """
REVEAL_TEST_SET = 2
""" The market will no longer accept data and the test set must be revealed before rewards can be calculated. """
REWARD = 3
""" No more data contributions are being accepted but rewards still need to be calculated. """
REWARD_RESTART = 4
"""
Same as `REWARD` but contributions have just been filtered out
and the iteration needs to restart with the remaining contributions.
"""
REWARD_COLLECT = 5
""" The reward values have been computed and are ready to be collected. """
@dataclass
class _Contribution:
"""
A contribution to train data.
This is stored for convenience but for some applications, storing the data could be very expensive,
instead, hashes could be stored and during the reward phase,
the hash can be used to verify data as data is re-submitted.
Note: this is not in the spirit of the prediction market (the current state should be public)
since the model would not actually be updated and the submitted data would be private
so new data contributors have very limited information.
"""
contributor_address: Address
data: np.array
classification: int
balance: int
"""
Initially this is the amount deposited with this contribution.
If contributions are not grouped by contributor, then while calculating rewards this gets updated to be the balance
for this particular contribution, to know if it should get kicked out of the reward phase.
"""
score: Optional[int] = field(default=None, init=False)
"""
The score for this contribution.
Mainly used for when contributions are not grouped.
"""
accuracy: Optional[float] = field(default=None, init=False)
""" The accuracy of the model on the test set after adding this contribution. """
class PredictionMarket(IncentiveMechanism):
"""
An IM where rewards are computed based on how the model's performance changes with respect to a test set.
For now, for the purposes of the simulation, the market is only intended to be run once.
Eventually this class and the actual smart contract implementation of it
should support restarting the market with a new bounty once a market has ended.
"""
@inject
def __init__(self,
# Injected
balances: Balances,
logger: Logger,
model: Classifier,
time_method: TimeMock,
# Parameters
any_address_claim_wait_time_s=60 * 60 * 24 * 7,
# Configuration Options
allow_greater_deposit=False,
group_contributions=False,
reset_model_during_reward_phase=False,
):
super().__init__(any_address_claim_wait_time_s=any_address_claim_wait_time_s)
self._balances = balances
self._logger = logger
self.model = model
self._time = time_method
# Configuration Options
self._allow_greater_deposit = allow_greater_deposit
self._group_contributions = group_contributions
self._reset_model_during_reward_phase = reset_model_during_reward_phase
self._market_earliest_end_time_s = None
self._market_balances: Dict[Address, float] = defaultdict(float)
""" Keeps track of balances in the market. """
self._next_data_index = None
self.min_stake = 1
"""
The minimum required amount to deposit.
Should be at least 1 to handle the worst case where the contribution takes the accuracy from 1 to 0.
"""
self.state = None
@property
def reset_model_during_reward_phase(self):
return self._reset_model_during_reward_phase
def distribute_payment_for_prediction(self, sender, value):
pass
def get_num_contributions_in_market(self):
"""
:return: The total number of contributions currently in the market.
This can decrease as "bad" contributors are removed during the reward phase.
"""
return len(self._market_data)
# Methods in chronological order of the PM.
@staticmethod
def hash_test_set(test_set):
"""
:param test_set: A test set.
:return: The hash of `test_set`.
"""
return sha256(str(test_set).encode()).hexdigest()
@staticmethod
def get_test_set_hashes(num_pieces, x_test, y_test) -> Tuple[list, list]:
"""
Helper to break the test set into `num_pieces` to initialize the market.
:param num_pieces: The number of pieces to break the test set into.
:param x_test: The features for the test set.
:param y_test: The labels for `x_test`.
:return: tuple
A list of `num_pieces` hashes for each portion of the test set.
The test set divided into `num_pieces`.
"""
test_sets = []
test_dataset_hashes = []
assert len(x_test) == len(y_test) >= num_pieces
for i in range(num_pieces):
start = int(i / num_pieces * len(x_test))
end = int((i + 1) / num_pieces * len(x_test))
test_set = list(zip(x_test[start:end], y_test[start:end]))
test_sets.append(test_set)
test_dataset_hashes.append(PredictionMarket.hash_test_set(test_set))
assert sum(len(t) for t in test_sets) == len(x_test)
return test_dataset_hashes, test_sets
def initialize_market(self, msg: Msg,
test_dataset_hashes: List[str],
# Ending criteria:
min_length_s: int, min_num_contributions: int) -> int:
"""
Initialize the prediction market.
:param msg: Indicates the one posting the bounty and the amount being committed for the bounty.
The total bounty should be an integer since it also represents the number of "rounds" in the PM.
:param test_dataset_hashes: The committed hashes for the portions of the test set.
:param min_length_s: The minimum length in seconds of the market.
:param min_num_contributions: The minimum number of contributions before ending the market.
:return: The index of the test set that must be revealed.
"""
assert self._market_earliest_end_time_s is None
assert self._next_data_index is None, "The market end has already been triggered."
assert self.state is None
self.bounty_provider = msg.sender
self.total_bounty = msg.value
self.remaining_bounty_rounds = self.total_bounty
self.test_set_hashes = test_dataset_hashes
assert len(self.test_set_hashes) > 1
self.test_reveal_index = random.randrange(len(self.test_set_hashes))
self.next_test_set_index_to_verify = 0
if self.next_test_set_index_to_verify == self.test_reveal_index:
self.next_test_set_index_to_verify += 1
self._market_data: List[_Contribution] = []
self.min_num_contributions = min_num_contributions
self._market_earliest_end_time_s = self._time() + min_length_s
self.reward_phase_end_time_s = None
self.prev_acc = None
self.original_acc = None
# Pay the owner since it will be the owner distributing funds using `handle_refund` and `handle_reward` later.
self._balances.send(self.bounty_provider, self.owner, self.total_bounty)
self.state = MarketPhase.INITIALIZATION
return self.test_reveal_index
def add_test_set_hashes(self, msg: Msg, more_test_set_hashes: List[str]) -> int:
"""
(Optional)
Add more hashes for portions of the test set to reveal.
This helps in case not all hashes can be sent in one transaction.
:param msg: The message for this transaction.
The sender must be the bounty provider.
:param more_test_set_hashes: More committed hashes for the portions of the test set.
:return: The index of the test set that must be revealed.
"""
assert self.state == MarketPhase.INITIALIZATION
assert msg.sender == self.bounty_provider
# Ensure that a new test set is given and the sender isn't just trying to get a new random index.
assert len(more_test_set_hashes) > 0, "You must give at least one hash."
self.test_set_hashes += more_test_set_hashes
self.test_reveal_index = random.randrange(len(self.test_set_hashes))
self.next_test_set_index_to_verify = 0
if self.next_test_set_index_to_verify == self.test_reveal_index:
self.next_test_set_index_to_verify += 1
return self.test_reveal_index
def verify_test_set(self, index: int, test_set_portion):
"""
Verify that a portion of the test set matches the committed to hash.
:param index: The index of the test set in the originally committed list of hashes.
:param test_set_portion: The portion of the test set to reveal.
"""
assert 0 <= index < len(self.test_set_hashes)
assert len(test_set_portion) > 0
test_set_hash = self.hash_test_set(test_set_portion)
assert test_set_hash == self.test_set_hashes[index]
def reveal_init_test_set(self, test_set_portion):
"""
Reveal the required portion of the full test set.
:param test_set_portion: The portion of the test set that must be revealed before started the Participation Phase.
"""
assert self.state == MarketPhase.INITIALIZATION
self.verify_test_set(self.test_reveal_index, test_set_portion)
self.state = MarketPhase.PARTICIPATION
def handle_add_data(self, contributor_address: Address, msg_value: float, data, classification) -> (float, bool):
# Allow them to stake as much as they want to ensure they get included in future rounds.
assert self.state == MarketPhase.PARTICIPATION, f'Current state is: {self.state}.'
if msg_value < self.min_stake:
raise RejectException(f"Did not pay enough. Sent {msg_value} < {self.min_stake}")
if self._allow_greater_deposit:
cost = msg_value
else:
cost = self.min_stake
update_model = False
self._market_data.append(_Contribution(contributor_address, data, classification, cost))
self._market_balances[contributor_address] += cost
return (cost, update_model)
def end_market(self):
"""
Signal the end of the prediction market.
"""
assert self.state == MarketPhase.PARTICIPATION, f'Current state is: {self.state}.'
if self.get_num_contributions_in_market() < self.min_num_contributions \
and self._time() < self._market_earliest_end_time_s:
raise RejectException("Can't end the market yet.")
self._logger.info("Ending market.")
self.state = MarketPhase.REVEAL_TEST_SET
self._next_data_index = 0
self.test_data, self.test_labels = [], []
def verify_next_test_set(self, test_set_portion):
assert self.state == MarketPhase.REVEAL_TEST_SET
self.verify_test_set(self.next_test_set_index_to_verify, test_set_portion)
test_data, test_labels = zip(*test_set_portion)
self.test_data += test_data
self.test_labels += test_labels
self.next_test_set_index_to_verify += 1
if self.next_test_set_index_to_verify == self.test_reveal_index:
self.next_test_set_index_to_verify += 1
if self.next_test_set_index_to_verify == len(self.test_set_hashes):
self.state = MarketPhase.REWARD_RESTART
self.test_data = np.array(self.test_data)
self.test_labels = np.array(self.test_labels)
def process_contribution(self):
"""
Reward Phase:
Process the next data contribution.
"""
assert self.remaining_bounty_rounds > 0, "The market has ended."
if self.state == MarketPhase.REWARD_RESTART:
self._next_data_index = 0
self._logger.debug("Remaining bounty rounds: %s", self.remaining_bounty_rounds)
self._scores = defaultdict(float)
if self._reset_model_during_reward_phase:
# The paper implies that we should not retrain the model and instead only train once.
# The problem there is that a contributor is affected by bad contributions
# between them and the last counted contribution after bad contributions are filtered out.
self.model.reset_model()
if self.prev_acc is None:
# XXX This evaluation can be expensive and likely won't work in Ethereum.
# We need to find a more efficient way to do this or let a contributor proved they did it.
self.prev_acc = self.model.evaluate(self.test_data, self.test_labels)
self.original_acc = self.prev_acc
self._logger.debug("Accuracy: %0.2f%%", self.prev_acc * 100)
elif not self._reset_model_during_reward_phase:
# When calculating rewards, the score, the same accuracy for the initial model should be used.
self.prev_acc = self.original_acc
self._num_market_contributions: Dict[Address, int] = Counter()
self._worst_contribution: Optional[_Contribution] = None
self._worst_contributor: Optional[Address] = None
self._min_score = math.inf
self.state = MarketPhase.REWARD
else:
assert self.state == MarketPhase.REWARD
contribution = self._market_data[self._next_data_index]
self._num_market_contributions[contribution.contributor_address] += 1
self.model.update(contribution.data, contribution.classification)
if not self._reset_model_during_reward_phase and contribution.accuracy is None:
# XXX Potentially expensive gas cost.
contribution.accuracy = self.model.evaluate(self.test_data, self.test_labels)
self._next_data_index += 1
iterated_through_all_contributions = self._next_data_index >= self.get_num_contributions_in_market()
if iterated_through_all_contributions \
or not self._group_contributions \
or self._market_data[self._next_data_index].contributor_address != contribution.contributor_address:
# Need to compute score.
if self._reset_model_during_reward_phase:
# XXX Potentially expensive gas cost.
acc = self.model.evaluate(self.test_data, self.test_labels)
else:
acc = contribution.accuracy
score_change = acc - self.prev_acc
if self._group_contributions:
new_score = self._scores[contribution.contributor_address] = \
self._scores[contribution.contributor_address] + score_change
else:
new_score = contribution.score = score_change
if new_score < self._min_score:
self._min_score = new_score
if self._group_contributions:
self._worst_contributor = contribution.contributor_address
else:
self._worst_contribution = contribution
elif self._group_contributions and self._worst_contributor == contribution.contributor_address:
# Their score increased, they might not be the worst anymore.
# Optimize: use a heap.
self._worst_contributor, self._min_score = min(self._scores.items(), key=lambda x: x[1])
self.prev_acc = acc
if iterated_through_all_contributions:
# Find min score and remove that address from the list.
self._logger.debug("Minimum score: %.2f", self._min_score)
if self._min_score < 0:
if self._group_contributions:
num_rounds = self._market_balances[self._worst_contributor] / -self._min_score
else:
num_rounds = self._worst_contribution.balance / -self._min_score
if num_rounds > self.remaining_bounty_rounds:
num_rounds = self.remaining_bounty_rounds
self._logger.debug("Will simulate %.2f rounds.", num_rounds)
self.remaining_bounty_rounds -= num_rounds
if self.remaining_bounty_rounds == 0:
self._end_reward_phase(num_rounds)
else:
if self._group_contributions:
participants_to_remove = set()
for participant, score in self._scores.items():
self._logger.debug("Score for \"%s\": %.2f", participant, score)
self._market_balances[participant] += score * num_rounds
if self._market_balances[participant] < self._num_market_contributions[participant]:
# They don't have enough left to stake next time.
participants_to_remove.add(participant)
self._market_data: List[_Contribution] = list(
filter(lambda c: c.contributor_address not in participants_to_remove,
self._market_data))
else:
for contribution in self._market_data:
contribution.balance += contribution.score * num_rounds
if contribution.balance < 1:
# Contribution is going to get kicked out.
self._market_balances[contribution.contributor_address] += contribution.balance
self._market_data: List[_Contribution] = \
list(filter(lambda c: c.balance >= 1, self._market_data))
if self.get_num_contributions_in_market() == 0:
self.state = MarketPhase.REWARD_COLLECT
self.remaining_bounty_rounds = 0
self.reward_phase_end_time_s = self._time()
else:
self.state = MarketPhase.REWARD_RESTART
else:
num_rounds = self.remaining_bounty_rounds
self.remaining_bounty_rounds = 0
self._end_reward_phase(num_rounds)
def _end_reward_phase(self, num_rounds):
"""
Distribute rewards.
:param num_rounds: The number of rounds remaining.
"""
self._logger.debug("Dividing remaining bounty amongst all remaining contributors to simulate %.2f rounds.",
num_rounds)
self.reward_phase_end_time_s = self._time()
self.state = MarketPhase.REWARD_COLLECT
if self._group_contributions:
for participant, score in self._scores.items():
self._logger.debug("Score for \"%s\": %.2f", participant, score)
self._market_balances[participant] += score * num_rounds
else:
for contribution in self._market_data:
self._market_balances[contribution.contributor_address] += \
contribution.score * num_rounds
self._market_data = []
def handle_refund(self, submitter: Address, stored_data: StoredData,
claimable_amount: float, claimed_by_submitter: bool,
prediction) -> float:
assert self.remaining_bounty_rounds == 0, "The reward phase has not finished processing contributions."
assert self.state == MarketPhase.REWARD_COLLECT
result = self._market_balances[submitter]
self._logger.debug("Reward for \"%s\": %.2f", submitter, result)
if result > 0:
del self._market_balances[submitter]
else:
result = 0
return result
def handle_report(self, reporter: Address, stored_data: StoredData, claimed_by_reporter: bool, prediction) -> float:
assert self.state == MarketPhase.REWARD_COLLECT, "The reward phase has not finished processing contributions."
assert self.remaining_bounty_rounds == 0
assert self.reward_phase_end_time_s > 0
if self._time() - self.reward_phase_end_time_s >= self.any_address_claim_wait_time_s:
submitter = stored_data.sender
result = self._market_balances[submitter]
if result > 0:
self._logger.debug("Giving reward for \"%s\" to \"%s\". Reward: %s", submitter, reporter, result)
del self._market_balances[reporter]
else:
result = 0
return result
@dataclass
class PredictionMarketImModule(Module):
allow_greater_deposit: bool = field(default=False)
group_contributions: bool = field(default=False)
reset_model_during_reward_phase: bool = field(default=False)
@provider
@singleton
def provide_data_loader(self, builder: ClassAssistedBuilder[PredictionMarket]) -> IncentiveMechanism:
return builder.build(
allow_greater_deposit=self.allow_greater_deposit,
group_contributions=self.group_contributions,
reset_model_during_reward_phase=self.reset_model_during_reward_phase,
)
|
0xDeCA10B/simulation/decai/simulation/contract/incentive/prediction_market.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/contract/incentive/prediction_market.py",
"repo_id": "0xDeCA10B",
"token_count": 9560
}
| 7 |
import unittest
import numpy as np
import scipy.sparse
from injector import Injector
from decai.simulation.data.featuremapping.feature_index_mapper import FeatureIndexMapper
from decai.simulation.logging_module import LoggingModule
class TestFeatureIndexMapper(unittest.TestCase):
@classmethod
def setUpClass(cls):
inj = Injector([
LoggingModule,
])
cls.f = inj.get(FeatureIndexMapper)
def test_map_dense(self):
x_train = np.random.random_sample((10, 3))
x_test = np.random.random_sample((4, x_train.shape[1]))
train, test, feature_index_mapping = self.f.map(x_train, x_test)
self.assertIs(train, x_train)
self.assertIs(test, x_test)
self.assertIsNone(feature_index_mapping)
def test_map_sparse(self):
x_train = np.array([[0, 0, 1, 1, 0], [0, 2, 0, 0, 0]])
x_test = np.array([[1, 0, 1, 0, 1], [0, 0, 3, 0, 0]])
x_train_sparse = scipy.sparse.csr_matrix((17348, 4288315073), dtype=np.uint8)
x_train_sparse[x_train.nonzero()] = x_train[x_train.nonzero()]
x_test_sparse = scipy.sparse.csr_matrix((3333, 21312344), dtype=np.uint8)
x_test_sparse[x_test.nonzero()] = x_test[x_test.nonzero()]
mapped_train, mapped_test, feature_index_mapping = self.f.map(x_train_sparse, x_test_sparse)
self.assertEqual(int, type(feature_index_mapping[0]))
self.assertEqual([1, 2, 3], feature_index_mapping)
self.assertTrue(mapped_train.sum(axis=0).all(),
"Every column should have at least one non-zero value.")
x_train_expected = np.zeros((x_train_sparse.shape[0], len(feature_index_mapping)), dtype=np.uint8)
x_train_expected[0, 1] = 1
x_train_expected[0, 2] = 1
x_train_expected[1, 0] = 2
self.assertTrue(np.array_equal(x_train_expected, mapped_train), mapped_train)
x_test_expected = np.zeros((x_test_sparse.shape[0], len(feature_index_mapping)), dtype=np.uint8)
x_test_expected[0, 1] = 1
x_test_expected[1, 1] = 3
self.assertTrue(np.array_equal(x_test_expected, mapped_test), mapped_test)
|
0xDeCA10B/simulation/decai/simulation/data/featuremapping/tests/test_feature_index_mapper.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/data/featuremapping/tests/test_feature_index_mapper.py",
"repo_id": "0xDeCA10B",
"token_count": 999
}
| 8 |
import os
import re
import sys
from injector import Injector
from sklearn.naive_bayes import MultinomialNB
from decai.simulation.contract.classification.ncc_module import NearestCentroidClassifierModule
from decai.simulation.contract.classification.perceptron import PerceptronModule
from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule
from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule
from decai.simulation.contract.incentive.stakeable import StakeableImModule
from decai.simulation.data.featuremapping.hashing.murmurhash3 import MurmurHash3Module
from decai.simulation.data.fitness_data_loader import FitnessDataModule
from decai.simulation.data.imdb_data_loader import ImdbDataModule
from decai.simulation.data.news_data_loader import NewsDataModule
from decai.simulation.data.offensive_data_loader import OffensiveDataModule
from decai.simulation.logging_module import LoggingModule
from decai.simulation.simulate import Agent, Simulator
# For `bokeh serve`.
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
datasets = dict(
fitness=dict(module=FitnessDataModule,
train_size=3500, test_size=1500,
),
imdb=dict(module=ImdbDataModule(num_words=1000),
train_size=None, test_size=None,
),
news=dict(module=NewsDataModule,
train_size=None, test_size=None,
),
offensive=dict(module=OffensiveDataModule,
train_size=None, test_size=None,
),
)
models = dict(
nb=dict(module=SciKitClassifierModule(MultinomialNB),
baseline_accuracy=dict(
# train_size, test_size = 3500, 1500
fitness=0.97,
# train_size, test_size = None, None
imdb=0.8323,
# train_size, test_size = None, None
news=0.8181,
)),
ncc=dict(module=NearestCentroidClassifierModule,
baseline_accuracy=dict(
# train_size, test_size = 3500, 1500
fitness=0.9513,
# train_size, test_size = None, None
imdb=0.7445,
# train_size, test_size = None, None
news=0.6727,
)),
perceptron=dict(module=PerceptronModule,
baseline_accuracy=dict(
# train_size, test_size = 3500, 1500
fitness=0.9507,
# train_size, test_size = None, None
imdb=0.73,
# train_size, test_size = None, None
news=0.9003,
)),
)
# Set up the agents that will act in the simulation.
agents = [
# Good
Agent(address="Good",
start_balance=10_000,
mean_deposit=50,
stdev_deposit=10,
mean_update_wait_s=10 * 60,
prob_mistake=0.0001,
),
# Malicious: A determined agent with the goal of disrupting others.
Agent(address="Bad",
start_balance=10_000,
mean_deposit=100,
stdev_deposit=3,
mean_update_wait_s=1 * 60 * 60,
good=False,
),
# One that just calls the model and pays to use the model.
Agent(address="Caller",
start_balance=30_000,
mean_deposit=0,
stdev_deposit=0,
mean_update_wait_s=2 * 60 * 60,
calls_model=True,
pay_to_call=50
),
]
def main():
global agents
# This file is set up to use different models and datasets.
dataset = 'offensive'
model_type = 'nb'
assert dataset in datasets
assert model_type in models
train_size = datasets[dataset]['train_size']
test_size = datasets[dataset]['test_size']
if train_size is None:
init_train_data_portion = 0.08
else:
init_train_data_portion = 100 / train_size
# No caller (assume free to call).
agents = agents[:-1]
# Set up the data, model, and incentive mechanism.
inj = Injector([
DefaultCollaborativeTrainerModule,
datasets[dataset]['module'],
MurmurHash3Module,
LoggingModule,
models[model_type]['module'],
StakeableImModule,
])
s = inj.get(Simulator)
# Start the simulation.
s.simulate(agents,
baseline_accuracy=models[model_type]['baseline_accuracy'].get(dataset),
init_train_data_portion=init_train_data_portion,
train_size=train_size,
test_size=test_size,
filename_indicator=f"{dataset}-{model_type}"
)
# Run with `bokeh serve PATH`.
if re.match('bk_script_|bokeh_app_', __name__):
main()
else:
print("`__name__` didn't match the pattern. Bokeh app will not run.")
|
0xDeCA10B/simulation/decai/simulation/simulate_entry_point.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/simulate_entry_point.py",
"repo_id": "0xDeCA10B",
"token_count": 2279
}
| 9 |
# Lab 7 - 分布式训练任务练习
## 实验目的
1. 学习使用Horovod库。
2. 通过调用不同的通信后端实现数据并行的并行/分布式训练,了解各种后端的基本原理和适用范围。
3. 通过实际操作,灵活掌握安装部署。
## 实验环境
* Ubuntu 18.04
* CUDA 10.0
* PyTorch==1.5.0
* Horovod==0.19.4
## 实验原理
通过测试MPI、NCCL、Gloo、oneCCL后端完成相同的allreduce通信,通过不同的链路实现数据传输。
## 实验内容
### 实验流程图

### 具体步骤
1. 安装依赖支持:OpenMPI, Horovod。
2. 运行Horovod MNIST测试用例(`Lab7/pytorch_mnist_horovod.py`),验证Horovod正确安装。
3. 按照MPI/Gloo/NCCL的顺序,选用不同的通信后端,测试不同GPU数、不同机器数时,MNIST样例下iteration耗时和吞吐率,记录GPU和机器数目,以及测试结果,并完成表格绘制。
1. 安装MPI,并测试多卡、多机并行训练耗时和吞吐率。可参考如下命令:
```
//单机多CPU
$horovodrun -np 2 python pytorch_mnist_horovod.py --no-cuda
//多机单GPU
$horovodrun -np 4 -H server1:1,server2:1,server3:1,server4:1 python pytorch_mnist_horovod_basic.py
```
2. 测试Gloo下的多卡、多机并行训练耗时。
3. 安装NCCL2后重新安装horovod并测试多卡、多机并行训练耗时和吞吐率。
4. (可选)安装支持GPU通信的MPI后重新安装horovod并测试多卡、多机并行训练耗时和吞吐率。
```
$ HOROVOD_GPU_ALLREDUCE=MPI pip install --no-cache-dir horovod
```
5. (可选)若机器有Tesla/Quadro GPU + RDMA环境,尝试设置GPUDirect RDMA 以达到更高的通信性能
6. 统计数据,绘制系统的scalability曲线H
7. (可选)选取任意RNN网络进行并行训练,测试horovod并行训练耗时和吞吐率。
## 实验报告
### 实验环境
||||
|--------|--------------|--------------------------|
|硬件环境|服务器数目| |
||网卡型号、数目||
||GPU型号、数目||
||GPU连接方式||
|软件环境|OS版本||
||GPU driver、(opt. NIC driver)||
||深度学习框架<br>python包名称及版本||
||CUDA版本||
||NCCL版本||
||||
### 实验结果
1. 测试服务器内多显卡加速比
|||||||
|-----|-----|-----|-----|------|------|
| 通信后端 | 服务器数量 | 每台服务器显卡数量 | 平均每步耗时 | 平均吞吐率 | 加速比 |
| MPI | 1 | | | | |
| MPI | 1 | | | | |
| ... | | | | | |
| Gloo | 1 | | | | |
| Gloo | 1 | | | | |
| ... | | | | | |
| NCCL | 1 | | | | |
| NCCL |1 | | | | |
| ... | | | | | |
|||||||
2. 测试服务器间加速比
|||||||
|-----|-----|-----|-----|------|------|
| 通信后端 | 服务器数量 | 每台服务器显卡数量 | 平均每步耗时 | 平均吞吐率 | 加速比 |
| MPI | | 1 ||||
| MPI | | 1 ||||
| Gloo | | 1 ||||
| Gloo | | 1 ||||
| NCCL | | 1 ||||
| NCCL | | 1 ||||
|||||||
3. 总结加速比的图表、比较不同通信后端的性能差异、分析可能的原因
<br />
<br />
<br />
<br />
4. (可选)比较不同模型的并行加速差异、分析可能的原因(提示:计算/通信比)
<br />
<br />
<br />
<br />
## 参考代码
1. 安装依赖支持
安装OpenMPI:`sudo apt install openmpi-bin`
安装Horovod:`python3 -m pip install horovod==0.19.4 --user`
2. 验证Horovod正确安装
运行mnist样例程序
```
python pytorch_mnist_horovod_basic.py
```
3. 选用不同的通信后端测试命令
1. 安装MPI,并测试多卡、多机并行训练耗时和吞吐率。
```
//单机多CPU
$horovodrun -np 2 python pytorch_mnist_horovod.py --no-cuda
//单机多GPU
$horovodrun -np 2 python pytorch_mnist_horovod.py
//多机单GPU
$horovodrun -np 4 -H server1:1,server2:1,server3:1,server4:1 python pytorch_mnist_horovod_basic.py
//多机多CPU
$horovodrun -np 16 -H server1:4,server2:4,server3:4,server4:4 python pytorch_mnist_horovod_basic.py --no-cuda
//多机多GPU
$horovodrun -np 16 -H server1:4,server2:4,server3:4,server4:4 python pytorch_mnist_horovod_basic.py
```
2. 测试Gloo下的多卡、多机并行训练耗时。
```
$horovodrun --gloo -np 2 python pytorch_mnist_horovod.py --no-cuda
$horovodrun -np 4 -H server1:1,server2:1,server3:1,server4:1 python pytorch_mnist_horovod_basic.py
$horovodrun –gloo -np 16 -H server1:4,server2:4,server3:4,server4:4 python pytorch_mnist_horovod_basic.py --no-cuda
```
3. 安装NCCL2后重新安装horovod并测试多卡、多机并行训练耗时和吞吐率。
```
$HOROVOD_GPU_OPERATIONS=NCCL pip install --no-cache-dir horovod
$horovodrun -np 2 -H server1:1,server2:1 python pytorch_mnist_horovod.py
```
4. 安装支持GPU通信的MPI后重新安装horovod并测试多卡、多机并行训练耗时和吞吐率。
```
HOROVOD_GPU_ALLREDUCE=MPI pip install --no-cache-dir horovod
```
## 参考资料
* PyTorch MNIST测试用例:https://github.com/horovod/horovod/blob/master/examples/pytorch/pytorch_mnist.py
* Horovod on GPU: https://github.com/horovod/horovod/blob/master/docs/gpus.rst
* NCCL2 download: https://developer.nvidia.com/nccl/nccl-download
* OpenMPI: https://www.open-mpi.org/software/ompi/v4.0/
|
AI-System/Labs/AdvancedLabs/Lab7/README.md/0
|
{
"file_path": "AI-System/Labs/AdvancedLabs/Lab7/README.md",
"repo_id": "AI-System",
"token_count": 3491
}
| 10 |
inference_address=http://0.0.0.0:8080
management_address=http://0.0.0.0:8081
number_of_netty_threads=32
job_queue_size=1000
model_store=/home/model-server/model-store
|
AI-System/Labs/BasicLabs/Lab5/config.properties/0
|
{
"file_path": "AI-System/Labs/BasicLabs/Lab5/config.properties",
"repo_id": "AI-System",
"token_count": 68
}
| 11 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 术语表
## A
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Artificial Intelligence|人工智能|AI|yanjie|
|Activation|激活函数||xiaowu|
|[Amdahl's Law](https://en.wikipedia.org/wiki/Amdahl%27s_law)|阿姆达尔定律||yanjie|
|Accounting|计数||yanjie|
|Action|动作||xuehui|
|Actor|采样器||xuehui|
|API Remoting|应用程序编程接口远程处理||yanjie|
|Accuracy|准确度||yanjie|
|Addictive Increase|加性增加||yanjie|
|Agent|智能体||xuehui|
|Artificial Neural Network|人工神经网络||yanjie|
|[Application-Specific Integrated Circuit](https://ja.wikipedia.org/wiki/ASIC)|专用集成电路|ASIC|yanjie|
|Accelerator|加速器||yanjie|
|Artificial Intelligence|人工智能|AI|yanjie|
|Allocation|分配||yanjie|
|Adversarial Training|对抗训练||xian|
|Automatic Feature Engineering|自动特征工程||xuehui|
|Adaptive Bit Rate|比特率自适应|ABR|mike|
|Attention|注意力机制||xiaowu|
|AutoML|自动机器学习||mike|
|Automatic Differentiation|自动微分||yanjie|
|AIOps|智能运维||yanjie|
|Agile Chip Development|敏捷芯片开发||yanjie|
|Address Coalescing|地址合并||yanjie|
## B
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Batch|批次||yanjie|
|Batch Size|批尺寸||yanjie|
|Batch Job|批处理作业||yanjie|
|Bias|偏移参数||xiaowu|
|Back Propagation|反向传播||xiaowu|
|Batch Normalization|批归一化||xiaowu|
|Block|块||yanjie|
|[Bin Packing](https://en.wikipedia.org/wiki/Bin_packing_problem)|装箱||yanjie|
|Bus|总线||yanjie|
|Behavior Policy|行为策略||xuehui|
|Bundled Mode|捆绑模式||xuehui|
|Benchmarks|系统评测||mike|
|Big Data System|大数据系统||yanjie|
|Branch Prediction|分支预测||yanjie|
## C
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Classification|分类||xiaowu|
|CrossEntropy|交叉熵损失函数||xiaowu|
|[Convolutional Neural Network](https://en.wikipedia.org/wiki/Convolutional_neural_network)|卷积神经网络|CNN|xiaowu|
|Convolution|卷积||yanjie|
|Convolution Layer|卷积层||yanjie|
|Control|控制||yanjie|
|Control Groups|控制组|cgroups|yanjie|
|Caching|缓存||yanjie|
|Cache Line|缓存线||yanjie|
|Cache Misses Rate|缓存未命中率||yanjie|
|Cached Allocator|缓存分配器||yanjie|
|Channel Number|通道数||quanlu|
|Container|容器 ||yanjie|
|Click-through Prediction|点击率预测||lynex|
|Collective Communication|集体式通信||lynex|
|Confidence Interval|置信区间||mike|
|Congestion Control|拥塞控制||mike|
|Compute-Bound|计算受限||yanjie|
|Code Completion|代码自动补全||yanjie|
|Code Snippet|代码片段||yanjie|
|Cursor-Following|光标跟随||yanjie|
|Code Refactoring|代码重构||yanjie|
|Computation Graph|计算图||yanjie|
|Cell|单元格||yanjie|
|Control Plane Components|控制平面组件||yanjie|
|Continuous Integration|持续集成|CI|yanjie|
|Continuous Delivery|持续交付|CD|yanjie|
|Confusion Matrix|混淆矩阵||yanjie|
|Cost-Effective|成本效益||yanjie|
|Context Switching|上下文切换||yanjie|
|Floorplanning|布图规划||shijie|
|Code Version Control|代码版本管理||yanjie|
|Code Book|代码书||yanjie|
|Copy on Write|写时复制||yanjie|
|Constraint Satisfaction Problem|约束满足问题||yanjie|
|Context Switching|上下文切换||yanjie|
## D
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Data Flow Graph|数据流图|DFG|quanlu|
|Deep Learning|深度学习||yanjie|
|Deep Learning System|深度学习系统||yanjie|
|Deployment|部署||yanjie|
|Deep Neural Network|深度神经网络|DNN|xiaowu|
|Dropout|丢弃法||xiaowu|
|Dropout Rate|丢弃率||quanlu|
|Data Augmentation|数据增强||xiaowu|
|Defect Tolerance|缺陷容忍||yanjie|
|Dimension|维度||yanjie|
|Docker Hub|镜像中心||yanjie|
|Direct GPU Pass-Through|直接GPU直通||yanjie|
|Device Emulation|设备仿真||yanjie|
|Device|设备||yanjie|
|Data Privacy|数据隐私||yanjie|
|Decoder|解码器||xiaowu|
|Deep Neural Network|深度神经网络||yanjie|
|Deallocation|释放||yanjie|
|Deterministic Policy|决定性策略||xuehui|
|Distilation|蒸馏||xian|
|Distributed Reinforcement Learning|分布式强化学习||xuehui|
|Data-Parallelism|数据并行||lynex|
|Data Driven|数据驱动||yanjie|
|Debugging|调试||yanjie|
|Development Experience|开发体验||yanjie|
|Directly Responsible Individuals|直接责任人||yanjie|
|Dissaggregation|分解||yanjie|
|Dissaggregation Memory|分解内存||yanjie|
|Domain Expert|领域专家||quanlu|
|Dependability|可靠性||yanjie|
## E
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Early Stop|早停法||xiaowu|
|Edge|边缘||yanjie|
|Edge Device|端侧设备||quanlu|
|Ensemble Learning|集成学习||xiaowu|
|Environment|环境||xuehui|
|Evict|驱逐||yanjie|
|Error Correction Code|纠错码|ECC|yanjie|
|Encoding|编码||yanjie|
|Encodder|编码器||xiaowu|
|Ensemble|融合||yanjie|
|Emitter|发射器||yanjie|
|Exploration|探索||xuehui|
|Exploit|利用||xuehui|
|Edge|边缘 ||yanjie|
|Enhanced Security|增强的安全性||yanjie|
|Economy of Scale|规模经济||yanjie|
|Exokernel|外核||yanjie|
## F
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Feasible Region|可行域||quanlu|
|Fit|拟合||xiaowu|
|Forward Calculation|前向计算||xiaowu|
|Forward Propagation|前向传播||yanjie|
|Full Connection Layer|全连接层||xiaowu|
|Flexibility|灵活性||yanjie|
|Floorplanning|布图规划||shijie|
|Flow Control|流量控制||mike|
|Fusion|融合||yanjie|
|Fragmentation|碎片||yanjie|
|Field-Programmable Gate Array|现场可编程门阵列||yanjie|
|Forward Propagation|前向传播||yanjie|
|Fault Injection|缺陷注入||yanjie|
|Fuzzing|模糊测试||yanjie|
|Fault|故障||yanjie|
|Floating-point|浮点数|shijie|
|Fixed-point|定点数|shijie|
|Faithfully|可信地||yajie|
## G
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Gradient|梯度||xuehui|
||梯度下降|GD|xiaowu|
|GPU|图形处理器||yanjie|
|Gaussian Process|高斯过程|GP|mike|
|Geometric|几何||yanjie|
## H
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|High Performance Computing|高性能计算|HPC|yanjie|
|High Throughput|高吞吐||yanjie|
|High Efficiency|高效率||yanjie|
|Hyper-parameters Optimization|超参数优化||xuehui|
|Heterogeneous hardware|异构硬件||yanjie|
|Hotfix|热修复||yanjie|
|Heartbeat|心跳机制||yanjie|
|Huffman Encoding|霍夫曼编码||yanjie|
## I
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Input|输入||yanjie|
|Inference|推理||yanjie|
|Inference System|推理系统||yanjie|
|Input Layer|输入层||xiaowu|
|Introspective|内省||yanjie|
|[Instruction Set Architecture](https://en.wikipedia.org/wiki/Instruction_set_architecture)|指令集架构|ISA|yanjie|
|Image|镜像||yanjie|
|[Intermediate Representation](https://en.wikipedia.org/wiki/Intermediate_representation)|中间表达|IR|yanjie|
|Integrated Development Environment|集成开发环境|IDE|yanjie|
|Intellisense|智能感知||yanjie|
|Intelligent Code Completion|智能代码完成||yanjie|
|Imperative Execution|命令式执行||yanjie|
|Incremental Scheduling|增量调度||yanjie|
|Incident|事件||yanjie|
## J
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Job|作业|||
|Just In Time Compiling|即时编译||yanjie|
|Job Completion Times|作业完成时间||yanjie|
## K
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Kernel|内核||yanjie|
|Knowledge Distillation|知识精炼||yanjie|
|Kernel Bypassing|内核旁路||yanjie|
## L
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Latency|延迟||yanjie|
|Learning Performance|学习性能||yanjie|
|Loss Function|损失函数||xiaowu|
|Loss|损失函数的值||xuehui|
|[Long Short-term Memory](https://en.wikipedia.org/wiki/Long_short-term_memory)|长短时记忆|LSTM|yanjie|
|Locality|局部性||yanjie|
|Linear Algebra)|线性代数||yanjie|
|Loop|循环||yanjie|
|Linux Containers|Linux容器|LXC|yanjie|
|Life Cycle|生命周期||yanjie|
|Label|标签||yanjie|
|Low Latency|低延迟||yanjie|
|Long Tail Traffic|长尾延迟||yanjie|
|Locality|局部性||yanjie|
|Low-Rank Factorization|低秩分解||yanjie|
|Load Balance|负载均衡|LB|yanjie|
|Learner|学习器||xuehui|
|Logistic Regression|逻辑回归|LR|mike|
|Learning Curve|学习曲线||yanjie|
|Learning Rate|学习率||quanlu|
|Layering|层次化||yanjie|
|Library Operating Systems|库操作系统|LibOS|yanjie|
|Live-variable Analysis|存活变量分析||yanjie|
## M
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Machine Learning Model|机器学习模型||quanlu|
|Machine Learning System|机器学习系统||yanjie|
|Model Zoo|模型动物园||yanjie|
|MSE|均方差损失函数||xiaowu|
|Moore's law|摩尔定律||yanjie|
|Memory Hierarchy|内存层级结构||yanjie|
|Maximum Payload|最大有效载荷||yanjie|
|Multi-Tenancy|多租户||yanjie|
|Mediated Pass-Through|中介直通||yanjie|
|Model Compression|模型压缩||yanjie|
|Model Selection|模型选择||quanlu|
|Model Space|模型空间||quanlu|
|Multiplicative Decrease|乘性减少||yanjie|
|Mobile|移动端||yanjie|
|Model Size|模型尺寸||yanjie|
|Model Compression|模型压缩||yanjie|
|Model Quantization|模型量化||yanjie|
|[Multiply–Accumulate Operation](https://en.wikipedia.org/wiki/Multiply%E2%80%93accumulate_operation)|乘积累加运算|MAC|yanjie|
|Multilayer Perceptron|多层感知器||lynex|
|Micro-Batch|微批次||lynex|
|Model-Parallelism|模型并行||lynex|
|Memory-Bound|内存受限||yanjie|
|Multiple Instruction Multiple Data|多指令流多数据流|MIMD|yanjie|
|Multiply Accumulate|乘加运算|MAC|yanjie|
|Monolithic|单片||yanjie|
|Modularity|模块化||yanjie|
|Moore's law|摩尔定律||yanjie|
|Max Thermal Design Power|最大热设计功率||yanjie|
|Multiprocessing|多道处理||yanjie|
|Multithreading|多线程||yanjie|
|Microkernel|微内核||yanjie|
|Microkernelification|微内核化||yanjie|
## N
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Neural Net|神经网络|NN|xiaowu|
|Namespace|命名空间||yanjie|
|[Neural Architecture Search](https://en.wikipedia.org/wiki/Neural_architecture_search)|神经网络结构搜索|NAS|yanjie|
|Non-Deterministic Policy|非决定性策略||xuehui|
|Neural Language Model|神经语言模型|NLM|yanjie|
## O
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Optimizer|梯度下降优化器、优化算法||xiaowu|
|Over Fitting|过拟合||xiaowu|
|Output Layer|输出层||xiaowu|
|Output|输出||yanjie|
|Operating System|操作系统||yanjie|
|Online Service|在线服务||yanjie|
|Operator|算子||yanjie|
|On-Chip Memory|片上存储器||yanjei|
|Online Service|在线服务||yanjie|
|On-Premises Deployment|本地部署||yanjie|
|Opportunistic Job|机会任务||yanjie|
|On-Call|候命||yanjie|
|Offloading|卸载||yanjie|
|Open Instruction Sets|开放的指令集||yanjie|
|Out-of-order Execution|乱序执行||yanjie|
## P
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Pooling|池化||aynjie|
|Pooling Layer|池化层||yanjie|
|Policy|策略||xuehui|
|Prefetch|预取||yanjie|
|Parallelism|并行性||yanjie|
|Precision Calibration|精度校准||yanjie|
|Parameter Pruning|参数裁剪||yanjie|
|Pruning|剪枝||yanjie|
|Parser|解析器||yanjie|
|Power Consumption|功耗||yanjie|
|Parameter Server|参数服务器||xuehui|
|Peer-to-Peer Communication|点对点通信||lynex|
|Pipeline-Parallelism|流水并行||lynex|
|Pipeline Stage|流水阶段||lynex|
|Perceptron|感知机||yanjie|
|Performance|表现||quanlu|
|Performance/Watt|性能每瓦特||yanjie|
|Performance/TCO|性能每总计拥有成本||yanjie|
|Packge Management System|包管理器系统||yanjie|
|Pay-as-You-Go|按需付费||yanjie|
## Q
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Quantization|量化||yanjie|
|[Queries Per Second](https://en.wikipedia.org/wiki/Queries_per_second)|每秒查询数|QPS|yanjie|
|Q Network|Q 网络||xuehui|
|Quality of Experience|体验指标|QoE|mike|
## R
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Raw Dataset|原始数据集||quanlu|
|Random Forest|随机森林||quanlu|
|Reinforcement Learning|强化学习|RL|yanjie|
|Reliability|可靠性||yanjie|
|Regularization|正则化||xiaowu|
|[Recurrent Neural Network](https://en.wikipedia.org/wiki/Recurrent_neural_network)|循环神经网络|RNN|xiaowu|
|Regression|回归||xiaowu|
|Reliability|可靠性||yanjie|
|Request|请求||yanjie|
|Replica|副本||yanjie|
|Remote Procedure Call|远程过程调用|RPC|yanjie|
|Reward|奖励||xuehui|
|Replay Buffer|重放缓冲区||xuehui|
|Runtime|运行期||yanjie|
|Rule Of The Thumb|经验法则||yanjie|
|Randomly Accessed Pages|随机访问磁盘页||yanjie|
|Redundancy|冗余||yanjie|
|Resilience|韧性||yanjie|
## S
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Scheduling|调度||yanjie|
|Search Space|搜索空间||quanlu|
|Service-Level Agreement|服务等级协议|SLA|yanjie|
|Seq2Seq|序列到序列||xiaowu|
|Scalability|可扩展性||yanjie|
|SGD|随机梯度下降||xiaowu|
|Single Instruction Multiple Data|单指令多数据流|SIMD|yanjie|
|Spatially Expanded Network|空间扩展网络||yanjie|
|Stream|流||yanjie|
|State|状态||xuehui|
|Shape Inference|形状推导||yanjie|
|Sparsity|稀疏性||yanjie|
|Sample|样本||yanjie|
|Sub-Second|次秒||yanjie|
|Supervised Learning|有监督学习|SL|xuehui|
|Space|空间||yanjie|
|Sharing|共享||yanjie|
|Spatial locality|空间局部性||yanjie|
|System Call|系统调用||yanjie|
|Systematic|系统的||yanjie|
|Syntax Highlighting|语法高亮||yanjie|
|Program Synthesis|程序综合||yanjie|
|Symbolic Execution|符号执行||yanjie|
|Scheduler Extender|调度器扩展||yanjie|
|Scheduler|调度器||yanjie|
|Single Instruction Multiple Threads|单指令多线程|SIMT|yanjie|
|Side Effect|副作用||yanjie|
|System Performance Evaluation Cooperative|通用计算的系统性能评估合作|SPEC|yanjie|
|Systolic Array|脉动阵列||yanjie|
|Separation of Concerns|关注点分离||yanjie|
|Slowdown|减速||yanjie|
|Shutdown|停止||yanjie|
|Speculative Prefetching|推测预取||yanjie|
|Source-to-source Compiler|源到源编译器||yanjie|
|State Machine|状态机||yanjie|
## T
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Tensor|张量||yanjie|
|Tensor Core|张量核||yanjie|
|Tensor Shape|张量形状||yanjie|
|Time-Multiplexed|时分复用||yanjie|
|Tile|块||yanjie|
|Tail Latency|尾部延迟||yanjie|
|Trick|技巧||yanjie|
|Temporal locality|时间局部性||yanjie|
|Target Policy|目标策略||xuehui|
|Telemetry|遥测||yanjie|
|Trade Offs|权衡取舍||yanjie|
|Transformer|变换器||yanjie|
|Transformer Engine|变换器引擎||yanjie|
|Trial|试验||quanlu|
|Temperature|温度||yanjie|
|Total Cost of Ownership|总计拥有成本|TCO|yanjie|
|Type System|类型系统||yanjie|
## U
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Union Mount|联合挂载||yanjie|
|Utilization|利用率||yanjie|
|Unsupervised Learning|无监督学习||xuehui|
|Unified Management|统一管理||yanjie|
|Up-front Commitment|预先承诺||yanjie|
## V
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Validation Accuracy|验证精度||quanlu|
|Virtual Machine|虚拟机||yanjie|
|Virtual Memory|虚拟内存||yanjie|
|Vectorization|向量化||yanjie|
|Visual Programming Language|可视化编程语言|VPL|yanjie|
## W
|英文|中文|缩写|首次提交人|
|---|---|---|---|
|Weights|权重参数||xiaowu|
|Weight Update|梯度更新||yanjie|
|Warp|束||yanjie|
|Weighted Mean|加权平均||yanjie|
|Warp Voting|束投票||yanjie|
|Warp Divergence|束发散||yanjie|
## X
|英文|中文|缩写|首次提交人|
|---|---|---|---|
## Y
|英文|中文|缩写|首次提交人|
|---|---|---|---|
## Z
|英文|中文|缩写|首次提交人|
|Zero-Copy|零拷贝||lynex|
# 更新与备注
- |Forward Calculation|前向计算|问题
- yanjie担心fully connected layer问题,待讨论
- 2022.4.12 改版为分字母章节书写,增加新备注项
# 参考链接
|
AI-System/Textbook/术语表.md/0
|
{
"file_path": "AI-System/Textbook/术语表.md",
"repo_id": "AI-System",
"token_count": 8867
}
| 12 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 1.3 深度学习系统组成与生态
如同 Jeff Dean 所描述的那样--过去是深度学习计算机系统和应用的黄金十年 “[A Golden Decade of Deep Learning: Computing Systems & Applications](https://direct.mit.edu/daed/article/151/2/58/110623/A-Golden-Decade-of-Deep-Learning-Computing-Systems)”[<sup>[1]</sup>](#goldenage)。随着深度学习系统的发展,其已经形成了一个较为独立的系统研究方向和开源系统生态。
通过之前深度学习的发展,以及模型,硬件与框架的趋势介绍,我们已经了解了深度学习系统的重要性。那么本章将介绍深度学习系统的设计目标,组成和生态,让读者形成人工智能系统的知识体系,为后续展开每个章节的内容做好铺垫。
- [1.3 深度学习系统组成与生态](#13-深度学习系统组成与生态)
- [1.3.1 深度学习系统的设计目标](#131-深度学习系统的设计目标)
- [1.3.2 深度学习系统的大致组成](#132-深度学习系统的大致组成)
- [1.3.3 深度学习系统生态](#133-深度学习系统生态)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
## 1.3.1 深度学习系统的设计目标
深度学习系统的设计目标我们可以总结为以下几个部分。
- 提供更加高效的编程语言、框架和工具:
- 设计更具表达能力和简洁的神经网络计算原语和编程语言。让用户能够提升开发效率,屏蔽底层细节,更灵活的原语支持。当前深度学习模型除了特定领域模型的算子和流程可以复用(例如,语言模型在自然语言处理领域被广泛作为基础结构,一些库专门提供模型中心供预训练模型共享和微调)。其新结构新算子的设计与开发仍遵循试错(Trial And Error)的方式进行,那么如何灵活表达新的算子,以及算子间的组合形式,算法工程师需要语言,库与框架层所提供相应的功能支持。
- 更直观的编辑、调试和实验工具。让用户可以完整的开发,测试,调整诊断与修复和优化程序,提升所开发深度学习程序的性能与鲁棒性。训练过程不是一蹴而就,其中伴随着不收敛,NaN,内存溢出等算法问题与系统缺陷(Bug),工具与系统本身如何在设计之初就考虑到这点,提供良好的可观测性,可调试性,允许用户注册自定义扩展等支持,是需要工具与系统设计者所需要在系统设计之初就需要提上日程的,否则之后更多是缝缝补补的修复,造成不好的开发体验与不能满足的需求。
- 支持深度学习生命周期中的各个环节,例如,模型压缩、推理、安全、隐私保护等。不仅能构建深度学习模型,能够支持全生命周期的深度学习程序开发,并在系统内对全生命周期进行分析与优化。当前的深度学习工程化场景,已经不是靠灵感一现单一的优化就能迅速取得领先优势,更多的是能否有完善的基础设施,快速复现社区工作,批量验证新的想法进行试错,所以一套好的完善的全流程的生命周期管理能够大幅度提升深度学习算法层面的生产力。
- 提供全面多样的深度学习任务需求的系统支持:
- 除了传统的深度学习训练与推理的支持,还能支持强化学习、自动化机器学习等新的训练范式。除了推理需求之外,训练作业中新的范式层出不穷,例如,需要不断和环境或模拟器交互以获取新数据的强化学习方式,批量大规模提交多作业的自动化机器学习方式等,这些新的范式造成对之前单一支持单模型之外,在多模型层面,训练与推理任务层面产生了新的系统抽象与资源,作业管理需求。
- 提供更强大和可扩展的计算能力。让用户的深度学习程序可扩展并部署于可以并行计算的节点或者集群,应对大数据和大模型的挑战。因为当前深度学习模型不断通过大模型层数更多的模型产生更好的算法层面的效果,促使系统需要支持更大的模型,同时由于企业 IT 基础设施不但完善,能够不断沉淀新的数据,也会伴随着大数据的问题。大模型与大数据促使存储与计算层面系统在摩尔定律失效的大背景下迫切需要通过并行与分布式计算的范式,扩展算力与存储的支持。
- 自动编译优化算法,包括不限于:
- 自动推导计算图:尽可能的通过符号执行或即时编译技术,获取更多的计算图信息,进而数据驱动方式做定制化的优化。
- 根据不同体系结构自动并行化:面对部署场景的多样化体系结构,训练阶段异构硬件的趋势,框架自动的进行算子算法选择,并行化,以期以最为优化的方式在指定硬件配置下,并行化与减少 I/O,逼近硬件提供的性能上限。
- 自动分布式化,并扩展到多个计算节点:面对云与集群场景,如何自动将任务扩展与部署,进而支撑分布式计算,弹性计算,让用户按需使用资源,这些也是云原生背景下人工智能系统所需要考虑和支持的。
- 持续优化:由于深度学习训练作业是迭代且周期较长,给系统以内省优化的机会,即不断监控,不断优化当前系统配置与运行时策略,以期弥补纯静态优化获取信息不足,运行时干扰造成的相比最优化的策略的差距。
- 探索并解决新挑战下的系统设计、实现和演化的问题。例如:动态性的支持,利用稀疏性进行加速优化,混合精度训练与部署,混合训练范式(强化学习),多任务(自动化机器学习)等。
- 提供在更大规模的企业级环境的部署需求:
- 多租环境的训练部署需求:面对多组织,多用户共享集群资源,以及大家迫切使用 GPU 资源的日益增长的需求,如何提供公平,稳定,高效的多租环境也是平台系统需要首先考虑的。
- 跨平台的推理部署需求:面对割裂的边缘侧硬件与软件栈,如何让模型训练一次,跨平台部署到不同软硬件平台,也是推理场景需要解决的重要问题。
- 安全与隐私的需求:由于深度学习模型类似传统程序,接受输入,处理后产生输出,但是相比传统程序,其解释性差,造成更容易产生安全问题,容易被攻击。同时模型本身的重要信息为权重,我们也要注意模型本身的隐私保护。同时如果是企业级环境或公有云环境,会有更高的安全和隐私保护要求。
有了宏观的目标,我们在接下来的章节会进一步介绍,当前的整个生态中整体的人工智能系统的技术栈是如何构成的,整个技术栈中各类人工智能系统处于哪个抽象层次,互相之间的关系是什么。
## 1.3.2 深度学习系统的大致组成
如图 1-3-1 所示,我们大致可以将深度学习系统分为以下方向:
- 开发体验层:负责提供用户前端的编程语言,接口和工具链。本层尽可能让用户表达目标任务与算法,尽量少让用户关注底层实现(例如,通过声明式编程的方式)是提升开发体验的较好的手段,但是过度的抽象会丧失灵活性的表达,在模型发展较快迭代频繁的时期用户还需要体验层兼顾灵活性和可调试性。开发体验层会调用,编排底层框架的接口提供更加简洁的用户开发体验。包括并不限于以下领域:
- 模型构建:卷积,循环神经网络,控制流等基本结构和算子支持与实现。语言的基本语法和框架的 API 接口提供基本算子的支持,当前主要以在 Python 语言内内嵌调用深度学习框架的方式进行深度学习模型的开发,但是也出现控制流在原生语言层与模型中间表达割裂等问题。
- 算法实现:同步与异步优化算法等。算法一般被封装为框架的配置或 API 供用户选择,有些框架也提供拦截接口给用户一定程度灵活性定制自定义算法。
- 流水线和工作流支持:高性能数据加载器等。流水线和工作流是实现模块解耦复用,可视化编程的前提,通过复用与可视化编程可以大幅降低组织内作业书写的门槛。
- 实验规划与配置:批量超参数调优与模型结构搜索等。由于当前模型试错(Trial And Error)的开发模式,让算法工程师在设计模型过程中有大量的超参数与模型结构需要尝试,自动化机器学习工具应运而生。
- 工具链: 模型转换,调试,可视化,类型系统等。就像传统的软件工程中调试器,可视化,类型系统等工具链的支撑,让整个开发过程中,跨平台,跨平台,问题诊断,缺陷验证等得以高效实现,目前深度学习系统领域也不断有类似工具产生以支持整个深度学习工程化实践。
- 生命周期管理:数据读取,训练与推理等流程开发与管理。机器学习领域的 DevOps 也就是 MLOps 的基础工具支持。其可以让重复模块被复用,同时让底层工具有精确的信息进行模块间的调度与多任务的优化,同时让各个环节模块化解耦,独立和更为快速的演进。
- 框架层:负责静态程序分析与计算图构建,编译优化等。框架本身通过提供供用户编程的 API 获取用户表达的模型,数据读取等意图,在静态程序分析阶段完成尽可能的自动前向计算图构建,自动求导补全反向传播计算图,计算图整体编译优化,算子内循环编译优化等。包括并不限于以下领域:
- 计算图构建:静态,动态计算图构建等。不同的框架类型决定了其使用静态还是动态图进行构建,静态图有利于获取更多信息做全图优化,动态图有利于调试。
- 自动求导:高效与高精度自动求导等。由于深度学习模型中大部分算子较为通用,框架提前封装好算子的自动求导函数,待用户触发训练过程自动透明的进行全模型的自动求导,以支持梯度下降等训练算法需要的权重梯度数据的获取。
- 中间表达构建:多层次中间表达等。通过构建深度学习模型的中间表达及多层中间表达,让模型本身可以更好的被编译器编译生成高效的后端代码。
- 编译优化:内核融合等。编译器或框架根据算子的语义,对适合进行内核融合(例如,多个算子和并为一个算子)进行融合,降低内核启动与访存代价。同时深度学习编译器还支持循环优化等类似传统编译器的优化策略和面向深度学习的优化策略(例如,牺牲一定精度的计算图等价代换等)。
- 运行时:负责系统的运行时的系统动态调度与优化。当获取的深度学习模型计算图部署于单卡,多卡或分布式的环境,运行期的框架需要对整体的计算图按照执行顺序调度算子与任务的执行,多路复用资源,做好内存等资源的分配与释放。包括并不限于以下部分。
- 优化器:运行时即时(Just-in-Time)优化,内省(Introspective)优化等。运行时根据硬件,隐藏的软件栈信息,数据分布等只能运行时所获取的信息,进一步对模型进行优化。
- 调度器:算子并行与调度。根据设备提供的软件栈和硬件调度策略,以及模型的算子间并行机会,进行类装箱的并行调度。
- 执行器:多线程等。算子执行过程中,如果特定设备没有做过多的运行时调度与干预,框架可以设计高效的运行时算子内的线程调度策略。
- 资源管理与硬件体系结构:负责程序的执行,互联与加速。在更广的层面,作业与作业间需要平台提供调度,运行期资源分配与环境隔离。包括并不限于以下部分:
- 硬件接口抽象:GPU,CPU,FPGA 和 ASIC 等。统一的硬件接口抽象可以复用编译优化策略,让优化与具体底层设备和体系结构适当解耦。
- 资源池化管理与调度:异构资源集群管理等。将服务器资源池化,通过高效的调度器结合深度学习作业特点和异构硬件拓扑进行高效调度。
- 可扩展的网络栈:RDMA,InifiBand,NVLink 等。提供更高效的加速器到加速器的互联(例如,NVLink 等),更高的带宽,更灵活的通信原语与高效的通信聚合算法(例如,AllReduce 算法)。
<center><img src="./img/3/3-3-1-dl-sys-stack.png" /></center>
<center>图 1.3.1 深度学习系统的大致组成</center>
我们将图 1.3.1 中的大致组成可以进一步细化为图 1.3.2 深度学习系统详图,由于篇幅所限其中还有很多系统技术点与方向没有罗列,我们将在后续对应章节详细介绍。我们可以看到深度学习系统整体的技术栈包罗万象且复杂,且由硬件到软件层有多个层次,形成系统化和层次化看系统的视角对未来理解程序是如何在底层系统执行,并做系统性能预估与技术选型至关重要。
<center><img src="./img/3/3-3-3-deeplearningsysstack.png" /></center>
<center>图 1.3.2 深度学习系统详图</center>
## 1.3.3 深度学习系统生态
除了以上重要的深度学习系统构成之外,随着人工智能应用范围越来越广,我们还可以将深度学习系统推广到更广泛的构成与生态。如图 1.3.3 所示,其生态包含以下领域:
- 核心系统软硬件:通过核心系统软硬件,对底层的基础架构进行抽象,向上层提供计算,存储,网络等资源池,按需给待执行的深度学习作业分配与隔离出指定规格的资源,执行深度学习作业。其面对的很多系统问题类似传统操作系统所解决的问题。其完成底层硬件的抽象与资源隔离后,只需要用户的应用提交到系统中被执行和管理。
- 深度学习任务运行和优化环境:提供更高效的运行时,资源分配隔离与任务调度。当深度学习作业启动,深度学习框架或运行时提供任务调度(例如,内核粒度任务等),内存管理,I/O 管理。未来随着作业愈发复杂,异构硬件厂商提供基本原语的支持,提供作业的多路复用(Multiplexing)等更高效的资源共享,打破设备商封装的运行时库的局限性。
- 通用资源管理和调度系统:提供公平,高效率和稳定的多租户深度学习平台支持。性能并不是系统设计本身的唯一考虑因素,在多租环境,还要兼顾公平,效率和稳定性,为用户提供更加可靠,易用的平台。
- 新型硬件及相关高性能网络和计算栈:随着加速器技术不断发展,网络互连技术提供更高的带宽,硬件层提供更高的算力与带宽支持更大规模的模型训练与推理。系统需要更加灵活的支持在不同的硬件和规格假设下,不同模型如何静态与动态结合的自动优化与高性能执行。同时由于硬件的发展趋势不同,潜在可能会让性能瓶颈产生变化,系统设计需要有前瞻性的判断新的系统设计机会。
- 深度学习算法和框架:通过深度学习算法与框架,用户可以表达模型结构设计,训练配置与工作流等需求,就像给深度学习提供了一套特定领域的“编程语言”,并且提供了相应的编译器及工具链可以翻译成运行时特定硬件环境可以执行的指令。
- 新型通用 AI 算法与模型的支持:提供更多样的模型支持,支持和推进模型效果的提升。支持新的算子(例如,控制流等),更加灵活的模型结构(例如,图模型等),模型的融合(例如,多专家系统等)支持。
- 多种深度学习框架的支持与进化:在深度学习领域,由于多种框架与工具的存在,如何为用户提供更多样的框架的统一支持与优化对提升用户体验,复用已有代码有很强的实用价值。
- 深度神经网络编译架构及优化:在编译期,通过静态分析与优化的方法,提供更优化的后端代码生成,提升模型的性能,正确性等。类似传统编译器,深度学习模型的计算图可以通过融合等手段优化,算子内可以应用循环优化等手段加速。同时面向深度学习模型本身的特点,也逐渐有工作利用一些等价和非等价计算图转换进行优化。
- 更广泛的人工智能系统生态:随着深度学习发展,更大的搜索空间,由于动态性造成运行时才能获取的信息与数据,模型安全与隐私,部署推理的多样化需求变得日益迫切,我们需要考虑除训练以外更多的人工智能系统问题。
- 机器学习新模式(例如,强化学习):提供新训练范式的灵活执行,部署与进程间同步支持等。例如,由于训练数据可能需要以与环境交互的过程中才能获取,造成需要通过强化学习等新的训练范式进行模型训练,需要设计新的系统以支持灵活的训练范式。
- 自动机器学习(例如自动化机器学习):当用户进行试错(Trial And Error)进行超参数搜索的搜索空间达到一定量级,用户通过自动化机器学习工具与算法可以更高效的进行模型的探索与训练。自动化机器学习系统可以提供多任务的高效管理与调度支持,提供支持搜索空间定义的程序语言等。
- 安全(Security)与隐私(Privacy):数据与模型,类似传统的信息安全要保护的数据与程序,除了数据本身,模型类似传统程序本身的安全与隐私问题提出了新的挑战。我们需要思考人工智能模型与应用的安全与隐私保护支持。
- 模型推理(Inference)、压缩(Compression)与优化:如果我们不需要训练,只需要执行前向传播过程,则是用户开始使用模型进行推理,基于深度学习特有性质进行高效的模型部署推理是除我们关注的训练之外的很重要的系统问题。在推理部署前,深度学习模型本身还可以通过模型压缩,量化等手段精简计算量与内存消耗,加速模型的部署。模型推理相比训练有更低的延迟要求,更严苛的资源供给,同时由于不需要求解梯度和训练,有更低的精度要求等。面对新的目标和约束,如何设计面向推理的深度学习系统提出了新的挑战和机会。
<center><img src="./img/3/3-3-2-dl-ecosystem.png" /></center>
<center>图 1.3.3 深度学习系统生态</center>
我们将在后续章节围绕核心系统软硬件,深度学习算法和框架,以及更广泛的人工智能系统生态中的重要内容展开介绍。
## 小结与讨论
本章我们主要围绕深度学习系统的组成和生态进行介绍,在初学人工智能系统时,我们可能会只关注框架,但当我们把系统放眼到整个基础架构,我们会发现当前深度学习系统涉及很多方面,类似传统的操作系统(异构资源管理系统),编译器(深度学习编译优化),Web 服务(推理系统),软件安全(模型安全)等问题在深度学习系统的场景中仍然会遇到,一些经典的理论与系统设计在今天仍然发挥着重要的影响。
在接下来的章节我们将通过一个实例介绍整体的深度学习系统的技术栈,快速了解深度学习系统的核心作用。
请读者思考深度学习系统中有哪些新挑战和问题是传统系统所没有遇到的?
## 参考文献
<div id="goldenage"></div>
1. [Jeffrey Dean; A Golden Decade of Deep Learning: Computing Systems & Applications. Daedalus 2022; 151 (2): 58–74.](https://direct.mit.edu/daed/article/151/2/58/110623/A-Golden-Decade-of-Deep-Learning-Computing-Systems)
|
AI-System/Textbook/第1章-人工智能系统概述/1.3-深度学习系统组成与生态.md/0
|
{
"file_path": "AI-System/Textbook/第1章-人工智能系统概述/1.3-深度学习系统组成与生态.md",
"repo_id": "AI-System",
"token_count": 15120
}
| 13 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
## 2.9 Transformer模型
Transformer 模型在 NLP/CV 领域都取得了很令人瞩目的成绩,因此在本小节中,我们来简要介绍一下其原理。作为铺垫,先要从序列到序列的模型说起,然后提出注意力机制,再过渡到主题。
- [2.9 Transformer模型](#29-transformer模型)
- [2.9.1 序列到序列模型(Seq2Seq)](#291-序列到序列模型seq2seq)
- [编码-解码结构(Encoder-Decoder)](#编码-解码结构encoder-decoder)
- [编码过程](#编码过程)
- [解码过程](#解码过程)
- [2.9.2 注意力机制(Attention)](#292-注意力机制attention)
- [计算区域](#计算区域)
- [所用信息](#所用信息)
- [模型结构](#模型结构)
- [2.9.3 Transformer](#293-transformer)
- [Enbedding 层](#enbedding-层)
- [位置编码](#位置编码)
- [编码器层](#编码器层)
- [注意力机制](#注意力机制)
- [多头注意力机制](#多头注意力机制)
- [前馈全连接层](#前馈全连接层)
- [归一化层](#归一化层)
- [掩码及其作用](#掩码及其作用)
- [解码器层](#解码器层)
- [模型输出](#模型输出)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
### 2.9.1 序列到序列模型(Seq2Seq)
序列到序列模型在自然语言处理中应用广泛,是重要的模型结构。本小节对序列到序列模型的提出和结构进行简要介绍,没有涉及代码实现部分。
前面章节讲到的RNN模型和实例,都属于序列预测问题,或是通过序列中一个时间步的输入值,预测下一个时间步输出值(如二进制减法问题);或是对所有输入序列得到一个输出作为分类(如名字分类问题)。他们的共同特点是:输出序列与输入序列等长,或输出长度为1。
还有一类序列预测问题,以序列作为输入,需要输出也是序列,并且输入和输出序列长度不确定,并不断变化。这类问题被成为序列到序列(Sequence-to-Sequence, Seq2Seq)预测问题。
序列到序列问题有很多应用场景:比如机器翻译、问答系统(QA)、文档摘要生成等。简单的RNN或LSRM结构无法处理这类问题,于是科学家们提出了一种新的结构 —— 编码解码(Encoder-Decoder)结构。
#### 编码-解码结构(Encoder-Decoder)
图 2.9.1 为Encoder-Decoder结构的示意图。
<center>
<img src="./img/encoder-decoder.png" />
图 2.9.1 Encoder-Decoder结构示意图
</center>
Encoder-Decoder结构的处理流程非常简单直观。
- 示意图中,输入序列和输出序列分别为中文语句和翻译之后的英文语句,它们的长度不一定相同。通常会将输入序列嵌入(Embedding)成一定维度的向量,传入编码器。
- Encoder为编码器,将输入序列编码成为固定长度的状态向量,通常称为语义编码向量。
- Decoder为解码器,将语义编码向量作为原始输入,解码成所需要的输出序列。
在具体实现中,编码器、解码器可以有不同选择,可自由组合。常见的选择有CNN、RNN、GRU、LSTM等。
应用Encoder-Decoder结构,可构建出序列到序列模型。
Seq2Seq模型有两种常见结构。我们以RNN网络作为编码和解码器来进行讲解。
图 2.9.2, 2.9.3 分别展示了这两种结构。
<center>
<img src="./img/Seq2Seq_structure1.png" />
图 2.9.2 Seq2Seq结构一
</center>
<center>
<img src="./img/Seq2Seq_structure2.png" />
图 2.9.3 Seq2Seq结构二
</center>
#### 编码过程
两种结构的编码过程完全一致。
输入序列为 $x=[x1, x2, x3]$。
RNN网络中,每个时间节点隐层状态为:
$$
h_t = f(h_{t-1}, x_t), \quad t \in [1,3]
$$
编码器中输出的语义编码向量可以有三种不同选取方式,分别是:
$$
\begin{aligned}
c &= h_3 \\\\
c &= g(h_3) \\\\
c &= g(h1, h2, h3) \\\\
\end{aligned}
$$
#### 解码过程
两种结构解码过程的不同点在于,语义编码向量是否应用于每一时刻输入。
第一种结构,每一时刻的输出$y_t$由前一时刻的输出$y_{t-1}$、前一时刻的隐层状态$h^\prime_{t-1}$和$c$共同决定,即: $y_t = f(y_{t-1}, h^\prime_{t-1}, c)$。
第二种结构,$c$只作为初始状态传入解码器,并不参与每一时刻的输入,即:
$$
\begin{cases}
y_1 = f(y_0, h^\prime_{0}, c) \\\\
y_t = f(y_{t-1}, h^\prime_{t-1}), t \in [2,4]
\end{cases}
$$
以上是序列到序列模型的结构简介。
### 2.9.2 注意力机制(Attention)
Attention(注意力)机制其实来源于人类的认识认知能力。比如当人们观察一个场景或处理一件事情时,人们往往会关注场景的显著性物体,处理事情时则希望抓住主要矛盾。注意力机制使得人类能够关注事物的重要部分,忽略次要部分,更高效的处理所面临的各种事情。
注意力机制在NLP领域被真正的发扬光大,其具有参数少、速度快、效果好的特点,如2018年的BERT、GPT 领跑各项 NLP 任务效果。由此在此领域,transformer和attention结构受到了极大的重视。
Attention的思路非常的简单,即一个加权求和过程,其原理可以表述如下:
<center>
<img src='./img/attention.png'/>
图 2.9.4 注意力机制示意图
</center>
第一步:通过Query和Key计算权重;
第二步:使用权重对Value进行加权求和从而得到Attention Value。
注意力机制的三大优点是:
- 参数少
模型复杂度跟 CNN、RNN 相比,复杂度更小,参数也更少。所以对算力的要求也就更小。
- 速度快
Attention 解决了 RNN 不能并行计算的问题。Attention机制每一步计算不依赖于上一步的计算结果,因此可以和CNN一样并行处理。
- 效果好
在 Attention 机制引入之前,有一个问题大家一直很苦恼:长距离的信息会被弱化,就好像记忆能力弱的人,记不住过去的事情是一样的。
Attention 的使用形式可以具体分类为三大类,每个大类下面又有几个小类。
#### 计算区域
- Soft Attention
这是比较常见的Attention方式,对所有key求权重概率,每个key都有一个对应的权重,是一种全局的计算方式(也可以叫Global Attention)。这种方式比较理性,参考了所有key的内容,再进行加权。但是计算量可能会比较大一些。
- Hard Attention,
这种方式是直接精准定位到某个key,其余key就都不管了,相当于这个key的概率是1,其余key的概率全部是0。因此这种对齐方式要求很高,要求一步到位,如果没有正确对齐,会带来很大的影响。另一方面,因为不可导,一般需要用强化学习的方法进行训练。(或者使用gumbel softmax之类的)
#### 所用信息
- Local Attention
这种方式其实是以上两种方式的一个折中,对一个窗口区域进行计算。先用Hard方式定位到某个地方,以这个点为中心可以得到一个窗口区域,在这个小区域内用Soft方式来算Attention。
- General Attention
这种方式利用到了外部信息,常用于需要构建两段文本关系的任务,query一般包含了额外信息,根据外部query对原文进行对齐。
- Self Attention
这种方式也可以成为Self Attention,因为其只是用了内部信息,key和value以及query只和输入原文有关,在self attention中,key=value=query。既然没有外部信息,那么在原文中的每个词可以跟该句子中的所有词进行Attention计算,相当于寻找原文内部的关系。
#### 模型结构
- 单层Attention
这是比较普遍的做法,用一个query对一段原文进行一次attention。
- 多层Attention
一般用于文本具有层次关系的模型,假设我们把一个document划分成多个句子,在第一层,我们分别对每个句子使用attention计算出一个句向量(也就是单层attention);在第二层,我们对所有句向量再做attention计算出一个文档向量(也是一个单层attention),最后再用这个文档向量去做任务。
- 多头Attention
这是Attention is All You Need中提到的multi-head attention,用到了多个query对一段原文进行了多次attention,每个query都关注到原文的不同部分,相当于重复做多次单层attention,最后再把这些结果拼接起来。
### 2.9.3 Transformer
2017年谷歌在一篇名为《Attention Is All You Need》的论文中,提出了一个基于attention(自注意力机制)结构来处理序列相关的问题的模型,名为Transformer。Transformer在很多不同nlp任务中获得了成功,例如:文本分类、机器翻译、阅读理解等。在解决这类问题时,Transformer模型摒弃了固有的定式,并没有用任何CNN或者RNN的结构,而是使用了Attention注意力机制,自动捕捉输入序列不同位置处的相对关联,善于处理较长文本,并且该模型可以高度并行地工作,训练速度很快。见图 2.9.5。
<center>
<img src='./img/transformer.png' height='600'/>
图 2.9.5 Transformer模型结构图
</center>
其最大的特点是没有使用CNN、RNN,仅使用Attention实现这一模型。与Seq2Seq一样,模型也分为encoder和decoder部分,encoder主要使用了multi-head的self-attention,而decoder则多了一层attention,第一层multi-head self-attention是将之前生成的输出作为输入,再将该层输出作为query输入到下一层attention中,下一层attention的key和value来自于encoder。
第一级decoder的key, query, value均来自前一层decoder的输出,但加入了Mask操作,即我们只能attend到前面已经翻译过的输出的词语,因为翻译过程我们当前还并不知道下一个输出词语,这是我们之后才会推测到的。
而第二级decoder也被称作encoder-decoder attention layer,即它的query来自于之前一级的decoder层的输出,但其key和value来自于encoder的输出,这使得decoder的每一个位置都可以attend到输入序列的每一个位置。
总结一下,k和v的来源总是相同的,q在encoder及第一级decoder中与k,v来源相同,在encoder-decoder attention layer中与k,v来源不同。
#### Enbedding 层
Embedding层的作用是将某种格式的输入数据,例如文本,转变为模型可以处理的向量表示,来描述原始数据所包含的信息。Embedding层输出的可以理解为当前时间步的特征,如果是文本任务,这里就可以是Word Embedding,如果是其他任务,就可以是任何合理方法所提取的特征。构建Embedding层的代码很简单,核心是借助torch提供的nn.Embedding。
#### 位置编码
Positional Encodding位置编码的作用是为模型提供当前时间步的前后出现顺序的信息。因为Transformer不像RNN那样的循环结构有前后不同时间步输入间天然的先后顺序,所有的时间步是同时输入,并行推理的,因此在时间步的特征中融合进位置编码的信息是合理的。位置编码可以有很多选择,可以是固定的,也可以设置成可学习的参数。这里,我们使用固定的位置编码。
此外有一个点刚刚接触Transformer的同学可能不太理解,编码器和解码器两个部分都包含输入,且两部分的输入的结构是相同的,只是推理时的用法不同,编码器只推理一次,而解码器是类似RNN那样循环推理,不断生成预测结果的。
怎么理解?假设我们现在做的是一个法语-英语的机器翻译任务,想把Je suis étudiant翻译为I am a student。那么我们输入给编码器的就是时间步数为3的embedding数组,编码器只进行一次并行推理,即获得了对于输入的法语句子所提取的若干特征信息。而对于解码器,是循环推理,逐个单词生成结果的。最开始,由于什么都还没预测,我们会将编码器提取的特征,以及一个句子起始符传给解码器,解码器预期会输出一个单词I。然后有了预测的第一个单词,我们就将I输入给解码器,会再预测出下一个单词am,再然后我们将I am作为输入喂给解码器,以此类推直到预测出句子终止符完成预测。
#### 编码器层
编码器作用是用于对输入进行特征提取,为解码环节提供有效的语义信息整体来看编码器由N个编码器层简单堆叠而成。每个编码器层由两个子层连接结构组成:第一个子层包括一个多头自注意力层和规范化层以及一个残差连接;第二个子层包括一个前馈全连接层和规范化层以及一个残差连接
#### 注意力机制
人类在观察事物时,无法同时仔细观察眼前的一切,只能聚焦到某一个局部。通常我们大脑在简单了解眼前的场景后,能够很快把注意力聚焦到最有价值的局部来仔细观察,从而作出有效判断。或许是基于这样的启发,大家想到了在算法中利用注意力机制。注意力计算:它需要三个指定的输入Q(query),K(key),V(value),然后通过下面公式得到注意力的计算结果。
可以这么简单的理解,当前时间步的注意力计算结果,是一个组系数 * 每个时间步的特征向量value的累加,而这个系数,通过当前时间步的query和其他时间步对应的key做内积得到,这个过程相当于用自己的query对别的时间步的key做查询,判断相似度,决定以多大的比例将对应时间步的信息继承过来。
#### 多头注意力机制
刚刚介绍了attention机制,在搭建EncoderLayer时候所使用的Attention模块,实际使用的是多头注意力,可以简单理解为多个注意力模块组合在一起。
多头注意力机制的作用:这种结构设计能让每个注意力机制去优化每个词汇的不同特征部分,从而均衡同一种注意力机制可能产生的偏差,让词义拥有来自更多元表达,实验表明可以从而提升模型效果。
举个更形象的例子,bank是银行的意思,如果只有一个注意力模块,那么它大概率会学习去关注类似money、loan贷款这样的词。如果我们使用多个多头机制,那么不同的头就会去关注不同的语义,比如bank还有一种含义是河岸,那么可能有一个头就会去关注类似river这样的词汇,这时多头注意力的价值就体现出来了。
#### 前馈全连接层
EncoderLayer 中另一个核心的子层是 Feed Forward Layer,我们这就介绍一下。在进行了 Attention 操作之后,encoder 和 decoder 中的每一层都包含了一个全连接前向网络,对每个 position 的向量分别进行相同的操作,包括两个线性变换和一个ReLU激活输出。
Feed Forward Layer 其实就是简单的由两个前向全连接层组成,核心在于,Attention模块每个时间步的输出都整合了所有时间步的信息,而Feed Forward Layer每个时间步只是对自己的特征的一个进一步整合,与其他时间步无关。
#### 归一化层
归一化层的作用:它是所有深层网络模型都需要的标准网络层,因为随着网络层数的增加,通过多层的计算后输出可能开始出现过大或过小的情况,这样可能会导致学习过程出现异常,模型可能收敛非常慢。因此都会在一定层后接归一化层进行数值的归一化,使其特征数值在合理范围内。Transformer中使用的normalization手段是layer norm。
#### 掩码及其作用
掩码:掩代表遮掩,码就是我们张量中的数值,它的尺寸不定,里面一般只有0和1;代表位置被遮掩或者不被遮掩。掩码的作用:在 transformer 中,掩码主要的作用有两个,一个是屏蔽掉无效的 padding 区域,一个是屏蔽掉来自“未来”的信息。Encoder 中的掩码主要是起到第一个作用,Decoder 中的掩码则同时发挥着两种作用。屏蔽掉无效的 padding 区域:我们训练需要组 batch 进行,就以机器翻译任务为例,一个 batch 中不同样本的输入长度很可能是不一样的,此时我们要设置一个最大句子长度,然后对空白区域进行 padding 填充,而填充的区域无论在 Encoder 还是Decoder 的计算中都是没有意义的,因此需要用mask进行标识,屏蔽掉对应区域的响应。屏蔽掉来自未来的信息:我们已经学习了attention 的计算流程,它是会综合所有时间步的计算的,那么在解码的时候,就有可能获取到未来的信息,这是不行的。因此,这种情况也需要我们使用 mask 进行屏蔽。现在还没介绍到 Decoder,如果没完全理解,可以之后再回过头来思考下
#### 解码器层
解码器的作用:根据编码器的结果以及上一次预测的结果,输出序列的下一个结果。整体结构上,解码器也是由N个相同层堆叠而成。
每个解码器层由三个子层连接结构组成,第一个子层连接结构包括一个多头自注意力子层和规范化层以及一个残差连接,第二个子层连接结构包括一个多头注意力子层和规范化层以及一个残差连接,第三个子层连接结构包括一个前馈全连接子层和规范化层以及一个残差连接。
解码器层中的各个子模块,如,多头注意力机制,规范化层,前馈全连接都与编码器中的实现相同。
有一个细节需要注意,第一个子层的多头注意力和编码器中完全一致,第二个子层,它的多头注意力模块中,query来自上一个子层,key 和 value 来自编码器的输出。可以这样理解,就是第二层负责,利用解码器已经预测出的信息作为query,去编码器提取的各种特征中,查找相关信息并融合到当前特征中,来完成预测。
#### 模型输出
输出部分就很简单了,每个时间步都过一个 线性层 + softmax层。
线性层的作用:通过对上一步的线性变化得到指定维度的输出,也就是转换维度的作用。转换后的维度对应着输出类别的个数,如果是翻译任务,那就对应的是文字字典的大小。
## 小结与讨论
请读者参考已有的 Transformer 的实现,使用 Pytorch 来搭建一个网络,训练一个很小的任务,比如,输入 [1,2,3,4,5],要求网络输出 [2,3,4,5],即去掉第一个字符。
## 参考文献
1. Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin, Attention is all you need, arXiv:1706.03762
2. The Annotated Transformer, http://nlp.seas.harvard.edu/2018/04/03/attention.html
|
AI-System/Textbook/第2章-神经网络基础/2.9-注意力机制和Transformer.md/0
|
{
"file_path": "AI-System/Textbook/第2章-神经网络基础/2.9-注意力机制和Transformer.md",
"repo_id": "AI-System",
"token_count": 12826
}
| 14 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 8.4 推理系统的运行期优化
<center> <img src="./img/4/8-4-6-runtime.png" /></center>
<center>图 8.4.1 推理系统运行期优化 </center>
推理系统类似传统的 Web 服务,需要应对不断增加的用户请求数量,提高吞吐量(Throughput),提升资源利用率(Utilization)。如图所示,本小节将围绕推理系统中涉及到的运行期(Runtime)优化,以及吞吐和效率问题进行展开。
- [8.4 推理系统的运行期优化](#84-推理系统的运行期优化)
- [8.4.1 推理系统的吞吐量](#841-推理系统的吞吐量)
- [8.4.2 加速器模型并发执行](#842-加速器模型并发执行)
- [8.4.3 动态批尺寸](#843-动态批尺寸)
- [8.4.4 多模型装箱(Bin Packing)](#844-多模型装箱bin-packing)
- [8.4.5 内存分配策略调优](#845-内存分配策略调优)
- [8.4.6 深度学习模型内存分配算法实验与模拟研究](#846-深度学习模型内存分配算法实验与模拟研究)
- [(1) 数据获取](#1-数据获取)
- [(2) 评测指标设定](#2-评测指标设定)
- [(3) 算法实现与评测](#3-算法实现与评测)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
## 8.4.1 推理系统的吞吐量
推理系统不仅要追求低延迟,在服务客户端请求的过程中,要提供高吞吐量请求服务的支持。推理系统需要高吞吐的目的:
- 应对突发的请求数量暴增
- 不断扩展的用户和设备的需求
推理系统达到高吞吐的常见优化策略有:
- 利用加速器并行
- 批处理请求(Request)
- 利用优化的 BLAS 矩阵运算库,SIMD 指令和 GPU 等加速器加速,提升利用率
- 自适应批尺寸(Batch Size)
- 多模型装箱使用加速器
- 扩展到多模型副本(Replica)部署
同时推理运行期的内存管理也会影响延迟和吞吐性能,我们在之后也会展开介绍。
在接下来的内容中,我们将介绍增加模型推理吞吐量的常用策略。
## 8.4.2 加速器模型并发执行
加速器的低效率使用常常由于所执行的负载的运算量不够高或者由于等待请求或 I/O 等造成资源空置和浪费。如图 8.4.2 所示,如果设备(Device)中只部署了单个模型,由于等待批处理请求,可能造成 GPU 空闲。
<center> <img src="./img/3/8-3-2-lowutil.png" /></center>
<center>图 8.4.2 加速器的低效率使用</center>
为了应对,单加速器运行多模型,推理系统可以通过时分复用策略,并发(Concurrent)执行模型[<sup>[1]</sup>](#clipper),如图 8.4.3 所示,将等待时的计算资源分配给其他模型进行执行,提升整体的推理吞吐量(Throughput)和设备利用率(Utilization)。
<center> <img src="./img/3/8-3-3-multimodel.png" /></center>
<center>图 8.4.3 并发执行提升加速器的效率使用</center>
例如,我们可以通过下面伪代码实例进行模拟,我们启动两个进程,每个进程内不断的监测 GPU 利用率,如果利用率低于一定阈值,触发执行推理任务,此实例只是为了说明当前执行机制,没有采用中心化的调度方式。
```dotnetcli
from multiprocessing import Process
import os
import time
...
def profile_gpu_util():
# 通过 nvml 获取 GPU 利用率
...
return util
def model_inference(model_path):
# 指定深度学习推理
...
return
def model_task(model_path):
...
while True:
# 判断利用率低于阈值,则执行推理
if profile_gpu_util() < threshold:
# 模型推理
model_inference(model_path)
# 睡眠一段
sleep(interval)
if __name__ == '__main__':
# 创建模型 1 推理进程
p1 = Process(target=model_task, args=('model1',))
# 创建模型 2 推理进程
p2 = Process(target=model_task, args=('model2',))
p1.start()
p1.join()
p2.start()
p2.join()
```
***经典回顾***
[并发计算(Concurrent Computing)](https://en.wikipedia.org/wiki/Concurrent_computing):“并发计算是多个计算在重叠的时间段内同时执行,而不是完全顺序执行的计算模式。并发计算是模块化编程的一种范例,整体计算任务被分解为可以同时执行的子计算任务。并发计算领域的先驱者包括 Edsger Dijkstra、Per Brinch Hansen 和 C.A.R. Hoare 等。并发计算的概念经常与并行计算(Parallel Computing)概念混淆,因为两者在很多资料被描述为在同一时间段内执行的多个进程。在并行计算中,执行发生在同一物理时刻:例如,在多处理器机器的不同处理器上,同时刻执行多个任务。相比之下,并发计算由重叠的进程生命周期组成,但执行不必在同一时刻发生,例如单核交替执行两个任务,一个任务I/O的时候让出计算核给另一个任务。”
## 8.4.3 动态批尺寸
例如,如下图所示,NVIDIA 所做的在 V100 上的推理性能基准测试[<sup>[2]</sup>](#nvperf),从图中可以看到,随着批尺寸(Batch Size)不断增加,模型推理的吞吐量在不断上升,但同时推理延迟(Latency)在下降。
<center> <img src="./img/3/8-3-1-throughput.png" /></center>
<center>图 8.4.4 NVIDIA深度学习推理性能测试 <a href="https://developer.nvidia.com/deep-learning-performance-training-inference">图片引用 NVIDIA 文档</a> </center>
由于通过提升批尺寸(Batch Size)可以提升吞吐量[<sup>[1]</sup>](#clipper),对于较高请求数量和频率的场景,通过大的批次可以提升吞吐量。但是推理系统要注意,没有免费的午餐,随着吞吐量上升的还有延迟,推理系统推理在动态调整批尺寸时需要满足一定的延迟约束。
优化问题定义:
$$max_{BatchSize}\{Throughput(BatchSize)\}$$
$$max_{BatchSize}: 最大批尺寸$$
$$Throughput: 吞吐量$$
$$BatchSize: 批尺寸$$
约束:
$$Latency(BatchSize) + Overhead(BatchSize) \leq LatencySLA $$
$$Latency: 延迟$$
$$Overhead: 其他开销(例如,组合,等待输入数据达到指定批尺寸)$$
$$LatencySLA: 延迟服务等级协议(例如,100ms)$$
动态批处理尺寸(Batch Size)的尺寸增长和减少,在相关工作 [Clipper](https://www.usenix.org/system/files/conference/nsdi17/nsdi17-crankshaw.pdf)[<sup>[1]</sup>](#clipper) 中有借鉴 [Additive Increase Multiplicative Decrease (AIMD)](https://en.wikipedia.org/wiki/Additive_increase/multiplicative_decrease) 策略。
***经典回顾***
[加性增加/乘性减少(AIMD)](https://en.wikipedia.org/wiki/Additive_increase/multiplicative_decrease) 算法:“是一种反馈控制算法,其被应用在TCP拥塞控制中。AIMD 将没有拥塞时拥塞窗口的线性增长与检测到拥塞时的指数减少相结合。使用 AIMD 拥塞控制的多个流最终将收敛到均衡使用共享链路。”
AIMD 在动态批尺寸中使用的策略:
- 加性增加(Addictive Increase):
- 将批次大小累加增加固定数量,直到处理批次的延迟超过目标延迟为止。
- 乘性减少(Multiplicative Decrease):
- 当达到后,执行一个小的乘法回退。例如,将批次大小减少 10%。
- 因为最佳批次大小不会大幅波动,所以使用的退避常数要比其他应用场景使用的AIMD 方案小得多。
接下来我们以 NVIDIA Triton 推理服务器为例看实际系统中支持的动态批尺寸功能。NVIDIA Triton 支持[动态批尺寸器(Dynamic Batcher)](https://github.com/triton-inference-server/server/blob/main/docs/model_configuration.md#dynamic-batcher)[<sup>[3]</sup>](#triton),动态批处理允许服务器组合推理请求,从而动态创建批处理。创建一批请求通常会增加吞吐量。动态批处理器应该用于无状态模型。动态创建的批次分布到为模型配置的所有模型实例。一般可以通过下面的流程实施:
以下步骤([参考 Triton 文档](https://github.com/triton-inference-server/server/blob/main/docs/model_analyzer.md))是为每个模型调整动态批处理器的推荐过程。其还推荐,可以使用[模型分析器](https://github.com/triton-inference-server/server/blob/main/docs/model_analyzer.md)自动搜索不同的动态批处理器配置。
- 确定模型的最大批量大小 max_batch_size 。
- 将以下内容添加到模型配置中以启用动态批处理器。默认策略是:动态批处理器将创建尽可能大的批次并且成批时不会耽误(和下面介绍的此配置相关 max_queue_delay_microseconds),直到最大批次大小(max_batch_size) 。
```yml
dynamic_batching { }
```
- 使用性能分析器确定默认动态批处理器配置提供的延迟和吞吐量,相比于上面提到的AIMD 策略,当前为人工介入,读者也可以将AIMD的思路应用于配置批尺寸的实验尝试中。根据性能约束,确定合适的批尺寸,之后再进行配置。
如果默认配置导致延迟值在您的延迟预算范围内,请尝试以下一种或两种方法来权衡增加的延迟以增加吞吐量:
- 增加最大批量大小。max_batch_size 属性表示模型支持的最大批量大小,可用于 Triton 可以利用的批处理类型。
- 将批处理延迟(Batch Delay)max_queue_delay_microseconds 设置为非零值,动态批处理器可以配置为允许请求在调度器中延迟有限的时间,以允许其他请求加入动态批处理。尝试增加延迟值直到超过延迟预算以查看对吞吐量的影响。
- 大多数模型不应使用首选批尺寸(Preferred Batch Sizes)。仅当该批大小导致性能明显高于其他批大小时,才应配置首选批大小。
preferred_batch_size 属性指示动态批处理器应尝试创建的批处理大小。
下面的[代码实例(参考 Triton文档)](https://github.com/triton-inference-server/server/blob/main/docs/model_configuration.md#preferred-batch-sizes)配置 Triton 应用动态批尺寸,并且配置首选批尺寸(Preferred Batch Sizes)为 4 或者 8。
```yml
dynamic_batching {
preferred_batch_size: [ 4, 8 ]
}
```
## 8.4.4 多模型装箱(Bin Packing)
在延迟服务等级协议(SLA)约束下,模型在指定的GPU下按最大吞吐量进行分配,但是可能仍有空闲资源,造成加速器的低效率使用。
如图所示,有些设备(Device)上的算力较高,部署的模型运算量又较小,使得设备上可以装箱(Bin Packing)多个模型,共享使用设备。
<center> <img src="./img/3/8-3-4-fragments.png" /></center>
<center>图 8.4.5 模型没有用满GPU产生空闲GPU资源</center>
如图所示,推理系统可以通过[最佳匹配(Best Fit)](https://en.wikipedia.org/wiki/Best-fit_bin_packing)策略装箱(Bin Packing)模型,将碎片化的模型(例如,model1和model2)请求由共享的设备进行推理。这样不仅提升了推理系统的吞吐(Throughput),也提升了设备的利用率(Utilization)。装箱在数据中心资源调度中是经典与传统的问题,我们可以看到系统抽象的很多问题会在不同应用场景与不同层再次出现,但是抽象出的问题与系统算法会由于假设和约束不同产生新的变化,所以系统工作本身既要熟悉经典,也需要了解新场景与变化趋势。
<center> <img src="./img/3/8-3-5-packing.png" /></center>
<center>图 8.3.6 空闲GPU资源</center>
接下来我们以 NVIDIA Triton 推理服务器为例看实际系统中支持的动态批尺寸功能与策略。NVIDIA Triton 支持[并发模型执行(Concurrent Model Execution)](https://github.com/triton-inference-server/server/blob/main/docs/architecture.md#concurrent-model-execution)[<sup>[4]</sup>](#tritonconcurrent)机制,充分利用现有加速器。Triton提供了实例组(Instance-Group)的模型配置选项,它允许每个模型指定应该允许该模型的并行执行次数。 每个此类启用的并行执行称为一个实例。默认情况下,Triton 为系统中的每个可用 GPU 为每个模型提供一个实例。
如下面实例所示模型配置 ModelInstanceGroup 属性用于指定应该可用的执行实例的数量以及应该为这些实例使用的计算资源。实例组(instance_group)设置可用于在每个 GPU 上或仅在某些 GPU 上放置模型的多个执行实例。 这样相当于利用上面提到的装箱策略,提升并发度和加速器利用率,充分利用资源。例如,以下配置将在每个系统 GPU 上放置模型的两个执行实例。以下配置将在 GPU 0 上放置一个执行实例,在 GPU 1 和 2 上放置两个执行实例([实例来源](https://github.com/triton-inference-server/server/blob/main/docs/model_configuration.md#multiple-model-instances))。
```yml
instance_group [
{
count: 1
kind: KIND_GPU
gpus: [ 0 ]
},
{
count: 2
kind: KIND_GPU
gpus: [ 1, 2 ]
}
]
```
***经典回顾***
[装箱(Bin Packing)](https://en.wikipedia.org/wiki/Bin_packing_problem):“装箱问题是一个组合优化问题,其中不同大小的物品必须装入有限数量的箱或容器中,每个箱或容器具有固定的给定容量,其方式为最小化使用的箱数量。该方法有许多应用,例如填充容器、物流车辆装载、大规模平台资源调度等。当前我们可以认为 GPU 是箱子,而模型就是要装入的物品,将运行时的模型调度抽象为装箱问题。”
## 8.4.5 内存分配策略调优
“All the science is either physics or stamp collecting” -- Ernest Rutherford
由于设备或服务端内存是紧缺资源,推理系统常常也需要考虑做内存的分配策略的优化,进而能够服务更大的模型。当前深度学习的框架中默认提供内存管理器进行内存管理,进而减少设备 API 调用提升性能。有些推理系统通过插件化方式允许用户注册内存管理逻辑的回调(Call Back),例如,TensorRT 的 [IGpuAllocator](https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/infer/Core/GpuAllocator.html)[<sup>[5]</sup>](#tensortallocator) 用于控制 GPU 分配的应用程序实现的类。如下面的代码实例,应用程序实现的回调(tensorrt.tensorrt.IGpuAllocator),用于处理 GPU 内存的获取。
```C++
classtensorrt.IGpuAllocator(self: tensorrt.tensorrt.IGpuAllocator)→ None
__init__(self: tensorrt.tensorrt.IGpuAllocator)→ None
allocate(self: tensorrt.tensorrt.IGpuAllocator, size: int, alignment: int, flags: int)→ capsule
```
***经典回顾***
[库操作系统(Library Operating Systems)简称LibOS与外核(Exokernel)](https://en.wikipedia.org/wiki/Exokernel):“背后的想法是尽可能少地强制应用程序开发人员进行抽象,使他们能够对硬件抽象做出尽可能多的决定。这种低级硬件访问允许程序员实现自定义抽象,并省略不必要的抽象,最常见的是提高程序的性能。当前深度学习框架自身实现内存管理器,例如,默认 TensorFlow 获取全部 GPU 内存,进而通过框架自身内存管理器进行内存管理,减少 cudaMalloc 和 cudaFree 等设备内存分配与释放调用,本身类似库操作系统抽象,由框架和应用自身做对应资源管理进而提升性能。”
我们可以将内存管理问题抽象为以下的优化问题:
- 目标:最小化内存占用和内存分配调用(malloc 和 free 等)开销
- 约束:保证延迟服务等级协议(SLA)
- 优化策略:
- 缓存分配器(Cached Allocator):推理系统预先申请设备内存,构建推理系统的内存管理器,减少设备内存分配释放等 API 调用代价(例如,[cudaFree调用可能阻塞它的调用者,直到所有GPU上所有先前排队的工作完成](https://arxiv.org/abs/1912.01703))。
- [预取(Prefetch)和卸载(Off-loading)](https://arxiv.org/abs/1602.08124):异步地将设备内存数据在读取前和产生后和主存进行换入换出,减少设备内存的数据压力。
- [算子融合(Fusion)](https://github.com/microsoft/nnfusion):将中间结果在缓存层给下一阶段的内核使用,减少中间结果回写吞吐和延迟更低的设备内存或者主存的开销。
例如,我们可以通过下面模拟实例思考,如何通过 Swap 机制,将中间结果暂存在主存,等需要时再拷贝到 GPU 显存。但是这种方式减少内存同时会增加数据搬运的开销,读者可以静态估计权衡最优方案。
```python
# 本伪代码实例使用 PyTorch 框架为演示
class TestModel(nn.Module):
def __init__(self, ni):
super(block1, self).__init__()
self.conv1 = nn.Conv2d(ni, ni, 1)
self.conv2 = nn.Conv2d(ni, ni, 3, 1, 1)
self.classifier = nn.Linear(ni*24*24,751)
def forward(self,x):
residual = x
out = F.relu(self.conv1(x))
residual.cpu() # 下一步计算用不到,暂存到主存
out = F.relu(self.conv2(out))
residual.cuda() # 拷贝回 GPU 显存
out += residual
out = out.view(out.size(0),-1)
return self.classifier(out)
model = TestModel(16)
x = torch.randn(1, 16, 24, 24)
output = model(x)
```
读者也可以参考相关工作(例如,[DNNMem ESEC/FSE '20](https://dl.acm.org/doi/abs/10.1145/3368089.3417050),[vDNN MICRO '16](https://dl.acm.org/doi/10.5555/3195638.3195660 )等)进一步了解深度学习模型内存的分配占用分类和优化策略。
同时请读者对比和思考,推理内存管理和训练内存管理异同,以及相关工作趋势。
1. 训练作业与框架内存管理:
1. 张量为单位,有反向传播和权重更新,进而前向传播张量存活时间(Liveness)更久。
2. 吞吐量为主要目的。
3. 批尺寸大,对内存尺寸(Memory Size)要求更高。
2. 推理系统内存管理:
1. 张量为单位,无反向传播和权重更新,张量依赖少,用完即可释放。
2. 延迟为主要约束。
3. 服务端推理批尺寸小,但是边缘侧设备内存尺寸小约束强。
所以推理系统的内存管理可以借鉴传统 Web 服务和移动 APP 的内存管理设计思想结合深度学习负载特点进行设计。而训练作业更多工作朝着批处理作业或者延迟不敏感应用发展,一部分工作借助虚拟内存思想,靠“外存”(主存或者 NVMe 等高速存储)和异步化 IO 进行内存管理优化,例如 [vDNN MICRO '16](https://dl.acm.org/doi/10.5555/3195638.3195660),[NVIDIA Unified Memory](https://developer.nvidia.com/blog/unified-memory-cuda-beginners/) 等工作,另一部分是从算法分析层面入手,从算法层面减少冗余( 本地不留副本与按序加载的 [DeepSpeed ZeRO](https://arxiv.org/pdf/1910.02054.pdf) 等,其整体思路类似传统的参数服务器,按需加载权重到本地计算)或者重算([Gradient Checkpointing](https://github.com/cybertronai/gradient-checkpointing)等)。
例如,传统程序中,[Facebook jemalloc](https://engineering.fb.com/2011/01/03/core-data/scalable-memory-allocation-using-jemalloc/) 为服务端应用设计,其设计思路为:
- 性能与并发管理:支持快速的分配回收,并发请求的内存管理,尽量减少锁争用问题。
- 空间与碎片:减少元数据大小,根据应用选择块尺寸类型。
- 工作负载驱动:隔离小对象,重用时优先使用低地址。
- 全面考虑互操作:例如,操作系统交互方面的优化,减少页换出策略,减少激活页集合等策略。
同时 jemalloc 参考了大量已有的内存管理器的设计。希望以上的设计思路能启发读者针对推理服务特点设计区别于训练作业的内存管理策略。以上思路可以指导我们对数据中心推理服务的内存管理设计。
感兴趣的读者可以在 8.4.6 进行内存管理观测与实验,深入理解深度学习的内存管理技术。
## 8.4.6 深度学习模型内存分配算法实验与模拟研究
此实验需要读者自行参考下面方式收集数据集。
### (1) 数据获取
读者可以参考库中提供的脚本读取数据并了解数据模式。
- 日志收集
我们通过下面的实例或者脚本进行深度学习作业的日志收集,进而获取张量尺寸信息,分配(Allocation)和释放(Deallocation)信息。
```shell
# 假设 TensorFlow 1.13 版本
export TF_CPP_MIN_VLOG_LEVEL=2
python tf_infer.py # 程序中为常见的 TensorFlow 推理或训练程序
```
- 张量分配与释放相关日志抽取,进而获取张量的大小与分配释放顺序
例如,其中一条张量分配日志如下
```
20XX-XX-XX 12:20:44.472769: I tensorflow/core/framework/log_memory.cc:35] __LOG_MEMORY__ MemoryLogTensorAllocation { step_id: 2 kernel_name: "vgg_16/pool3/MaxPool" tensor { dtype: DT_FLOAT shape { dim { size: 64 } dim { size: 256 } dim { size: 28 } dim { size: 28 } } allocation_description { requested_bytes: 51380224 allocated_bytes: 51380224 allocator_name: "GPU_0_bfc" allocation_id: 101 has_single_reference: true ptr: 140615920648192 } } }
```
分配张量日志实例与关键字
```
MemoryLogTensorAllocation
```
释放张量日志实例与关键字
```
MemoryLogTensorDeallocation
```
[参考文档](https://stackoverflow.com/questions/36331419/tensorflow-how-to-measure-how-much-gpu-memory-each-tensor-takes/43189934#43189934)
### (2) 评测指标设定
读者可以设计以下评测指标,进行算法策略设计:
- 最小化时间开销
- malloc()、free() 例程在一般情况下应该尽可能快。
- 最小化空间(Space)占用
- 分配器不应该浪费空间,它应该从系统中获取尽可能少的内存,并且应该以最小化碎片(Fragmentation)的方式维护内存。碎片是程序不使用的连续内存块中的无法再分配的内存空闲浪费区域。
- 最小化局部性(Locality)
- 分配通常在彼此附近一起使用的内存块。 这有助于在程序执行期间最大限度地减少页面和缓存未命中。
- 其他
### (3) 算法实现与评测
- 内存分配器模拟与算法设计
- 假设设计的内存分配器依赖调用底层 NVIDIA [cudaFree](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html#group__CUDART__MEMORY_1ga042655cbbf3408f01061652a075e094) 和 [cudaMalloc](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html#group__CUDART__MEMORY_1g37d37965bfb4803b6d4e59ff26856356) 原语获取设备原始内存,但是此 API 调用有一定的时间代价。参考本文或其他测试数据中进行量化模拟 API 调用时间开销:[当使用 cudaMalloc 和 cudaFree 时,增加矩阵大小时性能退化分析](https://arxiv.org/pdf/1510.05041.pdf)。
- 设计实现 malloc(),free() 接口并实现内部算法
读者可以选用以经典算法作为基准测试(例如,[DNNMem](https://dl.acm.org/doi/10.1145/3368089.3417050) 中介绍的主流框架内存分配算法,或者传统[内存分配器策略](https://www.cs.tufts.edu/~nr/cs257/archive/doug-lea/malloc.html)),设计新的算法,并通过收集的数据模拟,看能否提升当前目标,超越基准算法,并进行结果分析,形成分析报告或论文。
***经典回顾***
[伙伴内存分配(Buddy Memory Allocation)](https://en.wikipedia.org/wiki/Buddy_memory_allocation):“伙伴内存分配技术是一种内存分配算法,它将内存划分为多个分区,以尽可能地满足内存请求。该系统利用将内存分成两半来尝试提供最佳拟合。根据 Donald Knuth 的说法,伙伴系统由 Harry Markowitz 于 1963 年发明,并由 Kenneth C. Knowlton 首次描述(1965 年出版)。 伙伴系统有多种形式。将每个块细分为两个较小块的那些是常见的变体。该系统中的每个内存块都有一个顺序(Order),其中顺序是一个整数,范围从 0 到指定的上限。n阶块的大小与$2^n$成正比,因此块的大小正好是低一个阶块大小的两倍。 2 的幂块大小使地址计算变得简单,因为所有伙伴都在内存地址边界上对齐,该边界是 2 的幂。 当一个较大的块被分割时,它被分成两个较小的块,每个较小的块成为另一个唯一的伙伴。拆分块只能与其唯一的伙伴块合并,然后重新组合它们被拆分的更大块。其好处是,有更小的外碎片(因为更容易让相邻空闲块合并),由于2的指数次块大小,更方便和操作系统的页系统(Paging System)映射内存,相比最佳匹配(Best Fit)更低的搜索时间复杂度。缺点是由于块合并产生一定的性能损失,容易产生一定内碎片。读者如果感兴趣其他内存管理器工作,可以参考[文献](https://slideplayer.com/slide/3544296/)。”
## 小结与讨论
本小节主要围绕推理系统的高吞吐与高效率的优化展开讨论,我们总结了推理系统高吞吐和高效率需求,以及围绕这个设计目标,推理系统常常使用的优化策略。
看完本章内容后,我们可以思考以下几点问题:
当前吞吐量和效率的优化策略是否会对延迟产生影响?
设计其他策略进行吞吐量或使用效率的优化?
## 参考文献
<div id="clipper"></div>
1. [Crankshaw, Daniel et al. “Clipper: A Low-Latency Online Prediction Serving System.” NSDI (2017).](https://www.usenix.org/system/files/conference/nsdi17/nsdi17-crankshaw.pdf)
<div id="nvperf"></div>
2. [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference)
<div id="triton"></div>
3. [NVIDIA Triton Dynamic Batcher](https://github.com/triton-inference-server/server/blob/main/docs/model_configuration.md#dynamic-batcher)
<div id="tritonconcurrent"></div>
4. [Triton Concurrent Model Execution](https://github.com/triton-inference-server/server/blob/main/docs/architecture.md#concurrent-model-execution)
<div id="tensorrtallocator"></div>
5. [TensorRT GPU Allocator](https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/infer/Core/GpuAllocator.html#tensorrt.AllocatorFlag)
|
AI-System/Textbook/第8章-深度学习推理系统/8.4-推理系统的运行期优化.md/0
|
{
"file_path": "AI-System/Textbook/第8章-深度学习推理系统/8.4-推理系统的运行期优化.md",
"repo_id": "AI-System",
"token_count": 16987
}
| 15 |
# BatchScoringDeepLearningModelsWithAML Pipeline
trigger:
batch: true
branches:
include:
- master
variables:
- group: AzureKeyVault
jobs:
- job: BatchScoringDeepLearningModelsWithAMLJob
timeoutInMinutes: 300
cancelTimeoutInMinutes: 2
pool:
vmImage: 'Ubuntu-16.04'
steps:
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
which conda
conda env create -f environment.yml
conda activate batchscoringdl_aml
conda env list
echo Login Azure Account
az login -t $(sptenent) --service-principal -u $(spidentity) --password $(spsecret)
displayName: 'Initial Step'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate batchscoringdl_aml
conda env list
cd notebooks
echo Execute 01_local_testing.ipynb
papermill 01_local_testing.ipynb 01_local_testing_output.ipynb --log-output --no-progress-bar -k python3
displayName: '01_local_testing.ipynb'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate batchscoringdl_aml
conda env list
cd notebooks
echo Execute 02_setup_aml.ipynb
papermill 02_setup_aml.ipynb 02_setup_aml_output.ipynb --log-output --no-progress-bar -k python3 \
-p subscription_id $(subscriptionid) \
-p resource_group $(azurergname) \
-p workspace_name $(workspacename) \
-p workspace_region $(azureregion) \
-p storage_account_name $(azurestorage)
displayName: '02_setup_aml.ipynb'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate batchscoringdl_aml
conda env list
cd notebooks
echo Execute 03_develop_pipeline.ipynb
papermill 03_develop_pipeline.ipynb 03_develop_pipeline_output.ipynb --log-output --no-progress-bar -k python3
displayName: '03_develop_pipeline.ipynb'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate batchscoringdl_aml
conda env list
cd notebooks
echo Execute 04_deploy_logic_apps.ipynb
papermill 04_deploy_logic_apps.ipynb 04_deploy_logic_apps_output.ipynb --log-output --no-progress-bar -k python3
displayName: '04_deploy_logic_apps.ipynb'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate batchscoringdl_aml
conda env list
cd notebooks
echo Execute 05_clean_up.ipynb
papermill 05_clean_up.ipynb 05_clean_up_output.ipynb --log-output --no-progress-bar -k python3
displayName: '05_clean_up.ipynb'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate batchscoringdl_aml
echo Execute Resource Group Delete
existResponse=$(az group exists -n $(azurergname))
if [ "$existResponse" == "true" ]; then
echo Deleting project resource group
az group delete --name $(azurergname) --yes
else
echo Project resource group did not exist
fi
echo Done Cleanup
displayName: 'Backup Cleanup'
condition: or(canceled(),failed())
- task: CreateWorkItem@1
inputs:
workItemType: 'Issue'
title: $(System.TeamProject) - Build $(Build.BuildNumber) Failed
assignedTo: 'JS <[email protected]>'
associate: true
teamProject: $(System.TeamProject)
fieldMappings: |
Description=Branch: Branch $(Build.SourceBranch) failed to build. Go to Boards>WorkItems and tag the failure type.
displayName: 'Create work item on failure'
condition: failed()
|
AI/.ci/python-keras-scoring.yml/0
|
{
"file_path": "AI/.ci/python-keras-scoring.yml",
"repo_id": "AI",
"token_count": 1469
}
| 16 |
parameters:
Agent: Hosted Ubuntu 1604
Demands: "python3"
stageName: 'defaultStageName'
jobDisplayName: 'defaultDisplayName'
jobTimeoutInMinutes: 180
TridentWorkloadTypeShort: #
DeployLocation: #
TestPostfix: # "" | "-release" | "-preview"
Deploy_Location_Short: #
DefaultWorkingDirectory: #
Template: #
aksimagename: 'myimage'
aks_name: "akscluster"
aks_service_name: "aksservice"
ProjectLocation: #
PythonPath: #
cluster_name: #
flighting_release: false
flighting_preview: false
flighting_master: false
doCleanup: True
sub_vars: ../vars/agce_devops_sub_vars.yml
workload_vars: #
sql_server_name: "x"
sql_database_name: "x"
sql_username: "x"
sql_password: "x"
data_prep: true
train: true
post_cleanup: true
container_name: "x"
account_name: "x"
account_key: "x"
datastore_rg: "x"
conda: #
stages:
- stage: ${{parameters.stageName}}
dependsOn: []
jobs:
- job: deploy_notebook_steps
displayName: ${{parameters.jobDisplayName}}
pool:
name: ${{parameters.Agent}}
demands: ${{parameters.Demands}}
timeoutInMinutes: ${{parameters.jobTimeoutInMinutes}}
continueOnError: ${{or(or(eq(parameters.flighting_release,'true'), eq(parameters.flighting_preview,'true')), eq(parameters.flighting_master,'true'))}}
workspace:
clean: all
variables:
- template: ${{parameters.sub_vars}}
steps:
- template: ../steps/deploy_notebook_steps_v5.yml
parameters:
template: ${{parameters.Template}}
azureSubscription: ${{variables.azureSubscription}}
azure_subscription: ${{variables.azure_subscription}}
azureresourcegroup: ${{parameters.TridentWorkloadTypeShort}}-${{parameters.DeployLocation}}${{parameters.TestPostfix}}
workspacename: ${{parameters.TridentWorkloadTypeShort}}-${{parameters.DeployLocation}}
azureregion: ${{parameters.DeployLocation}}
aksimagename: ${{parameters.aksimagename}}
aks_service_name: ${{parameters.aks_service_name}}
aks_name: ${{parameters.aks_name}}
location: ${{parameters.ProjectLocation}}
python_path: ${{parameters.DefaultWorkingDirectory}}${{parameters.PythonPath}}
cluster_name: ${{parameters.TridentWorkloadTypeShort}}${{parameters.TestPostfix}}
flighting_release: ${{parameters.flighting_release}}
flighting_preview: ${{parameters.flighting_preview}}
flighting_master: ${{parameters.flighting_master}}
doCleanup: ${{parameters.doCleanup}}
sql_server_name: ${{parameters.sql_server_name}}
sql_database_name: ${{parameters.sql_database_name}}
sql_username: ${{parameters.sql_username}}
sql_password: ${{parameters.sql_password}}
data_prep: ${{parameters.data_prep}}
train: ${{parameters.train}}
post_cleanup: ${{parameters.post_cleanup}}
container_name: ${{parameters.container_name}}
account_name: ${{parameters.account_name}}
account_key: ${{parameters.account_key}}
datastore_rg: ${{parameters.datastore_rg}}
conda: ${{parameters.conda}}
|
AI/.ci/stage/deploy_notebooks_stage_v5.yml/0
|
{
"file_path": "AI/.ci/stage/deploy_notebooks_stage_v5.yml",
"repo_id": "AI",
"token_count": 1269
}
| 17 |
parameters:
azureSubscription: ''
azure_subscription: ''
location: submodules/DeployMLModelKubernetes/{{cookiecutter.project_name}}
azureresourcegroup: dciborowhp
workspacename: dciborowhpws
azureregion: westus2
aksimagename: dciborowhpaks
aks_name: dciborowhpaks
aks_service_name: myimage
conda: MLAKSDeployAML
doCleanup: true
python_path: "$(System.DefaultWorkingDirectory)/submodules/DeployMLModelKubernetes/{{cookiecutter.project_name}}"
flighting_release: false
flighting_preview: false
flighting_master: false
steps:
- template: config_conda.yml
parameters:
conda_location: ${{parameters.location}}
azureSubscription: ${{parameters.azureSubscription}}
conda: ${{parameters.conda}}
flighting_release: ${{parameters.flighting_release}}
flighting_preview: ${{parameters.flighting_preview}}
flighting_master: ${{parameters.flighting_master}}
- template: azpapermill.yml
parameters:
notebook: 00_AMLConfiguration.ipynb
location: ${{parameters.location}}
azureSubscription: ${{parameters.azureSubscription}}
conda: ${{parameters.conda}}
azure_subscription: ${{parameters.azure_subscription}}
azureresourcegroup: ${{parameters.azureresourcegroup}}
workspacename: ${{parameters.workspacename}}
azureregion: ${{parameters.azureregion}}
aksimagename: ${{parameters.aksimagename}}
- template: azpapermill.yml
parameters:
notebook: 01_DataPrep.ipynb
location: ${{parameters.location}}
azureSubscription: ${{parameters.azureSubscription}}
conda: ${{parameters.conda}}
- bash: |
mkdir -p ${{parameters.location}}/iotedge/data_folder
mkdir -p ${{parameters.location}}/aks/data_folder
cd ${{parameters.location}}
cp data_folder/*.tsv iotedge/data_folder
cp data_folder/*.tsv aks/data_folder
displayName: 'Copying data'
- template: azpapermill.yml
parameters:
notebook: 02_TrainOnLocal.ipynb
location: ${{parameters.location}}
azureSubscription: ${{parameters.azureSubscription}}
conda: ${{parameters.conda}}
- template: azpapermill.yml
parameters:
notebook: 03_DevelopScoringScript.ipynb
location: ${{parameters.location}}
azureSubscription: ${{parameters.azureSubscription}}
conda: ${{parameters.conda}}
- template: azpapermill.yml
parameters:
notebook: 04_CreateImage.ipynb
location: ${{parameters.location}}
azureSubscription: ${{parameters.azureSubscription}}
conda: ${{parameters.conda}}
- template: azpapermill.yml
parameters:
notebook: 05_DeployOnAKS.ipynb
location: ${{parameters.location}}/aks
azureSubscription: ${{parameters.azureSubscription}}
conda: ${{parameters.conda}}
aks_name: ${{parameters.aks_name}}
azureregion: ${{parameters.azureregion}}
aks_service_name: ${{parameters.aks_service_name}}
aksimagename: ${{parameters.aksimagename}}
python_path: ${{parameters.python_path}}
- template: cleanuptask.yml
parameters:
azureSubscription: ${{parameters.azureSubscription}}
conda: ${{parameters.conda}}
azureresourcegroup: ${{parameters.azureresourcegroup}}
doCleanup: ${{parameters.doCleanup}}
|
AI/.ci/steps/MLAKSDeployAMLJob.yml/0
|
{
"file_path": "AI/.ci/steps/MLAKSDeployAMLJob.yml",
"repo_id": "AI",
"token_count": 1170
}
| 18 |
parameters:
tenant: 72f988bf-86f1-41af-91ab-2d7cd011db47
azureresourcegroup: "rmlrts"
workspacename: "rmlrtsws"
azureregion: "eastus"
aksimagename: "myimage"
aks_name: "rmlrtsaks"
aks_service_name: "rmlrts"
acr_name: "rmlrtsacr"
CRAN: 'https://cloud.r-project.org'
R_LIBS_USER: '$(Agent.BuildDirectory)/R/library'
azure_subscription: #
sp_appid: #
sp_password: #
steps:
- script: sudo apt-get update && sudo apt-get install -y libxml2-dev libssl-dev
displayName: "Install System Dependencies"
- bash: |
echo "options(repos = '${{parameters.CRAN}}')" > ~/.Rprofile
echo ".libPaths(c('${{parameters.R_LIBS_USER}}', .libPaths()))" >> ~/.Rprofile
mkdir -p ${{parameters.R_LIBS_USER}}
displayName: 'Setup R library directory'
- bash: |
Rscript -e "pkgs <- c('remotes', 'rcmdcheck', 'drat', 'AzureGraph', 'AzureRMR', 'AzureContainers'); if(length(find.package(pkgs, quiet=TRUE)) != length(pkgs)) install.packages(pkgs)"
Rscript -e "remotes::install_deps(dependencies=TRUE)"
displayName: 'Installing package dependencies'
- bash: |
sed -i -e 's/your AAD tenant here/${{parameters.tenant}}/g' resource_specs.R
sed -i -e 's/your subscription here/${{parameters.azure_subscription}}/g' resource_specs.R
sed -i -e 's/resource group name/${{parameters.azureresourcegroup}}/g' resource_specs.R
sed -i -e 's/resource group location/${{parameters.azureregion}}/g' resource_specs.R
sed -i -e 's/container registry name/${{parameters.acr_name}}/g' resource_specs.R
sed -i -e 's/cluster name/${{parameters.aks_name}}/g' resource_specs.R
sed -i -e 's/your ap id/${{parameters.sp_appid}}/g' resource_specs.R
sed -i -e 's/stop("Must specify a password!")/"${{parameters.sp_password}}"/g' resource_specs.R
displayName: 'Setup R parameters'
|
AI/.ci/steps/config_r.yml/0
|
{
"file_path": "AI/.ci/steps/config_r.yml",
"repo_id": "AI",
"token_count": 731
}
| 19 |
parameters:
conda: #
azureSubscription: #
azure_subscription: #
azureresourcegroup: #
workspacename: defaultworkspacename
azureregion: #
location: #
pylint_fail: True
set_vars: true
aksimagename: "x"
aks_service_name: "x"
aks_name: "x"
deep_aksimagename: "mydeepimage"
deep_aks_service_name: "deepaksservice"
deep_aks_name: "deepakscluster"
sql_server_name: "x"
sql_database_name: "x"
sql_username: "x"
sql_password: "x"
datastore_rg: "x"
container_name: "x"
account_name: "x"
account_key: "x"
steps:
- task: AzureCLI@1
displayName: "Set Vars"
condition: eq('${{ parameters.set_vars }}', true)
inputs:
azureSubscription: ${{parameters.azureSubscription}}
scriptLocation: 'inlineScript'
failOnStderr: True
inlineScript: |
pwd
source activate ${{parameters.conda}}
rm -f workspace_conf.yml
echo "subscription_id: ${{parameters.azure_subscription}}" >> workspace_conf.yml
echo "resource_group: ${{parameters.azureresourcegroup}}" >> workspace_conf.yml
echo "workspace_name: ${{parameters.workspacename}}" >> workspace_conf.yml
echo "workspace_region: ${{parameters.azureregion}}" >> workspace_conf.yml
echo "image_name: ${{parameters.aksimagename}}" >> workspace_conf.yml
echo "aks_service_name: ${{parameters.aks_service_name}}" >> workspace_conf.yml
echo "aks_name: ${{parameters.aks_name}}" >> workspace_conf.yml
echo "aks_location: ${{parameters.azureregion}}" >> workspace_conf.yml
echo "sql_server_name: ${{parameters.sql_server_name}}" >> workspace_conf.yml
echo "sql_database_name: ${{parameters.sql_database_name}}" >> workspace_conf.yml
echo "sql_username: ${{parameters.sql_username}}" >> workspace_conf.yml
echo "sql_password: ${{parameters.sql_password}}" >> workspace_conf.yml
echo "datastore_rg: ${{parameters.datastore_rg}}" >> workspace_conf.yml
echo "container_name: ${{parameters.container_name}}" >> workspace_conf.yml
echo "account_name: ${{parameters.account_name}}" >> workspace_conf.yml
echo "account_key: ${{parameters.account_key}}" >> workspace_conf.yml
cp workspace_conf.yml ../
rm -f project.yml
echo "project_name: AI Default Project" >> project.yml
echo "settings:" >> project.yml
echo "- subscription_id:" >> project.yml
echo " - description: Azure Subscription Id" >> project.yml
echo " - value: ${{parameters.azure_subscription}}" >> project.yml
echo "- resource_group:" >> project.yml
echo " - description: Azure Resource Group Name" >> project.yml
echo " - value: ${{parameters.azureresourcegroup}}" >> project.yml
echo "- workspace_name:" >> project.yml
echo " - description: Azure ML Workspace Name" >> project.yml
echo " - value: ${{parameters.workspacename}}" >> project.yml
echo "- workspace_region:" >> project.yml
echo " - description: Azure ML Workspace Region" >> project.yml
echo " - value: ${{parameters.azureregion}}" >> project.yml
echo "- image_name:" >> project.yml
echo " - description: Docker Container Image Name" >> project.yml
echo " - value: ${{parameters.aksimagename}}" >> project.yml
echo "- aks_service_name:" >> project.yml
echo " - description: AKS Service Name" >> project.yml
echo " - value: ${{parameters.aks_service_name}}" >> project.yml
echo "- aks_name:" >> project.yml
echo " - description: AKS Cluster Name" >> project.yml
echo " - value: ${{parameters.aks_name}}" >> project.yml
echo "- aks_location:" >> project.yml
echo " - description: AKS Azure Region" >> project.yml
echo " - value: ${{parameters.azureregion}}" >> project.yml
echo "- deep_image_name:" >> project.yml
echo " - description: Docker Container Image Name" >> project.yml
echo " - value: ${{parameters.deep_aksimagename}}" >> project.yml
echo "- deep_aks_service_name:" >> project.yml
echo " - description: AKS Service Name" >> project.yml
echo " - value: ${{parameters.deep_aks_service_name}}" >> project.yml
echo "- deep_aks_name:" >> project.yml
echo " - description: AKS Cluster Name" >> project.yml
echo " - value: ${{parameters.deep_aks_name}}" >> project.yml
echo "- deep_aks_location:" >> project.yml
echo " - description: AKS Azure Region" >> project.yml
echo " - value: ${{parameters.azureregion}}" >> project.yml
cat project.yml
cp project.yml ../
- task: AzureCLI@1
displayName: "pytests - tests"
inputs:
azureSubscription: ${{parameters.azureSubscription}}
scriptLocation: 'inlineScript'
failOnStderr: True
inlineScript: |
source activate ${{parameters.conda}}
pip install -U .
pip install pytest-nunit
pytest tests --doctest-modules --nunit-xml=test-nunitresults.xml --cov=. --cov-report=xml
- task: AzureCLI@1
displayName: "PyLint Notebooks Check"
condition: succeededOrFailed()
continueOnError: true
inputs:
azureSubscription: ${{parameters.azureSubscription}}
scriptLocation: 'inlineScript'
inlineScript: |
source activate ${{parameters.conda}}
if [ -d "notebooks" ]; then
jupyter nbconvert --to python notebooks/*.ipynb
pylint --output-format=junit notebooks/*.py >> test-pylint-results.xml
pylint notebooks/*.py
fi
- task: PublishTestResults@2
condition: succeededOrFailed()
inputs:
testResultsFormat: NUnit
testResultsFiles: '**/test-nunitresults.xml'
failTaskOnFailedTests: true
- task: PublishTestResults@2
displayName: 'Publish Test Results **/test-*.xml'
condition: succeededOrFailed()
inputs:
testResultsFiles: '**/test-*.xml'
- task: PublishCodeCoverageResults@1
condition: succeededOrFailed()
inputs:
codeCoverageTool: Cobertura
summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml'
|
AI/.ci/steps/pytest_steps.yml/0
|
{
"file_path": "AI/.ci/steps/pytest_steps.yml",
"repo_id": "AI",
"token_count": 2438
}
| 20 |
variables:
TridentWorkloadTypeShort: aimlscore
DeployLocation: eastus
ProjectLocation: "."
PythonPath: "."
Template: MLTrainDeployAMLJob.yml
|
AI/.ci/vars/mlhyperparametertuning_vars.yml/0
|
{
"file_path": "AI/.ci/vars/mlhyperparametertuning_vars.yml",
"repo_id": "AI",
"token_count": 51
}
| 21 |
#!/bin/bash
#
# This script is for training with updated ann driver
#
# The design for this ann driver is to have 2 separate processes for training: one for passage/query
# inference using trained checkpoint to generate ann data and calcuate ndcg, another for training the model
# using the ann data generated. Data between processes is shared on common directory, model_dir for checkpoints
# and model_ann_data_dir for ann data.
#
# This script initialize the training and start the model training process
# It first preprocess the msmarco data into indexable cache, then generate a single initial ann data
# version to train on, after which it start training on the generated ann data, continously looking for
# newest ann data generated in model_ann_data_dir
#
# To start training, you'll need to run this script first
# after intial ann data is created (you can tell by either finding "successfully created
# initial ann training data" in console output or if you start seeing new model on tensorboard),
# start run_ann_data_gen.sh in another dlts job (or same dlts job using split GPU)
#
# Note if preprocess directory or ann data directory already exist, those steps will be skipped
# and training will start immediately
# # Passage ANCE(FirstP)
# gpu_no=4
# seq_length=512
# model_type=rdot_nll
# tokenizer_type="roberta-base"
# base_data_dir="../data/raw_data/"
# preprocessed_data_dir="${base_data_dir}ann_data_${tokenizer_type}_${seq_length}/"
# job_name="OSPass512"
# pretrained_checkpoint_dir="warmup or trained checkpoint path"
# data_type=1
# warmup_steps=5000
# per_gpu_train_batch_size=8
# gradient_accumulation_steps=2
# learning_rate=1e-6
# # Document ANCE(FirstP)
# gpu_no=4
# seq_length=512
# tokenizer_type="roberta-base"
# model_type=rdot_nll
# base_data_dir="../data/raw_data/"
# preprocessed_data_dir="${base_data_dir}ann_data_${tokenizer_type}_${seq_length}/"
# job_name="OSDoc512"
# pretrained_checkpoint_dir="warmup or trained checkpoint path"
# data_type=0
# warmup_steps=3000
# per_gpu_train_batch_size=8
# gradient_accumulation_steps=2
# learning_rate=5e-6
# # Document ANCE(MaxP)
gpu_no=8
seq_length=2048
tokenizer_type="roberta-base"
model_type=rdot_nll_multi_chunk
base_data_dir="../data/raw_data/"
preprocessed_data_dir="${base_data_dir}ann_data_${tokenizer_type}_${seq_length}/"
job_name="OSDoc2048"
pretrained_checkpoint_dir="warmup or trained checkpoint path"
data_type=0
warmup_steps=500
per_gpu_train_batch_size=2
gradient_accumulation_steps=8
learning_rate=1e-5
##################################### Data Preprocessing ################################
model_dir="${base_data_dir}${job_name}/"
model_ann_data_dir="${model_dir}ann_data/"
preprocess_cmd="\
python ../data/msmarco_data.py --data_dir $base_data_dir --out_data_dir $preprocessed_data_dir --model_type $model_type \
--model_name_or_path roberta-base --max_seq_length $seq_length --data_type $data_type\
"
echo $preprocess_cmd
eval $preprocess_cmd
if [[ $? = 0 ]]; then
echo "successfully created preprocessed data"
else
echo "preprocessing failed"
echo "failure: $?"
exit 1
fi
##################################### Inital ANN Data generation ################################
initial_data_gen_cmd="\
python -m torch.distributed.launch --nproc_per_node=$gpu_no ../drivers/run_ann_data_gen.py --training_dir $model_dir \
--init_model_dir $pretrained_checkpoint_dir --model_type $model_type --output_dir $model_ann_data_dir \
--cache_dir "${model_ann_data_dir}cache/" --data_dir $preprocessed_data_dir --max_seq_length $seq_length \
--per_gpu_eval_batch_size 16 --topk_training 200 --negative_sample 20 --end_output_num 0 \
"
echo $initial_data_gen_cmd
eval $initial_data_gen_cmd
if [[ $? = 0 ]]; then
echo "successfully created initial ann training data"
else
echo "initial data generation failed"
echo "failure: $?"
exit 1
fi
############################################# Training ########################################
train_cmd="\
python -m torch.distributed.launch --nproc_per_node=$gpu_no ../drivers/run_ann.py --model_type $model_type \
--model_name_or_path $pretrained_checkpoint_dir --task_name MSMarco --triplet --data_dir $preprocessed_data_dir \
--ann_dir $model_ann_data_dir --max_seq_length $seq_length --per_gpu_train_batch_size=$per_gpu_train_batch_size \
--gradient_accumulation_steps $gradient_accumulation_steps --learning_rate $learning_rate --output_dir $model_dir \
--warmup_steps $warmup_steps --logging_steps 100 --save_steps 10000 --optimizer lamb --single_warmup \
"
echo $train_cmd
eval $train_cmd
|
ANCE/commands/run_train.sh/0
|
{
"file_path": "ANCE/commands/run_train.sh",
"repo_id": "ANCE",
"token_count": 1490
}
| 22 |
from transformers.configuration_utils import PretrainedConfig
#from transformers.utils import logging
#logger = logging.get_logger(__name__)
import logging
logger = logging.getLogger(__name__)
# DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
# "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
# "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
# "microsoft/deberta-v2-xlarge-mnli": "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json",
# "microsoft/deberta-v2-xxlarge-mnli": "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json",
# }
class SEEDEncoderConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.DebertaV2Model`. It is used
to instantiate a DeBERTa-v2 model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa
`microsoft/deberta-v2-xlarge <https://huggingface.co/microsoft/deberta-base>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Arguments:
vocab_size (:obj:`int`, `optional`, defaults to 128100):
Vocabulary size of the DeBERTa-v2 model. Defines the number of different tokens that can be represented by
the :obj:`inputs_ids` passed when calling :class:`~transformers.DebertaV2Model`.
hidden_size (:obj:`int`, `optional`, defaults to 1536):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, `optional`, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, `optional`, defaults to 24):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, `optional`, defaults to 6144):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"`, :obj:`"gelu"`, :obj:`"tanh"`, :obj:`"gelu_fast"`,
:obj:`"mish"`, :obj:`"linear"`, :obj:`"sigmoid"` and :obj:`"gelu_new"` are supported.
hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, `optional`, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, `optional`, defaults to 0):
The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.DebertaModel` or
:class:`~transformers.TFDebertaModel`.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-7):
The epsilon used by the layer normalization layers.
relative_attention (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether use relative position encoding.
max_relative_positions (:obj:`int`, `optional`, defaults to -1):
The range of relative positions :obj:`[-max_position_embeddings, max_position_embeddings]`. Use the same
value as :obj:`max_position_embeddings`.
pad_token_id (:obj:`int`, `optional`, defaults to 0):
The value used to pad input_ids.
position_biased_input (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether add absolute position embedding to content embedding.
pos_att_type (:obj:`List[str]`, `optional`):
The type of relative position attention, it can be a combination of :obj:`["p2c", "c2p", "p2p"]`, e.g.
:obj:`["p2c"]`, :obj:`["p2c", "c2p"]`, :obj:`["p2c", "c2p", 'p2p"]`.
layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
The epsilon used by the layer normalization layers.
"""
model_type = "seed_encoder"
def __init__(
self,
pad_token_id=1,
vocab_size=32769,
encoder_layers=12,
encoder_embed_dim=768,
encoder_ffn_embed_dim=3072,
encoder_attention_heads=12,
dropout=0.1,
attention_dropout=0.1,
activation_dropout=0.0,
encoder_layerdrop=0.0,
max_positions=512,
activation_fn='gelu',
quant_noise_pq=0.0,
quant_noise_pq_block_size=8,
train_ratio='0.5:0.5',
decoder_atten_window=2,
pooler_activation_fn='tanh',
pooler_dropout=0.0,
encoder_layers_to_keep=None,
decoder_layers=3,
decoder_embed_path=None,
decoder_embed_dim=768,
decoder_ffn_embed_dim=3072,
decoder_attention_heads=12,
decoder_normalize_before=True,
decoder_learned_pos=True,
adaptive_softmax_cutoff=None,
adaptive_softmax_dropout=0,
share_decoder_input_output_embed=True,
share_all_embeddings=True,
no_token_positional_embeddings=False,
adaptive_input=False,
no_cross_attention=False,
cross_self_attention=False,
no_scale_embedding=True,
layernorm_embedding=True,
tie_adaptive_weights=True,
decoder_layers_to_keep=None,
initializer_range=0.02,
**kwargs
):
super().__init__(**kwargs)
self.pad_token_id=pad_token_id
self.vocab_size=vocab_size
self.encoder_layers=encoder_layers
self.encoder_embed_dim=encoder_embed_dim
self.encoder_ffn_embed_dim=encoder_ffn_embed_dim
self.encoder_attention_heads=encoder_attention_heads
self.dropout=dropout
self.attention_dropout=attention_dropout
self.activation_dropout=activation_dropout
self.encoder_layerdrop=encoder_layerdrop
self.max_positions=max_positions
self.activation_fn=activation_fn
self.quant_noise_pq=quant_noise_pq
self.quant_noise_pq_block_size=quant_noise_pq_block_size
self.train_ratio=train_ratio
self.decoder_atten_window=decoder_atten_window
self.pooler_activation_fn=pooler_activation_fn
self.pooler_dropout=pooler_dropout
self.encoder_layers_to_keep=encoder_layers_to_keep
self.decoder_layers=decoder_layers
self.decoder_embed_path=decoder_embed_path
self.decoder_embed_dim=decoder_embed_dim
self.decoder_ffn_embed_dim=decoder_ffn_embed_dim
self.decoder_attention_heads=decoder_attention_heads
self.decoder_normalize_before=decoder_normalize_before
self.decoder_learned_pos=decoder_learned_pos
self.adaptive_softmax_cutoff=adaptive_softmax_cutoff
self.adaptive_softmax_dropout=adaptive_softmax_dropout
self.share_decoder_input_output_embed=share_decoder_input_output_embed
self.share_all_embeddings=share_all_embeddings
self.no_token_positional_embeddings=no_token_positional_embeddings
self.adaptive_input=adaptive_input
self.no_cross_attention=no_cross_attention
self.cross_self_attention=cross_self_attention
self.decoder_output_dim=decoder_embed_dim
self.decoder_input_dim=decoder_embed_dim
self.no_scale_embedding=no_scale_embedding
self.layernorm_embedding=layernorm_embedding
self.tie_adaptive_weights=tie_adaptive_weights
self.decoder_layers_to_keep=decoder_layers_to_keep
self.decoder_layerdrop=0
self.max_source_positions=max_positions
self.max_target_positions=max_positions
self.initializer_range = initializer_range
|
ANCE/model/SEED_Encoder/configuration_seed_encoder.py/0
|
{
"file_path": "ANCE/model/SEED_Encoder/configuration_seed_encoder.py",
"repo_id": "ANCE",
"token_count": 3684
}
| 23 |
"""
Code for self-training with weak supervision.
Author: Giannis Karamanolakis ([email protected])
"""
import logging
def get_logger(logfile, name='mylogger', level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s'):
logger = logging.getLogger(name)
logger.setLevel(level)
# File Handler: prints to logfile
fileHandler = logging.FileHandler(logfile)
fileHandler.setFormatter(logging.Formatter(format))
logger.addHandler(fileHandler)
# Console Handler: prints to console
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logging.Formatter(format))
logger.addHandler(consoleHandler)
logger.propagate = False
return logger
def close(logger):
logger.handlers = []
|
ASTRA/astra/Logger.py/0
|
{
"file_path": "ASTRA/astra/Logger.py",
"repo_id": "ASTRA",
"token_count": 256
}
| 24 |
### Evaluation code for E2E, WebNLG and Dart
* Code for evaluating E2E https://github.com/tuetschek/e2e-metrics
* Code for evaluating WebNLG and Dart https://github.com/WebNLG/GenerationEval.git
Before running evaluation for the first time you must run
`bash download_evalscript.sh`
|
AdaMix/NLG/eval/README.md/0
|
{
"file_path": "AdaMix/NLG/eval/README.md",
"repo_id": "AdaMix",
"token_count": 89
}
| 25 |
. ./venv/bin/activate
seed=110
n_experts=8
python -m torch.distributed.launch --nproc_per_node=1 src/gpt2_ft.py \
--train_data ./data/e2e/train.jsonl \
--valid_data ./data/e2e/valid.jsonl \
--train_batch_size 8 \
--grad_acc 1 \
--valid_batch_size 4 \
--seq_len 512 \
--model_card gpt2.md \
--init_checkpoint ./pretrained_checkpoints/gpt2-medium-pytorch_model.bin \
--platform local \
--clip 0.0 \
--lr 0.0002 \
--weight_decay 0.01 \
--correct_bias \
--adam_beta2 0.999 \
--scheduler linear \
--warmup_step 2000 \
--max_epoch 20 \
--eval_interval 5000 \
--save_interval 5000 \
--lora_dim 4 \
--lora_alpha 32 \
--lora_dropout 0.1 \
--label_smooth 0.1 \
--work_dir ./trained_models/GPT2_M/e2e/$seed/lora_adamix \
--random_seed $seed \
--n_experts $n_experts \
--share_A 0 \
--share_B 1
bash run_eval_e2e.sh --seed $seed --n_experts $n_experts
|
AdaMix/NLG/run_train_e2e.sh/0
|
{
"file_path": "AdaMix/NLG/run_train_e2e.sh",
"repo_id": "AdaMix",
"token_count": 365
}
| 26 |
# ------------------------------------------------------------------------------------------
# Copyright (c). All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import argparse
import time
import math
import os, sys
import numpy as np
import itertools
import torch
import random
from torch.utils.data import DataLoader
torch.set_printoptions(threshold=100000)
from gpu import (
add_gpu_params,
parse_gpu,
distributed_opt,
distributed_gather,
distributed_sync,
cleanup
)
from optimizer import (
create_adam_optimizer,
create_optimizer_scheduler,
add_optimizer_params,
create_adam_optimizer_from_args
)
from data_utils import FT_Dataset
from model import GPT2Config, GPT2LMModel
from exp_utils import create_exp_dir
import loralib as lora
parser = argparse.ArgumentParser(description='PyTorch GPT2 ft script')
add_gpu_params(parser)
add_optimizer_params(parser)
parser.add_argument('--train_data', required=True, help='location of training data corpus')
parser.add_argument('--valid_data', required=True, help='location of validation data corpus')
parser.add_argument('--train_batch_size', type=int, default=8, help='training batch size')
parser.add_argument('--valid_batch_size', type=int, default=4, help='validation batch size')
parser.add_argument('--grad_acc', type=int, default=1, help='gradient accumulation steps')
parser.add_argument('--clip', type=float, default=0.0, help='gradient clip')
parser.add_argument('--seq_len', type=int, default=512, help='number of tokens to predict.')
parser.add_argument('--model_card', default='gpt2.md', choices=['gpt2.sm', 'gpt2.md', 'gpt2.lg'],
help='model names')
parser.add_argument('--init_checkpoint', default=None, help='pretrained checkpoint path')
parser.add_argument('--fp16', action='store_true', help='train model with fp16')
parser.add_argument('--log_interval', type=int, default=100, help='log interval')
parser.add_argument('--eval_interval', type=int, default=2000, help='eval interval')
parser.add_argument('--save_interval', type=int, default=500, help='save interval')
parser.add_argument('--work_dir', type=str, default=os.getenv('PT_OUTPUT_DIR', 'gpt2_model'),
help='working folder.')
parser.add_argument('--lora_dim', type=int, default=0, help='lora attn dimension')
parser.add_argument('--lora_alpha', type=int, default=128, help='lora attn alpha')
parser.add_argument('--obj', default='clm', choices=['jlm', 'clm'],
help='language model training objective')
parser.add_argument('--lora_dropout', default=0.0, type=float,
help='dropout probability for lora layers')
parser.add_argument('--label_smooth', default=0.0, type=float, help='label smoothing')
parser.add_argument('--roll_interval', type=int, default=-1, help='rolling interval')
parser.add_argument('--roll_lr', type=float, default=0.00001, help='rolling learning rate')
parser.add_argument('--roll_step', type=int, default=100, help='rolling step')
parser.add_argument('--eval_epoch', type=int, default=1, help='eval per number of epochs')
parser.add_argument('--n_experts', type=int, default=1, help='number of experts')
parser.add_argument('--share_A', type=int, default=0, help='share LoRA A')
parser.add_argument('--share_B', type=int, default=0, help='share LoRA B')
parser.add_argument('--lora_only', type=int, default=0, help='Run Lora without Adamix')
parser.add_argument('--adamix_only', type=int, default=0, help='Run Adamix without Lora')
parser.add_argument('--expert_weight_tune', type=int, default=0, help='Tune expert weights during training')
# influence model, calculate the influence score between two samples.
def print_args(args):
if args.rank == 0:
print('=' * 100)
for k, v in args.__dict__.items():
print(f' - {k} : {v}')
print('=' * 100)
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def optimizer_step(_loss, _optimizer, _model, _schedule, args, is_update=True):
if args.fp16:
with amp.scale_loss(_loss, _optimizer) as _scaled_loss:
_scaled_loss.backward()
else:
_loss.backward()
if is_update:
if args.clip > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(_optimizer), args.clip)
else:
torch.nn.utils.clip_grad_norm_(_model.parameters(), args.clip)
_optimizer.step()
_optimizer.zero_grad()
if _schedule is not None:
_schedule.step()
def evaluate(model, valid_loader, args):
model.eval()
total_loss = 0.
start_time = time.time()
avg_lm_loss = AverageMeter()
with torch.no_grad():
for idx, data in enumerate(valid_loader):
data = {key: value for key, value in data.items()}
_input = data['input'].to(args.device)
_target = data['target'].to(args.device)
_msk = data['mask'].to(args.device)
_lm_logits, _loss = model(_input, lm_labels=_target, lm_mask=_msk)
loss = _loss.mean()
avg_lm_loss.update(loss.item())
if idx % 100 == 0:
print('eval samples:', idx, 'loss:', loss.float())
total_time = time.time() - start_time
print('average loss', avg_lm_loss.avg)
return avg_lm_loss.avg, math.exp(avg_lm_loss.avg)
def train_validate(
model,
optimizer,
scheduler,
train_loader,
valid_loader,
args,
train_step=0,
epoch=0
):
model.train()
avg_lm_loss = AverageMeter()
print('start to train the model................', epoch)
log_start_time = time.time()
best_val_ppl = None
train_loader.sampler.set_epoch(epoch)
for idx, data in enumerate(train_loader):
data = {key: value for key, value in data.items()}
_input = data['input'].to(args.device)
_target = data['target'].to(args.device)
_msk = data['mask'].to(args.device)
_lm_logits, _lm_loss = model(
_input, lm_labels=_target, lm_mask=_msk, label_smooth=args.label_smooth
)
_lm_loss = _lm_loss.mean()
train_step += 1
is_update = True if train_step % args.grad_acc == 0 else False
avg_lm_loss.update(_lm_loss.item())
optimizer_step(
_lm_loss/(args.grad_acc), optimizer, model, scheduler, args, is_update=is_update
)
if train_step % args.log_interval == 0:
elapsed = time.time() - log_start_time
lr = optimizer.param_groups[0]['lr']
log_str = f'| epoch {epoch:3d} step {train_step:>8d} | { idx + 1:>6d} batches | ' \
f'lr {lr:.3g} | ms/batch {elapsed * 1000 / args.log_interval:5.2f} | ' \
f'loss {avg_lm_loss.val:5.2f} | avg loss {avg_lm_loss.avg:5.2f} | ' \
f'ppl {math.exp(avg_lm_loss.avg):5.2f}'
if args.rank == 0:
print(log_str)
log_start_time = time.time()
avg_lm_loss.reset()
if train_step % args.save_interval == 0:
if args.rank == 0:
model_path = os.path.join(args.work_dir, f'model.{train_step}.pt')
print('saving checkpoint', model_path)
torch.save({'model_state_dict': lora.lora_state_dict(model)}, model_path)
distributed_sync(args)
# evaluation interval
if train_step % args.eval_interval == 0:
eval_start_time = time.time()
valid_loss, valid_ppl = evaluate(model, valid_loader, args)
if best_val_ppl is None or valid_ppl < best_val_ppl:
best_val_ppl = valid_ppl
log_str = f'| Eval {train_step // args.eval_interval:3d} at step {train_step:>8d} | ' \
f'time: {time.time() - eval_start_time:5.2f}s | valid loss {valid_loss:5.2f} | ' \
f'valid ppl {valid_ppl:5.2f} | best ppl {best_val_ppl:5.2f} '
if args.rank == 0:
print('-' * 100)
print(log_str)
print('-' * 100)
model.train()
distributed_sync(args)
if train_step == args.max_step:
break
if args.rank == 0:
model_path = os.path.join(args.work_dir, f'model.{train_step}.pt')
print('saving checkpoint', model_path)
torch.save({'model_state_dict': model.state_dict()}, model_path)
distributed_sync(args)
return train_step
if __name__ == '__main__':
args = parser.parse_args()
parse_gpu(args)
print_args(args)
if args.fp16:
try:
from apex import amp
except Exception as e:
warnings.warn('Could not import amp, apex may not be installed')
torch.manual_seed(args.random_seed)
random.seed(args.random_seed)
if args.rank == 0:
args.logging = create_exp_dir(args.work_dir)
train_data = FT_Dataset(
args.train_data, args.train_batch_size, args.seq_len,
joint_lm=args.obj=='jlm'
)
valid_data = FT_Dataset(
args.valid_data, args.valid_batch_size, args.seq_len,
)
train_loader = DataLoader(
train_data, batch_size=args.train_batch_size, num_workers=0,
shuffle=False, pin_memory=False, drop_last=True,
sampler=torch.utils.data.distributed.DistributedSampler(train_data, seed=args.random_seed)
)
valid_loader = DataLoader(
valid_data, batch_size=args.valid_batch_size, num_workers=0,
shuffle=False, pin_memory=False, drop_last=False,
sampler=torch.utils.data.distributed.DistributedSampler(valid_data, seed=args.random_seed)
)
if args.model_card == 'gpt2.sm':
config = GPT2Config(
n_embd=768, n_layer=12, n_head=12,
lora_attn_dim=args.lora_dim,
lora_attn_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
n_experts=args.n_experts,
share_A=args.share_A,
share_B=args.share_B,
lora_only=args.lora_only,
adamix_only=args.adamix_only
)
elif args.model_card == 'gpt2.md':
config = GPT2Config(
n_embd=1024, n_layer=24, n_head=16,
lora_attn_dim=args.lora_dim,
lora_attn_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
n_experts=args.n_experts,
share_A=args.share_A,
share_B=args.share_B,
lora_only=args.lora_only,
adamix_only=args.adamix_only
)
elif args.model_card == 'gpt2.lg':
config = GPT2Config(
n_embd=1280, n_layer=36, n_head=20,
lora_attn_dim=args.lora_dim,
lora_attn_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
n_experts=args.n_experts,
share_A=args.share_A,
share_B=args.share_B,
lora_only=args.lora_only,
adamix_only=args.adamix_only
)
lm_net = GPT2LMModel(config)
if args.init_checkpoint is not None:
print('loading model pretrained weight.')
lm_net.load_weight(torch.load(args.init_checkpoint))
lm_net = lm_net.cuda()
if args.lora_dim > 0:
lora.mark_only_lora_as_trainable(lm_net)
if args.adamix_only == 1:
for n, p in lm_net.named_parameters():
if "deepspeed_experts" in n:
p.requires_grad = True
if args.expert_weight_tune == 1:
for n, p in lm_net.named_parameters():
if "expert_score_weight" in n:
p.requires_grad = True
for n, p in lm_net.named_parameters():
print(n, p.requires_grad)
optimizer = create_adam_optimizer_from_args(lm_net, args)
if args.max_step is None:
args.max_step = (args.max_epoch * train_data.num_batches + args.world_size - 1) // args.world_size
print('set max_step:', args.max_step)
scheduler = create_optimizer_scheduler(optimizer, args)
if args.fp16:
lm_net, optimizer = amp.initialize(lm_net, optimizer, opt_level="O1")
lm_net, optimizer = distributed_opt(args, lm_net, optimizer, grad_acc=args.grad_acc)
try:
train_step = 0
for epoch in itertools.count(start=1):
train_step = train_validate(
lm_net, optimizer, scheduler, train_loader, valid_loader, args,
train_step=train_step, epoch=epoch
)
if train_step >= args.max_step or (args.max_epoch is not None and epoch >= args.max_epoch):
if args.rank == 0:
print('-' * 100)
print('End of training')
break
except KeyboardInterrupt:
if args.rank == 0:
print('-' * 100)
print('Exiting from training early')
if args.rank == 0:
model_path = os.path.join(args.work_dir, f'model.final.pt')
print('saving checkpoint', model_path)
torch.save({'model_state_dict': lm_net.state_dict()}, model_path)
distributed_sync(args)
print('cleanup dist ...')
cleanup(args)
|
AdaMix/NLG/src/gpt2_ft.py/0
|
{
"file_path": "AdaMix/NLG/src/gpt2_ft.py",
"repo_id": "AdaMix",
"token_count": 6408
}
| 27 |
FROM nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04
FROM yaqing/pytorch-efficient-tuning:v0.1
LABEL maintainer="Hugging Face"
LABEL repository="transformers"
COPY . transformers/
RUN cd transformers/ && \
python -m pip install --no-cache-dir .
CMD ["/bin/bash"]
|
AdaMix/docker/transformers-pytorch-gpu-adamix/Dockerfile/0
|
{
"file_path": "AdaMix/docker/transformers-pytorch-gpu-adamix/Dockerfile",
"repo_id": "AdaMix",
"token_count": 110
}
| 28 |
/* Our DOM objects */
/* Colab dropdown */
table.center-aligned-table td {
text-align: center;
}
table.center-aligned-table th {
text-align: center;
vertical-align: middle;
}
.colab-dropdown {
position: relative;
display: inline-block;
}
.colab-dropdown-content {
display: none;
position: absolute;
background-color: #f9f9f9;
min-width: 117px;
box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
z-index: 1;
}
.colab-dropdown-content button {
color: #6670FF;
background-color: #f9f9f9;
font-size: 12px;
border: none;
min-width: 117px;
padding: 5px 5px;
text-decoration: none;
display: block;
}
.colab-dropdown-content button:hover {background-color: #eee;}
.colab-dropdown:hover .colab-dropdown-content {display: block;}
/* Version control */
.version-button {
background-color: #6670FF;
color: white;
border: none;
padding: 5px;
font-size: 15px;
cursor: pointer;
}
.version-button:hover, .version-button:focus {
background-color: #A6B0FF;
}
.version-dropdown {
display: none;
background-color: #6670FF;
min-width: 160px;
overflow: auto;
font-size: 15px;
}
.version-dropdown a {
color: white;
padding: 3px 4px;
text-decoration: none;
display: block;
}
.version-dropdown a:hover {
background-color: #A6B0FF;
}
.version-show {
display: block;
}
/* Framework selector */
.framework-selector {
display: flex;
flex-direction: row;
justify-content: flex-end;
margin-right: 30px;
}
.framework-selector > button {
background-color: white;
color: #6670FF;
border: 1px solid #6670FF;
padding: 5px;
}
.framework-selector > button.selected{
background-color: #6670FF;
color: white;
border: 1px solid #6670FF;
padding: 5px;
}
/* Copy button */
a.copybtn {
margin: 3px;
}
/* The literal code blocks */
.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal {
color: #6670FF;
}
/* To keep the logo centered */
.wy-side-scroll {
width: auto;
font-size: 20px;
}
/* The div that holds the Hugging Face logo */
.HuggingFaceDiv {
width: 100%
}
/* The research field on top of the toc tree */
.wy-side-nav-search{
padding-top: 0;
background-color: #6670FF;
}
/* The toc tree */
.wy-nav-side{
background-color: #6670FF;
}
/* The section headers in the toc tree */
.wy-menu-vertical p.caption{
background-color: #4d59ff;
line-height: 40px;
}
/* The selected items in the toc tree */
.wy-menu-vertical li.current{
background-color: #A6B0FF;
}
/* When a list item that does belong to the selected block from the toc tree is hovered */
.wy-menu-vertical li.current a:hover{
background-color: #B6C0FF;
}
/* When a list item that does NOT belong to the selected block from the toc tree is hovered. */
.wy-menu-vertical li a:hover{
background-color: #A7AFFB;
}
/* The text items on the toc tree */
.wy-menu-vertical a {
color: #FFFFDD;
font-family: Calibre-Light, sans-serif;
}
.wy-menu-vertical header, .wy-menu-vertical p.caption{
color: white;
font-family: Calibre-Light, sans-serif;
}
/* The color inside the selected toc tree block */
.wy-menu-vertical li.toctree-l2 a, .wy-menu-vertical li.toctree-l3 a, .wy-menu-vertical li.toctree-l4 a {
color: black;
}
/* Inside the depth-2 selected toc tree block */
.wy-menu-vertical li.toctree-l2.current>a {
background-color: #B6C0FF
}
.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a {
background-color: #C6D0FF
}
/* Inside the depth-3 selected toc tree block */
.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{
background-color: #D6E0FF
}
/* Inside code snippets */
.rst-content dl:not(.docutils) dt{
font-size: 15px;
}
/* Links */
a {
color: #6670FF;
}
/* Content bars */
.rst-content dl:not(.docutils) dt {
background-color: rgba(251, 141, 104, 0.1);
border-right: solid 2px #FB8D68;
border-left: solid 2px #FB8D68;
color: #FB8D68;
font-family: Calibre-Light, sans-serif;
border-top: none;
font-style: normal !important;
}
/* Expand button */
.wy-menu-vertical li.toctree-l2 span.toctree-expand,
.wy-menu-vertical li.on a span.toctree-expand, .wy-menu-vertical li.current>a span.toctree-expand,
.wy-menu-vertical li.toctree-l3 span.toctree-expand{
color: black;
}
/* Max window size */
.wy-nav-content{
max-width: 1200px;
}
/* Mobile header */
.wy-nav-top{
background-color: #6670FF;
}
/* Source spans */
.rst-content .viewcode-link, .rst-content .viewcode-back{
color: #6670FF;
font-size: 110%;
letter-spacing: 2px;
text-transform: uppercase;
}
/* It would be better for table to be visible without horizontal scrolling */
.wy-table-responsive table td, .wy-table-responsive table th{
white-space: normal;
}
.footer {
margin-top: 20px;
}
.footer__Social {
display: flex;
flex-direction: row;
}
.footer__CustomImage {
margin: 2px 5px 0 0;
}
/* class and method names in doc */
.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) code.descclassname{
font-family: Calibre, sans-serif;
font-size: 20px !important;
}
/* class name in doc*/
.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname{
margin-right: 10px;
font-family: Calibre-Medium, sans-serif;
}
/* Method and class parameters */
.sig-param{
line-height: 23px;
}
/* Class introduction "class" string at beginning */
.rst-content dl:not(.docutils) .property{
font-size: 18px;
color: black;
}
/* FONTS */
body{
font-family: Calibre, sans-serif;
font-size: 16px;
}
h1 {
font-family: Calibre-Thin, sans-serif;
font-size: 70px;
}
h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend{
font-family: Calibre-Medium, sans-serif;
}
@font-face {
font-family: Calibre-Medium;
src: url(./Calibre-Medium.otf);
font-weight:400;
}
@font-face {
font-family: Calibre;
src: url(./Calibre-Regular.otf);
font-weight:400;
}
@font-face {
font-family: Calibre-Light;
src: url(./Calibre-Light.ttf);
font-weight:400;
}
@font-face {
font-family: Calibre-Thin;
src: url(./Calibre-Thin.otf);
font-weight:400;
}
/**
* Nav Links to other parts of huggingface.co
*/
div.menu {
position: absolute;
top: 0;
right: 0;
padding-top: 20px;
padding-right: 20px;
z-index: 1000;
}
div.menu a {
font-size: 14px;
letter-spacing: 0.3px;
text-transform: uppercase;
color: white;
-webkit-font-smoothing: antialiased;
background: linear-gradient(0deg, #6671ffb8, #9a66ffb8 50%);
padding: 10px 16px 6px 16px;
border-radius: 3px;
margin-left: 12px;
position: relative;
}
div.menu a:active {
top: 1px;
}
@media (min-width: 768px) and (max-width: 1750px) {
.wy-breadcrumbs {
margin-top: 32px;
}
}
@media (max-width: 768px) {
div.menu {
display: none;
}
}
|
AdaMix/docs/source/_static/css/huggingface.css/0
|
{
"file_path": "AdaMix/docs/source/_static/css/huggingface.css",
"repo_id": "AdaMix",
"token_count": 3123
}
| 29 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
BARThez
-----------------------------------------------------------------------------------------------------------------------
Overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The BARThez model was proposed in `BARThez: a Skilled Pretrained French Sequence-to-Sequence Model`
<https://arxiv.org/abs/2010.12321>`__ by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis on 23 Oct,
2020.
The abstract of the paper:
*Inductive transfer learning, enabled by self-supervised learning, have taken the entire Natural Language Processing
(NLP) field by storm, with models such as BERT and BART setting new state of the art on countless natural language
understanding tasks. While there are some notable exceptions, most of the available models and research have been
conducted for the English language. In this work, we introduce BARThez, the first BART model for the French language
(to the best of our knowledge). BARThez was pretrained on a very large monolingual French corpus from past research
that we adapted to suit BART's perturbation schemes. Unlike already existing BERT-based French language models such as
CamemBERT and FlauBERT, BARThez is particularly well-suited for generative tasks, since not only its encoder but also
its decoder is pretrained. In addition to discriminative tasks from the FLUE benchmark, we evaluate BARThez on a novel
summarization dataset, OrangeSum, that we release with this paper. We also continue the pretraining of an already
pretrained multilingual BART on BARThez's corpus, and we show that the resulting model, which we call mBARTHez,
provides a significant boost over vanilla BARThez, and is on par with or outperforms CamemBERT and FlauBERT.*
The Authors' code can be found `here <https://github.com/moussaKam/BARThez>`__.
Examples
_______________________________________________________________________________________________________________________
- BARThez can be fine-tuned on sequence-to-sequence tasks in a similar way as BART, check:
:prefix_link:`examples/seq2seq/ <examples/seq2seq/README.md>`.
BarthezTokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.BarthezTokenizer
:members:
BarthezTokenizerFast
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.BarthezTokenizerFast
:members:
|
AdaMix/docs/source/model_doc/barthez.rst/0
|
{
"file_path": "AdaMix/docs/source/model_doc/barthez.rst",
"repo_id": "AdaMix",
"token_count": 755
}
| 30 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
Encoder Decoder Models
-----------------------------------------------------------------------------------------------------------------------
The :class:`~transformers.EncoderDecoderModel` can be used to initialize a sequence-to-sequence model with any
pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder.
The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks
was shown in `Leveraging Pre-trained Checkpoints for Sequence Generation Tasks <https://arxiv.org/abs/1907.12461>`__ by
Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
After such an :class:`~transformers.EncoderDecoderModel` has been trained/fine-tuned, it can be saved/loaded just like
any other models (see the examples for more information).
An application of this architecture could be to leverage two pretrained :class:`~transformers.BertModel` as the encoder
and decoder for a summarization model as was shown in: `Text Summarization with Pretrained Encoders
<https://arxiv.org/abs/1908.08345>`__ by Yang Liu and Mirella Lapata.
EncoderDecoderConfig
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.EncoderDecoderConfig
:members:
EncoderDecoderModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.EncoderDecoderModel
:members: forward, from_encoder_decoder_pretrained
|
AdaMix/docs/source/model_doc/encoderdecoder.rst/0
|
{
"file_path": "AdaMix/docs/source/model_doc/encoderdecoder.rst",
"repo_id": "AdaMix",
"token_count": 531
}
| 31 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
MPNet
-----------------------------------------------------------------------------------------------------------------------
Overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The MPNet model was proposed in `MPNet: Masked and Permuted Pre-training for Language Understanding
<https://arxiv.org/abs/2004.09297>`__ by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
MPNet adopts a novel pre-training method, named masked and permuted language modeling, to inherit the advantages of
masked language modeling and permuted language modeling for natural language understanding.
The abstract from the paper is the following:
*BERT adopts masked language modeling (MLM) for pre-training and is one of the most successful pre-training models.
Since BERT neglects dependency among predicted tokens, XLNet introduces permuted language modeling (PLM) for
pre-training to address this problem. However, XLNet does not leverage the full position information of a sentence and
thus suffers from position discrepancy between pre-training and fine-tuning. In this paper, we propose MPNet, a novel
pre-training method that inherits the advantages of BERT and XLNet and avoids their limitations. MPNet leverages the
dependency among predicted tokens through permuted language modeling (vs. MLM in BERT), and takes auxiliary position
information as input to make the model see a full sentence and thus reducing the position discrepancy (vs. PLM in
XLNet). We pre-train MPNet on a large-scale dataset (over 160GB text corpora) and fine-tune on a variety of
down-streaming tasks (GLUE, SQuAD, etc). Experimental results show that MPNet outperforms MLM and PLM by a large
margin, and achieves better results on these tasks compared with previous state-of-the-art pre-trained methods (e.g.,
BERT, XLNet, RoBERTa) under the same model setting.*
Tips:
- MPNet doesn't have :obj:`token_type_ids`, you don't need to indicate which token belongs to which segment. just
separate your segments with the separation token :obj:`tokenizer.sep_token` (or :obj:`[sep]`).
The original code can be found `here <https://github.com/microsoft/MPNet>`__.
MPNetConfig
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.MPNetConfig
:members:
MPNetTokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.MPNetTokenizer
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
create_token_type_ids_from_sequences, save_vocabulary
MPNetTokenizerFast
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.MPNetTokenizerFast
:members:
MPNetModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.MPNetModel
:members: forward
MPNetForMaskedLM
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.MPNetForMaskedLM
:members: forward
MPNetForSequenceClassification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.MPNetForSequenceClassification
:members: forward
MPNetForMultipleChoice
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.MPNetForMultipleChoice
:members: forward
MPNetForTokenClassification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.MPNetForTokenClassification
:members: forward
MPNetForQuestionAnswering
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.MPNetForQuestionAnswering
:members: forward
TFMPNetModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFMPNetModel
:members: call
TFMPNetForMaskedLM
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFMPNetForMaskedLM
:members: call
TFMPNetForSequenceClassification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFMPNetForSequenceClassification
:members: call
TFMPNetForMultipleChoice
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFMPNetForMultipleChoice
:members: call
TFMPNetForTokenClassification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFMPNetForTokenClassification
:members: call
TFMPNetForQuestionAnswering
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFMPNetForQuestionAnswering
:members: call
|
AdaMix/docs/source/model_doc/mpnet.rst/0
|
{
"file_path": "AdaMix/docs/source/model_doc/mpnet.rst",
"repo_id": "AdaMix",
"token_count": 1367
}
| 32 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
XLM-ProphetNet
-----------------------------------------------------------------------------------------------------------------------
**DISCLAIMER:** If you see something strange, file a `Github Issue
<https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title>`__ and assign
@patrickvonplaten
Overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The XLM-ProphetNet model was proposed in `ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,
<https://arxiv.org/abs/2001.04063>`__ by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei
Zhang, Ming Zhou on 13 Jan, 2020.
XLM-ProphetNet is an encoder-decoder model and can predict n-future tokens for "ngram" language modeling instead of
just the next token. Its architecture is identical to ProhpetNet, but the model was trained on the multi-lingual
"wiki100" Wikipedia dump.
The abstract from the paper is the following:
*In this paper, we present a new sequence-to-sequence pretraining model called ProphetNet, which introduces a novel
self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of
the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by
n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time
step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent
overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale
dataset (160GB) respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for
abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new
state-of-the-art results on all these datasets compared to the models using the same scale pretraining corpus.*
The Authors' code can be found `here <https://github.com/microsoft/ProphetNet>`__.
XLMProphetNetConfig
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.XLMProphetNetConfig
:members:
XLMProphetNetTokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.XLMProphetNetTokenizer
:members:
XLMProphetNetModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.XLMProphetNetModel
XLMProphetNetEncoder
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.XLMProphetNetEncoder
XLMProphetNetDecoder
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.XLMProphetNetDecoder
XLMProphetNetForConditionalGeneration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.XLMProphetNetForConditionalGeneration
XLMProphetNetForCausalLM
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.XLMProphetNetForCausalLM
|
AdaMix/docs/source/model_doc/xlmprophetnet.rst/0
|
{
"file_path": "AdaMix/docs/source/model_doc/xlmprophetnet.rst",
"repo_id": "AdaMix",
"token_count": 961
}
| 33 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
Summary of the tokenizers
-----------------------------------------------------------------------------------------------------------------------
On this page, we will have a closer look at tokenization. As we saw in :doc:`the preprocessing tutorial
<preprocessing>`, tokenizing a text is splitting it into words or subwords, which then are converted to ids through a
look-up table. Converting words or subwords to ids is straightforward, so in this summary, we will focus on splitting a
text into words or subwords (i.e. tokenizing a text). More specifically, we will look at the three main types of
tokenizers used in 🤗 Transformers: :ref:`Byte-Pair Encoding (BPE) <byte-pair-encoding>`, :ref:`WordPiece <wordpiece>`,
and :ref:`SentencePiece <sentencepiece>`, and show examples of which tokenizer type is used by which model.
Note that on each model page, you can look at the documentation of the associated tokenizer to know which tokenizer
type was used by the pretrained model. For instance, if we look at :class:`~transformers.BertTokenizer`, we can see
that the model uses :ref:`WordPiece <wordpiece>`.
Introduction
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Splitting a text into smaller chunks is a task that is harder than it looks, and there are multiple ways of doing so.
For instance, let's look at the sentence ``"Don't you love 🤗 Transformers? We sure do."`` A simple way of tokenizing
this text is to split it by spaces, which would give:
.. code-block::
["Don't", "you", "love", "🤗", "Transformers?", "We", "sure", "do."]
This is a sensible first step, but if we look at the tokens ``"Transformers?"`` and ``"do."``, we notice that the
punctuation is attached to the words ``"Transformer"`` and ``"do"``, which is suboptimal. We should take the
punctuation into account so that a model does not have to learn a different representation of a word and every possible
punctuation symbol that could follow it, which would explode the number of representations the model has to learn.
Taking punctuation into account, tokenizing our exemplary text would give:
.. code-block::
["Don", "'", "t", "you", "love", "🤗", "Transformers", "?", "We", "sure", "do", "."]
Better. However, it is disadvantageous, how the tokenization dealt with the word ``"Don't"``. ``"Don't"`` stands for
``"do not"``, so it would be better tokenized as ``["Do", "n't"]``. This is where things start getting complicated, and
part of the reason each model has its own tokenizer type. Depending on the rules we apply for tokenizing a text, a
different tokenized output is generated for the same text. A pretrained model only performs properly if you feed it an
input that was tokenized with the same rules that were used to tokenize its training data.
`spaCy <https://spacy.io/>`__ and `Moses <http://www.statmt.org/moses/?n=Development.GetStarted>`__ are two popular
rule-based tokenizers. Applying them on our example, *spaCy* and *Moses* would output something like:
.. code-block::
["Do", "n't", "you", "love", "🤗", "Transformers", "?", "We", "sure", "do", "."]
As can be seen space and punctuation tokenization, as well as rule-based tokenization, is used here. Space and
punctuation tokenization and rule-based tokenization are both examples of word tokenization, which is loosely defined
as splitting sentences into words. While it's the most intuitive way to split texts into smaller chunks, this
tokenization method can lead to problems for massive text corpora. In this case, space and punctuation tokenization
usually generates a very big vocabulary (the set of all unique words and tokens used). *E.g.*, :doc:`Transformer XL
<model_doc/transformerxl>` uses space and punctuation tokenization, resulting in a vocabulary size of 267,735!
Such a big vocabulary size forces the model to have an enormous embedding matrix as the input and output layer, which
causes both an increased memory and time complexity. In general, transformers models rarely have a vocabulary size
greater than 50,000, especially if they are pretrained only on a single language.
So if simple space and punctuation tokenization is unsatisfactory, why not simply tokenize on characters? While
character tokenization is very simple and would greatly reduce memory and time complexity it makes it much harder for
the model to learn meaningful input representations. *E.g.* learning a meaningful context-independent representation
for the letter ``"t"`` is much harder than learning a context-independent representation for the word ``"today"``.
Therefore, character tokenization is often accompanied by a loss of performance. So to get the best of both worlds,
transformers models use a hybrid between word-level and character-level tokenization called **subword** tokenization.
Subword tokenization
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Subword tokenization algorithms rely on the principle that frequently used words should not be split into smaller
subwords, but rare words should be decomposed into meaningful subwords. For instance ``"annoyingly"`` might be
considered a rare word and could be decomposed into ``"annoying"`` and ``"ly"``. Both ``"annoying"`` and ``"ly"`` as
stand-alone subwords would appear more frequently while at the same time the meaning of ``"annoyingly"`` is kept by the
composite meaning of ``"annoying"`` and ``"ly"``. This is especially useful in agglutinative languages such as Turkish,
where you can form (almost) arbitrarily long complex words by stringing together subwords.
Subword tokenization allows the model to have a reasonable vocabulary size while being able to learn meaningful
context-independent representations. In addition, subword tokenization enables the model to process words it has never
seen before, by decomposing them into known subwords. For instance, the :class:`~transformers.BertTokenizer` tokenizes
``"I have a new GPU!"`` as follows:
.. code-block::
>>> from transformers import BertTokenizer
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> tokenizer.tokenize("I have a new GPU!")
["i", "have", "a", "new", "gp", "##u", "!"]
Because we are considering the uncased model, the sentence was lowercased first. We can see that the words ``["i",
"have", "a", "new"]`` are present in the tokenizer's vocabulary, but the word ``"gpu"`` is not. Consequently, the
tokenizer splits ``"gpu"`` into known subwords: ``["gp" and "##u"]``. ``"##"`` means that the rest of the token should
be attached to the previous one, without space (for decoding or reversal of the tokenization).
As another example, :class:`~transformers.XLNetTokenizer` tokenizes our previously exemplary text as follows:
.. code-block::
>>> from transformers import XLNetTokenizer
>>> tokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased")
>>> tokenizer.tokenize("Don't you love 🤗 Transformers? We sure do.")
["▁Don", "'", "t", "▁you", "▁love", "▁", "🤗", "▁", "Transform", "ers", "?", "▁We", "▁sure", "▁do", "."]
We'll get back to the meaning of those ``"▁"`` when we look at :ref:`SentencePiece <sentencepiece>`. As one can see,
the rare word ``"Transformers"`` has been split into the more frequent subwords ``"Transform"`` and ``"ers"``.
Let's now look at how the different subword tokenization algorithms work. Note that all of those tokenization
algorithms rely on some form of training which is usually done on the corpus the corresponding model will be trained
on.
.. _byte-pair-encoding:
Byte-Pair Encoding (BPE)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Byte-Pair Encoding (BPE) was introduced in `Neural Machine Translation of Rare Words with Subword Units (Sennrich et
al., 2015) <https://arxiv.org/abs/1508.07909>`__. BPE relies on a pre-tokenizer that splits the training data into
words. Pretokenization can be as simple as space tokenization, e.g. :doc:`GPT-2 <model_doc/gpt2>`, :doc:`Roberta
<model_doc/roberta>`. More advanced pre-tokenization include rule-based tokenization, e.g. :doc:`XLM <model_doc/xlm>`,
:doc:`FlauBERT <model_doc/flaubert>` which uses Moses for most languages, or :doc:`GPT <model_doc/gpt>` which uses
Spacy and ftfy, to count the frequency of each word in the training corpus.
After pre-tokenization, a set of unique words has been created and the frequency of each word it occurred in the
training data has been determined. Next, BPE creates a base vocabulary consisting of all symbols that occur in the set
of unique words and learns merge rules to form a new symbol from two symbols of the base vocabulary. It does so until
the vocabulary has attained the desired vocabulary size. Note that the desired vocabulary size is a hyperparameter to
define before training the tokenizer.
As an example, let's assume that after pre-tokenization, the following set of words including their frequency has been
determined:
.. code-block::
("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5)
Consequently, the base vocabulary is ``["b", "g", "h", "n", "p", "s", "u"]``. Splitting all words into symbols of the
base vocabulary, we obtain:
.. code-block::
("h" "u" "g", 10), ("p" "u" "g", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "u" "g" "s", 5)
BPE then counts the frequency of each possible symbol pair and picks the symbol pair that occurs most frequently. In
the example above ``"h"`` followed by ``"u"`` is present `10 + 5 = 15` times (10 times in the 10 occurrences of
``"hug"``, 5 times in the 5 occurrences of "hugs"). However, the most frequent symbol pair is ``"u"`` followed by "g",
occurring `10 + 5 + 5 = 20` times in total. Thus, the first merge rule the tokenizer learns is to group all ``"u"``
symbols followed by a ``"g"`` symbol together. Next, "ug" is added to the vocabulary. The set of words then becomes
.. code-block::
("h" "ug", 10), ("p" "ug", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "ug" "s", 5)
BPE then identifies the next most common symbol pair. It's ``"u"`` followed by ``"n"``, which occurs 16 times. ``"u"``,
``"n"`` is merged to ``"un"`` and added to the vocabulary. The next most frequent symbol pair is ``"h"`` followed by
``"ug"``, occurring 15 times. Again the pair is merged and ``"hug"`` can be added to the vocabulary.
At this stage, the vocabulary is ``["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"]`` and our set of unique words
is represented as
.. code-block::
("hug", 10), ("p" "ug", 5), ("p" "un", 12), ("b" "un", 4), ("hug" "s", 5)
Assuming, that the Byte-Pair Encoding training would stop at this point, the learned merge rules would then be applied
to new words (as long as those new words do not include symbols that were not in the base vocabulary). For instance,
the word ``"bug"`` would be tokenized to ``["b", "ug"]`` but ``"mug"`` would be tokenized as ``["<unk>", "ug"]`` since
the symbol ``"m"`` is not in the base vocabulary. In general, single letters such as ``"m"`` are not replaced by the
``"<unk>"`` symbol because the training data usually includes at least one occurrence of each letter, but it is likely
to happen for very special characters like emojis.
As mentioned earlier, the vocabulary size, *i.e.* the base vocabulary size + the number of merges, is a hyperparameter
to choose. For instance :doc:`GPT <model_doc/gpt>` has a vocabulary size of 40,478 since they have 478 base characters
and chose to stop training after 40,000 merges.
Byte-level BPE
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A base vocabulary that includes all possible base characters can be quite large if *e.g.* all unicode characters are
considered as base characters. To have a better base vocabulary, `GPT-2
<https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf>`__ uses bytes
as the base vocabulary, which is a clever trick to force the base vocabulary to be of size 256 while ensuring that
every base character is included in the vocabulary. With some additional rules to deal with punctuation, the GPT2's
tokenizer can tokenize every text without the need for the <unk> symbol. :doc:`GPT-2 <model_doc/gpt>` has a vocabulary
size of 50,257, which corresponds to the 256 bytes base tokens, a special end-of-text token and the symbols learned
with 50,000 merges.
.. _wordpiece:
WordPiece
=======================================================================================================================
WordPiece is the subword tokenization algorithm used for :doc:`BERT <model_doc/bert>`, :doc:`DistilBERT
<model_doc/distilbert>`, and :doc:`Electra <model_doc/electra>`. The algorithm was outlined in `Japanese and Korean
Voice Search (Schuster et al., 2012)
<https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf>`__ and is very similar to
BPE. WordPiece first initializes the vocabulary to include every character present in the training data and
progressively learns a given number of merge rules. In contrast to BPE, WordPiece does not choose the most frequent
symbol pair, but the one that maximizes the likelihood of the training data once added to the vocabulary.
So what does this mean exactly? Referring to the previous example, maximizing the likelihood of the training data is
equivalent to finding the symbol pair, whose probability divided by the probabilities of its first symbol followed by
its second symbol is the greatest among all symbol pairs. *E.g.* ``"u"``, followed by ``"g"`` would have only been
merged if the probability of ``"ug"`` divided by ``"u"``, ``"g"`` would have been greater than for any other symbol
pair. Intuitively, WordPiece is slightly different to BPE in that it evaluates what it `loses` by merging two symbols
to make ensure it's `worth it`.
.. _unigram:
Unigram
=======================================================================================================================
Unigram is a subword tokenization algorithm introduced in `Subword Regularization: Improving Neural Network Translation
Models with Multiple Subword Candidates (Kudo, 2018) <https://arxiv.org/pdf/1804.10959.pdf>`__. In contrast to BPE or
WordPiece, Unigram initializes its base vocabulary to a large number of symbols and progressively trims down each
symbol to obtain a smaller vocabulary. The base vocabulary could for instance correspond to all pre-tokenized words and
the most common substrings. Unigram is not used directly for any of the models in the transformers, but it's used in
conjunction with :ref:`SentencePiece <sentencepiece>`.
At each training step, the Unigram algorithm defines a loss (often defined as the log-likelihood) over the training
data given the current vocabulary and a unigram language model. Then, for each symbol in the vocabulary, the algorithm
computes how much the overall loss would increase if the symbol was to be removed from the vocabulary. Unigram then
removes p (with p usually being 10% or 20%) percent of the symbols whose loss increase is the lowest, *i.e.* those
symbols that least affect the overall loss over the training data. This process is repeated until the vocabulary has
reached the desired size. The Unigram algorithm always keeps the base characters so that any word can be tokenized.
Because Unigram is not based on merge rules (in contrast to BPE and WordPiece), the algorithm has several ways of
tokenizing new text after training. As an example, if a trained Unigram tokenizer exhibits the vocabulary:
.. code-block::
["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"],
``"hugs"`` could be tokenized both as ``["hug", "s"]``, ``["h", "ug", "s"]`` or ``["h", "u", "g", "s"]``. So which one
to choose? Unigram saves the probability of each token in the training corpus on top of saving the vocabulary so that
the probability of each possible tokenization can be computed after training. The algorithm simply picks the most
likely tokenization in practice, but also offers the possibility to sample a possible tokenization according to their
probabilities.
Those probabilities are defined by the loss the tokenizer is trained on. Assuming that the training data consists of
the words :math:`x_{1}, \dots, x_{N}` and that the set of all possible tokenizations for a word :math:`x_{i}` is
defined as :math:`S(x_{i})`, then the overall loss is defined as
.. math::
\mathcal{L} = -\sum_{i=1}^{N} \log \left ( \sum_{x \in S(x_{i})} p(x) \right )
.. _sentencepiece:
SentencePiece
=======================================================================================================================
All tokenization algorithms described so far have the same problem: It is assumed that the input text uses spaces to
separate words. However, not all languages use spaces to separate words. One possible solution is to use language
specific pre-tokenizers, *e.g.* :doc:`XLM <model_doc/xlm>` uses a specific Chinese, Japanese, and Thai pre-tokenizer).
To solve this problem more generally, `SentencePiece: A simple and language independent subword tokenizer and
detokenizer for Neural Text Processing (Kudo et al., 2018) <https://arxiv.org/pdf/1808.06226.pdf>`__ treats the input
as a raw input stream, thus including the space in the set of characters to use. It then uses the BPE or unigram
algorithm to construct the appropriate vocabulary.
The :class:`~transformers.XLNetTokenizer` uses SentencePiece for example, which is also why in the example earlier the
``"▁"`` character was included in the vocabulary. Decoding with SentencePiece is very easy since all tokens can just be
concatenated and ``"▁"`` is replaced by a space.
All transformers models in the library that use SentencePiece use it in combination with unigram. Examples of models
using SentencePiece are :doc:`ALBERT <model_doc/albert>`, :doc:`XLNet <model_doc/xlnet>`, :doc:`Marian
<model_doc/marian>`, and :doc:`T5 <model_doc/t5>`.
|
AdaMix/docs/source/tokenizer_summary.rst/0
|
{
"file_path": "AdaMix/docs/source/tokenizer_summary.rst",
"repo_id": "AdaMix",
"token_count": 5084
}
| 34 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, CTRL, BERT, RoBERTa, XLNet).
GPT, GPT-2 and CTRL are fine-tuned using a causal language modeling (CLM) loss. BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss. XLNet is fine-tuned using a permutation language modeling (PLM) loss.
"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_data_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
train_data_files: Optional[str] = field(
default=None,
metadata={
"help": "The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
},
)
eval_data_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
train_ref_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input train ref data file for whole word mask in Chinese."},
)
eval_ref_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."},
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
mlm: bool = field(
default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."}
)
whole_word_mask: bool = field(default=False, metadata={"help": "Whether ot not to use whole word mask."})
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
plm_probability: float = field(
default=1 / 6,
metadata={
"help": "Ratio of length of a span of masked tokens to surrounding context length for permutation language modeling."
},
)
max_span_length: int = field(
default=5, metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."}
)
block_size: int = field(
default=-1,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def get_dataset(
args: DataTrainingArguments,
tokenizer: PreTrainedTokenizer,
evaluate: bool = False,
cache_dir: Optional[str] = None,
):
def _dataset(file_path, ref_path=None):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask")
return LineByLineWithRefDataset(
tokenizer=tokenizer,
file_path=file_path,
block_size=args.block_size,
ref_path=ref_path,
)
return LineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size)
else:
return TextDataset(
tokenizer=tokenizer,
file_path=file_path,
block_size=args.block_size,
overwrite_cache=args.overwrite_cache,
cache_dir=cache_dir,
)
if evaluate:
return _dataset(args.eval_data_file, args.eval_ref_file)
elif args.train_data_files:
return ConcatDataset([_dataset(f) for f in glob(args.train_data_files)])
else:
return _dataset(args.train_data_file, args.train_ref_file)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument."
)
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name"
)
if model_args.model_name_or_path:
model = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = AutoModelWithLMHead.from_config(config)
model.resize_token_embeddings(len(tokenizer))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)."
)
if data_args.block_size <= 0:
data_args.block_size = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
data_args.block_size = min(data_args.block_size, tokenizer.max_len)
# Get datasets
train_dataset = (
get_dataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir) if training_args.do_train else None
)
eval_dataset = (
get_dataset(data_args, tokenizer=tokenizer, evaluate=True, cache_dir=model_args.cache_dir)
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
data_collator = DataCollatorForPermutationLanguageModeling(
tokenizer=tokenizer,
plm_probability=data_args.plm_probability,
max_span_length=data_args.max_span_length,
)
else:
if data_args.mlm and data_args.whole_word_mask:
data_collator = DataCollatorForWholeWordMask(
tokenizer=tokenizer, mlm_probability=data_args.mlm_probability
)
else:
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
prediction_loss_only=True,
)
# Training
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=model_path)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
eval_output = trainer.evaluate()
perplexity = math.exp(eval_output["eval_loss"])
result = {"perplexity": perplexity}
output_eval_file = os.path.join(training_args.output_dir, "eval_results_lm.txt")
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
results.update(result)
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
AdaMix/examples/legacy/run_language_modeling.py/0
|
{
"file_path": "AdaMix/examples/legacy/run_language_modeling.py",
"repo_id": "AdaMix",
"token_count": 5485
}
| 35 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# as due to their complexity multi-gpu tests could impact other tests, and to aid debug we have those in a separate module.
import os
import sys
from transformers.testing_utils import TestCasePlus, execute_subprocess_async, get_gpu_count, require_torch_gpu, slow
from .utils import load_json
class TestSummarizationDistillerMultiGPU(TestCasePlus):
@classmethod
def setUpClass(cls):
return cls
@slow
@require_torch_gpu
def test_distributed_eval(self):
output_dir = self.get_auto_remove_tmp_dir()
args = f"""
--model_name Helsinki-NLP/opus-mt-en-ro
--save_dir {output_dir}
--data_dir {self.test_file_dir_str}/test_data/wmt_en_ro
--num_beams 2
--task translation
""".split()
# we want this test to run even if there is only one GPU, but if there are more we use them all
n_gpu = get_gpu_count()
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={n_gpu}
{self.test_file_dir}/run_distributed_eval.py
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
metrics_save_path = os.path.join(output_dir, "test_bleu.json")
metrics = load_json(metrics_save_path)
# print(metrics)
self.assertGreaterEqual(metrics["bleu"], 25)
|
AdaMix/examples/legacy/seq2seq/old_test_seq2seq_examples_multi_gpu.py/0
|
{
"file_path": "AdaMix/examples/legacy/seq2seq/old_test_seq2seq_examples_multi_gpu.py",
"repo_id": "AdaMix",
"token_count": 771
}
| 36 |
if ! [ -f ./dev.txt ]; then
echo "Downloading CONLL2003 dev dataset...."
curl -L -o ./dev.txt 'https://github.com/davidsbatista/NER-datasets/raw/master/CONLL2003/valid.txt'
fi
if ! [ -f ./test.txt ]; then
echo "Downloading CONLL2003 test dataset...."
curl -L -o ./test.txt 'https://github.com/davidsbatista/NER-datasets/raw/master/CONLL2003/test.txt'
fi
if ! [ -f ./train.txt ]; then
echo "Downloading CONLL2003 train dataset...."
curl -L -o ./train.txt 'https://github.com/davidsbatista/NER-datasets/raw/master/CONLL2003/train.txt'
fi
export MAX_LENGTH=200
export BERT_MODEL=bert-base-uncased
export OUTPUT_DIR=chunker-model
export BATCH_SIZE=32
export NUM_EPOCHS=3
export SAVE_STEPS=750
export SEED=1
python3 run_ner.py \
--task_type Chunk \
--data_dir . \
--model_name_or_path $BERT_MODEL \
--output_dir $OUTPUT_DIR \
--max_seq_length $MAX_LENGTH \
--num_train_epochs $NUM_EPOCHS \
--per_gpu_train_batch_size $BATCH_SIZE \
--save_steps $SAVE_STEPS \
--seed $SEED \
--do_train \
--do_eval \
--do_predict
|
AdaMix/examples/legacy/token-classification/run_chunk.sh/0
|
{
"file_path": "AdaMix/examples/legacy/token-classification/run_chunk.sh",
"repo_id": "AdaMix",
"token_count": 414
}
| 37 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for question-answering."""
import logging
import os
from dataclasses import dataclass, field
from typing import Optional
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
HfArgumentParser,
TFAutoModelForQuestionAnswering,
TFTrainer,
TFTrainingArguments,
squad_convert_examples_to_features,
)
from transformers.data.processors.squad import SquadV1Processor, SquadV2Processor
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
data_dir: Optional[str] = field(
default=None, metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}
)
use_tfds: Optional[bool] = field(default=True, metadata={"help": "If TFDS should be used or not."})
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
doc_stride: int = field(
default=128,
metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."},
)
max_query_length: int = field(
default=64,
metadata={
"help": "The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
},
)
max_answer_length: int = field(
default=30,
metadata={
"help": "The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
version_2_with_negative: bool = field(
default=False, metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}
)
null_score_diff_threshold: float = field(
default=0.0, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
)
n_best_size: int = field(
default=20, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
)
lang_id: int = field(
default=0,
metadata={
"help": "language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(
"n_replicas: %s, distributed training: %s, 16-bits training: %s",
training_args.n_replicas,
bool(training_args.n_replicas > 1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Prepare Question-Answering task
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast,
)
with training_args.strategy.scope():
model = TFAutoModelForQuestionAnswering.from_pretrained(
model_args.model_name_or_path,
from_pt=bool(".bin" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# Get datasets
if data_args.use_tfds:
if data_args.version_2_with_negative:
logger.warn("tensorflow_datasets does not handle version 2 of SQuAD. Switch to version 1 automatically")
try:
import tensorflow_datasets as tfds
except ImportError:
raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.")
tfds_examples = tfds.load("squad", data_dir=data_args.data_dir)
train_examples = (
SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=False)
if training_args.do_train
else None
)
eval_examples = (
SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=True)
if training_args.do_eval
else None
)
else:
processor = SquadV2Processor() if data_args.version_2_with_negative else SquadV1Processor()
train_examples = processor.get_train_examples(data_args.data_dir) if training_args.do_train else None
eval_examples = processor.get_dev_examples(data_args.data_dir) if training_args.do_eval else None
train_dataset = (
squad_convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=data_args.max_seq_length,
doc_stride=data_args.doc_stride,
max_query_length=data_args.max_query_length,
is_training=True,
return_dataset="tf",
)
if training_args.do_train
else None
)
train_dataset = train_dataset.apply(tf.data.experimental.assert_cardinality(len(train_examples)))
eval_dataset = (
squad_convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=data_args.max_seq_length,
doc_stride=data_args.doc_stride,
max_query_length=data_args.max_query_length,
is_training=False,
return_dataset="tf",
)
if training_args.do_eval
else None
)
eval_dataset = eval_dataset.apply(tf.data.experimental.assert_cardinality(len(eval_examples)))
# Initialize our Trainer
trainer = TFTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
if __name__ == "__main__":
main()
|
AdaMix/examples/question-answering/run_tf_squad.py/0
|
{
"file_path": "AdaMix/examples/question-answering/run_tf_squad.py",
"repo_id": "AdaMix",
"token_count": 3694
}
| 38 |
{
"activation": "gelu",
"attention_dropout": 0.1,
"dim": 768,
"dropout": 0.1,
"hidden_dim": 3072,
"initializer_range": 0.02,
"max_position_embeddings": 512,
"n_heads": 12,
"n_layers": 6,
"sinusoidal_pos_embds": true,
"tie_weights_": true,
"vocab_size": 28996
}
|
AdaMix/examples/research_projects/distillation/training_configs/distilbert-base-cased.json/0
|
{
"file_path": "AdaMix/examples/research_projects/distillation/training_configs/distilbert-base-cased.json",
"repo_id": "AdaMix",
"token_count": 134
}
| 39 |
"""
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal, Huggingface team :)
Adapted From Facebook Inc, Detectron2
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.import copy
"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import sha256
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import numpy as np
from PIL import Image
from tqdm.auto import tqdm
import cv2
import requests
import wget
from filelock import FileLock
from yaml import Loader, dump, load
try:
import torch
_torch_available = True
except ImportError:
_torch_available = False
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
default_cache_path = os.path.join(torch_cache_home, "transformers")
CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co"
S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert"
PATH = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
CONFIG = os.path.join(PATH, "config.yaml")
ATTRIBUTES = os.path.join(PATH, "attributes.txt")
OBJECTS = os.path.join(PATH, "objects.txt")
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
WEIGHTS_NAME = "pytorch_model.bin"
CONFIG_NAME = "config.yaml"
def load_labels(objs=OBJECTS, attrs=ATTRIBUTES):
vg_classes = []
with open(objs) as f:
for object in f.readlines():
vg_classes.append(object.split(",")[0].lower().strip())
vg_attrs = []
with open(attrs) as f:
for object in f.readlines():
vg_attrs.append(object.split(",")[0].lower().strip())
return vg_classes, vg_attrs
def load_checkpoint(ckp):
r = OrderedDict()
with open(ckp, "rb") as f:
ckp = pkl.load(f)["model"]
for k in copy.deepcopy(list(ckp.keys())):
v = ckp.pop(k)
if isinstance(v, np.ndarray):
v = torch.tensor(v)
else:
assert isinstance(v, torch.tensor), type(v)
r[k] = v
return r
class Config:
_pointer = {}
def __init__(self, dictionary: dict, name: str = "root", level=0):
self._name = name
self._level = level
d = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
k = copy.deepcopy(k)
v = copy.deepcopy(v)
if isinstance(v, dict):
v = Config(v, name=k, level=level + 1)
d[k] = v
setattr(self, k, v)
self._pointer = d
def __repr__(self):
return str(list((self._pointer.keys())))
def __setattr__(self, key, val):
self.__dict__[key] = val
self.__dict__[key.upper()] = val
levels = key.split(".")
last_level = len(levels) - 1
pointer = self._pointer
if len(levels) > 1:
for i, l in enumerate(levels):
if hasattr(self, l) and isinstance(getattr(self, l), Config):
setattr(getattr(self, l), ".".join(levels[i:]), val)
if l == last_level:
pointer[l] = val
else:
pointer = pointer[l]
def to_dict(self):
return self._pointer
def dump_yaml(self, data, file_name):
with open(f"{file_name}", "w") as stream:
dump(data, stream)
def dump_json(self, data, file_name):
with open(f"{file_name}", "w") as stream:
json.dump(data, stream)
@staticmethod
def load_yaml(config):
with open(config) as stream:
data = load(stream, Loader=Loader)
return data
def __str__(self):
t = " "
if self._name != "root":
r = f"{t * (self._level-1)}{self._name}:\n"
else:
r = ""
level = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(v, Config):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(v).__name__})\n"
self._level = level
return r[:-1]
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs):
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
return cls(config_dict)
@classmethod
def get_config_dict(cls, pretrained_model_name_or_path: str, **kwargs):
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
config_file = hf_bucket_url(pretrained_model_name_or_path, filename=CONFIG_NAME, use_cdn=False)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
config_file = Config.load_yaml(resolved_config_file)
except EnvironmentError:
msg = "Can't load config for"
raise EnvironmentError(msg)
if resolved_config_file == config_file:
print("loading configuration file from path")
else:
print("loading configuration file cache")
return Config.load_yaml(resolved_config_file), kwargs
# quick compare tensors
def compare(in_tensor):
out_tensor = torch.load("dump.pt", map_location=in_tensor.device)
n1 = in_tensor.numpy()
n2 = out_tensor.numpy()[0]
print(n1.shape, n1[0, 0, :5])
print(n2.shape, n2[0, 0, :5])
assert np.allclose(
n1, n2, rtol=0.01, atol=0.1
), f"{sum([1 for x in np.isclose(n1, n2, rtol=0.01, atol=0.1).flatten() if x == False])/len(n1.flatten())*100:.4f} % element-wise mismatch"
raise Exception("tensors are all good")
# Hugging face functions below
def is_remote_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def hf_bucket_url(model_id: str, filename: str, use_cdn=True) -> str:
endpoint = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
legacy_format = "/" not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def http_get(
url,
temp_file,
proxies=None,
resume_size=0,
user_agent=None,
):
ua = "python/{}".format(sys.version.split()[0])
if _torch_available:
ua += "; torch/{}".format(torch.__version__)
if isinstance(user_agent, dict):
ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
headers = {"user-agent": ua}
if resume_size > 0:
headers["Range"] = "bytes=%d-" % (resume_size,)
response = requests.get(url, stream=True, proxies=proxies, headers=headers)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
progress = tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc="Downloading",
)
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=10,
resume_download=False,
user_agent=None,
local_files_only=False,
):
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
etag = None
if not local_files_only:
try:
response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout)
if response.status_code == 200:
etag = response.headers.get("ETag")
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(cache_path):
return cache_path
else:
matching_files = [
file
for file in fnmatch.filter(os.listdir(cache_dir), filename + ".*")
if not file.endswith(".json") and not file.endswith(".lock")
]
if len(matching_files) > 0:
return os.path.join(cache_dir, matching_files[-1])
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False."
)
return None
# From now on, etag is not None.
if os.path.exists(cache_path) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
# If the download just completed while the lock was activated.
if os.path.exists(cache_path) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
incomplete_path = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(incomplete_path, "a+b") as f:
yield f
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
resume_size = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s",
url,
temp_file.name,
)
http_get(
url,
temp_file,
proxies=proxies,
resume_size=resume_size,
user_agent=user_agent,
)
os.replace(temp_file.name, cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
json.dump(meta, meta_file)
return cache_path
def url_to_filename(url, etag=None):
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5"):
filename += ".h5"
return filename
def cached_path(
url_or_filename,
cache_dir=None,
force_download=False,
proxies=None,
resume_download=False,
user_agent=None,
extract_compressed_file=False,
force_extract=False,
local_files_only=False,
):
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
user_agent=user_agent,
local_files_only=local_files_only,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif urlparse(url_or_filename).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
if extract_compressed_file:
if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
output_dir, output_file = os.path.split(output_path)
output_extract_dir_name = output_file.replace(".", "-") + "-extracted"
output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted)
if is_zipfile(output_path):
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(output_path))
return output_path_extracted
return output_path
def get_data(query, delim=","):
assert isinstance(query, str)
if os.path.isfile(query):
with open(query) as f:
data = eval(f.read())
else:
req = requests.get(query)
try:
data = requests.json()
except Exception:
data = req.content.decode()
assert data is not None, "could not connect"
try:
data = eval(data)
except Exception:
data = data.split("\n")
req.close()
return data
def get_image_from_url(url):
response = requests.get(url)
img = np.array(Image.open(BytesIO(response.content)))
return img
# to load legacy frcnn checkpoint from detectron
def load_frcnn_pkl_from_url(url):
fn = url.split("/")[-1]
if fn not in os.listdir(os.getcwd()):
wget.download(url)
with open(fn, "rb") as stream:
weights = pkl.load(stream)
model = weights.pop("model")
new = {}
for k, v in model.items():
new[k] = torch.from_numpy(v)
if "running_var" in k:
zero = torch.Tensor([0])
k2 = k.replace("running_var", "num_batches_tracked")
new[k2] = zero
return new
def get_demo_path():
print(f"{os.path.abspath(os.path.join(PATH, os.pardir))}/demo.ipynb")
def img_tensorize(im, input_format="RGB"):
assert isinstance(im, str)
if os.path.isfile(im):
img = cv2.imread(im)
else:
img = get_image_from_url(im)
assert img is not None, f"could not connect to: {im}"
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if input_format == "RGB":
img = img[:, :, ::-1]
return img
def chunk(images, batch=1):
return (images[i : i + batch] for i in range(0, len(images), batch))
|
AdaMix/examples/research_projects/lxmert/utils.py/0
|
{
"file_path": "AdaMix/examples/research_projects/lxmert/utils.py",
"repo_id": "AdaMix",
"token_count": 8332
}
| 40 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.