text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="square-medium-outline" format="2">
<advance width="1200"/>
<unicode hex="F0A14"/>
<note>
square-medium-outline
</note>
<outline>
<contour>
<point x="307" y="1003" type="line"/>
<point x="893" y="1003" type="line"/>
<point x="893" y="417" type="line"/>
<point x="307" y="417" type="line"/>
</contour>
<contour>
<point x="1180" y="130" type="line"/>
<point x="1180" y="1290" type="line"/>
<point x="20" y="1290" type="line"/>
<point x="20" y="130" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/square-medium-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/square-medium-outline.glif",
"repo_id": "cascadia-code",
"token_count": 285
}
| 700 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="star-check-outline" format="2">
<advance width="1200"/>
<unicode hex="F156A"/>
<note>
star-check-outline
</note>
<outline>
<contour>
<point x="588" y="389" type="line"/>
<point x="588" y="423"/>
<point x="612" y="479" type="qcurve"/>
<point x="583" y="495" type="line"/>
<point x="369" y="365" type="line"/>
<point x="424" y="608" type="line"/>
<point x="239" y="772" type="line"/>
<point x="488" y="796" type="line"/>
<point x="583" y="1020" type="line"/>
<point x="678" y="796" type="line"/>
<point x="926" y="772" type="line"/>
<point x="802" y="664" type="line"/>
<point x="855" y="688"/>
<point x="921" y="688" type="qcurve" smooth="yes"/>
<point x="966" y="688" type="line"/>
<point x="1146" y="846" type="line"/>
<point x="741" y="880" type="line"/>
<point x="583" y="1250" type="line"/>
<point x="424" y="880" type="line"/>
<point x="20" y="846" type="line"/>
<point x="324" y="574" type="line"/>
<point x="234" y="180" type="line"/>
</contour>
<contour>
<point x="1180" y="439" type="line"/>
<point x="1114" y="518" type="line"/>
<point x="910" y="315" type="line"/>
<point x="821" y="405" type="line"/>
<point x="752" y="339" type="line"/>
<point x="910" y="170" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/star-check-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/star-check-outline.glif",
"repo_id": "cascadia-code",
"token_count": 691
}
| 701 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="star-off" format="2">
<advance width="1200"/>
<unicode hex="F04D1"/>
<note>
star-off
</note>
<outline>
<contour>
<point x="1180" y="200" type="line"/>
<point x="92" y="1290" type="line"/>
<point x="20" y="1218" type="line"/>
<point x="341" y="896" type="line"/>
<point x="72" y="876" type="line"/>
<point x="370" y="609" type="line"/>
<point x="282" y="223" type="line"/>
<point x="623" y="428" type="line"/>
<point x="965" y="223" type="line"/>
<point x="950" y="291" type="line"/>
<point x="1110" y="130" type="line"/>
</contour>
<contour>
<point x="895" y="627" type="line"/>
<point x="1175" y="876" type="line"/>
<point x="779" y="909" type="line"/>
<point x="623" y="1272" type="line"/>
<point x="512" y="1008" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/star-off.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/star-off.glif",
"repo_id": "cascadia-code",
"token_count": 444
}
| 702 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="steering" format="2">
<advance width="1200"/>
<unicode hex="F04D4"/>
<note>
steering
</note>
<outline>
<contour>
<point x="657" y="424" type="line"/>
<point x="744" y="443"/>
<point x="867" y="566"/>
<point x="886" y="653" type="qcurve"/>
<point x="1060" y="653" type="line"/>
<point x="1041" y="495"/>
<point x="815" y="269"/>
<point x="657" y="250" type="qcurve"/>
</contour>
<contour>
<point x="309" y="767" type="line"/>
<point x="140" y="767" type="line"/>
<point x="162" y="939"/>
<point x="426" y="1176"/>
<point x="774" y="1176"/>
<point x="1038" y="939"/>
<point x="1060" y="767" type="qcurve"/>
<point x="891" y="767" type="line"/>
<point x="717" y="941" type="line"/>
<point x="483" y="941" type="line"/>
</contour>
<contour>
<point x="385" y="272"/>
<point x="159" y="498"/>
<point x="140" y="653" type="qcurve"/>
<point x="314" y="653" type="line"/>
<point x="333" y="566"/>
<point x="456" y="443"/>
<point x="543" y="424" type="qcurve"/>
<point x="543" y="250" type="line"/>
</contour>
<contour>
<point x="363" y="1290"/>
<point x="20" y="947"/>
<point x="20" y="473"/>
<point x="363" y="130"/>
<point x="837" y="130"/>
<point x="1180" y="473"/>
<point x="1180" y="947"/>
<point x="837" y="1290"/>
<point x="600" y="1290" type="qcurve" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/steering.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/steering.glif",
"repo_id": "cascadia-code",
"token_count": 806
}
| 703 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="sticker-plus" format="2">
<advance width="1200"/>
<unicode hex="F136C"/>
<note>
sticker-plus
</note>
<outline>
<contour>
<point x="1060" y="1290"/>
<point x="978" y="1290" type="qcurve" smooth="yes"/>
<point x="222" y="1290" type="line" smooth="yes"/>
<point x="140" y="1290"/>
<point x="20" y="1170"/>
<point x="20" y="1088" type="qcurve" smooth="yes"/>
<point x="20" y="332" type="line" smooth="yes"/>
<point x="20" y="250"/>
<point x="140" y="130"/>
<point x="222" y="130" type="qcurve" smooth="yes"/>
<point x="831" y="130" type="line"/>
<point x="1180" y="479" type="line"/>
<point x="1180" y="1088" type="line" smooth="yes"/>
<point x="1180" y="1170"/>
</contour>
<contour>
<point x="657" y="653" type="line"/>
<point x="657" y="479" type="line"/>
<point x="543" y="479" type="line"/>
<point x="543" y="653" type="line"/>
<point x="369" y="653" type="line"/>
<point x="369" y="767" type="line"/>
<point x="543" y="767" type="line"/>
<point x="543" y="941" type="line"/>
<point x="657" y="941" type="line"/>
<point x="657" y="767" type="line"/>
<point x="831" y="767" type="line"/>
<point x="831" y="653" type="line"/>
</contour>
<contour>
<point x="1066" y="536" type="line"/>
<point x="774" y="244" type="line"/>
<point x="774" y="332" type="line" smooth="yes"/>
<point x="774" y="416"/>
<point x="894" y="536"/>
<point x="978" y="536" type="qcurve" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/sticker-plus.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/sticker-plus.glif",
"repo_id": "cascadia-code",
"token_count": 801
}
| 704 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="store-alert-outline" format="2">
<advance width="1200"/>
<unicode hex="F18C2"/>
<note>
store-alert-outline
</note>
<outline>
<contour>
<point x="916" y="974" type="line"/>
<point x="74" y="974" type="line"/>
<point x="20" y="710" type="line"/>
<point x="20" y="604" type="line"/>
<point x="74" y="604" type="line"/>
<point x="74" y="288" type="line"/>
<point x="600" y="288" type="line"/>
<point x="600" y="604" type="line"/>
<point x="810" y="604" type="line"/>
<point x="810" y="288" type="line"/>
<point x="916" y="288" type="line"/>
<point x="916" y="604" type="line"/>
<point x="968" y="604" type="line"/>
<point x="968" y="710" type="line"/>
</contour>
<contour>
<point x="494" y="604" type="line"/>
<point x="494" y="394" type="line"/>
<point x="178" y="394" type="line"/>
<point x="178" y="604" type="line"/>
</contour>
<contour>
<point x="862" y="710" type="line"/>
<point x="129" y="710" type="line"/>
<point x="161" y="868" type="line"/>
<point x="830" y="868" type="line"/>
</contour>
<contour>
<point x="916" y="1132" type="line"/>
<point x="74" y="1132" type="line"/>
<point x="74" y="1026" type="line"/>
<point x="916" y="1026" type="line"/>
</contour>
<contour>
<point x="1074" y="974" type="line"/>
<point x="1074" y="658" type="line"/>
<point x="1180" y="658" type="line"/>
<point x="1180" y="974" type="line"/>
</contour>
<contour>
<point x="1074" y="446" type="line"/>
<point x="1180" y="446" type="line"/>
<point x="1180" y="552" type="line"/>
<point x="1074" y="552" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/store-alert-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/store-alert-outline.glif",
"repo_id": "cascadia-code",
"token_count": 882
}
| 705 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="store-outline" format="2">
<advance width="1200"/>
<unicode hex="F1361"/>
<note>
store-outline
</note>
<outline>
<contour>
<point x="189" y="903" type="line"/>
<point x="1011" y="903" type="line"/>
<point x="1047" y="710" type="line"/>
<point x="153" y="710" type="line"/>
</contour>
<contour>
<point x="1117" y="1097" type="line"/>
<point x="1117" y="1227" type="line"/>
<point x="83" y="1227" type="line"/>
<point x="83" y="1097" type="line"/>
</contour>
<contour>
<point x="1180" y="710" type="line"/>
<point x="1117" y="1033" type="line"/>
<point x="83" y="1033" type="line"/>
<point x="20" y="710" type="line"/>
<point x="20" y="580" type="line"/>
<point x="83" y="580" type="line"/>
<point x="83" y="193" type="line"/>
<point x="730" y="193" type="line"/>
<point x="730" y="580" type="line"/>
<point x="987" y="580" type="line"/>
<point x="987" y="193" type="line"/>
<point x="1117" y="193" type="line"/>
<point x="1117" y="580" type="line"/>
<point x="1180" y="580" type="line"/>
</contour>
<contour>
<point x="600" y="323" type="line"/>
<point x="213" y="323" type="line"/>
<point x="213" y="580" type="line"/>
<point x="600" y="580" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/store-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/store-outline.glif",
"repo_id": "cascadia-code",
"token_count": 683
}
| 706 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="table-furniture" format="2">
<advance width="1200"/>
<unicode hex="F05BC"/>
<note>
table-furniture
</note>
<outline>
<contour>
<point x="20" y="884" type="line"/>
<point x="134" y="884" type="line"/>
<point x="77" y="361" type="line"/>
<point x="222" y="361" type="line"/>
<point x="254" y="650" type="line"/>
<point x="946" y="650" type="line"/>
<point x="978" y="361" type="line"/>
<point x="1123" y="361" type="line"/>
<point x="1066" y="884" type="line"/>
<point x="1180" y="884" type="line"/>
<point x="1180" y="1059" type="line"/>
<point x="20" y="1059" type="line"/>
</contour>
<contour>
<point x="932" y="767" type="line"/>
<point x="268" y="767" type="line"/>
<point x="281" y="884" type="line"/>
<point x="919" y="884" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/table-furniture.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/table-furniture.glif",
"repo_id": "cascadia-code",
"token_count": 445
}
| 707 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="tablet-android" format="2">
<advance width="1200"/>
<unicode hex="F04F7"/>
<note>
tablet-android
</note>
<outline>
<contour>
<point x="1030" y="1243" type="line"/>
<point x="1030" y="297" type="line"/>
<point x="170" y="297" type="line"/>
<point x="170" y="1243" type="line"/>
</contour>
<contour>
<point x="719" y="178" type="line"/>
<point x="719" y="119" type="line"/>
<point x="481" y="119" type="line"/>
<point x="481" y="178" type="line"/>
</contour>
<contour>
<point x="1030" y="1420"/>
<point x="955" y="1420" type="qcurve" smooth="yes"/>
<point x="245" y="1420" type="line" smooth="yes"/>
<point x="170" y="1420"/>
<point x="68" y="1317"/>
<point x="68" y="1243" type="qcurve" smooth="yes"/>
<point x="68" y="178" type="line" smooth="yes"/>
<point x="68" y="103"/>
<point x="170" y="0"/>
<point x="245" y="0" type="qcurve" smooth="yes"/>
<point x="955" y="0" type="line" smooth="yes"/>
<point x="1030" y="0"/>
<point x="1133" y="103"/>
<point x="1133" y="178" type="qcurve" smooth="yes"/>
<point x="1133" y="1243" type="line" smooth="yes"/>
<point x="1133" y="1317"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/tablet-android.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/tablet-android.glif",
"repo_id": "cascadia-code",
"token_count": 641
}
| 708 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="text-shadow" format="2">
<advance width="1200"/>
<unicode hex="F0669"/>
<note>
text-shadow
</note>
<outline>
<contour>
<point x="20" y="1119" type="line"/>
<point x="362" y="1119" type="line"/>
<point x="362" y="301" type="line"/>
<point x="566" y="301" type="line"/>
<point x="566" y="1119" type="line"/>
<point x="905" y="1119" type="line"/>
<point x="905" y="1324" type="line"/>
<point x="20" y="1324" type="line"/>
</contour>
<contour>
<point x="634" y="915" type="line"/>
<point x="771" y="915" type="line"/>
<point x="771" y="1052" type="line"/>
<point x="634" y="1052" type="line"/>
</contour>
<contour>
<point x="838" y="915" type="line"/>
<point x="975" y="915" type="line"/>
<point x="975" y="1052" type="line"/>
<point x="838" y="1052" type="line"/>
</contour>
<contour>
<point x="1043" y="915" type="line"/>
<point x="1180" y="915" type="line"/>
<point x="1180" y="1052" type="line"/>
<point x="1043" y="1052" type="line"/>
</contour>
<contour>
<point x="634" y="710" type="line"/>
<point x="771" y="710" type="line"/>
<point x="771" y="847" type="line"/>
<point x="634" y="847" type="line"/>
</contour>
<contour>
<point x="634" y="505" type="line"/>
<point x="771" y="505" type="line"/>
<point x="771" y="643" type="line"/>
<point x="634" y="643" type="line"/>
</contour>
<contour>
<point x="634" y="301" type="line"/>
<point x="771" y="301" type="line"/>
<point x="771" y="438" type="line"/>
<point x="634" y="438" type="line"/>
</contour>
<contour>
<point x="634" y="96" type="line"/>
<point x="771" y="96" type="line"/>
<point x="771" y="234" type="line"/>
<point x="634" y="234" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/text-shadow.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/text-shadow.glif",
"repo_id": "cascadia-code",
"token_count": 960
}
| 709 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="timeline-clock" format="2">
<advance width="1200"/>
<unicode hex="F11FB"/>
<note>
timeline-clock
</note>
<outline>
<contour>
<point x="72" y="1213" type="line"/>
<point x="72" y="911" type="line"/>
<point x="171" y="911" type="line"/>
<point x="171" y="1213" type="line"/>
</contour>
<contour>
<point x="171" y="207" type="line"/>
<point x="171" y="509" type="line"/>
<point x="72" y="509" type="line"/>
<point x="72" y="207" type="line"/>
</contour>
<contour>
<point x="223" y="753"/>
<point x="164" y="812"/>
<point x="79" y="812"/>
<point x="20" y="753"/>
<point x="20" y="667"/>
<point x="79" y="608"/>
<point x="164" y="608"/>
<point x="223" y="667"/>
<point x="223" y="710" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="641" y="1114"/>
<point x="431" y="953"/>
<point x="391" y="828" type="qcurve"/>
<point x="273" y="710" type="line"/>
<point x="391" y="592" type="line"/>
<point x="431" y="467"/>
<point x="641" y="306"/>
<point x="776" y="306" type="qcurve" smooth="yes"/>
<point x="887" y="306"/>
<point x="1071" y="415"/>
<point x="1180" y="601"/>
<point x="1180" y="819"/>
<point x="1071" y="1005"/>
<point x="887" y="1114"/>
<point x="776" y="1114" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="726" y="963" type="line"/>
<point x="802" y="963" type="line"/>
<point x="802" y="701" type="line"/>
<point x="996" y="585" type="line"/>
<point x="956" y="519" type="line"/>
<point x="726" y="660" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/timeline-clock.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/timeline-clock.glif",
"repo_id": "cascadia-code",
"token_count": 888
}
| 710 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="timer-sand-complete" format="2">
<advance width="1200"/>
<unicode hex="F199F"/>
<note>
timer-sand-complete
</note>
<outline>
<contour>
<point x="1027" y="427" type="line"/>
<point x="743" y="710" type="line"/>
<point x="1027" y="993" type="line"/>
<point x="1027" y="1420" type="line"/>
<point x="173" y="1420" type="line"/>
<point x="173" y="993" type="line"/>
<point x="457" y="710" type="line"/>
<point x="173" y="427" type="line"/>
<point x="173" y="0" type="line"/>
<point x="1027" y="0" type="line"/>
</contour>
<contour>
<point x="317" y="1280" type="line"/>
<point x="883" y="1280" type="line"/>
<point x="883" y="1030" type="line"/>
<point x="600" y="747" type="line"/>
<point x="317" y="1030" type="line"/>
</contour>
<contour>
<point x="883" y="390" type="line"/>
<point x="883" y="140" type="line"/>
<point x="317" y="140" type="line"/>
<point x="317" y="390" type="line"/>
<point x="600" y="673" type="line"/>
</contour>
<contour>
<point x="743" y="340" type="line"/>
<point x="600" y="483" type="line"/>
<point x="457" y="340" type="line"/>
<point x="457" y="283" type="line"/>
<point x="743" y="283" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/timer-sand-complete.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/timer-sand-complete.glif",
"repo_id": "cascadia-code",
"token_count": 657
}
| 711 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="tooltip-check" format="2">
<advance width="1200"/>
<unicode hex="F155C"/>
<note>
tooltip-check
</note>
<outline>
<contour>
<point x="1112" y="1290"/>
<point x="1066" y="1290" type="qcurve" smooth="yes"/>
<point x="134" y="1290" type="line" smooth="yes"/>
<point x="88" y="1290"/>
<point x="20" y="1222"/>
<point x="20" y="1176" type="qcurve" smooth="yes"/>
<point x="20" y="479" type="line" smooth="yes"/>
<point x="20" y="430"/>
<point x="88" y="361"/>
<point x="134" y="361" type="qcurve" smooth="yes"/>
<point x="369" y="361" type="line"/>
<point x="600" y="130" type="line"/>
<point x="831" y="361" type="line"/>
<point x="1066" y="361" type="line" smooth="yes"/>
<point x="1112" y="361"/>
<point x="1180" y="430"/>
<point x="1180" y="479" type="qcurve" smooth="yes"/>
<point x="1180" y="1176" type="line" smooth="yes"/>
<point x="1180" y="1222"/>
</contour>
<contour>
<point x="894" y="977" type="line"/>
<point x="510" y="593" type="line"/>
<point x="306" y="797" type="line"/>
<point x="390" y="879" type="line"/>
<point x="510" y="759" type="line"/>
<point x="812" y="1059" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/tooltip-check.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/tooltip-check.glif",
"repo_id": "cascadia-code",
"token_count": 644
}
| 712 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="tooth" format="2">
<advance width="1200"/>
<unicode hex="F08C3"/>
<note>
tooth
</note>
<outline>
<contour>
<point x="374" y="1290"/>
<point x="309" y="1290" type="qcurve" smooth="yes"/>
<point x="227" y="1290"/>
<point x="156" y="1238" type="qcurve" smooth="yes"/>
<point x="94" y="1192"/>
<point x="20" y="1031"/>
<point x="20" y="852"/>
<point x="91" y="639"/>
<point x="134" y="593" type="qcurve"/>
<point x="151" y="579"/>
<point x="167" y="530" type="qcurve" smooth="yes"/>
<point x="178" y="500"/>
<point x="202" y="424" type="qcurve" smooth="yes"/>
<point x="246" y="282"/>
<point x="273" y="225" type="qcurve" smooth="yes"/>
<point x="320" y="130"/>
<point x="369" y="130" type="qcurve" smooth="yes"/>
<point x="442" y="130"/>
<point x="483" y="171" type="qcurve" smooth="yes"/>
<point x="516" y="206"/>
<point x="529" y="272" type="qcurve" smooth="yes"/>
<point x="537" y="312"/>
<point x="543" y="394" type="qcurve" smooth="yes"/>
<point x="548" y="468"/>
<point x="556" y="495" type="qcurve" smooth="yes"/>
<point x="570" y="536"/>
<point x="630" y="536"/>
<point x="644" y="495" type="qcurve" smooth="yes"/>
<point x="652" y="468"/>
<point x="657" y="394" type="qcurve" smooth="yes"/>
<point x="663" y="312"/>
<point x="671" y="272" type="qcurve" smooth="yes"/>
<point x="684" y="206"/>
<point x="717" y="171" type="qcurve" smooth="yes"/>
<point x="758" y="130"/>
<point x="831" y="130" type="qcurve" smooth="yes"/>
<point x="880" y="130"/>
<point x="927" y="225" type="qcurve" smooth="yes"/>
<point x="954" y="282"/>
<point x="998" y="424" type="qcurve" smooth="yes"/>
<point x="1022" y="500"/>
<point x="1033" y="530" type="qcurve" smooth="yes"/>
<point x="1049" y="579"/>
<point x="1066" y="593" type="qcurve"/>
<point x="1109" y="639"/>
<point x="1180" y="852"/>
<point x="1180" y="1031"/>
<point x="1106" y="1192"/>
<point x="1044" y="1238" type="qcurve" smooth="yes"/>
<point x="973" y="1290"/>
<point x="891" y="1290" type="qcurve" smooth="yes"/>
<point x="826" y="1290"/>
<point x="780" y="1279" type="qcurve" smooth="yes"/>
<point x="752" y="1274"/>
<point x="712" y="1257" type="qcurve" smooth="yes"/>
<point x="682" y="1244"/>
<point x="665" y="1241" type="qcurve" smooth="yes"/>
<point x="635" y="1233"/>
<point x="565" y="1233"/>
<point x="535" y="1241" type="qcurve" smooth="yes"/>
<point x="518" y="1244"/>
<point x="488" y="1257" type="qcurve" smooth="yes"/>
<point x="448" y="1274"/>
<point x="420" y="1279" type="qcurve" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/tooth.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/tooth.glif",
"repo_id": "cascadia-code",
"token_count": 1435
}
| 713 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="triangle-outline" format="2">
<advance width="1200"/>
<unicode hex="F0537"/>
<note>
triangle-outline
</note>
<outline>
<contour>
<point x="1180" y="210" type="line"/>
<point x="600" y="1210" type="line"/>
<point x="20" y="210" type="line"/>
</contour>
<contour>
<point x="203" y="316" type="line"/>
<point x="600" y="1000" type="line"/>
<point x="997" y="316" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/triangle-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/triangle-outline.glif",
"repo_id": "cascadia-code",
"token_count": 238
}
| 714 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="unity" format="2">
<advance width="1200"/>
<unicode hex="F06AF"/>
<note>
unity
</note>
<outline>
<contour>
<point x="608" y="228" type="line"/>
<point x="1056" y="113" type="line"/>
<point x="1180" y="561" type="line"/>
<point x="1093" y="710" type="line"/>
<point x="1180" y="859" type="line"/>
<point x="1056" y="1307" type="line"/>
<point x="608" y="1192" type="line"/>
<point x="521" y="1043" type="line"/>
<point x="347" y="1043" type="line"/>
<point x="20" y="710" type="line"/>
<point x="347" y="377" type="line"/>
<point x="521" y="377" type="line"/>
</contour>
<contour>
<point x="281" y="645" type="line"/>
<point x="673" y="645" type="line"/>
<point x="869" y="303" type="line"/>
<point x="530" y="396" type="line"/>
</contour>
<contour>
<point x="788" y="710" type="line"/>
<point x="984" y="1049" type="line"/>
<point x="1077" y="710" type="line"/>
<point x="984" y="371" type="line"/>
</contour>
<contour>
<point x="869" y="1117" type="line"/>
<point x="673" y="775" type="line"/>
<point x="281" y="775" type="line"/>
<point x="530" y="1024" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/unity.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/unity.glif",
"repo_id": "cascadia-code",
"token_count": 640
}
| 715 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="vector-polyline-remove" format="2">
<advance width="1200"/>
<unicode hex="F1228"/>
<note>
vector-polyline-remove
</note>
<outline>
<contour>
<point x="1180" y="474" type="line"/>
<point x="1100" y="553" type="line"/>
<point x="981" y="437" type="line"/>
<point x="864" y="553" type="line"/>
<point x="784" y="474" type="line"/>
<point x="901" y="357" type="line"/>
<point x="784" y="238" type="line"/>
<point x="864" y="158" type="line"/>
<point x="981" y="277" type="line"/>
<point x="1100" y="158" type="line"/>
<point x="1180" y="238" type="line"/>
<point x="1061" y="357" type="line"/>
</contour>
<contour>
<point x="1151" y="1148" type="line"/>
<point x="811" y="1148" type="line"/>
<point x="811" y="888" type="line"/>
<point x="506" y="583" type="line"/>
<point x="421" y="583" type="line"/>
<point x="307" y="922" type="line"/>
<point x="360" y="922" type="line"/>
<point x="360" y="1262" type="line"/>
<point x="20" y="1262" type="line"/>
<point x="20" y="922" type="line"/>
<point x="190" y="922" type="line"/>
<point x="301" y="583" type="line"/>
<point x="246" y="583" type="line"/>
<point x="246" y="243" type="line"/>
<point x="585" y="243" type="line"/>
<point x="585" y="503" type="line"/>
<point x="891" y="808" type="line"/>
<point x="1151" y="808" type="line"/>
</contour>
<contour>
<point x="246" y="1148" type="line"/>
<point x="246" y="1036" type="line"/>
<point x="131" y="1036" type="line"/>
<point x="131" y="1148" type="line"/>
</contour>
<contour>
<point x="471" y="468" type="line"/>
<point x="471" y="357" type="line"/>
<point x="360" y="357" type="line"/>
<point x="360" y="468" type="line"/>
</contour>
<contour>
<point x="1039" y="1036" type="line"/>
<point x="1039" y="922" type="line"/>
<point x="925" y="922" type="line"/>
<point x="925" y="1036" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/vector-polyline-remove.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/vector-polyline-remove.glif",
"repo_id": "cascadia-code",
"token_count": 1034
}
| 716 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="view-dashboard-outline" format="2">
<advance width="1200"/>
<unicode hex="F0A1D"/>
<note>
view-dashboard-outline
</note>
<outline>
<contour>
<point x="793" y="1160" type="line"/>
<point x="1050" y="1160" type="line"/>
<point x="1050" y="1033" type="line"/>
<point x="793" y="1033" type="line"/>
</contour>
<contour>
<point x="150" y="1160" type="line"/>
<point x="407" y="1160" type="line"/>
<point x="407" y="773" type="line"/>
<point x="150" y="773" type="line"/>
</contour>
<contour>
<point x="793" y="647" type="line"/>
<point x="1050" y="647" type="line"/>
<point x="1050" y="260" type="line"/>
<point x="793" y="260" type="line"/>
</contour>
<contour>
<point x="150" y="387" type="line"/>
<point x="407" y="387" type="line"/>
<point x="407" y="260" type="line"/>
<point x="150" y="260" type="line"/>
</contour>
<contour>
<point x="1180" y="903" type="line"/>
<point x="1180" y="1290" type="line"/>
<point x="663" y="1290" type="line"/>
<point x="663" y="903" type="line"/>
</contour>
<contour>
<point x="537" y="647" type="line"/>
<point x="537" y="1290" type="line"/>
<point x="20" y="1290" type="line"/>
<point x="20" y="647" type="line"/>
</contour>
<contour>
<point x="1180" y="130" type="line"/>
<point x="1180" y="773" type="line"/>
<point x="663" y="773" type="line"/>
<point x="663" y="130" type="line"/>
</contour>
<contour>
<point x="537" y="130" type="line"/>
<point x="537" y="517" type="line"/>
<point x="20" y="517" type="line"/>
<point x="20" y="130" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/view-dashboard-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/view-dashboard-outline.glif",
"repo_id": "cascadia-code",
"token_count": 878
}
| 717 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="view-module" format="2">
<advance width="1200"/>
<unicode hex="F0573"/>
<note>
view-module
</note>
<outline>
<contour>
<point x="1180" y="1153" type="line"/>
<point x="838" y="1153" type="line"/>
<point x="838" y="744" type="line"/>
<point x="1180" y="744" type="line"/>
</contour>
<contour>
<point x="429" y="1153" type="line"/>
<point x="429" y="744" type="line"/>
<point x="771" y="744" type="line"/>
<point x="771" y="1153" type="line"/>
</contour>
<contour>
<point x="838" y="676" type="line"/>
<point x="838" y="267" type="line"/>
<point x="1180" y="267" type="line"/>
<point x="1180" y="676" type="line"/>
</contour>
<contour>
<point x="429" y="676" type="line"/>
<point x="429" y="267" type="line"/>
<point x="771" y="267" type="line"/>
<point x="771" y="676" type="line"/>
</contour>
<contour>
<point x="20" y="676" type="line"/>
<point x="20" y="267" type="line"/>
<point x="362" y="267" type="line"/>
<point x="362" y="676" type="line"/>
</contour>
<contour>
<point x="20" y="1153" type="line"/>
<point x="20" y="744" type="line"/>
<point x="362" y="744" type="line"/>
<point x="362" y="1153" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/view-module.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/view-module.glif",
"repo_id": "cascadia-code",
"token_count": 676
}
| 718 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="volume-source" format="2">
<advance width="1200"/>
<unicode hex="F1120"/>
<note>
volume-source
</note>
<outline>
<contour>
<point x="292" y="915" type="line"/>
<point x="20" y="915" type="line"/>
<point x="20" y="505" type="line"/>
<point x="292" y="505" type="line"/>
<point x="634" y="164" type="line"/>
<point x="634" y="1256" type="line"/>
</contour>
<contour>
<point x="905" y="915" type="line"/>
<point x="771" y="915" type="line"/>
<point x="771" y="505" type="line"/>
<point x="905" y="505" type="line"/>
</contour>
<contour>
<point x="1180" y="1186" type="line"/>
<point x="1043" y="1186" type="line"/>
<point x="1043" y="234" type="line"/>
<point x="1180" y="234" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/volume-source.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/volume-source.glif",
"repo_id": "cascadia-code",
"token_count": 417
}
| 719 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="warehouse" format="2">
<advance width="1200"/>
<unicode hex="F0F81"/>
<note>
warehouse
</note>
<outline>
<contour>
<point x="251" y="187" type="line"/>
<point x="369" y="187" type="line"/>
<point x="369" y="304" type="line"/>
<point x="251" y="304" type="line"/>
</contour>
<contour>
<point x="1180" y="941" type="line"/>
<point x="600" y="1233" type="line"/>
<point x="20" y="941" type="line"/>
<point x="20" y="187" type="line"/>
<point x="134" y="187" type="line"/>
<point x="134" y="653" type="line"/>
<point x="1066" y="653" type="line"/>
<point x="1066" y="187" type="line"/>
<point x="1180" y="187" type="line"/>
</contour>
<contour>
<point x="369" y="884" type="line"/>
<point x="369" y="767" type="line"/>
<point x="134" y="767" type="line"/>
<point x="134" y="884" type="line"/>
</contour>
<contour>
<point x="717" y="884" type="line"/>
<point x="717" y="767" type="line"/>
<point x="483" y="767" type="line"/>
<point x="483" y="884" type="line"/>
</contour>
<contour>
<point x="1066" y="884" type="line"/>
<point x="1066" y="767" type="line"/>
<point x="831" y="767" type="line"/>
<point x="831" y="884" type="line"/>
</contour>
<contour>
<point x="251" y="419" type="line"/>
<point x="369" y="419" type="line"/>
<point x="369" y="536" type="line"/>
<point x="251" y="536" type="line"/>
</contour>
<contour>
<point x="483" y="419" type="line"/>
<point x="600" y="419" type="line"/>
<point x="600" y="536" type="line"/>
<point x="483" y="536" type="line"/>
</contour>
<contour>
<point x="483" y="187" type="line"/>
<point x="600" y="187" type="line"/>
<point x="600" y="304" type="line"/>
<point x="483" y="304" type="line"/>
</contour>
<contour>
<point x="717" y="187" type="line"/>
<point x="831" y="187" type="line"/>
<point x="831" y="304" type="line"/>
<point x="717" y="304" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/warehouse.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/warehouse.glif",
"repo_id": "cascadia-code",
"token_count": 1060
}
| 720 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="water-boiler" format="2">
<advance width="1200"/>
<unicode hex="F0F92"/>
<note>
water-boiler
</note>
<outline>
<contour>
<point x="883" y="1420" type="qcurve" smooth="yes"/>
<point x="317" y="1420" type="line" smooth="yes"/>
<point x="257" y="1420"/>
<point x="173" y="1337"/>
<point x="173" y="1280" type="qcurve" smooth="yes"/>
<point x="173" y="427" type="line" smooth="yes"/>
<point x="173" y="367"/>
<point x="257" y="283"/>
<point x="317" y="283" type="qcurve" smooth="yes"/>
<point x="387" y="283" type="line"/>
<point x="387" y="140" type="line"/>
<point x="173" y="140" type="line"/>
<point x="173" y="0" type="line"/>
<point x="387" y="0" type="line" smooth="yes"/>
<point x="447" y="0"/>
<point x="530" y="83"/>
<point x="530" y="140" type="qcurve" smooth="yes"/>
<point x="530" y="283" type="line"/>
<point x="670" y="283" type="line"/>
<point x="670" y="140" type="line" smooth="yes"/>
<point x="670" y="83"/>
<point x="753" y="0"/>
<point x="813" y="0" type="qcurve" smooth="yes"/>
<point x="1027" y="0" type="line"/>
<point x="1027" y="140" type="line"/>
<point x="813" y="140" type="line"/>
<point x="813" y="283" type="line"/>
<point x="883" y="283" type="line" smooth="yes"/>
<point x="943" y="283"/>
<point x="1027" y="367"/>
<point x="1027" y="427" type="qcurve" smooth="yes"/>
<point x="1027" y="1280" type="line" smooth="yes"/>
<point x="1027" y="1337"/>
<point x="943" y="1420"/>
</contour>
<contour>
<point x="540" y="1210"/>
<point x="600" y="1210" type="qcurve" smooth="yes"/>
<point x="660" y="1210"/>
<point x="743" y="1127"/>
<point x="743" y="1010"/>
<point x="660" y="927"/>
<point x="540" y="927"/>
<point x="457" y="1010"/>
<point x="457" y="1127"/>
</contour>
<contour>
<point x="457" y="427" type="line"/>
<point x="457" y="533" type="line"/>
<point x="743" y="533" type="line"/>
<point x="743" y="427" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/water-boiler.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/water-boiler.glif",
"repo_id": "cascadia-code",
"token_count": 1083
}
| 721 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="xml" format="2">
<advance width="1200"/>
<unicode hex="F05C0"/>
<note>
xml
</note>
<outline>
<contour>
<point x="441" y="232" type="line"/>
<point x="550" y="208" type="line"/>
<point x="759" y="1188" type="line"/>
<point x="650" y="1212" type="line"/>
</contour>
<contour>
<point x="822" y="511" type="line"/>
<point x="822" y="352" type="line"/>
<point x="1180" y="710" type="line"/>
<point x="822" y="1068" type="line"/>
<point x="822" y="911" type="line"/>
<point x="1023" y="710" type="line"/>
</contour>
<contour>
<point x="378" y="352" type="line"/>
<point x="378" y="511" type="line"/>
<point x="177" y="710" type="line"/>
<point x="378" y="911" type="line"/>
<point x="378" y="1068" type="line"/>
<point x="20" y="710" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/xml.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/xml.glif",
"repo_id": "cascadia-code",
"token_count": 455
}
| 722 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="POWER ON SYMBOL" format="2">
<advance width="1200"/>
<unicode hex="23FD"/>
<note>
POWER ON SYMBOL
</note>
<outline>
<contour>
<point x="598" y="1420" type="line" smooth="yes"/>
<point x="532" y="1419"/>
<point x="479" y="1365"/>
<point x="479" y="1299" type="curve" smooth="yes"/>
<point x="479" y="1298"/>
<point x="479" y="1298"/>
<point x="479" y="1297" type="curve" smooth="yes"/>
<point x="479" y="123" type="line" smooth="yes"/>
<point x="479" y="123"/>
<point x="479" y="122"/>
<point x="479" y="121" type="curve" smooth="yes"/>
<point x="479" y="54"/>
<point x="533" y="0"/>
<point x="600" y="0" type="curve" smooth="yes"/>
<point x="667" y="0"/>
<point x="721" y="54"/>
<point x="721" y="121" type="curve" smooth="yes"/>
<point x="721" y="122"/>
<point x="721" y="123"/>
<point x="721" y="123" type="curve" smooth="yes"/>
<point x="721" y="1297" type="line"/>
<point x="721" y="1299" type="line" smooth="yes"/>
<point x="721" y="1366"/>
<point x="667" y="1420"/>
<point x="600" y="1420" type="curve" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/Unicode_IEC_symbol_font.ufo/glyphs/P_O_W_E_R_ O_N_ S_Y_M_B_O_L_.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/Unicode_IEC_symbol_font.ufo/glyphs/P_O_W_E_R_ O_N_ S_Y_M_B_O_L_.glif",
"repo_id": "cascadia-code",
"token_count": 613
}
| 723 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="arrow-left" format="2">
<advance width="1200"/>
<unicode hex="EA9B"/>
<note>
arrow-left
</note>
<outline>
<contour>
<point x="572" y="1161" type="line"/>
<point x="499" y="1228" type="line"/>
<point x="20" y="744" type="line"/>
<point x="20" y="676" type="line"/>
<point x="499" y="192" type="line"/>
<point x="572" y="264" type="line"/>
<point x="170" y="661" type="line"/>
<point x="1180" y="661" type="line"/>
<point x="1180" y="759" type="line"/>
<point x="170" y="759" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/arrow-left.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/arrow-left.glif",
"repo_id": "cascadia-code",
"token_count": 301
}
| 724 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="bookmark" format="2">
<advance width="1200"/>
<unicode hex="EAA5"/>
<note>
bookmark
</note>
<outline>
<contour>
<point x="1115" y="1371" type="line"/>
<point x="1061" y="1420" type="line"/>
<point x="139" y="1420" type="line"/>
<point x="85" y="1371" type="line"/>
<point x="85" y="33" type="line"/>
<point x="178" y="0" type="line"/>
<point x="600" y="472" type="line"/>
<point x="1022" y="0" type="line"/>
<point x="1115" y="33" type="line"/>
</contour>
<contour>
<point x="1011" y="1321" type="line"/>
<point x="1011" y="170" type="line"/>
<point x="638" y="581" type="line"/>
<point x="562" y="581" type="line"/>
<point x="189" y="170" type="line"/>
<point x="189" y="1321" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/bookmark.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/bookmark.glif",
"repo_id": "cascadia-code",
"token_count": 423
}
| 725 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="chevron-right" format="2">
<advance width="1200"/>
<unicode hex="EAB6"/>
<note>
chevron-right
</note>
<outline>
<contour>
<point x="293" y="204" type="line"/>
<point x="367" y="130" type="line"/>
<point x="907" y="676" type="line"/>
<point x="907" y="744" type="line"/>
<point x="367" y="1290" type="line"/>
<point x="293" y="1216" type="line"/>
<point x="802" y="713" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/chevron-right.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/chevron-right.glif",
"repo_id": "cascadia-code",
"token_count": 243
}
| 726 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="debug-continue" format="2">
<advance width="1200"/>
<unicode hex="EACF"/>
<note>
debug-continue
</note>
<outline>
<contour>
<point x="20" y="127" type="line"/>
<point x="165" y="127" type="line"/>
<point x="165" y="1293" type="line"/>
<point x="20" y="1293" type="line"/>
</contour>
<contour>
<point x="1180" y="764" type="line"/>
<point x="496" y="1251" type="line"/>
<point x="383" y="1194" type="line"/>
<point x="383" y="221" type="line"/>
<point x="496" y="164" type="line"/>
<point x="1180" y="650" type="line"/>
</contour>
<contour>
<point x="528" y="1049" type="line"/>
<point x="1009" y="707" type="line"/>
<point x="528" y="366" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/debug-continue.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/debug-continue.glif",
"repo_id": "cascadia-code",
"token_count": 397
}
| 727 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="ellipsis" format="2">
<advance width="1200"/>
<unicode hex="EA7C"/>
<note>
ellipsis
</note>
<outline>
<contour>
<point x="211" y="751"/>
<point x="154" y="808"/>
<point x="72" y="808"/>
<point x="20" y="751"/>
<point x="20" y="669"/>
<point x="72" y="612"/>
<point x="154" y="612"/>
<point x="211" y="669"/>
<point x="211" y="710" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="695" y="751"/>
<point x="639" y="808"/>
<point x="556" y="808"/>
<point x="499" y="751"/>
<point x="499" y="669"/>
<point x="556" y="612"/>
<point x="639" y="612"/>
<point x="695" y="669"/>
<point x="695" y="710" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="1180" y="751"/>
<point x="1123" y="808"/>
<point x="1041" y="808"/>
<point x="984" y="751"/>
<point x="984" y="669"/>
<point x="1041" y="612"/>
<point x="1123" y="612"/>
<point x="1180" y="669"/>
<point x="1180" y="710" type="qcurve" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/ellipsis.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/ellipsis.glif",
"repo_id": "cascadia-code",
"token_count": 601
}
| 728 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="file-symlink-file" format="2">
<advance width="1200"/>
<unicode hex="EAEE"/>
<note>
file-symlink-file
</note>
<outline>
<contour>
<point x="842" y="1333" type="line"/>
<point x="153" y="1333" type="line"/>
<point x="110" y="1290" type="line"/>
<point x="110" y="891" type="line"/>
<point x="196" y="891" type="line"/>
<point x="196" y="1247" type="line"/>
<point x="733" y="1247" type="line"/>
<point x="733" y="933" type="line"/>
<point x="776" y="891" type="line"/>
<point x="1090" y="891" type="line"/>
<point x="1090" y="178" type="line"/>
<point x="823" y="178" type="line"/>
<point x="823" y="87" type="line"/>
<point x="1132" y="87" type="line"/>
<point x="1180" y="130" type="line"/>
<point x="1180" y="995" type="line"/>
<point x="1166" y="1029" type="line"/>
<point x="871" y="1323" type="line"/>
</contour>
<contour>
<point x="823" y="1247" type="line"/>
<point x="1090" y="976" type="line"/>
<point x="823" y="976" type="line"/>
</contour>
<contour>
<point x="733" y="753" type="line"/>
<point x="686" y="800" type="line"/>
<point x="63" y="800" type="line"/>
<point x="20" y="753" type="line"/>
<point x="20" y="130" type="line"/>
<point x="63" y="87" type="line"/>
<point x="686" y="87" type="line"/>
<point x="733" y="130" type="line"/>
</contour>
<contour>
<point x="643" y="710" type="line"/>
<point x="643" y="178" type="line"/>
<point x="110" y="178" type="line"/>
<point x="110" y="710" type="line"/>
</contour>
<contour>
<point x="510" y="620" type="line"/>
<point x="243" y="620" type="line"/>
<point x="243" y="534" type="line"/>
<point x="400" y="534" type="line"/>
<point x="167" y="296" type="line"/>
<point x="229" y="235" type="line"/>
<point x="467" y="468" type="line"/>
<point x="467" y="311" type="line"/>
<point x="552" y="311" type="line"/>
<point x="552" y="577" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/file-symlink-file.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/file-symlink-file.glif",
"repo_id": "cascadia-code",
"token_count": 1057
}
| 729 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="graph-line" format="2">
<advance width="1200"/>
<unicode hex="EBE2"/>
<note>
graph-line
</note>
<outline>
<contour>
<point x="102" y="223" type="line"/>
<point x="102" y="1275" type="line"/>
<point x="20" y="1275" type="line"/>
<point x="20" y="184" type="line"/>
<point x="59" y="145" type="line"/>
<point x="1150" y="145" type="line"/>
<point x="1150" y="223" type="line"/>
</contour>
<contour>
<point x="1124" y="882" type="line"/>
<point x="1180" y="939" type="line"/>
<point x="1016" y="1102" type="line"/>
<point x="960" y="1102" type="line"/>
<point x="546" y="684" type="line"/>
<point x="412" y="818" type="line"/>
<point x="352" y="818" type="line"/>
<point x="29" y="494" type="line"/>
<point x="89" y="438" type="line"/>
<point x="382" y="732" type="line"/>
<point x="516" y="598" type="line"/>
<point x="572" y="598" type="line"/>
<point x="990" y="1016" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/graph-line.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/graph-line.glif",
"repo_id": "cascadia-code",
"token_count": 524
}
| 730 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="list-flat" format="2">
<advance width="1200"/>
<unicode hex="EB84"/>
<note>
list-flat
</note>
<outline>
<contour>
<point x="1180" y="519" type="line"/>
<point x="1180" y="612" type="line"/>
<point x="20" y="612" type="line"/>
<point x="20" y="519" type="line"/>
</contour>
<contour>
<point x="20" y="808" type="line"/>
<point x="1180" y="808" type="line"/>
<point x="1180" y="906" type="line"/>
<point x="20" y="906" type="line"/>
</contour>
<contour>
<point x="20" y="1195" type="line"/>
<point x="20" y="1097" type="line"/>
<point x="1180" y="1097" type="line"/>
<point x="1180" y="1195" type="line"/>
</contour>
<contour>
<point x="1180" y="323" type="line"/>
<point x="20" y="323" type="line"/>
<point x="20" y="225" type="line"/>
<point x="1180" y="225" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/list-flat.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/list-flat.glif",
"repo_id": "cascadia-code",
"token_count": 477
}
| 731 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="preserve-case" format="2">
<advance width="1200"/>
<unicode hex="EB2E"/>
<note>
preserve-case
</note>
<outline>
<contour>
<point x="376" y="1061" type="line"/>
<point x="293" y="1061" type="line"/>
<point x="20" y="359" type="line"/>
<point x="113" y="359" type="line"/>
<point x="181" y="554" type="line"/>
<point x="483" y="554" type="line"/>
<point x="556" y="359" type="line"/>
<point x="649" y="359" type="line"/>
</contour>
<contour>
<point x="210" y="627" type="line"/>
<point x="317" y="929" type="line"/>
<point x="327" y="944"/>
<point x="332" y="978" type="qcurve"/>
<point x="332" y="978" type="line"/>
<point x="337" y="944"/>
<point x="342" y="929" type="qcurve" smooth="yes"/>
<point x="454" y="627" type="line"/>
</contour>
<contour>
<point x="951" y="359" type="line" smooth="yes"/>
<point x="1048" y="359"/>
<point x="1180" y="471"/>
<point x="1180" y="559" type="qcurve" smooth="yes"/>
<point x="1180" y="632"/>
<point x="1092" y="725"/>
<point x="1024" y="729" type="qcurve"/>
<point x="1024" y="734" type="line"/>
<point x="1078" y="754"/>
<point x="1146" y="842"/>
<point x="1146" y="900" type="qcurve" smooth="yes"/>
<point x="1146" y="973"/>
<point x="1039" y="1061"/>
<point x="946" y="1061" type="qcurve" smooth="yes"/>
<point x="746" y="1061" type="line"/>
<point x="746" y="359" type="line"/>
</contour>
<contour>
<point x="922" y="988" type="line" smooth="yes"/>
<point x="1058" y="988"/>
<point x="1058" y="885" type="qcurve" smooth="yes"/>
<point x="1058" y="827"/>
<point x="980" y="759"/>
<point x="912" y="759" type="qcurve" smooth="yes"/>
<point x="829" y="759" type="line"/>
<point x="829" y="988" type="line"/>
</contour>
<contour>
<point x="922" y="686" type="line" smooth="yes"/>
<point x="1092" y="686"/>
<point x="1092" y="559" type="qcurve" smooth="yes"/>
<point x="1092" y="500"/>
<point x="1014" y="432"/>
<point x="941" y="432" type="qcurve" smooth="yes"/>
<point x="829" y="432" type="line"/>
<point x="829" y="686" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/preserve-case.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/preserve-case.glif",
"repo_id": "cascadia-code",
"token_count": 1167
}
| 732 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="remote" format="2">
<advance width="1200"/>
<unicode hex="EB3A"/>
<note>
remote
</note>
<outline>
<contour>
<point x="1058" y="447" type="line"/>
<point x="1126" y="515" type="line"/>
<point x="702" y="933" type="line"/>
<point x="1126" y="1358" type="line"/>
<point x="1058" y="1420" type="line"/>
<point x="606" y="967" type="line"/>
<point x="606" y="900" type="line"/>
</contour>
<contour>
<point x="142" y="996" type="line"/>
<point x="74" y="933" type="line"/>
<point x="509" y="498" type="line"/>
<point x="74" y="68" type="line"/>
<point x="142" y="0" type="line"/>
<point x="606" y="470" type="line"/>
<point x="606" y="532" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/remote.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/remote.glif",
"repo_id": "cascadia-code",
"token_count": 398
}
| 733 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="symbol-boolean" format="2">
<advance width="1200"/>
<unicode hex="EA8F"/>
<note>
symbol-boolean
</note>
<outline>
<contour>
<point x="20" y="338" type="line"/>
<point x="60" y="294" type="line"/>
<point x="1140" y="294" type="line"/>
<point x="1180" y="338" type="line"/>
<point x="1180" y="1082" type="line"/>
<point x="1140" y="1126" type="line"/>
<point x="60" y="1126" type="line"/>
<point x="20" y="1082" type="line"/>
</contour>
<contour>
<point x="1100" y="378" type="line"/>
<point x="600" y="378" type="line"/>
<point x="600" y="666" type="line"/>
<point x="923" y="666" type="line"/>
<point x="751" y="493" type="line"/>
<point x="808" y="435" type="line"/>
<point x="1052" y="679" type="line"/>
<point x="1052" y="741" type="line"/>
<point x="808" y="985" type="line"/>
<point x="751" y="927" type="line"/>
<point x="923" y="750" type="line"/>
<point x="600" y="750" type="line"/>
<point x="600" y="670" type="line"/>
<point x="259" y="670" type="line"/>
<point x="436" y="493" type="line"/>
<point x="379" y="435" type="line"/>
<point x="131" y="679" type="line"/>
<point x="131" y="741" type="line"/>
<point x="379" y="985" type="line"/>
<point x="436" y="927" type="line"/>
<point x="264" y="754" type="line"/>
<point x="600" y="754" type="line"/>
<point x="600" y="1042" type="line"/>
<point x="1100" y="1042" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/symbol-boolean.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/symbol-boolean.glif",
"repo_id": "cascadia-code",
"token_count": 774
}
| 734 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="uniE6AD" format="2">
<advance width="1200"/>
<unicode hex="E7AD"/>
<note>
uniE6AD
</note>
<outline>
<contour>
<point x="336" y="469" type="line"/>
<point x="192" y="518" type="line"/>
<point x="192" y="306" type="line"/>
<point x="336" y="255" type="line"/>
</contour>
<contour>
<point x="363" y="469" type="line"/>
<point x="363" y="255" type="line"/>
<point x="507" y="306" type="line"/>
<point x="507" y="518" type="line"/>
</contour>
<contour>
<point x="353" y="485" type="line"/>
<point x="487" y="530" type="line"/>
<point x="353" y="575" type="line"/>
<point x="206" y="533" type="line"/>
</contour>
<contour>
<point x="164" y="764" type="line"/>
<point x="20" y="813" type="line"/>
<point x="20" y="602" type="line"/>
<point x="164" y="550" type="line"/>
</contour>
<contour>
<point x="191" y="764" type="line"/>
<point x="191" y="550" type="line"/>
<point x="335" y="602" type="line"/>
<point x="335" y="813" type="line"/>
</contour>
<contour>
<point x="181" y="781" type="line"/>
<point x="317" y="827" type="line"/>
<point x="181" y="870" type="line"/>
<point x="35" y="828" type="line"/>
</contour>
<contour>
<point x="501" y="764" type="line"/>
<point x="357" y="813" type="line"/>
<point x="357" y="602" type="line"/>
<point x="501" y="550" type="line"/>
</contour>
<contour>
<point x="528" y="764" type="line"/>
<point x="528" y="550" type="line"/>
<point x="672" y="602" type="line"/>
<point x="672" y="813" type="line"/>
</contour>
<contour>
<point x="517" y="781" type="line"/>
<point x="652" y="827" type="line"/>
<point x="517" y="870" type="line"/>
<point x="371" y="828" type="line"/>
</contour>
<contour>
<point x="672" y="1059" type="line"/>
<point x="528" y="1108" type="line"/>
<point x="528" y="897" type="line"/>
<point x="672" y="845" type="line"/>
</contour>
<contour>
<point x="699" y="1059" type="line"/>
<point x="699" y="845" type="line"/>
<point x="843" y="897" type="line"/>
<point x="843" y="1108" type="line"/>
</contour>
<contour>
<point x="689" y="1076" type="line"/>
<point x="825" y="1122" type="line"/>
<point x="689" y="1165" type="line"/>
<point x="543" y="1123" type="line"/>
</contour>
<contour>
<point x="1009" y="1059" type="line"/>
<point x="865" y="1108" type="line"/>
<point x="865" y="897" type="line"/>
<point x="1009" y="845" type="line"/>
</contour>
<contour>
<point x="1036" y="1059" type="line"/>
<point x="1036" y="845" type="line"/>
<point x="1180" y="897" type="line"/>
<point x="1180" y="1108" type="line"/>
</contour>
<contour>
<point x="1026" y="1076" type="line"/>
<point x="1160" y="1122" type="line"/>
<point x="1026" y="1165" type="line"/>
<point x="879" y="1123" type="line"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:42:44 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/devicons.ufo/glyphs/uniE_6A_D_.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/devicons.ufo/glyphs/uniE_6A_D_.glif",
"repo_id": "cascadia-code",
"token_count": 1649
}
| 735 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="disco" format="2">
<advance width="1200"/>
<unicode hex="E271"/>
<note>
disco
</note>
<outline>
<contour>
<point x="1180" y="950"/>
<point x="840" y="1288"/>
<point x="360" y="1288"/>
<point x="20" y="950"/>
<point x="20" y="470"/>
<point x="360" y="132"/>
<point x="840" y="132"/>
<point x="1180" y="470"/>
</contour>
<contour>
<point x="840" y="611"/>
<point x="699" y="470"/>
<point x="501" y="470"/>
<point x="360" y="611"/>
<point x="360" y="809"/>
<point x="501" y="950"/>
<point x="699" y="950"/>
<point x="840" y="809"/>
</contour>
<contour>
<point x="638" y="800"/>
<point x="562" y="800"/>
<point x="510" y="748"/>
<point x="510" y="672"/>
<point x="562" y="620"/>
<point x="638" y="620"/>
<point x="690" y="672"/>
<point x="690" y="748"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:45:38 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/font-awesome-extension.ufo/glyphs/disco.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/font-awesome-extension.ufo/glyphs/disco.glif",
"repo_id": "cascadia-code",
"token_count": 593
}
| 736 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="refrigerator" format="2">
<advance width="1200"/>
<unicode hex="E23B"/>
<note>
refrigerator
</note>
<outline>
<contour>
<point x="888" y="29" type="qcurve"/>
<point x="876" y="29" type="line"/>
<point x="324" y="29" type="line"/>
<point x="312" y="29" type="line"/>
<point x="321" y="0"/>
<point x="353" y="0" type="qcurve" smooth="yes"/>
<point x="847" y="0" type="line" smooth="yes"/>
<point x="879" y="0"/>
</contour>
<contour>
<point x="280" y="1376" type="qcurve" smooth="yes"/>
<point x="280" y="1056" type="line"/>
<point x="297" y="1071"/>
<point x="324" y="1071" type="qcurve" smooth="yes"/>
<point x="876" y="1071" type="line" smooth="yes"/>
<point x="903" y="1071"/>
<point x="920" y="1056" type="qcurve"/>
<point x="920" y="1376" type="line" smooth="yes"/>
<point x="920" y="1394"/>
<point x="894" y="1420"/>
<point x="876" y="1420" type="qcurve" smooth="yes"/>
<point x="324" y="1420" type="line" smooth="yes"/>
<point x="306" y="1420"/>
<point x="280" y="1394"/>
</contour>
<contour>
<point x="338" y="1152" type="qcurve" smooth="yes"/>
<point x="338" y="1341" type="line" smooth="yes"/>
<point x="338" y="1362"/>
<point x="361" y="1362" type="qcurve" smooth="yes"/>
<point x="376" y="1362" type="line" smooth="yes"/>
<point x="396" y="1362"/>
<point x="396" y="1341" type="qcurve" smooth="yes"/>
<point x="396" y="1152" type="line" smooth="yes"/>
<point x="396" y="1129"/>
<point x="376" y="1129" type="qcurve" smooth="yes"/>
<point x="361" y="1129" type="line" smooth="yes"/>
<point x="338" y="1129"/>
</contour>
<contour>
<point x="920" y="102" type="qcurve" smooth="yes"/>
<point x="920" y="914" type="line"/>
<point x="920" y="1001" type="line" smooth="yes"/>
<point x="920" y="1018"/>
<point x="894" y="1045"/>
<point x="876" y="1045" type="qcurve" smooth="yes"/>
<point x="324" y="1045" type="line" smooth="yes"/>
<point x="306" y="1045"/>
<point x="280" y="1018"/>
<point x="280" y="1001" type="qcurve" smooth="yes"/>
<point x="280" y="102" type="line" smooth="yes"/>
<point x="280" y="84"/>
<point x="306" y="58"/>
<point x="324" y="58" type="qcurve" smooth="yes"/>
<point x="876" y="58" type="line" smooth="yes"/>
<point x="894" y="58"/>
<point x="920" y="84"/>
</contour>
<contour>
<point x="396" y="233"/>
<point x="373" y="233" type="qcurve" smooth="yes"/>
<point x="361" y="233" type="line" smooth="yes"/>
<point x="338" y="233"/>
<point x="338" y="256" type="qcurve" smooth="yes"/>
<point x="338" y="905" type="line" smooth="yes"/>
<point x="338" y="928"/>
<point x="361" y="928" type="qcurve" smooth="yes"/>
<point x="373" y="928" type="line" smooth="yes"/>
<point x="396" y="928"/>
<point x="396" y="905" type="qcurve" smooth="yes"/>
<point x="396" y="256" type="line" smooth="yes"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:45:38 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/font-awesome-extension.ufo/glyphs/refrigerator.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/font-awesome-extension.ufo/glyphs/refrigerator.glif",
"repo_id": "cascadia-code",
"token_count": 1612
}
| 737 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="grabber" format="2">
<advance width="1200"/>
<unicode hex="F4A6"/>
<note>
grabber
</note>
<outline>
<contour>
<point x="1026" y="83"/>
<point x="1026" y="201"/>
<point x="943" y="284"/>
<point x="825" y="284"/>
<point x="742" y="201"/>
<point x="742" y="83"/>
<point x="825" y="0"/>
<point x="943" y="0"/>
</contour>
<contour>
<point x="1026" y="651"/>
<point x="1026" y="769"/>
<point x="943" y="852"/>
<point x="825" y="852"/>
<point x="742" y="769"/>
<point x="742" y="651"/>
<point x="825" y="568"/>
<point x="943" y="568"/>
</contour>
<contour>
<point x="458" y="83"/>
<point x="458" y="201"/>
<point x="375" y="284"/>
<point x="257" y="284"/>
<point x="174" y="201"/>
<point x="174" y="83"/>
<point x="257" y="0"/>
<point x="375" y="0"/>
</contour>
<contour>
<point x="943" y="1420"/>
<point x="825" y="1420"/>
<point x="742" y="1337"/>
<point x="742" y="1219"/>
<point x="825" y="1136"/>
<point x="943" y="1136"/>
<point x="1026" y="1219"/>
<point x="1026" y="1337"/>
</contour>
<contour>
<point x="375" y="852"/>
<point x="257" y="852"/>
<point x="174" y="769"/>
<point x="174" y="651"/>
<point x="257" y="568"/>
<point x="375" y="568"/>
<point x="458" y="651"/>
<point x="458" y="769"/>
</contour>
<contour>
<point x="458" y="1219"/>
<point x="458" y="1337"/>
<point x="375" y="1420"/>
<point x="257" y="1420"/>
<point x="174" y="1337"/>
<point x="174" y="1219"/>
<point x="257" y="1136"/>
<point x="375" y="1136"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:49:53 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/grabber.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/grabber.glif",
"repo_id": "cascadia-code",
"token_count": 1061
}
| 738 |
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>ascender</key>
<integer>819</integer>
<key>capHeight</key>
<integer>0</integer>
<key>copyright</key>
<string>Nerd Fonts</string>
<key>descender</key>
<integer>-205</integer>
<key>familyName</key>
<string>Nerd Font File Types</string>
<key>guidelines</key>
<array/>
<key>italicAngle</key>
<integer>0</integer>
<key>openTypeHeadCreated</key>
<string>2023/11/22 16:13:24</string>
<key>openTypeHheaAscender</key>
<integer>900</integer>
<key>openTypeHheaDescender</key>
<integer>0</integer>
<key>openTypeNameManufacturerURL</key>
<string>https://github.com/ryanoasis/nerd-fonts</string>
<key>openTypeNamePreferredSubfamilyName</key>
<string>Regular</string>
<key>openTypeOS2Panose</key>
<array>
<integer>2</integer>
<integer>0</integer>
<integer>5</integer>
<integer>3</integer>
<integer>0</integer>
<integer>0</integer>
<integer>0</integer>
<integer>0</integer>
<integer>0</integer>
<integer>0</integer>
</array>
<key>openTypeOS2StrikeoutPosition</key>
<integer>265</integer>
<key>openTypeOS2StrikeoutSize</key>
<integer>51</integer>
<key>openTypeOS2Type</key>
<array/>
<key>openTypeOS2TypoAscender</key>
<integer>819</integer>
<key>openTypeOS2TypoDescender</key>
<integer>-205</integer>
<key>openTypeOS2TypoLineGap</key>
<integer>92</integer>
<key>openTypeOS2UnicodeRanges</key>
<array>
<integer>0</integer>
<integer>60</integer>
</array>
<key>openTypeOS2VendorID</key>
<string>PfEd</string>
<key>openTypeOS2WeightClass</key>
<integer>400</integer>
<key>openTypeOS2WidthClass</key>
<integer>5</integer>
<key>openTypeOS2WinAscent</key>
<integer>900</integer>
<key>openTypeOS2WinDescent</key>
<integer>0</integer>
<key>postscriptBlueShift</key>
<integer>12</integer>
<key>postscriptBlueValues</key>
<array/>
<key>postscriptFamilyBlues</key>
<array/>
<key>postscriptFamilyOtherBlues</key>
<array/>
<key>postscriptFontName</key>
<string>NerdFontFileTypes-Regular</string>
<key>postscriptOtherBlues</key>
<array/>
<key>postscriptStemSnapH</key>
<array/>
<key>postscriptStemSnapV</key>
<array/>
<key>postscriptUnderlinePosition</key>
<integer>-102</integer>
<key>postscriptUnderlineThickness</key>
<integer>51</integer>
<key>styleMapStyleName</key>
<string>regular</string>
<key>styleName</key>
<string>Regular</string>
<key>unitsPerEm</key>
<integer>2048</integer>
<key>versionMajor</key>
<integer>3</integer>
<key>versionMinor</key>
<integer>100</integer>
<key>xHeight</key>
<integer>0</integer>
</dict>
</plist>
|
cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/fontinfo.plist/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/fontinfo.plist",
"repo_id": "cascadia-code",
"token_count": 1331
}
| 739 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="i_custom_windows" format="2">
<advance width="1200"/>
<unicode hex="E62A"/>
<note>
i_custom_windows
</note>
<outline>
<contour>
<point x="620" y="690" type="line"/>
<point x="620" y="410" type="line"/>
<point x="620" y="130" type="line"/>
<point x="1180" y="130" type="line"/>
<point x="1180" y="690" type="line"/>
</contour>
<contour>
<point x="20" y="690" type="line"/>
<point x="20" y="410" type="line"/>
<point x="20" y="130" type="line"/>
<point x="580" y="130" type="line"/>
<point x="580" y="690" type="line"/>
</contour>
<contour>
<point x="620" y="1290" type="line"/>
<point x="620" y="1010" type="line"/>
<point x="620" y="730" type="line"/>
<point x="1180" y="730" type="line"/>
<point x="1180" y="1290" type="line"/>
</contour>
<contour>
<point x="20" y="1290" type="line"/>
<point x="20" y="1010" type="line"/>
<point x="20" y="730" type="line"/>
<point x="580" y="730" type="line"/>
<point x="580" y="1290" type="line"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:42:08 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_custom_windows.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_custom_windows.glif",
"repo_id": "cascadia-code",
"token_count": 634
}
| 740 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="i_seti_default" format="2">
<advance width="1200"/>
<unicode hex="E64E"/>
<note>
i_seti_default
</note>
<outline>
<contour>
<point x="1180" y="604" type="line"/>
<point x="20" y="604" type="line"/>
<point x="20" y="449" type="line"/>
<point x="1180" y="449" type="line"/>
</contour>
<contour>
<point x="668" y="971" type="line"/>
<point x="20" y="971" type="line"/>
<point x="20" y="817" type="line"/>
<point x="668" y="817" type="line"/>
</contour>
<contour>
<point x="1180" y="1338" type="line"/>
<point x="20" y="1338" type="line"/>
<point x="20" y="1184" type="line"/>
<point x="1180" y="1184" type="line"/>
</contour>
<contour>
<point x="882" y="236" type="line"/>
<point x="20" y="236" type="line"/>
<point x="20" y="82" type="line"/>
<point x="882" y="82" type="line"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:42:08 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_default.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_default.glif",
"repo_id": "cascadia-code",
"token_count": 558
}
| 741 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="i_seti_hex" format="2">
<advance width="1200"/>
<unicode hex="E667"/>
<note>
i_seti_hex
</note>
<outline>
<contour>
<point x="310" y="208" type="line"/>
<point x="890" y="208" type="line"/>
<point x="1180" y="710" type="line"/>
<point x="890" y="1212" type="line"/>
<point x="310" y="1212" type="line"/>
<point x="20" y="710" type="line"/>
</contour>
<contour>
<point x="894" y="710" type="line"/>
<point x="747" y="456" type="line"/>
<point x="453" y="456" type="line"/>
<point x="306" y="710" type="line"/>
<point x="453" y="964" type="line"/>
<point x="747" y="964" type="line"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:42:08 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_hex.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_hex.glif",
"repo_id": "cascadia-code",
"token_count": 441
}
| 742 |
# Support
## How to file issues and get help
This project uses GitHub Issues to track bugs and feature requests. Please search the existing
issues before filing new issues to avoid duplicates. For new issues, file your bug or
feature request as a new Issue.
## Microsoft Support Policy
Support for this project is limited to the resources listed above.
|
causica/SUPPORT.md/0
|
{
"file_path": "causica/SUPPORT.md",
"repo_id": "causica",
"token_count": 85
}
| 743 |
from typing import Tuple
import torch
import torchsde
from scotch.sdes.sdes_core import SDE
from tensordict import TensorDict
from torch import Tensor
def generate_and_return_trajectories(
sde_class: SDE,
z0: Tensor,
num_time_points: int,
t_max: float,
device: str = "cuda",
normalize: bool = False,
dt: float = 1e-3,
return_raw: bool = False,
**kwargs,
) -> Tuple[Tensor, TensorDict, torchsde.BrownianInterval]:
"""Generate synthetic trajectories.
z0 = torch.full(size=(n, state_size), fill_value=0.0, device=device)
Args:
sde_class: SDE class to generate trajectories for.
z0: Tensor of shape (n, state_size) of initial points.
num_time_points: Number of time points to generate for each trajectory.
t_max: Maximum time point to generate for each trajectory.
return_bm: Whether to return the Brownian motion used to generate the trajectories.
device: Device to generate trajectories on.
normalize: Whether to normalize the trajectories per variable.
dt: Time step to use for SDE integration.
return_raw: Whether to return the raw trajectories or TensorDict version.
**kwargs: Any additional arguments to pass to the SDE class.
Returns:
ts: Time points of generated trajectories; shape (num_time_points,).
zs_td: TensorDict of generated trajectories.
bm: Brownian motion used to generate trajectories; returned if return_bm.
"""
n, state_size = z0.shape
ts = torch.linspace(0, t_max, num_time_points)
bm = torchsde.BrownianInterval(
t0=0.0, t1=t_max, size=(n, state_size), levy_area_approximation="space-time", device=device
)
zs = torchsde.sdeint(sde_class(**kwargs), z0, ts, bm=bm, dt=dt, method="euler") # (t_size, batch_size, state_size)
zs = zs.permute(1, 0, 2) # reshape into format (batch_size, t_size, state_size)
print(zs.shape)
zs = (zs - zs.mean(dim=(0, 1))) / zs.std(dim=(0, 1)) if normalize else zs
zs_td = TensorDict(
{f"x{i}": zs[:, :, i].unsqueeze(dim=2) for i in range(state_size)},
batch_size=[n],
)
if return_raw:
return ts, zs, bm
return ts, zs_td, bm
|
causica/research_experiments/scotch/src/scotch/dataset_generation/generate_trajectories.py/0
|
{
"file_path": "causica/research_experiments/scotch/src/scotch/dataset_generation/generate_trajectories.py",
"repo_id": "causica",
"token_count": 914
}
| 744 |
from dataclasses import dataclass, field
import torch
from tensordict import TensorDictBase
@dataclass
class InterventionData:
"""
Dataclass to hold the data associated with an intervention
This represents one intervention and many samples from the intervened distribution
The class also stores `sampled_nodes`, i.e. the ones that are neither intervened or conditioned on
Args:
intervention_data: A `TensorDict` with all the nodes (including intervened/conditioned)
intervention_values: A dictionary of node names to 1D numpy arrays of the intervened values
condition_values: A dictionary of node names to 1D numpy arrays of the conditioned values
"""
intervention_data: TensorDictBase
intervention_values: TensorDictBase
condition_values: TensorDictBase
sampled_nodes: set[str] = field(init=False) # the nodes that are neither conditioned nor sampled
def __post_init__(self):
assert self.intervention_values.batch_size == torch.Size()
assert self.condition_values.batch_size == torch.Size()
self.sampled_nodes = (
set(self.intervention_data.keys())
- set(self.intervention_values.keys())
- set(self.condition_values.keys())
)
@dataclass
class CounterfactualData:
"""
Dataclass to hold the data associated with a counterfactual
This represents one intervention and reference and many samples from the intervened and reference
distributions
The class also stores `sampled_nodes`, i.e. the ones that are neither intervened or conditioned on
Args:
counterfactual_data: A `TensorDict` with all of the node values (including intervened) of counterfactual data
factual_data: A `TensorDict` with all of the node values of the base observations used for the counterfactuals data.
This refers to the observations in "What would have happened (CFs) if I would have done (intervention) given
I observed (base observation).
intervention_values: A dictionary of node names to 1D numpy arrays of the intervened values
"""
counterfactual_data: TensorDictBase
intervention_values: TensorDictBase
factual_data: TensorDictBase
sampled_nodes: set[str] = field(init=False)
def __post_init__(self):
assert list(self.counterfactual_data.keys()) == list(self.factual_data.keys())
assert self.counterfactual_data.batch_size == self.factual_data.batch_size
assert self.intervention_values.batch_size == torch.Size()
self.sampled_nodes = set(self.counterfactual_data.keys()) - set(self.intervention_values.keys())
|
causica/src/causica/datasets/interventional_data.py/0
|
{
"file_path": "causica/src/causica/datasets/interventional_data.py",
"repo_id": "causica",
"token_count": 877
}
| 745 |
from typing import Generic, TypeVar
from torch import distributions as td
from torch import nn
DistributionType_co = TypeVar("DistributionType_co", bound=td.Distribution, covariant=True)
class DistributionModule(Generic[DistributionType_co], nn.Module):
"""Baseclass for modules returning distributions.
Useful e.g. to create variational approximations of distributions.
Subclasses are expected to implement a `forward` method that returns a concrete `td.Distribution` and should usually
inherit from a conrete version of this class, i.e. `DistributionModule[<td.Distribution subclass>]`.
"""
def __call__(self, *args, **kwargs) -> DistributionType_co:
"""Return a td.Distribution."""
return super().__call__(*args, **kwargs)
|
causica/src/causica/distributions/distribution_module.py/0
|
{
"file_path": "causica/src/causica/distributions/distribution_module.py",
"repo_id": "causica",
"token_count": 227
}
| 746 |
from typing import Generic, Mapping, TypeVar
import torch
import torch.distributions as td
from tensordict import TensorDictBase
from torch import nn
from causica.distributions.transforms.base import TransformModule, TypedTransform
class JointTransform(TypedTransform[TensorDictBase, TensorDictBase]):
"""A joint transform that applies a different transform to each key in the TensorDict.
Keys in the input that are not found in the transform are left unchanged.
This is heavily inspired by the `torch.distributions.transforms.StackTransform` class.
See https://pytorch.org/docs/stable/distributions.html#torch.distributions.transforms.StackTransform
"""
def __init__(self, transformations: Mapping[str, td.Transform], cache_size: int = 0):
"""
Args:
transformations: A dictionary of transforms, where the keys are the keys in the TensorDict
cache_size: Size of cache. If zero, no caching is done. If one, the latest single value is cached.
Only 0 and 1 are supported.
"""
bad_transformation_types = {type(t) for t in transformations.values() if not isinstance(t, td.Transform)}
if bad_transformation_types:
raise TypeError(
"All transformations must be subtypes of `torch.distributions.Transform`, but the "
f"following are not: {bad_transformation_types} are not."
)
if cache_size:
transformations = {key: t.with_cache(cache_size) for key, t in transformations.items()}
super().__init__(cache_size=cache_size)
self.transformations = transformations
def _call(self, x: TensorDictBase) -> TensorDictBase:
return x.clone().update(
{key: transform(x[key]) for key, transform in self.transformations.items() if key in x.keys()}
)
def _inverse(self, y: TensorDictBase) -> TensorDictBase:
# We cannot use ._inv as pylint complains with E202: _inv is hidden because of `self._inv = None`
# in td.Transform.__init__
return y.clone().update(
{key: transform.inv(y[key]) for key, transform in self.transformations.items() if key in y.keys()}
)
def log_abs_det_jacobian(self, x: TensorDictBase, y: TensorDictBase) -> TensorDictBase:
if set(x.keys()) != set(y.keys()):
raise ValueError("x and y must have the same keys.")
if not set(self.transformations.keys()) <= set(x.keys()):
raise ValueError("All keys in transformations must be in x and y.")
return x.clone().update(
{
key: self.transformations[key].log_abs_det_jacobian(x[key], y[key])
if key in self.transformations
else torch.zeros_like(x[key])
for key in x.keys()
}
)
@property
def bijective(self):
return all(t.bijective for t in self.transformations.values())
@property
def domain(self):
return {key: t.domain for key, t in self.transformations.items()}
@property
def codomain(self):
return {key: t.codomain for key, t in self.transformations.items()}
T_co = TypeVar("T_co", bound=nn.Module, covariant=True)
class _TypedModuleDict(Generic[T_co], nn.ModuleDict, Mapping[str, T_co]):
"""Allow a ModuleDict to be interpreted as a mapping."""
def __hash__(self) -> int:
return nn.ModuleDict.__hash__(self)
class JointTransformModule(JointTransform, TransformModule[TensorDictBase, TensorDictBase]):
"""Joint transform with TransformModule transformations applied per key to a TensorDict."""
def __init__(self, transformations: Mapping[str, TransformModule], *args, **kwargs):
"""
Args:
transformations: A mapping of transforms, where the keys are the keys in the TensorDict.
*args, **kwargs: Passed to the JointTransform.
"""
super().__init__(transformations, *args, **kwargs)
self.transformations = _TypedModuleDict[TransformModule](transformations)
|
causica/src/causica/distributions/transforms/joint.py/0
|
{
"file_path": "causica/src/causica/distributions/transforms/joint.py",
"repo_id": "causica",
"token_count": 1587
}
| 747 |
import abc
import pytorch_lightning as pl
import torch
from tensordict import TensorDict
from causica.datasets.variable_types import VariableTypeEnum
class DECIDataModule(pl.LightningDataModule, abc.ABC):
"""An Abstract Data Module containing the methods required by a `DECIModule`."""
@property
@abc.abstractmethod
def dataset_name(self) -> str:
"""The name of this dataset"""
@property
@abc.abstractmethod
def dataset_train(self) -> TensorDict:
"""The training dataset"""
@property
@abc.abstractmethod
def dataset_test(self) -> TensorDict:
"""The testing dataset"""
@property
@abc.abstractmethod
def variable_shapes(self) -> dict[str, torch.Size]:
"""Get the shape of each variable in the dataset."""
@property
@abc.abstractmethod
def variable_types(self) -> dict[str, VariableTypeEnum]:
"""Get the type of each variable in the dataset."""
@property
@abc.abstractmethod
def column_names(self) -> dict[str, list[str]]:
"""Get a map of the node names and the corresponding columns of the original dataset."""
|
causica/src/causica/lightning/data_modules/deci_data_module.py/0
|
{
"file_path": "causica/src/causica/lightning/data_modules/deci_data_module.py",
"repo_id": "causica",
"token_count": 403
}
| 748 |
import networkx as nx
import torch
from causica.distributions import ErdosRenyiDAGDistribution
def test_samples_dags():
"""Test that all samples are DAGs"""
p = torch.tensor([[0.7, 0.4]])
n = 5
sample_shape = torch.Size([3, 4])
dist = ErdosRenyiDAGDistribution(num_nodes=n, probs=p)
samples = dist.sample(sample_shape).numpy()
assert samples.shape == torch.Size(sample_shape + p.shape + (n, n))
flat_samples = samples.reshape((-1, n, n))
for dag in flat_samples:
assert nx.is_directed_acyclic_graph(nx.from_numpy_array(dag, create_using=nx.DiGraph))
def test_mode():
"""Test the mode either returns a lower triangle or a matrix of zeros."""
p = torch.tensor(0.6)
mode = ErdosRenyiDAGDistribution(num_nodes=5, probs=p).mode
torch.testing.assert_close(mode, torch.tril(torch.ones_like(mode), diagonal=-1))
p = torch.tensor(0.2)
mode = ErdosRenyiDAGDistribution(num_nodes=5, probs=p).mode
torch.testing.assert_close(mode, torch.zeros_like(mode))
def test_extreme_sample():
"""Test that extreme probabilities give rise to expected graphs"""
n = 6
p = torch.tensor(1.0)
sample = ErdosRenyiDAGDistribution(num_nodes=n, probs=p).sample()
torch.testing.assert_close(sample.sum(dim=(-2, -1)).item(), n * (n - 1) / 2.0)
p = torch.tensor(0.0)
sample = ErdosRenyiDAGDistribution(num_nodes=n, probs=p).sample()
torch.testing.assert_close(sample, torch.zeros_like(sample))
def test_num_deges():
num_edges = 16
samples = ErdosRenyiDAGDistribution(num_nodes=8, num_edges=torch.tensor(num_edges)).sample(torch.Size([100]))
assert samples.shape == torch.Size([100, 8, 8])
torch.testing.assert_close(
samples.sum(dim=(-2, -1)).mean(), torch.tensor(num_edges, dtype=torch.float32), atol=2.0, rtol=0.1
)
samples = ErdosRenyiDAGDistribution(num_nodes=2, num_edges=torch.tensor(2)).sample(torch.Size([100]))
assert samples.shape == torch.Size([100, 2, 2])
torch.testing.assert_close(samples.sum(dim=(-2, -1)).mean(), torch.tensor(1, dtype=torch.float32))
|
causica/test/distributions/adjacency/test_directed_acyclic.py/0
|
{
"file_path": "causica/test/distributions/adjacency/test_directed_acyclic.py",
"repo_id": "causica",
"token_count": 860
}
| 749 |
"""Tests for the different TransformModules to make sure tensors are properly registered."""
import io
import itertools
from typing import Any, TypeVar
import pytest
import torch
from tensordict import TensorDictBase, make_tensordict
from causica.distributions.transforms import SequentialTransformModule
from causica.distributions.transforms.base import TransformModule
from causica.distributions.transforms.joint import JointTransformModule
class _OffsetTransformModule(TransformModule[torch.Tensor, torch.Tensor]):
"""Dummy transform module that adds a constant to the input tensor.
Used for testing the registration of transform modules."""
def __init__(self, offset: torch.Tensor):
super().__init__(cache_size=0)
self.offset: torch.Tensor
self.register_buffer("offset", offset)
def _call(self, x: torch.Tensor) -> torch.Tensor:
return x + self.offset
def _inverse(self, y: torch.Tensor) -> torch.Tensor:
return y - self.offset
def _test_triplets():
"""Generate test triplets of (data, transform, expected_result)."""
data = torch.randn((3, 1), dtype=torch.float32)
offset = torch.full((3, 1), 7.5, dtype=torch.float32)
transform = _OffsetTransformModule(offset)
return [
(data, transform, data + offset),
(data, SequentialTransformModule[torch.Tensor, torch.Tensor](transform, transform.inv), data),
(data, SequentialTransformModule[torch.Tensor, torch.Tensor](transform, transform), data + 2 * offset),
(make_tensordict({"a": data}), JointTransformModule({"a": transform}), make_tensordict({"a": data + offset})),
]
X = TypeVar("X", torch.Tensor, TensorDictBase)
Y = TypeVar("Y", torch.Tensor, TensorDictBase)
@pytest.mark.parametrize("data,transform,expected_result", _test_triplets())
def test_transform_module_output(data: X, transform: TransformModule[X, Y], expected_result: Y) -> None:
output = transform(data)
torch.testing.assert_close(output, expected_result)
inverse = transform.inv
assert inverse.inv is transform
torch.testing.assert_close(inverse(output), data)
@pytest.mark.parametrize("data,transform,_", _test_triplets())
@pytest.mark.parametrize("to_kwargs", [{"dtype": torch.float16}])
def test_registration(data: X, transform: TransformModule[X, Y], _, to_kwargs: dict[str, Any]) -> None:
"""Test that registration is working by testing that the state can be moved and loaded."""
transform_modified: TransformModule[X, Y] = transform.to(**to_kwargs)
# Collect parameters and buffers as tensors
tensors = dict(itertools.chain(transform.named_buffers(), transform.named_parameters()))
tensors_modified = dict(itertools.chain(transform_modified.named_buffers(), transform_modified.named_parameters()))
# Check that the tensors are equivalent
assert set(tensors) == set(tensors_modified)
for name in tensors:
torch.testing.assert_close(tensors[name].to(**to_kwargs), tensors_modified[name])
# Check that the state dict is consistent and picklable
state_dict = transform_modified.state_dict()
with io.BytesIO() as f:
torch.save(state_dict, f)
f.seek(0)
state_dict = torch.load(f)
for name in tensors:
torch.testing.assert_close(tensors[name].to(**to_kwargs), state_dict[name])
# Produce the output for x
if isinstance(data, TensorDictBase):
x_modified = data.apply(lambda x_: x_.to(**to_kwargs))
else:
x_modified = data.to(**to_kwargs)
y_modified = transform_modified(x_modified)
y = transform(data)
# Check that the output remains correct, i.e. the transformation is approx equivariant w.r.t. the `to` operator.
if isinstance(y, TensorDictBase):
assert isinstance(y_modified, TensorDictBase) # plays nicer with mypy than checking type equality
for key in y.keys():
torch.testing.assert_close(y_modified.get(key), y.get(key).to(**to_kwargs), atol=2e-2, rtol=1e-2)
else:
assert isinstance(y_modified, torch.Tensor) # plays nicer with mypy than checking type equality
torch.testing.assert_close(y_modified, y.to(**to_kwargs), atol=2e-2, rtol=1e-2)
def test_transform_module_registration_buffers() -> None:
# Check that z is in buffers
offset = torch.randn((5, 1))
transform = _OffsetTransformModule(offset)
buffers = dict(transform.named_buffers())
torch.testing.assert_close(buffers["offset"], offset)
def test_sequential_transform_module_inner_buffers() -> None:
offset = torch.randn((5, 1))
transform = _OffsetTransformModule(offset)
seq_transform = SequentialTransformModule[torch.Tensor, torch.Tensor](transform, transform.inv)
# Check that buffers are stored for the inner transformation
seq_buffers = dict(seq_transform.named_buffers())
for name, buffer in transform.named_buffers():
torch.testing.assert_close(seq_buffers[f"0.{name}"], buffer)
def test_joint_transform_module_inner_buffers() -> None:
offset = torch.randn((2,))
transform = _OffsetTransformModule(offset)
joint_transform = JointTransformModule({"a": transform})
# Check that buffers are stored for the inner transformation
joint_buffers = dict(joint_transform.named_buffers())
for name, buffer in transform.named_buffers():
torch.testing.assert_close(joint_buffers[f"transformations.a.{name}"], buffer)
|
causica/test/distributions/transforms/test_transform_modules.py/0
|
{
"file_path": "causica/test/distributions/transforms/test_transform_modules.py",
"repo_id": "causica",
"token_count": 1910
}
| 750 |
import pytest
import torch
from causica.nn import DECIEmbedNN
PROCESSED_DIM = 6
NODE_NUM = 4
GROUP_MASK = torch.tensor(
[
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1],
],
dtype=torch.float32,
)
assert GROUP_MASK.shape == (NODE_NUM, PROCESSED_DIM)
GRAPH_SHAPES = [tuple(), (5,), (2, 3)]
SAMPLE_SHAPES = [tuple(), (3,), (1, 2)]
@pytest.mark.parametrize("graph_shape", GRAPH_SHAPES)
@pytest.mark.parametrize("sample_shape", SAMPLE_SHAPES)
def test_fgnni_broadcast(graph_shape, sample_shape):
graph_tensor = torch.randint(0, 2, (*graph_shape, NODE_NUM, NODE_NUM), dtype=torch.float32)
sample_tensor = torch.randn((*sample_shape, *graph_shape, PROCESSED_DIM))
fgnni = DECIEmbedNN(group_mask=GROUP_MASK, embedding_size=32, out_dim_g=32, num_layers_g=2, num_layers_zeta=2)
out = fgnni(sample_tensor, graph_tensor)
assert out.shape == sample_shape + graph_shape + (PROCESSED_DIM,)
|
causica/test/nn/test_deci_embed_nn.py/0
|
{
"file_path": "causica/test/nn/test_deci_embed_nn.py",
"repo_id": "causica",
"token_count": 465
}
| 751 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math
from typing import Union
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.modules.utils import _pair, _single, _triple
from ...cliffordkernels import (
get_1d_clifford_kernel,
get_2d_clifford_kernel,
get_2d_clifford_rotation_kernel,
get_3d_clifford_kernel,
)
from ...signature import CliffordSignature
from ..functional.utils import clifford_convnd
class _CliffordConvNd(nn.Module):
"""Base class for all Clifford convolution modules."""
def __init__(
self,
g: Union[tuple, list, torch.Tensor],
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int,
padding: int,
dilation: int,
groups: int,
bias: bool,
padding_mode: str,
rotation: bool = False,
) -> None:
super().__init__()
sig = CliffordSignature(g)
# register as buffer as we want the tensor to be moved to the same device as the module
self.register_buffer("g", sig.g)
self.dim = sig.dim
self.n_blades = sig.n_blades
if rotation:
assert (
self.dim == 2
), "2d rotational Clifford layers are only available for g = [-1, -1]. Make sure you have the right signature."
if self.dim == 1:
self._get_kernel = get_1d_clifford_kernel
elif self.dim == 2 and rotation:
self._get_kernel = get_2d_clifford_rotation_kernel
elif self.dim == 2:
self._get_kernel = get_2d_clifford_kernel
elif self.dim == 3:
self._get_kernel = get_3d_clifford_kernel
else:
raise NotImplementedError(
f"Clifford convolution not implemented for {self.dim} dimensions. Wrong Clifford signature."
)
if padding_mode != "zeros":
raise NotImplementedError(f"Padding mode {padding_mode} not implemented.")
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.padding_mode = padding_mode
self.rotation = rotation
self.weight = nn.ParameterList(
[nn.Parameter(torch.empty(out_channels, in_channels // groups, *kernel_size)) for _ in range(self.n_blades)]
)
if bias:
self.bias = nn.Parameter(torch.empty(self.n_blades, out_channels))
else:
self.register_parameter("bias", None)
if rotation:
self.scale_param = nn.Parameter(torch.Tensor(self.weight[0].shape))
self.zero_kernel = nn.Parameter(torch.zeros(self.weight[0].shape), requires_grad=False)
self.weight.append(self.scale_param)
self.weight.append(self.zero_kernel)
self.reset_parameters()
def reset_parameters(self):
"""Initialization of the Clifford convolution weight and bias tensors.
The number of blades is taken into account when calculated the bounds of Kaiming uniform.
"""
for blade, w in enumerate(self.weight):
# Weight initialization for Clifford weights.
if blade < self.n_blades:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(
torch.Tensor(
self.out_channels, int(self.in_channels * self.n_blades / self.groups), *self.kernel_size
)
)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(w, -bound, bound)
# Extra weights for 2d Clifford rotation layer.
elif blade == self.n_blades:
assert self.rotation is True
# Default channel_in / channel_out initialization for scaling params.
nn.init.kaiming_uniform_(w, a=math.sqrt(5))
elif blade == self.n_blades + 1:
# Nothing to be done for zero kernel.
pass
else:
raise ValueError(
f"Wrong number of Clifford weights. Expected {self.n_blades} weight tensors, and 2 extra tensors for rotational kernels."
)
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(
torch.Tensor(self.out_channels, int(self.in_channels * self.n_blades / self.groups), *self.kernel_size)
)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, x: torch.Tensor, conv_fn: callable) -> torch.Tensor:
if self.bias is not None:
b = self.bias.view(-1)
else:
b = None
output_blades, w = self._get_kernel(self.weight, self.g)
return clifford_convnd(
conv_fn,
x,
output_blades,
w,
b,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
def extra_repr(self):
s = "{in_channels}, {out_channels}, kernel_size={kernel_size}" ", stride={stride}"
if self.padding != (0,) * len(self.padding):
s += ", padding={padding}"
if self.dilation != (1,) * len(self.dilation):
s += ", dilation={dilation}"
if self.groups != 1:
s += ", groups={groups}"
if self.bias is None:
s += ", bias=False"
return s.format(**self.__dict__)
class CliffordConv1d(_CliffordConvNd):
"""1d Clifford convolution.
Args:
g (Union[tuple, list, torch.Tensor]): Clifford signature.
in_channels (int): Number of channels in the input tensor.
out_channels (int): Number of channels produced by the convolution.
kernel_size (int): Size of the convolving kernel.
stride (int): Stride of the convolution.
padding (int): padding added to both sides of the input.
dilation (int): Spacing between kernel elements.
groups (int): Number of blocked connections from input channels to output channels.
bias (bool): If True, adds a learnable bias to the output.
padding_mode (str): Padding to use.
"""
def __init__(
self,
g: Union[tuple, list, torch.Tensor],
in_channels: int,
out_channels: int,
kernel_size: int = 3,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros",
) -> None:
kernel_size_ = _single(kernel_size)
stride_ = _single(stride)
padding_ = _single(padding)
dilation_ = _single(dilation)
super().__init__(
g,
in_channels,
out_channels,
kernel_size_,
stride_,
padding_,
dilation_,
groups,
bias,
padding_mode,
)
if not self.dim == 1:
raise NotImplementedError("Wrong Clifford signature for CliffordConv1d.")
def forward(self, x: torch.Tensor) -> torch.Tensor:
*_, I = x.shape
if not (I == self.n_blades):
raise ValueError(f"Input has {I} blades, but Clifford layer expects {self.n_blades}.")
return super().forward(x, F.conv1d)
class CliffordConv2d(_CliffordConvNd):
"""2d Clifford convolution.
Args:
g (Union[tuple, list, torch.Tensor]): Clifford signature.
in_channels (int): Number of channels in the input tensor.
out_channels (int): Number of channels produced by the convolution.
kernel_size (Union[int, Tuple[int, int]]): Size of the convolving kernel.
stride (Union[int, Tuple[int, int]]): Stride of the convolution.
padding (Union[int, Tuple[int, int]]): padding added to both sides of the input.
dilation (Union[int, Tuple[int, int]]): Spacing between kernel elements.
groups (int): Number of blocked connections from input channels to output channels.
bias (bool): If True, adds a learnable bias to the output.
padding_mode (str): Padding to use.
rotation (bool): If True, enables the rotation kernel for Clifford convolution.
"""
def __init__(
self,
g: Union[tuple, list, torch.Tensor],
in_channels: int,
out_channels: int,
kernel_size: int = 3,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros",
rotation: bool = False,
):
kernel_size_ = _pair(kernel_size)
stride_ = _pair(stride)
padding_ = padding if isinstance(padding, str) else _pair(padding)
dilation_ = _pair(dilation)
super().__init__(
g,
in_channels,
out_channels,
kernel_size_,
stride_,
padding_,
dilation_,
groups,
bias,
padding_mode,
rotation,
)
if not self.dim == 2:
raise NotImplementedError("Wrong Clifford signature for CliffordConv2d.")
def forward(self, x: torch.Tensor) -> torch.Tensor:
*_, I = x.shape
if not (I == self.n_blades):
raise ValueError(f"Input has {I} blades, but Clifford layer expects {self.n_blades}.")
return super().forward(x, F.conv2d)
class CliffordConv3d(_CliffordConvNd):
"""3d Clifford convolution.
Args:
g (Union[tuple, list, torch.Tensor]): Clifford signature.
in_channels (int): Number of channels in the input tensor.
out_channels (int): Number of channels produced by the convolution.
kernel_size (Union[int, Tuple[int, int, int]]): Size of the convolving kernel.
stride (Union[int, Tuple[int, int, int]]): Stride of the convolution.
padding (Union[int, Tuple[int, int, int]]): padding added to all sides of the input.
dilation (Union[int, Tuple[int, int, int]]): Spacing between kernel elements.
groups (int): Number of blocked connections from input channels to output channels.
bias (bool): If True, adds a learnable bias to the output.
padding_mode (str): Padding to use.
"""
def __init__(
self,
g: Union[tuple, list, torch.Tensor],
in_channels: int,
out_channels: int,
kernel_size: int = 3,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros",
):
kernel_size_ = _triple(kernel_size)
stride_ = _triple(stride)
padding_ = padding if isinstance(padding, str) else _triple(padding)
dilation_ = _triple(dilation)
super().__init__(
g,
in_channels,
out_channels,
kernel_size_,
stride_,
padding_,
dilation_,
groups,
bias,
padding_mode,
)
if not self.dim == 3:
raise NotImplementedError("Wrong Clifford signature for CliffordConv3d.")
def forward(self, x: torch.Tensor) -> torch.Tensor:
*_, I = x.shape
if not (I == self.n_blades):
raise ValueError(f"Input has {I} blades, but Clifford layer expects {self.n_blades}.")
return super().forward(x, F.conv3d)
|
cliffordlayers/cliffordlayers/nn/modules/cliffordconv.py/0
|
{
"file_path": "cliffordlayers/cliffordlayers/nn/modules/cliffordconv.py",
"repo_id": "cliffordlayers",
"token_count": 5423
}
| 752 |
# Installation Guide
```bash
pip install cliffordlayers
```
## For development
```bash title="clone the repo"
git clone https://github.com/microsoft/cliffordlayers
```
=== "`conda`"
```bash title="create and activate env"
cd cliffordlayers
conda env create --file docker/environment.yml
conda activate cliffordlayers
```
```bash title="make an editable install"
pip install -e .
```
=== "`docker`"
```bash title="build docker container"
cd cliffordlayers/docker
docker build -t cliffordlayers .
cd ..
```
```bash title="run docker container"
docker run --gpus all -it --rm --user $(id -u):$(id -g) \
-v $(pwd):/code --workdir /code -e PYTHONPATH=/code \
cliffordlayers:latest
```
|
cliffordlayers/docs/install.md/0
|
{
"file_path": "cliffordlayers/docs/install.md",
"repo_id": "cliffordlayers",
"token_count": 300
}
| 753 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from cliffordlayers.nn.functional.batchnorm import (
clifford_batch_norm,
complex_batch_norm,
)
from cliffordlayers.nn.modules.batchnorm import (
CliffordBatchNorm1d,
CliffordBatchNorm2d,
CliffordBatchNorm3d,
ComplexBatchNorm1d,
)
from cliffordlayers.signature import CliffordSignature
def test_clifford_batchnorm1d_vs_complex_batchnorm():
"""Test Clifford1d batchnorm function against complex batchnorm function using g = [-1]."""
x = torch.randn(4, 16, 8, 2)
x_norm_clifford = clifford_batch_norm(
x,
CliffordSignature(
[
-1,
]
).n_blades,
)
x_norm_complex = complex_batch_norm(torch.view_as_complex(x))
torch.testing.assert_close(x_norm_clifford, torch.view_as_real(x_norm_complex))
def test_clifford_batchnorm1d_vs_complex_batchnorm_scaled():
"""Test Clifford1d batchnorm function against complex batchnorm function using g = [-1],
where an affine transformation is applied.
"""
x = torch.randn(4, 16, 8, 2)
w = torch.randn(2, 2, 16)
b = torch.randn(2, 16)
x_norm_clifford = clifford_batch_norm(
x,
CliffordSignature(
[
-1,
]
).n_blades,
weight=w,
bias=b,
)
x_norm_complex = complex_batch_norm(
torch.view_as_complex(x),
weight=w,
bias=b,
)
torch.testing.assert_close(x_norm_clifford, torch.view_as_real(x_norm_complex))
def test_clifford_batchnorm1d_vs_complex_batchnorm_scaled_validation():
"""Test Clifford1d batchnorm function against complex batchnorm function in the validation setting using g = [-1],
where an affine transformation is applied.
"""
x = torch.randn(4, 16, 8, 2)
w = torch.randn(2, 2, 16)
b = torch.randn(2, 16)
x_norm_clifford = clifford_batch_norm(
x,
CliffordSignature(
[
-1,
]
).n_blades,
weight=w,
bias=b,
training=False,
)
x_norm_complex = complex_batch_norm(
torch.view_as_complex(x),
weight=w,
bias=b,
training=False,
)
torch.testing.assert_close(x_norm_clifford, torch.view_as_real(x_norm_complex))
def test_clifford_batchnorm1d_vs_complex_batchnorm_running_mean():
"""Test Clifford1d batchnorm function against complex batchnorm function using g = [-1],
where running mean is provided.
"""
x = torch.randn(4, 16, 8, 2)
mean = torch.randn(2, 16)
# For the running covariance matrix, we need a positive definite form.
X = torch.randn(16, 2, 2)
cov = X @ X.mT
cov = cov.add_(torch.eye(2)).permute(1, 2, 0)
x_norm_clifford = clifford_batch_norm(
x,
CliffordSignature(
[
-1,
]
).n_blades,
running_mean=mean,
running_cov=cov,
training=True,
)
x_norm_complex = complex_batch_norm(
torch.view_as_complex(x),
running_mean=mean,
running_cov=cov,
training=True,
)
torch.testing.assert_close(x_norm_clifford, torch.view_as_real(x_norm_complex))
def test_modules_clifford_batchnorm1d_vs_complex_batchnorm1d():
"""Test Clifford1d batchnorm module against complex batchnorm module using g = [-1]."""
x = torch.randn(4, 16, 8, 2)
complex_norm = ComplexBatchNorm1d(
channels=16,
)
x_norm_complex = complex_norm(torch.view_as_complex(x))
clifford_norm = CliffordBatchNorm1d(
[
-1,
],
channels=16,
)
x_norm_clifford = clifford_norm(x)
torch.testing.assert_close(x_norm_clifford, torch.view_as_real(x_norm_complex))
def test_clifford_batchnorm2d():
"""Test Clifford2d batchnorm function for correct outputs using g = [1, 1]."""
x = torch.randn(4, 16, 8, 4)
x_norm_clifford = clifford_batch_norm(
x,
CliffordSignature([1, 1]).n_blades,
)
assert x_norm_clifford.shape == x.shape
def test_clifford_batchnorm2d_scaled():
"""Test Clifford2d batchnorm function for correct outputs using g = [1, 1],
where an affine transformation is applied.
"""
x = torch.randn(4, 16, 8, 4)
w = torch.randn(4, 4, 16)
b = torch.randn(4, 16)
x_norm_clifford = clifford_batch_norm(
x,
CliffordSignature([1, 1]).n_blades,
weight=w,
bias=b,
)
assert x_norm_clifford.shape == x.shape
def test_clifford_batchnorm3d():
"""Test Clifford3d batchnorm function for correct outputs using g = [1, 1, 1]."""
x = torch.randn(4, 16, 32, 32, 32, 8)
x_norm_clifford = clifford_batch_norm(
x,
CliffordSignature([1, 1, 1]).n_blades,
)
assert x_norm_clifford.shape == x.shape
def test_clifford_batchnorm3d_scaled():
"""Test Clifford3d batchnorm function for correct outputs using g = [1, 1, 1],
where an affine transformation is applied.
"""
x = torch.randn(4, 16, 32, 32, 32, 8)
w = torch.randn(8, 8, 16)
b = torch.randn(8, 16)
x_norm_clifford = clifford_batch_norm(
x,
CliffordSignature([1, 1, 1]).n_blades,
weight=w,
bias=b,
)
assert x_norm_clifford.shape == x.shape
def test_module_clifford_batchnorm2d():
"""Test Clifford2d batchnorm module for correct outputs using g = [1, 1]."""
x = torch.randn(4, 16, 64, 64, 4)
clifford_norm = CliffordBatchNorm2d(
[-1, -1],
channels=16,
)
x_norm_clifford = clifford_norm(x)
assert x.shape == x_norm_clifford.shape
def test_module_clifford_batchnorm3d():
"""Test Clifford3d batchnorm module for correct outputs using g = [1, 1]."""
x = torch.randn(4, 16, 64, 64, 64, 8)
clifford_norm = CliffordBatchNorm3d(
[-1, -1, -1],
channels=16,
)
x_norm_clifford = clifford_norm(x)
assert x.shape == x_norm_clifford.shape
|
cliffordlayers/tests/test_clifford_batchnorm.py/0
|
{
"file_path": "cliffordlayers/tests/test_clifford_batchnorm.py",
"repo_id": "cliffordlayers",
"token_count": 2735
}
| 754 |
#!/bin/bash
# download and install miniconda
# please update the link below according to the platform you are using (https://conda.io/miniconda.html)
# e.g. for Mac, change to https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
bash ./Miniconda3-latest-Linux-x86_64.sh -b -p $HOME/miniconda
export PATH="$HOME/miniconda/bin:$PATH"
# create a new environment named cgvae
conda create --name cgvae python=3.5 pip
source activate cgvae
# install cython
pip install Cython --install-option="--no-cython-compile"
# install rdkit
conda install -c rdkit rdkit
# install tensorflow 1.3
pip install --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.3.0-cp35-cp35m-linux_x86_64.whl
# install other requirements
pip install -r requirements.txt
# remove conda bash
rm ./Miniconda3-latest-Linux-x86_64.sh
|
constrained-graph-variational-autoencoder/install.sh/0
|
{
"file_path": "constrained-graph-variational-autoencoder/install.sh",
"repo_id": "constrained-graph-variational-autoencoder",
"token_count": 344
}
| 755 |
{
"project_name": "spaCy FastAPI Azure Cognitive Skill",
"project_slug": "{{ cookiecutter.project_name.lower().replace(' ', '_').replace('-', '_') }}",
"short_description": "spaCy FastAPI for Custom Cognitive Skills in Azure Search",
"spacy_model": "This must be one of spaCy's default models. See https://spacy.io/usage for a supported list."
}
|
cookiecutter-spacy-fastapi/cookiecutter.json/0
|
{
"file_path": "cookiecutter-spacy-fastapi/cookiecutter.json",
"repo_id": "cookiecutter-spacy-fastapi",
"token_count": 118
}
| 756 |
const axios = require('axios')
const fs = require('fs')
const pjson = require('../package.json')
const { convertData } = require('../src/float-utils-node.js')
const CollaborativeTrainer64 = artifacts.require("./CollaborativeTrainer64")
const DataHandler64 = artifacts.require("./data/DataHandler64")
const NearestCentroidClassifier = artifacts.require("./classification/NearestCentroidClassifier")
const Stakeable64 = artifacts.require("./incentive/Stakeable64")
module.exports = function (deployer) {
if (deployer.network === 'skipMigrations') {
return
}
// Information to persist to the database.
const name = "VPA Classifier"
const description = "Supports multiple domains."
const encoder = 'universal sentence encoder'
const modelInfo = {
name,
description,
accuracy: '0.88',
modelType: 'Classifier64',
encoder,
}
const toFloat = 1E9
// Low default times for testing.
const refundTimeS = 15
const anyAddressClaimWaitTimeS = 20
const ownerClaimWaitTimeS = 20
// Weight for deposit cost in wei.
const costWeight = 1E15
// Model
const classifications = []
const centroids = []
const dataCounts = []
let model = fs.readFileSync('./src/ml-models/vpa/vpa-classifier-centroids.json', 'utf8')
model = JSON.parse(model)
for (let [classification, centroidInfo] of Object.entries(model.intents)) {
classifications.push(classification)
centroids.push(convertData(centroidInfo.centroid, web3, toFloat))
dataCounts.push(centroidInfo.dataCount)
}
console.log(`Deploying DataHandler.`)
return deployer.deploy(DataHandler64).then(dataHandler => {
console.log(` Deployed data handler to ${dataHandler.address}.`)
return deployer.deploy(Stakeable64,
refundTimeS,
ownerClaimWaitTimeS,
anyAddressClaimWaitTimeS,
costWeight
).then(incentiveMechanism => {
console.log(` Deployed incentive mechanism to ${incentiveMechanism.address}.`)
return deployer.deploy(NearestCentroidClassifier,
[classifications[0]], [centroids[0]], [dataCounts[0]],
// Block gasLimit by most miners as of May 2019.
{ gas: 8.8E6 }
).then(classifier => {
// Add classes separately to avoid hitting gasLimit.
const addClassPromises = []
for (let i = 1; i < classifications.length; ++i) {
addClassPromises.push(classifier.addClass(
centroids[i], classifications[i], dataCounts[i]
))
}
console.log(`Deploying main entry point.`)
return deployer.deploy(CollaborativeTrainer64,
name, description, encoder,
dataHandler.address,
incentiveMechanism.address,
classifier.address
).then(instance => {
console.log(` Deployed VPA collaborative classifier to ${instance.address}.`)
return Promise.all([
dataHandler.transferOwnership(instance.address),
incentiveMechanism.transferOwnership(instance.address),
classifier.transferOwnership(instance.address),
].concat(addClassPromises)).then(() => {
modelInfo.address = instance.address
return axios.post(`${pjson.proxy}api/models`, modelInfo).then(() => {
console.log("Added model to the database.")
}).catch(err => {
if (process.env.CI !== "true" && process.env.REACT_APP_ENABLE_SERVICE_DATA_STORE === 'true') {
console.error("Error adding model to the database.")
console.error(err)
throw err
}
})
})
})
})
})
})
}
|
0xDeCA10B/demo/client/migrations/3_deploy_VPA_classifier.js/0
|
{
"file_path": "0xDeCA10B/demo/client/migrations/3_deploy_VPA_classifier.js",
"repo_id": "0xDeCA10B",
"token_count": 1250
}
| 0 |
pragma solidity ^0.6;
import "../../lib/SafeMath.sol";
import {Classifier64} from "./classification/Classifier.sol";
import {DataHandler64} from "./data/DataHandler.sol";
import {IncentiveMechanism64} from "./incentive/IncentiveMechanism.sol";
/**
* The main interface to sharing updatable models on the blockchain.
*/
contract CollaborativeTrainer {
string public name;
string public description;
string public encoder;
constructor (
string memory _name,
string memory _description,
string memory _encoder
) public {
name = _name;
description = _description;
encoder = _encoder;
}
}
/**
* The main interface to Decentralized & Collaborative AI on the Blockchain.
* For classifiers that use data with 64-bit values.
*/
// Using IoC even though it's more expensive, it's easier to work with.
// Those wishing to optimize can change the code to use inheritance and do other optimizations before deploying.
// We can also make a script that generates the required files based on several parameters.
contract CollaborativeTrainer64 is CollaborativeTrainer {
using SafeMath for uint256;
/** Data has been added. */
event AddData(
/**
* The data stored.
*/
int64[] d,
/**
* The classification for the data.
*/
uint64 c,
/**
* The time it was added.
*/
uint t,
/**
* The address that added the data.
*/
address indexed sender,
uint cost
);
DataHandler64 public dataHandler;
IncentiveMechanism64 public incentiveMechanism;
Classifier64 public classifier;
constructor(
string memory _name,
string memory _description,
string memory _encoder,
DataHandler64 _dataHandler,
IncentiveMechanism64 _incentiveMechanism,
Classifier64 _classifier
) CollaborativeTrainer(_name, _description, _encoder) public {
dataHandler = _dataHandler;
incentiveMechanism = _incentiveMechanism;
classifier = _classifier;
}
/**
* Update the model.
*
* @param data A single sample of training data for the model.
* @param classification The label for `data`.
*/
function addData(int64[] memory data, uint64 classification) public payable {
uint cost = incentiveMechanism.handleAddData(msg.value, data, classification);
uint time = dataHandler.handleAddData(msg.sender, cost, data, classification);
classifier.update(data, classification);
// Safe subtraction because cost <= msg.value.
uint remaining = msg.value - cost;
if (remaining > 0) {
msg.sender.transfer(remaining);
}
// Emit here so that it's easier to catch.
emit AddData(data, classification, time, msg.sender, cost);
}
/**
* Attempt a refund for the deposit given with submitted data.
* Must be called by the address that originally submitted the data.
*
* @param data The data for which to attempt a refund.
* @param classification The label originally submitted with `data`.
* @param addedTime The time when the data was added.
*/
function refund(int64[] memory data, uint64 classification, uint addedTime) public {
(uint claimableAmount, bool claimedBySubmitter, uint numClaims) = dataHandler.handleRefund(
msg.sender, data, classification, addedTime);
uint64 prediction = classifier.predict(data);
uint refundAmount = incentiveMechanism.handleRefund(msg.sender,
data, classification, addedTime,
claimableAmount, claimedBySubmitter,
prediction,
numClaims);
msg.sender.transfer(refundAmount);
}
/**
* Report bad or old data and attempt to get a reward.
*
* @param data The data to report.
* @param classification The label originally submitted with `data`.
* @param addedTime The time when the data was added.
* @param originalAuthor The address that originally added the data.
*/
function report(int64[] memory data, uint64 classification, uint addedTime, address originalAuthor)
public {
(uint initialDeposit, uint claimableAmount, bool claimedByReporter, uint numClaims, bytes32 dataKey) = dataHandler.handleReport(
msg.sender, data, classification, addedTime, originalAuthor);
uint64 prediction = classifier.predict(data);
uint rewardAmount = incentiveMechanism.handleReport(msg.sender,
data, classification, addedTime, originalAuthor,
initialDeposit, claimableAmount, claimedByReporter,
prediction,
numClaims);
dataHandler.updateClaimableAmount(dataKey, rewardAmount);
msg.sender.transfer(rewardAmount);
}
}
|
0xDeCA10B/demo/client/src/contracts/CollaborativeTrainer.sol/0
|
{
"file_path": "0xDeCA10B/demo/client/src/contracts/CollaborativeTrainer.sol",
"repo_id": "0xDeCA10B",
"token_count": 1770
}
| 1 |
const _toFloat = 1E9
exports.convertNum = function (num, web3, toFloat = _toFloat) {
return web3.utils.toBN(Math.round(num * toFloat))
}
exports.convertData = function (data, web3, toFloat = _toFloat) {
return data.map(num => exports.convertNum(num, web3, toFloat))
}
|
0xDeCA10B/demo/client/src/float-utils-node.js/0
|
{
"file_path": "0xDeCA10B/demo/client/src/float-utils-node.js",
"repo_id": "0xDeCA10B",
"token_count": 104
}
| 2 |
import axios from 'axios'
import { DataStore, DataStoreHealthStatus, ModelInformation, ModelsResponse, OriginalData, RemoveResponse } from './data-store'
export class ServiceDataStore implements DataStore {
url = ''
constructor(url?: string) {
if (url !== undefined) {
this.url = url
} else if (process.env.NODE_ENV === 'production' && process.env.BACK_END_URL) {
this.url = process.env.BACK_END_URL
}
}
async health(): Promise<DataStoreHealthStatus> {
if (process.env.REACT_APP_ENABLE_SERVICE_DATA_STORE === undefined
|| process.env.REACT_APP_ENABLE_SERVICE_DATA_STORE.toLocaleLowerCase() === 'true') {
return axios.get(this.url + '/api/health', { timeout: 1000 }).then(response => {
const { healthy } = response.data
return new DataStoreHealthStatus(healthy, { url: this.url })
}).catch(err => {
return new DataStoreHealthStatus(false, { err })
})
} else {
return new DataStoreHealthStatus(false, { reason: "Disabled" })
}
}
saveOriginalData(transactionHash: string, originalData: OriginalData): Promise<any> {
return axios.post(this.url + '/api/data', {
transactionHash,
originalData,
})
}
getOriginalData(transactionHash: string): Promise<OriginalData> {
return axios.get(`${this.url}/api/data/${transactionHash}`).then(response => {
const { originalData } = response.data
const { text } = originalData
return new OriginalData(text)
})
}
saveModelInformation(modelInformation: ModelInformation): Promise<any> {
return axios.post(this.url + '/api/models', modelInformation)
}
getModels(afterAddress?: string, limit?: number): Promise<ModelsResponse> {
const params = []
if (afterAddress != null) {
params.push(`afterAddress=${afterAddress}`)
}
if (limit != null) {
params.push(`limit=${limit}`)
}
const url = `${this.url}/api/models?${params.join('&')}`
return axios.get(url).then(response => {
const models = response.data.models.map((model: any) => new ModelInformation(model))
const { remaining } = response.data
return new ModelsResponse(models, remaining)
})
}
getModel(modelId?: number, address?: string): Promise<ModelInformation> {
const params = []
if (modelId != null) {
params.push(`modelId=${modelId}`)
}
if (address != null) {
params.push(`address=${address}`)
}
return axios.get(`${this.url}/api/model?${params.join('&')}`).then(response => {
const { model } = response.data
if (address !== null && address !== undefined && model.address !== address) {
throw new Error("Could not find a model with the matching address.")
}
return new ModelInformation(model)
})
}
removeModel(_modelInformation: ModelInformation): Promise<RemoveResponse> {
// Requires permission validation from the server.
throw new Error("Not implemented")
}
}
|
0xDeCA10B/demo/client/src/storage/service-data-store.ts/0
|
{
"file_path": "0xDeCA10B/demo/client/src/storage/service-data-store.ts",
"repo_id": "0xDeCA10B",
"token_count": 983
}
| 3 |
exports.assertCloseToNumbers = function (actual, expected, delta,
web3,
messagePrefix) {
messagePrefix = messagePrefix ? messagePrefix + ": " : ""
if (web3.utils.isBN(actual)) {
if (web3.utils.isBN(expected)) {
const message = `${messagePrefix}actual (BN): ${actual} (${typeof actual})\nexpected (BN): ${expected} (${typeof expected})\ndelta: ${delta}`
assert(actual.sub(expected).abs().lte(web3.utils.toBN(delta)), message)
} else {
const expectedBN = web3.utils.toBN(expected)
const message = `${messagePrefix}actual (BN): ${actual} (${typeof actual})\nexpected: ${expected} (${typeof expected}) => BN: ${expectedBN}\ndelta: ${delta}`
assert(actual.sub(expectedBN).abs().lte(web3.utils.toBN(delta)), message)
}
} else if (web3.utils.isBN(expected)) {
const actualBN = web3.utils.toBN(actual)
const message = `${messagePrefix}actual: ${actual} (${typeof actual}) => BN: ${actualBN}\nexpected (BN): ${expected} (${typeof expected})\ndelta: ${delta}`
assert(actualBN.sub(expected).abs().lte(web3.utils.toBN(delta)), message)
} else {
if (typeof actual === 'string') {
actual = parseInt(actual)
}
assert.closeTo(actual, expected, delta, messagePrefix)
}
}
exports.assertEqualNumbers = function (actual, expected,
web3,
messagePrefix) {
messagePrefix = messagePrefix ? messagePrefix + ": " : ""
if (web3.utils.isBN(actual)) {
if (web3.utils.isBN(expected)) {
const message = `${messagePrefix}actual (BN): ${actual} (${typeof actual})\nexpected: ${expected} (${typeof expected})`
assert(actual.eq(expected), message)
} else {
const expectedBN = web3.utils.toBN(expected)
const message = `${messagePrefix}actual (BN): ${actual} (${typeof actual})\nexpected: ${expected} (${typeof expected}) => BN: ${expectedBN}`
assert(actual.eq(expectedBN), message)
}
} else if (web3.utils.isBN(expected)) {
const actualBN = web3.utils.toBN(actual)
const message = `${messagePrefix}actual: ${actual} (${typeof actual}) => BN: ${actualBN}\nexpected (BN): ${expected} (${typeof expected})`
assert(actualBN.eq(expected), message)
} else {
if (typeof actual === 'string') {
actual = parseInt(actual)
}
assert.equal(actual, expected, messagePrefix)
}
}
|
0xDeCA10B/demo/client/test/float-test-utils-node.js/0
|
{
"file_path": "0xDeCA10B/demo/client/test/float-test-utils-node.js",
"repo_id": "0xDeCA10B",
"token_count": 811
}
| 4 |
from abc import ABC, abstractmethod
from injector import Module, inject, singleton
from decai.simulation.contract.balances import Balances
from decai.simulation.contract.classification.classifier import Classifier
from decai.simulation.contract.data.data_handler import DataHandler
from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism
from decai.simulation.contract.objects import Msg, SmartContract
class CollaborativeTrainer(ABC, SmartContract):
"""
Base class for the main interface to create simulations of a training model in a smart contract.
"""
def __init__(self,
balances: Balances,
data_handler: DataHandler,
incentive_mechanism: IncentiveMechanism,
model: Classifier,
):
super().__init__()
self.data_handler = data_handler
self.im = incentive_mechanism
self.model = model
self._balances = balances
@abstractmethod
def add_data(self, msg: Msg, data, label):
"""
Update the model with one data sample.
:param msg: Standard message to pass to any method of a smart contract.
:param data: A single sample of training data for the model.
:param label: The label for `data`.
"""
pass
@abstractmethod
def predict(self, msg: Msg, data):
"""
:param msg: Standard message to pass to any method of a smart contract.
:param data:
:return: The predicted classification/label for `data`.
"""
pass
@abstractmethod
def refund(self, msg: Msg, data, classification, added_time: int):
"""
Attempt a refund for the deposit given with submitted data.
Must be called by the address that originally submitted the data.
:param msg: Standard message to pass to any method of a smart contract.
:param data: The data for which to attempt a refund.
:param classification: The label originally submitted with `data`.
:param added_time :The time when the data was added.
"""
pass
@abstractmethod
def report(self, msg: Msg, data, classification, added_time: int, original_author: str):
"""
Report bad or old data and attempt to get a reward.
:param msg: Standard message to pass to any method of a smart contract.
:param data: The data to report.
:param classification: The label originally submitted with `data`.
:param added_time :The time when the data was added.
:param original_author: The address that originally added the data.
"""
pass
@singleton
class DefaultCollaborativeTrainer(CollaborativeTrainer):
"""
Default implementation of the main interface.
"""
@inject
def __init__(self,
balances: Balances,
data_handler: DataHandler,
incentive_mechanism: IncentiveMechanism,
model: Classifier,
):
kwargs = dict(locals())
del kwargs['self']
del kwargs['__class__']
super().__init__(**kwargs)
self.data_handler.owner = self.address
self.im.owner = self.address
self.model.owner = self.address
def predict(self, msg: Msg, data):
self.im.distribute_payment_for_prediction(msg.sender, msg.value)
return self.model.predict(data)
# FUNCTIONS FOR HANDLING DATA
def add_data(self, msg: Msg, data, classification):
# Consider making sure duplicate data isn't added until it's been claimed.
cost, update_model = self.im.handle_add_data(msg.sender, msg.value, data, classification)
self.data_handler.handle_add_data(msg.sender, cost, data, classification)
if update_model:
self.model.update(data, classification)
# In Solidity the message's value gets taken automatically.
# Here we do this at the end in case something failed while trying to add data.
self._balances.send(msg.sender, self.address, cost)
def refund(self, msg: Msg, data, classification, added_time: int):
(claimable_amount, claimed_by_submitter, stored_data) = \
self.data_handler.handle_refund(msg.sender, data, classification, added_time)
prediction = self.model.predict(data)
refund_amount = self.im.handle_refund(msg.sender, stored_data,
claimable_amount, claimed_by_submitter, prediction)
self._balances.send(self.address, msg.sender, refund_amount)
# The Solidity version doesn't need this extra function call because if there is an error earlier,
# then the changes automatically get reverted.
self.data_handler.update_claimable_amount(msg.sender, stored_data, refund_amount)
def report(self, msg: Msg, data, classification, added_time: int, original_author: str):
claimed_by_reporter, stored_data = \
self.data_handler.handle_report(msg.sender, data, classification, added_time, original_author)
prediction = lambda: self.model.predict(data)
reward_amount = self.im.handle_report(msg.sender, stored_data, claimed_by_reporter, prediction)
self.data_handler.update_claimable_amount(msg.sender, stored_data, reward_amount)
self._balances.send(self.address, msg.sender, reward_amount)
class DefaultCollaborativeTrainerModule(Module):
def configure(self, binder):
binder.bind(CollaborativeTrainer, to=DefaultCollaborativeTrainer)
|
0xDeCA10B/simulation/decai/simulation/contract/collab_trainer.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/contract/collab_trainer.py",
"repo_id": "0xDeCA10B",
"token_count": 2134
}
| 5 |
import mmh3
from injector import Module
from decai.simulation.data.featuremapping.hashing.token_hash import TokenHash
class MurmurHash3(TokenHash):
def hash(self, text: str) -> int:
# Made to be equivalent to the JavaScript demo code.
return mmh3.hash(text, signed=False)
class MurmurHash3Module(Module):
def configure(self, binder):
binder.bind(TokenHash, to=MurmurHash3)
|
0xDeCA10B/simulation/decai/simulation/data/featuremapping/hashing/murmurhash3.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/data/featuremapping/hashing/murmurhash3.py",
"repo_id": "0xDeCA10B",
"token_count": 148
}
| 6 |
import os
from dataclasses import dataclass, field
from logging import Logger
from typing import List
import numpy as np
import pandas as pd
from injector import inject, Module
from sklearn.utils import shuffle
from decai.simulation.data.data_loader import DataLoader
@inject
@dataclass
class TitanicDataLoader(DataLoader):
"""
Load data for Titanic survivors.
https://www.kaggle.com/c/titanic/data
"""
_logger: Logger
_seed: int = field(default=231, init=False)
_train_split: float = field(default=0.7, init=False)
def classifications(self) -> List[str]:
return ["DIED", "SURVIVED"]
def _get_features(self, data: pd.DataFrame):
"""
Map the data to numbers.
Also uses some ideas from https://triangleinequality.wordpress.com/2013/09/08/basic-feature-engineering-with-the-titanic-data/
:param data: The data without labels.
:return: The data mapped to numbers.
"""
data.drop(columns=['PassengerId', 'Ticket'], inplace=True)
# , 'Name', 'Ticket', 'Cabin', 'Embarked'
title_tuples = (
(' Mr. ', ' Sir. ', ' Don. ', ' Major. ', ' Capt. ', ' Jonkheer. ', ' Rev. ', ' Col. '),
(' Mrs. ', ' Countess. ', ' Mme. ', ' Lady. '),
(' Miss. ', ' Mlle. ', ' Ms. '),
(' Master. ',),
(' Dr. ',),
)
title_to_num = {
' Mr. ': 0,
' Mrs. ': 1,
' Miss. ': 2,
' Master. ': 3,
}
def _get_title(row):
result = None
name = row['Name']
for index, titles in enumerate(title_tuples):
for t in titles:
if t in name:
result = titles[0]
if result == ' Dr. ':
if row['Sex'] == 'male':
result = ' Mr. '
else:
result = ' Mrs. '
assert result is not None, f"No title found in {row}."
result = title_to_num[result]
return result
def _get_cabin(row):
result = -1
cabin = row['Cabin']
if isinstance(cabin, str):
for c in 'ABCDEFGT':
if c in cabin:
result = ord(c) - ord('A')
break
return result
result = []
for index, row in data.iterrows():
if row['Sex'] == 'male':
sex = 0
else:
sex = 1
family_size = row['SibSp'] + row['Parch']
datum = [
row['Pclass'],
sex,
_get_title(row),
family_size,
# These features did not help:
# _get_cabin(row),
# row['Age'],
# row['Parch'],
# row['SibSp'],
# row['Fare'],
# row['Fare'] / (family_size + 1),
]
result.append(datum)
return result
def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple):
self._logger.info("Loading data.")
data_folder_path = os.path.join(__file__, '../../../../training_data/titanic')
if not os.path.exists(data_folder_path):
# TODO Attempt to download the data.
raise Exception(f"Could not find Titanic dataset at \"{data_folder_path}\"."
"\nYou must download it from https://www.kaggle.com/c/titanic/data.")
x_train = pd.read_csv(os.path.join(data_folder_path, 'train.csv'))
y_train = np.array(x_train['Survived'], np.int8)
x_train.drop(columns=['Survived'], inplace=True)
x_train = self._get_features(x_train)
x_train = np.array(x_train)
x_train, y_train = shuffle(x_train, y_train, random_state=self._seed)
train_split = int(len(x_train) * self._train_split)
x_test, y_test = x_train[train_split:], y_train[train_split:]
x_train, y_train = x_train[:train_split], y_train[:train_split]
if train_size is not None:
x_train, y_train = x_train[:train_size], y_train[:train_size]
if test_size is not None:
x_test, y_test = x_test[:test_size], y_test[:test_size]
self._logger.info("Done loading data.")
return (x_train, y_train), (x_test, y_test)
@dataclass
class TitanicDataModule(Module):
def configure(self, binder):
binder.bind(DataLoader, to=TitanicDataLoader)
|
0xDeCA10B/simulation/decai/simulation/data/titanic_data_loader.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/data/titanic_data_loader.py",
"repo_id": "0xDeCA10B",
"token_count": 2322
}
| 7 |
# Lab 9 - 强化学习系统练习(RLlib的实践与应用)
## 实验目的
1. 通过快速上手RLlib
2. 理解分布式强化学习系统的各模块的构成
3. 理解强化学习的分布式算法及其性能
## 实验环境
* Linux集群(至少两台Linux机器)
* Python==3.7.6
* ray
* rllib
* PyTorch==1.5.0
## 实验原理
RLlib是由UC Berkeley发起的一个开源的强化学习(Reinforcement Learning,简称RL)框架, 提供了高度可扩展性的API, 可以让用户在其框架上实现不同的RL算法,或者将已有的算法跑在分布式平台上。Rllib既可以支持多种多样不同的RL算法(例如DQN, policy grident, SAC, DDPG等),也支持连接各种不同的环境(例如gym, MuJoCo等), 同时也支持把不同的分布式RL算法(例如apex-dqn,IMPALA等)跑在集群上。RLlib支持pytroch和tensorflow/tensorflow eager等不同的深度学习框架。

**注:** 上图出自https://docs.ray.io/en/latest/rllib.html
本实验通过不同的配置, 理解不同的分布式强化学习算法在不同并行条件下的不同环境的表现。
## 实验内容
### 实验流程图

### 具体步骤
1. 安装环境依赖包 `ray` 和 `rllib` ,并测试是否安装成功。
```
pip install -U ray
pip install ray[rllib]
```
2. 配置分布式RLlib环境, 并检测分布式环境是否成功
1. 参考如下命令,配置主节点(master节点)
```
ray start --head --redis-port=6666
```
注:
a. 该port为ray预留的可以被其他机器访问的端口
b. 可以通过ssh 访问机器,或直接登录到机器进行配置
2. 参考如下命令,配置工作节点(worker节点)
```
ray start --address=<master_address>
```
**注:** master_address指的是主节点的IP地址
3. 配置不同的脚本,测试不同算法对应不同并行条件/不同环境下的收敛速度。至少挑选一种分布式算法,并测试其worker并行数目为4,8,16的情况下在至少两个Atari环境下的收敛情况,提交配置文件和对应的启动脚本文件。
1. 在算法为apex-dqn,并行条件为worker数目为2,4,16的情况下,测试在pong的环境下的收敛情况。
2. 在算法为apex-dppg,并行条件为worker数目为2,4,16的情况下,测试在pendulum的环境下的收敛情况。
3. 在算法为impala,并行条件为worker数目为2,4,16的情况下,测试在cartpole的环境下的收敛情况。
4. 收敛结果的分析,包括不同并行条件/环境下的不同算法的收敛的time和reward。总结成表格,并画出对应的学习曲线。
## 实验报告
### 实验环境
||||
|--------|--------------|--------------------------|
|硬件环境|CPU(vCPU数目)| |
||GPU(型号,数目)||
|软件环境|OS版本||
||深度学习框架<br>python包名称及版本||
||CUDA版本||
||||
### 实验结果
1. 提交不同算法、环境和并行条件(worker数目)下,配置文件和启动脚本。
<br />
<br />
<br />
<br />
<br />
2. 收敛结果的分析
1. 提交不同config的运行输出文件
<br />
<br />
<br />
<br />
<br />
2. 填写不同的算法在不同并行条件/环境下,收敛所需要的time和reward表格
||||||
|---|---|---|---|---|
| 算法 | 环境 | 并行条件 | Time | Reward |
| apex-dqn | pong | 2 |||
||| 4 |||
||| 16 |||
| apex-dppg | pendulum | 2 |||
||| 4 |||
||| 16 |||
| Imapla | cartpole | 2 |||
||| 4 |||
||| 16 |||
||||||
3. 根据b的表格生成不同的学习曲线
<br />
<br />
<br />
<br />
<br />
## 参考代码
### 安装依赖包
```
pip install -U ray
pip install ray[rllib]
```
### 检测依赖包是否安装成功
1. 测试ray
```
git clone https://github.com/ray-project/ray.git
cd ray
python -m pytest -v python/ray/tests/test_mini.py
```
2. 测试rllib
```
rllib train --run=PPO --env=CartPole-v0
```
### 检测分布式的rllib的环境是否配置成功
1. 配置主节点,ssh到主节点进行配置:
```
ray start --head --redis-port=6666
```
该`port`为 ray 预留的可以被其他机器访问的端口
2. 配置工作节点,登录到每一台其他节点上进行配置:
```
ray start --address=<master_address>
```
`master_address` 指的是主节点的IP地址
### 参考的不同分布式算法对应不同环境/并行条件的配置
代码位置:`Lab9/config`
参考命令:
```
cd Lab9
rllib train -f config/xxx-xxx.yaml
```
## 参考资料
* Ray GitHub仓库:https://github.com/ray-project/ray
* Ray和RLlib的官方文档:https://docs.ray.io/en/latest/index.html
* RLlib编写config参考链接: https://docs.ray.io/en/master/rllib-training.html
|
AI-System/Labs/AdvancedLabs/Lab9/README.md/0
|
{
"file_path": "AI-System/Labs/AdvancedLabs/Lab9/README.md",
"repo_id": "AI-System",
"token_count": 3417
}
| 8 |
from setuptools import setup, Extension
from torch.utils import cpp_extension
setup(name='mylinear_cpp',
ext_modules=[cpp_extension.CppExtension('mylinear_cpp', ['mylinear.cpp'])],
cmdclass={'build_ext': cpp_extension.BuildExtension})
|
AI-System/Labs/BasicLabs/Lab2/mylinear_cpp_extension/setup.py/0
|
{
"file_path": "AI-System/Labs/BasicLabs/Lab2/mylinear_cpp_extension/setup.py",
"repo_id": "AI-System",
"token_count": 87
}
| 9 |
# MIT License
# Copyright (c) Microsoft Corporation.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
FROM ubuntu:18.04
ENV PYTHONUNBUFFERED TRUE
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \
fakeroot \
ca-certificates \
dpkg-dev \
g++ \
python3-dev \
openjdk-11-jdk \
curl \
vim \
&& rm -rf /var/lib/apt/lists/* \
&& cd /tmp \
&& curl -O https://bootstrap.pypa.io/get-pip.py \
&& python3 get-pip.py
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1
RUN update-alternatives --install /usr/local/bin/pip pip /usr/local/bin/pip3 1
RUN pip install --no-cache-dir psutil
RUN pip install --no-cache-dir torch
RUN pip install --no-cache-dir torchvision
RUN pip install --no-cache-dir torchtext
RUN pip install --no-cache-dir captum
ADD serve serve
RUN pip install ../serve/
RUN useradd -m model-server \
&& mkdir -p /home/model-server/tmp
COPY dockerd-entrypoint.sh /usr/local/bin/dockerd-entrypoint.sh
RUN chmod +x /usr/local/bin/dockerd-entrypoint.sh \
&& chown -R model-server /home/model-server
COPY config.properties /home/model-server/config.properties
RUN mkdir /home/model-server/model-store && chown -R model-server /home/model-server/model-store
EXPOSE 8080 8081
WORKDIR /home/model-server
ENV TEMP=/home/model-server/tmp
ENTRYPOINT ["/usr/local/bin/dockerd-entrypoint.sh"]
CMD ["serve"]
|
AI-System/Labs/BasicLabs/Lab5/Dockerfile.infer.cpu/0
|
{
"file_path": "AI-System/Labs/BasicLabs/Lab5/Dockerfile.infer.cpu",
"repo_id": "AI-System",
"token_count": 818
}
| 10 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 人工智能系统 教材
本教材的中文名称设定为 **人工智能系统**,主要讲解支持人工智能的计算机系统设计,对应的中英文课程名称为 **人工智能系统** **System for AI**。本课程中将交替使用一下词汇:**人工智能系统**,**AI-System** 和 **System for AI**。
本教材为[微软人工智能教育与共建社区](https://github.com/microsoft/ai-edu)中规划的人工智能相关教材之一,在[A-基础教程](https://github.com/microsoft/ai-edu/tree/master/A-%E5%9F%BA%E7%A1%80%E6%95%99%E7%A8%8B)模块下,课程编号和名称为 *A6-人工智能系统*。
欢迎访问[微软人工智能教育与共建社区](https://github.com/microsoft/ai-edu)的[A-基础教程](https://github.com/microsoft/ai-edu/tree/master/A-%E5%9F%BA%E7%A1%80%E6%95%99%E7%A8%8B)模块访问更多相关内容。
- [人工智能系统 教材](#人工智能系统-教材)
- [写在前面](#写在前面)
- [如何浏览本系列教程](#如何浏览本系列教程)
- [人工智能系统教材设立背景](#人工智能系统教材设立背景)
- [人工智能系统教材设立目的](#人工智能系统教材设立目的)
- [人工智能系统教材的设计与特点](#人工智能系统教材的设计与特点)
- [人工智能系统教材目录与大纲](#人工智能系统教材目录与大纲)
- [附录](#附录)
- [反馈与贡献](#反馈与贡献)
- [写在后面](#写在后面)
## 写在前面
如果您觉得教材对您有帮助,请不要忘记给本站加星(点击网页顶部的Star标签),星越多说明本教材越对大家有帮助,我们就越会努力完善本站。
## 如何浏览本系列教程
1. 如果使用浏览器在线观看的话,可以使用 Edge 或 Chrome 浏览器,[加这个Math展示控件](https://chrome.google.com/webstore/detail/mathjax-plugin-for-github/ioemnmodlmafdkllaclgeombjnmnbima)
2. 也可以clone全部内容到本地,然后用VSCode浏览,但VSCode中需要安装能读取Markdown格式的扩展,比如Markdown AllInOne插件。
3. 本教程提供数据包:在"SourceCode"文件夹中下载"DataDownload.py"并运行,输入本地目录后即可开始下载数据包,并自动解压至当地。
## 人工智能系统教材设立背景
近年来人工智能特别是深度学习技术得到了飞速发展,这背后离不开计算机硬件和软件系统的不断进步。在可见的未来,人工智能技术的发展仍将依赖于计算机系统和人工智能相结合的共同创新模式。需要注意的是,计算机系统现在正以更大的规模和更高的复杂性来赋能于人工智能,这背后不仅需要更多的系统上的创新,更需要系统性的思维和方法论。与此同时,人工智能也反过来为设计复杂系统提供支持。
我们注意到,现在的大部分人工智能相关的教材,特别是深度学习和机器学习相关课程主要集中在相关理论、算法或者应用,与系统相关的教材并不多见。我们希望人工智能系统教材能让人工智能相关教育变得更加体系化和普适化,以共同促进人工智能与系统交叉人才的培养。
## 人工智能系统教材设立目的
本教材主要为本科生高年级和研究生设计,帮助学生:
1. 完整的了解支持深度学习的计算机系统架构,并通过实际的问题,来学习深度学习完整生命周期下的系统设计。
2. 介绍前沿的系统和人工智能相结合的研究工作,包括AI for Systems and Systems for AI,以帮助高年级的本科生和研究生更好的寻找和定义有意义的研究问题。
3. 从系统研究的角度出发设计实验课程。通过操作和应用主流和最新的框架、平台和工具来鼓励学生动手实现和优化系统模块,以提高解决实际问题的能力,而不仅仅是了解工具使用。
**先修课程与教材:** C/C++/Python, 计算机体系结构,算法导论,操作系统,编译原理,计算机网络
## 人工智能系统教材的设计与特点
教材主要包括以下特点:
1. 体系化: 本书内容设计围绕深度学习系统全栈进行阐述,同时涵盖深度学习系统的设计原则,工作综述和方法学。
2. 深入浅出: 以易于理解的文字和内容呈现方式,简化的实例,抽象出关键系统问题。同时兼顾当前前沿的研究工作,有一定深度。
3. 启发式思考:系统问题的抽象与定义优先于解决方法与优化的介绍。兼顾人工智能系统领域的经典问题和代表性解决方法。启发读者思考,展开新工作。
4. 兼收并蓄:本教材的设计不仅会借助微软和微软亚洲研究院在人工智能和系统交叉领域的研究成果和经验,其中包括微软及微软亚洲研究院开发的系统,平台和工具,也会参考和介绍业界主流经典的人工智能系统工作。教材也鼓励其他学校和老师根据自己的需求添加和调整更多的高级内容,或者设计新的实验。
## 人工智能系统教材目录与大纲
- [1. 人工智能系统概述](第1章-人工智能系统概述/1-前言.md)
- [1.1 深度学习的历史,现状与发展](第1章-人工智能系统概述/1.1-深度学习的历史,现状与发展.md)
- 1.1.1 深度学习的广泛应用
- 1.1.2 深度学习方法
- 1.1.3 神经网络基本理论的奠定
- 1.1.4 深度学习算法,模型的现状和趋势
- [1.2 算法,框架,体系结构与算力的进步](第1章-人工智能系统概述/1.2-算法,框架,体系结构与算力的进步.md)
- 1.2.1 大数据和分布式系统
- 1.2.2 深度学习算法的进步
- 1.2.3 计算机体系结构和计算能力的进步
- 1.2.4 计算框架的进步
- [1.3 深度学习系统组成与生态](第1章-人工智能系统概述/1.3-深度学习系统组成与生态.md)
- 1.3.1 深度学习系统的设计目标
- 1.3.2 深度学习系统的大致组成
- 1.3.3 深度学习系统生态
- [1.4 深度学习样例背后的系统问题](第1章-人工智能系统概述/1.4-深度学习样例背后的系统问题.md)
- 1.4.1 一个深度学习样例与其中的系统问题
- 1.4.2 模型算子实现中的系统问题
- 1.4.3 框架执行深度学习模型的生命周期
- 1.4.4 更广泛的人工智能系统生态
- 1.4.5 深度学习框架及工具入门实验
- [1.5 影响深度学习系统设计的理论,原则与假设](第1章-人工智能系统概述/1.5-影响深度学习系统设计的理论,原则与假设.md)
- 1.5.1 抽象-层次化表示与解释
- 1.5.2 摩尔定律与算力发展趋势
- 1.5.3 局部性原则与内存层次结构
- 1.5.4 深度学习负载的线性代数计算与缺陷容忍特性
- 1.5.5 并行加速与阿姆达尔定律优化上限
- 1.5.6 冗余与可靠性
- [2. 深度神经网络基础](第2章-神经网络基础/2-前言.md)
- [2.1 神经网络基本概念](第2章-神经网络基础/2.1-神经网络基本概念.md)
- 2.1.1 神经元细胞的数学模型
- 2.1.2 神经网络的主要功能
- 2.1.3 为什么需要激活函数
- [2.2 神经网络的训练](第2章-神经网络基础/2.2-神经网络的训练.md)
- 2.2.1 基本训练流程
- 2.2.2 损失函数
- 2.2.3 梯度下降
- 2.2.4 反向传播
- [2.3 用神经网络解决回归问题](第2章-神经网络基础/2.3-解决回归问题.md)
- 2.3.1 提出问题
- 2.3.2 万能近似定理
- 2.3.3 定义神经网络结构
- 2.3.4 前向计算
- 2.3.5 反向传播
- [2.4 用神经网络解决分类问题](第2章-神经网络基础/2.4-解决分类问题.md)
- 2.4.1 提出问题
- 2.4.2 定义神经网络结构
- 2.4.3 前向计算
- 2.4.4 反向传播
- 2.4.5 运行结果
- [2.5 深度神经网络基础知识](第2章-神经网络基础/2.5-深度神经网络.md)
- 2.5.1 抽象与设计
- 2.5.2 权重矩阵初始化
- 2.5.3 批量归一化
- 2.5.4 过拟合
- [2.6 梯度下降的优化算法](第2章-神经网络基础/2.6-梯度下降的优化算法.md)
- 2.6.1 随机梯度下降,动量等算法
- [2.7 卷积神经网络基础知识](第2章-神经网络基础/2.7-卷积神经网络.md)
- 2.7.1 卷积神经网络的能力
- 2.7.2 卷积神经网络的典型结构
- 2.7.3 卷积核的作用
- 2.7.4 卷积后续的运算
- 2.7.5 卷积神经网络的特性
- 2.7.6 卷积类型
- 2.7.7 计算卷积核梯度的实例说明
- [2.8 循环神经网络基础知识](第2章-神经网络基础/2.8-循环神经网络.md)
- 2.8.1 循环神经网络的发展简史
- 2.8.2 循环神经网络的结构和典型用途
- 2.8.3 深度循环神经网络
- 2.8.4 双向循环神经网络
- [2.9 注意力机制与Transformer](第2章-神经网络基础/2.9-注意力机制和Transformer.md)
- 2.9.1 序列到序列模型
- 2.9.2 注意力机制
- 2.9.3 Transformer
<!-- - 2.1 神经网络模型
- 2.1.1 张量计算的抽象
- 2.1.2 模型结构的现状与趋势
- 2.2 深度学习系统基础
- 2.2.1 深度学习运算的表示
- 2.2.2 编译框架与中间表达
- 2.2.3 运行态和硬件
- 2.2.4 分布式执行
- 2.2.5 深度学习系统性能优化 -->
- [3. 深度学习框架基础](第3章-深度学习框架基础/3-前言.md)
- [3.1 基于数据流图的深度学习框架](第3章-深度学习框架基础/3.1-基于数据流图的深度学习框架.md)
- 3.1.1 深度学习框架发展概述
- 3.1.2 编程范式:声明式和命令式
- 3.1.3 自动微分基础
- 3.1.4 基于数据流图的深度学习框架
- 3.1.5 计算图调度与执行
- 3.1.6 小结与讨论
- [3.2 神经网络计算中的控制流](第3章-深度学习框架基础/3.2-神经网络计算中的控制流.md)
- 3.2.1 背景
- 3.2.2 静态图:向数据流图中添加控制流原语
- 3.2.3 动态图:复用宿主语言控制流语句
- 3.2.4 动态图转换为静态图
- 3.2.5 小结与讨论
- [4. 矩阵运算与计算机体系结构]()
- 4.1 深度学习常见模型结构
- 4.1.1 全连接层映射到矩阵运算
- 4.1.2 卷积层映射到矩阵运算
- 4.1.3 循环网络层映射到矩阵运算
- 4.1.4 注意力层映射到矩阵运算
- 4.2 计算机体系结构与矩阵运算
- 4.2.1 CPU体系结构
- 4.2.2 CPU实现高效计算矩阵乘
- 4.3 GPU体系结构与矩阵计算
- 4.3.1 GPU体系结构
- 4.3.2 GPU编程模型
- 4.3.3 GPU实现一个简单的计算
<!-- - 4.4 专用芯片与矩阵计算
- 4.4.1 张量处理单元
- 4.4.2 低精度量化
- 4.4.3 复杂指令集
- 4.4.4 矩阵处理单元
- 4.4.5 脉动阵列
- 4.4.6 代表性神经网络芯片 -->
- [5. 深度学习框架的编译与优化](第5章-深度学习框架的编译与优化/5-前言.md)
- [5.1 深度神经网络编译器](第5章-深度学习框架的编译与优化/5.1-深度神经网络编译器.md)
- 5.1.1 前端
- 5.1.2 后端
- 5.1.3 中间表达
- 5.1.4 优化过程
- [5.2 计算图优化](第5章-深度学习框架的编译与优化/5.2-计算图优化.md)
- 5.2.1 计算图与图优化
- 5.2.2 算术表达式化简
- 5.2.3 公共子表达式消除
- 5.2.4 常数传播
- 5.2.5 通用矩阵乘自动融合
- 5.2.6 算子融合
- 5.2.7 子图替换
- 5.2.8 随机子图替换
- [5.3 内存优化](第5章-深度学习框架的编译与优化/5.3-内存优化.md)
- 5.3.1 深度学习模型内存分析与预估
- 5.3.2 基于拓扑序的最小内存分配
- 5.3.3 根据整数线性规划求解最优内存放置
- 5.3.4 张量换入换出与张量重计算
- [5.4 内核优化](第5章-深度学习框架的编译与优化/5.4-内核优化.md)
- 5.4.1 算子表达式
- 5.4.2 算子表示与调度逻辑的分离
- 5.4.3 自动调度搜索与代码生成
- [5.5 算子调度优化](第5章-深度学习框架的编译与优化/5.5-算子调度优化.md)
- 5.5.1 操作符融合
- 5.5.2 编译时调度
<!-- - 5.6 前沿人工智能编程语言与编译器
- 5.6.1 语言与编程接口
- 5.6.2 中间表达
- 5.6.3 编译器
- 5.6.4 工具链: 代价模型,类型系统 -->
- [6. 分布式训练算法与系统](第6章-分布式训练算法与系统/6-前言.md)
- [6.1 分布式计算简介](第6章-分布式训练算法与系统/6.1-分布式计算简介.md)
- 6.1.1 串行到并行计算
- 6.1.2 并行计算加速定律
- [6.2 分布式深度学习的意义](第6章-分布式训练算法与系统/6.2-分布式深度学习的意义.md)
- 6.2.1 算子内并行
- 6.2.2 算子间并行
- [6.3 分布式训练算法分类](第6章-分布式训练算法与系统/6.3-分布式训练算法分类.md)
- 6.3.1 数据并行
- 6.3.2 模型并行
- 6.3.3 流水并行
- [6.4 深度学习并行训练同步方式](第6章-分布式训练算法与系统/6.4-深度学习并行训练同步方式.md)
- 6.4.1 同步并行
- 6.4.2 异步并行
- 6.4.3 半同步并行
- [6.5 分布式训练系统简介](第6章-分布式训练算法与系统/6.5-分布式训练系统简介.md)
- 6.5.1 用户接口
- 6.5.2 单节点执行单元
- 6.5.3 通信与协调
- [6.6 分布式训练的通信协调](第6章-分布式训练算法与系统/6.6-分布式训练的通信协调.md)
- 6.6.1 通信协调的硬件
- 6.6.2 通信协调的软件
- [7. 异构计算集群调度与资源管理系统](第7章-异构计算集群调度与资源管理系统/7-前言.md)
- [7.1 异构计算集群管理系统简介](第7章-异构计算集群调度与资源管理系统/7.1-异构计算集群管理系统简介.md)
- 7.1.1 多租环境运行的训练作业
- 7.1.2 作业生命周期
- 7.1.3 集群管理系统架构
- [7.2 训练作业,镜像与容器](第7章-异构计算集群调度与资源管理系统/7.2-训练作业,镜像与容器.md)
- 7.2.1 深度学习作业依赖与规格
- 7.2.2 环境依赖:镜像
- 7.2.3 运行时资源隔离:容器
- 7.2.4 从操作系统视角看GPU技术栈
- 7.2.5 人工智能作业开发体验
- [7.3 调度](第7章-异构计算集群调度与资源管理系统/7.3-调度.md)
- 7.3.1 调度问题优化目标
- 7.3.2 群调度
- 7.3.3 DRF调度
- 7.3.4 容量调度
- 7.3.5 虚拟集群
- 7.3.6 抢占式调度
- 7.3.7 深度学习调度算法实验与模拟研究
- [7.4 面向深度学习的集群管理系统](第7章-异构计算集群调度与资源管理系统/7.4-面向深度学习的集群管理系统.md)
- 7.4.1 深度学习工作负载的需求
- 7.4.2 异构硬件的多样性
- 7.4.3 深度学习平台的管理与运维需求
- 7.4.4 深度学习负载与异构硬件下的调度设计
- 7.4.5 代表性异构集群管理系统
- [7.5 存储](第7章-异构计算集群调度与资源管理系统/7.5-存储.md)
- 7.5.1 沿用大数据平台存储路线
- 7.5.2 沿用高性能计算平台存储路线
- 7.5.3 面向深度学习的存储
- [7.6 开发与运维](第7章-异构计算集群调度与资源管理系统/7.6-开发与运维.md)
- 7.6.1 平台功能模块与敏捷开发
- 7.6.2 监控体系构建
- 7.6.3 测试
- 7.6.4 平台部署与DevOps
- 7.6.5 平台运维
- 7.6.6 部署异构资源集群管理系统实验
- [8. 深度学习推理系统](第8章-深度学习推理系统/8-前言.md)
- [8.1 推理系统简介](第8章-深度学习推理系统/8.1-推理系统简介.md)
- 8.1.1 对比推理与训练过程
- 8.1.2 推理系统的优化目标与约束
- [8.2 模型推理的离线优化](第8章-深度学习推理系统/8.2-模型推理的离线优化.md)
- 8.2.1 通过程序理解推理优化动机
- 8.2.2 推理延迟
- 8.2.3 层间与张量融合
- 8.2.4 目标后端自动调优
- 8.2.5 模型压缩
- 8.2.6 低精度推理
- [8.3 部署](第8章-深度学习推理系统/8.3-部署.md)
- 8.3.1 可靠性和可扩展性
- 8.3.2 部署灵活性
- 8.3.3 模型转换与开放协议
- 8.3.4 移动端部署
- 8.3.5 推理系统简介
- 8.3.6 配置镜像与容器进行云上训练,推理与压测实验
- [8.4 推理系统的运行期优化](第8章-深度学习推理系统/8.4-推理系统的运行期优化.md)
- 8.4.1 推理系统的吞吐量
- 8.4.2 加速器模型并发执行
- 8.4.3 动态批尺寸
- 8.4.4 多模型装箱
- 8.4.5 内存分配策略调优
- 8.4.6 深度学习模型内存分配算法实验与模拟研究
- [8.5 开发、训练与部署的全生命周期管理-MLOps](第8章-深度学习推理系统/8.5-开发、训练与部署的全生命周期管理-MLOps.md)
- 8.5.1 MLOps的生命周期
- 8.5.2 MLOps工具链
- 8.5.3 线上发布与回滚策略
- 8.6.4 MLOps持续集成,持续交付(CI/CD)
- 8.6.5 MLOps工具与服务
- [8.6 推理专有芯片](第8章-深度学习推理系统/8.6-推理专有芯片.md)
- 8.6.1 推理芯片架构对比
- 8.6.2 神经网络推理芯片的动机和由来
- 8.6.3 数据中心推理芯片
- 8.6.4 边缘推理芯片
- 8.6.5 芯片模拟器
- [9. 自动化机器学习系统](第9章-自动化机器学习系统/9-%E5%89%8D%E8%A8%80.md)
- [9.1 自动化机器学习](第9章-自动化机器学习系统/9.1-自动化机器学习.md)
- 9.1.1 超参数搜索
- 9.1.2 神经网络结构搜索
- 9.1.3 自动特征工程
- [9.2 自动化机器学习系统与工具设计](第9章-自动化机器学习系统/9.2-自动化机器学习系统与工具设计.md)
- 9.2.1 自动化机器学习工具概览
- 9.2.2 探索式训练过程
- 9.2.3 自动化机器学习系统编程范式和系统优化前沿
- [10. 强化学习系统](第10章-强化学习系统/10-前言.md)
- [10.1 强化学习基本概念](第10章-强化学习系统/10.1-强化学习的基本概念.md)
- 10.2 分布式强化学习系统
- [10.2.1 分布式强化学习算法](第10章-强化学习系统/10.2.1-分布式强化学习算法.md)
- [10.2.2 分布式强化学习对框架的需求和挑战](第10章-强化学习系统/10.2.2-分布式强化学习对框架的需求和挑战.md)
- [10.2.3 分布式强化学习框架与应用](第10章-强化学习系统/10.2.3-分布式强化学习框架和应用.md)
- [11. 模型压缩与加速](第11章-模型压缩与加速/11-前言.md)
- [11.1 模型压缩简介](第11章-模型压缩与加速/11.1-模型压缩简介.md)
- 11.1.1 模型大小持续增长
- 11.1.2 硬件算力增速放缓
- 11.1.3 模型压缩方法
- [11.2 基于稀疏化的模型压缩](第11章-模型压缩与加速/11.2-基于稀疏化的模型压缩.md)
- 11.2.1 人工智能系统与稀疏性
- 11.2.2 深度神经网络的稀疏化与剪枝
- [11.3 模型压缩与硬件加速](第11章-模型压缩与加速/11.3-模型压缩与硬件加速.md)
- 11.3.1 稀疏模型硬件加速
- 11.3.2 量化模型硬件加速
- [12. 人工智能安全与隐私](第12章-人工智能安全与隐私/12-前言.md)
- [12.1 人工智能内在安全与隐私](第12章-人工智能安全与隐私/12.1-人工智能内在安全与隐私.md)
- 12.1.1 内在安全问题
- 12.1.2 内在隐私问题
- [12.2 人工智能训练安全与隐私](第12章-人工智能安全与隐私/12.2-人工智能训练安全与隐私.md)
- 12.2.1 训练时安全
- 12.2.2 训练时隐私
- 12.2.3 联邦学习
- [12.3 人工智能服务安全与隐私](第12章-人工智能安全与隐私/12.3-人工智能服务安全与隐私.md)
- 12.3.1 服务时安全
- 12.3.2 服务时的用户隐私
- 12.3.3 服务时的模型隐私
- [13. 人工智能优化计算机系统](第13章-人工智能优化计算机系统/13-前言.md)
- [13.1 简介与趋势](第13章-人工智能优化计算机系统/13.1-简介与趋势.md)
- 13.1.1 系统设计的范式转移
- [13.2 学习增强系统的应用](第13章-人工智能优化计算机系统/13.2-学习增强系统的应用.md)
- 13.2.1 流媒体系统
- 13.2.1 数据库索引
- 13.2.3 系统性能和参数调优
- 13.2.4 芯片设计
- 13.2.5 预测性资源调度
- [13.3 学习增强系统的落地挑战](第13章-人工智能优化计算机系统/13.3-学习增强系统的落地挑战.md)
- 13.3.1 系统数据
- 13.3.2 系统模型
- 13.3.3 系统动态性
- 13.3.4 系统正确性
## 附录
- [术语表](术语表.md)
## 反馈与贡献
1. 反馈
如果您对本模块内容有任何反馈,欢迎在 GitHub [Issues](https://github.com/microsoft/AI-System/issues)模块中留言,我们会积极了解您的反馈,并尽量满足您的要求。
2. 贡献
如果您想向本模块提供任何有价值的教程内容,请fork本仓库到您自己的账号,编辑内容并提交Pull Request,我们会及时审阅并处理。
请参考如下流程:
- (1) [创建分支,书写内容,提交Pull Request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request)。
- (2) [抄送审阅者](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/requesting-a-pull-request-review)。
- (3) [合并并删除分支](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/incorporating-changes-from-a-pull-request/about-pull-request-merges#squash-and-merge-your-pull-request-commits)。
欢迎向本模块贡献有价值的内容。
## 写在后面
加星点赞是一种良好的Open Source的程序员素养,作者的目标是得到10000颗星!星越多,我们的创作团队越努力!
送人玫瑰,手有余香,传播给你的朋友,让大家一起进步!
|
AI-System/Textbook/README.md/0
|
{
"file_path": "AI-System/Textbook/README.md",
"repo_id": "AI-System",
"token_count": 16997
}
| 11 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
## 2.4 解决分类问题
本小节主要围绕解决分类问题中的提出问题,定义神经网络结构,前向计算,反向传播展开介绍。
- [2.4 解决分类问题](#24-解决分类问题)
- [2.4.1 提出问题](#241-提出问题)
- [2.4.2 定义神经网络结构](#242-定义神经网络结构)
- [2.4.3 前向计算](#243-前向计算)
- [第一层](#第一层)
- [第二层](#第二层)
- [损失函数](#损失函数)
- [2.4.4 反向传播](#244-反向传播)
- [2.4.5 运行结果](#245-运行结果)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
### 2.4.1 提出问题
我们有如表 2.4.1 所示的1000个样本和标签。
表 2.4.1 多分类问题数据样本
|样本|$x_1$|$x_2$|$y$|
|---|---|---|---|
|1|0.22825111|-0.34587097|2|
|2|0.20982606|0.43388447|3|
|...|...|...|...|
|1000|0.38230143|-0.16455377|2|
还好这个数据只有两个特征,所以我们可以用可视化的方法展示,如图 2.4.1。
<img src="./img/data.png" width="500" />
图 2.4.1 可视化样本数据
一共有3个类别:
1. 蓝色方点
2. 红色叉点
3. 绿色圆点
样本组成了一个貌似铜钱的形状,我们就把这个问题叫做“铜钱孔形分类”问题吧。
三种颜色的点有规律地占据了一个单位平面内$(-0.5,0.5)$的不同区域,从图中可以明显看出,这不是线性可分问题,而单层神经网络只能做线性分类,如果想做非线性分类,需要至少两层神经网络来完成。
红绿两色是圆形边界分割,红蓝两色是个矩形边界,都是有规律的。但是,学习神经网络,要忘记“规律”这个词,对于神经网络来说,数学上的“有规律”或者“无规律”是没有意义的,对于它来说一概都是无规律,训练难度是一模一样的。
另外,边界也是无意义的,要用概率来理解:没有一条非0即1的分界线来告诉我们哪些点应该属于哪个区域,我们可以得到的是处于某个位置的点属于三个类别的概率有多大,然后我们从中取概率最大的那个类别作为最终判断结果。
### 2.4.2 定义神经网络结构
先设计出能完成非线性多分类的网络结构,如图11-2所示。
<img src="./img/nn2.png" width="500"/>
图 2.4.2 非线性多分类的神经网络结构图
- 输入层两个特征值$x_1, x_2$
$$
x=
\begin{pmatrix}
x_1 & x_2
\end{pmatrix}
$$
- 隐层$2\times 3$的权重矩阵$W1$
$$
W1=
\begin{pmatrix}
w1_{11} & w1_{12} & w1_{13} \\
w1_{21} & w1_{22} & w1_{23}
\end{pmatrix}
$$
- 隐层$1\times 3$的偏移矩阵$B1$
$$
B1=\begin{pmatrix}
b1_1 & b1_2 & b1_3
\end{pmatrix}
$$
- 隐层由3个神经元构成
- 输出层$3\times 3$的权重矩阵$W2$
$$
W2=\begin{pmatrix}
w2_{11} & w2_{12} & w2_{13} \\
w2_{21} & w2_{22} & w2_{23} \\
w2_{31} & w2_{32} & w2_{33}
\end{pmatrix}
$$
- 输出层$1\times 1$的偏移矩阵$B2$
$$
B2=\begin{pmatrix}
b2_1 & b2_2 & b2_3
\end{pmatrix}
$$
- 输出层有3个神经元使用Softmax函数进行分类
### 2.4.3 前向计算
根据网络结构,可以绘制前向计算图,如图 2.4.3 所示。
<img src="./img/multiple_forward.png" />
图 2.4.3 前向计算图
#### 第一层
- 线性计算
$$
z1_1 = x_1 w1_{11} + x_2 w1_{21} + b1_1
\\
z1_2 = x_1 w1_{12} + x_2 w1_{22} + b1_2
\\
z1_3 = x_1 w1_{13} + x_2 w1_{23} + b1_3
\\
Z1 = X \cdot W1 + B1
$$
- 激活函数
$$
a1_1 = Sigmoid(z1_1)
\\
a1_2 = Sigmoid(z1_2)
\\
a1_3 = Sigmoid(z1_3)
\\
A1 = Sigmoid(Z1)
$$
#### 第二层
- 线性计算
$$
z2_1 = a1_1 w2_{11} + a1_2 w2_{21} + a1_3 w2_{31} + b2_1
\\
z2_2 = a1_1 w2_{12} + a1_2 w2_{22} + a1_3 w2_{32} + b2_2
\\
z2_3 = a1_1 w2_{13} + a1_2 w2_{23} + a1_3 w2_{33} + b2_3
\\
Z2 = A1 \cdot W2 + B2
$$
- 分类函数
$$
a2_1 = \frac{e^{z2_1}}{e^{z2_1} + e^{z2_2} + e^{z2_3}}
\\
a2_2 = \frac{e^{z2_2}}{e^{z2_1} + e^{z2_2} + e^{z2_3}}
\\
a2_3 = \frac{e^{z2_3}}{e^{z2_1} + e^{z2_2} + e^{z2_3}}
\\
A2 = Softmax(Z2)
$$
#### 损失函数
使用多分类交叉熵损失函数:
$$
loss = -(y_1 \ln a2_1 + y_2 \ln a2_2 + y_3 \ln a2_3)
\\
J(w,b) = -\frac{1}{m} \sum^m_{i=1} \sum^n_{j=1} y_{ij} \ln (a2_{ij})
$$
$m$为样本数,$n$为类别数。
### 2.4.4 反向传播
根据前向计算图,可以绘制出反向传播的路径如图 2.4.4。
<img src="./img/multiple_backward.png" />
图 2.4.4 反向传播图
Softmax与多分类交叉熵配合时的反向传播推导过程,最后是一个很简单的减法:
$$
\frac{\partial loss}{\partial Z2}=A2-y \rightarrow dZ2
$$
从Z2开始再向前推:
$$
\begin{aligned}
\frac{\partial loss}{\partial W2} &= A1^{\top} \cdot dZ2 \rightarrow dW2
\\
\frac{\partial{loss}}{\partial{B2}} &= dZ2 \rightarrow dB2
\\
\frac{\partial A1}{\partial Z1} &= A1 \odot (1-A1) \rightarrow dA1
\\
\frac{\partial loss}{\partial Z1} &= dZ2 \cdot W2^{\top} \odot dA1 \rightarrow dZ1
\\
dW1 &= X^{\top} \cdot dZ1
\\
dB1 &= dZ1
\end{aligned}
$$
### 2.4.5 运行结果
训练过程如图 2.4.5 所示。
<img src="./img/loss.png" />
图 2.4.5 训练过程中的损失函数值和准确率值的变化
迭代了5000次,没有到达损失函数小于0.1的条件。
分类结果如图 2.4.6 所示。
<img src="./img/result.png" ch="500" />
图 2.4.6 分类效果图
因为没达到精度要求,所以分类效果一般。从分类结果图上看,外圈圆形差不多拟合住了,但是内圈的方形还差很多,最后的测试分类准确率为0.952。如果在第一层增加神经元的数量(目前是 3,可以尝试 8),是可以得到比较满意的结果的。
## 小结与讨论
本小节主要介绍了解决分类问题中的提出问题,定义神经网络结构,前向计算,反向传播。
请读者通过PyTorch实现一个模型解决一个简单的分类问题。
## 参考文献
1. 《智能之门》,胡晓武等著,高等教育出版社
2. Duchi, J., Hazan, E., & Singer, Y. (2011). Adaptive subgradient methods for online learning and stochastic optimization. Journal of Machine Learning Research, 12(Jul), 2121-2159.
3. Zeiler, M. D. (2012). ADADELTA: an adaptive learning rate method. arXiv preprint arXiv:1212.5701.
4. Tieleman, T., & Hinton, G. (2012). Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude. COURSERA: Neural networks for machine learning, 4(2), 26-31.
5. Kingma, D. P., & Ba, J. (2014). Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.
6. 周志华老师的西瓜书《机器学习》
7. Chawla N V, Bowyer K W, Hall L O, et al. SMOTE: synthetic minority over-sampling technique[J]. Journal of Artificial Intelligence Research, 2002, 16(1):321-357.
8. Inoue H. Data Augmentation by Pairing Samples for Images Classification[J]. 2018.
9. Zhang H, Cisse M, Dauphin Y N, et al. mixup: Beyond Empirical Risk Minimization[J]. 2017.
10. 《深度学习》- 伊恩·古德费洛
11. Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun, Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks. Link: https://arxiv.org/pdf/1506.01497v3.pdf
|
AI-System/Textbook/第2章-神经网络基础/2.4-解决分类问题.md/0
|
{
"file_path": "AI-System/Textbook/第2章-神经网络基础/2.4-解决分类问题.md",
"repo_id": "AI-System",
"token_count": 4675
}
| 12 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 5.4 内核优化与生成
- [5.4 内核优化与生成](#54-内核优化与生成)
- [5.4.1 算子表达式](#541-算子表达式)
- [5.4.2 算子表示与调度逻辑的分离](#542-算子表示与调度逻辑的分离)
- [5.4.3 自动调度搜索与代码生成](#543-自动调度搜索与代码生成)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
前面的编译优化基本都是在计算图的上进行的,当一个计算图被优化过后,就需要继续向下编译。其中一个最主要的问题就是如果对计算图中的每一个算子生成相应的代码。在计算框架中,每个算子都是预先实现并注册到框架中的,这样计算图在执行时只需要调用相应的代码即可。然而,计算框架的缺点是无法快速适配到一个新的硬件上,其需要为每一种硬件都实现一套算子代码,这不仅需要大量人力和时间成本,并且算子实现的性能也无法得到保证,因为,在对每个后端平台针对每个算子实现内核代码的时候都需要考虑不同的编程模型、数据排布、线程模型、缓存大小等等因素。
为了解决这个问题,就有了张量编译(或算子编译)的研究工作以及张量编译器。算子编译的核心思想是首先为通用算子找到一种能够描述算子与硬件无关的计算逻辑的表示,然后由编译器根据这种逻辑描述再结合具体的硬件生成相应的内核代码。近年来,有较多的研究工作都在围绕这个问题出现,例如TVM, Halide, TACO, Tensor Comprehension, FlexTensor等。在本书中将以TVM为例,来讲述算子编译的基本思想,更深入的技术细节可以参考相关文献。
## 5.4.1 算子表达式
对深度学习中的大多数算子,其计算逻辑都可以描述成针对输出张量中的每一个元素的独立同构计算。以矩阵乘算子为例(如图5-4-1所示),矩阵C中的每一个元素(如坐标为[i,j])的值都可以通过对应的一行(第i行)和一列(第j列)的内积来计算得出。也就是说,大多数的算子的计算逻辑都要以通过描述其中的元素的计算逻辑来表示,这就是算子表达式的作用。
<center> <img src="./img/5-4-1-matmul.png" /></center>
<center>图5-4-1. 矩阵乘算子</center>
一个算子表达式主要包括以下几个部分:1)所有输入和输出张量,2)输出张量的计算形状,3)输出张量中每一个元素的计算表达式,其中包括元素的在张量中的位置参数,一般以lambda表达式的形式描述为坐标参数的匿名函数。如下面表中每一行为上述矩阵乘算子在TVM中的算子表达式。
<center>
| 算子 | 算子表达式 |
| :-----| ----: |
| 矩阵乘 | ```C = t.compute((m, n), lambda i, j: t.sum(A[i, k] * B[k, j]), axis=k)``` |
| 仿射变换 | ```C = t.compute((m, n), lambda i, j: C[i, j] + bias[i])```|
| 卷积 | ```C = t.compute((c, h, w), lambda i, x, y: t.sum(data[kc, x+kx, y+ky] * w[i, kx, ky]), axis=[kx, ky, kc])``` |
| ReLU | ```C = t.compute((m, n), lambda i, j: t.max(0, A[i, j])``` |
</center>
<center>表5-2-1. 一些常见的算子表达式</center>
## 5.4.2 算子表示与调度逻辑的分离
有了算子表达式之后,我们就得到了一个算子的计算逻辑。为了生成硬件上的最终代码,我们需要把算子表达式的逻辑计算变化成符合硬件编程模型的代码,并考虑硬件特性进行代码优化,这个过程就叫作表达式的调度(Schedule)。
通常来说,一个最简单的调度方案就是通过生成多重循环来遍历一个算子表达式中输出张量中的每一个元素,然后调用其提供的lambda函数,即可完成一个简单的内核代码的生成。图5-4-2展示了一个简单的张量加算子的表达式,以及为其在TVM中创建一个默认调度的示例(上半部分),同时调度后产生出的内核代码(下半部分)。
```
# 在TVM中创建一个默认调度的示例
C = tvm.compute((n,), lambda i: A[i] + B[i])
s = tvm.create_schedule(C.op)
```
```
// 调度后产生出的内核代码
for (int i= 0; i < n; ++i)
{
C[i] = A[i] + B[i];
}
```
<center>图5-4-2. 一个张量加算子的调度示例</center>
可以看到,上面生成的内核代码只是一个简单的循环,实际中这样的代码往往性能不好。我们希望对上述循环进行一系列的变化,如把一个循环拆分成两重循环、或者把两个循环合并一个循环、或者把两个循环的顺序颠倒等等。为了方便这些优化,算子编译器也提供了一些相应的调度操作接口,如下图中的split操作即可以上述循环按照32为因子进行拆分成内个两重循环,如图5-4-3所示。
```
# 在TVM中创建一个默认调度的示例
C = tvm.compute((n,), lambda i: A[i] + B[i])
s = tvm.create_schedule(C.op)
# 在TVM中按照32为因子进行拆分成内个两重循环
xo, xi = s[C].split(s[C].axis[0], factor = 32)
```
```
// 调度后产生出的内核代码
for (int xo = 0; xo < ceil(n /32); ++xo)
{
for (int xi = 0; xi < 32; ++xi)
{
int i = xo * 32 + xi;
if (i < n)
C[i] = A[i] + B[i];
}
}
```
<center>图5-4-3. 一个张量加算子的调度优化示例</center>
除了优化,我们还希望一个算子表达式能生成特定硬件上符合其编程模型的代码。这就需要我们能针对这些硬件提供一些调度操作。例如,当我们想让上述代码能在CUDA GPU上执行,我们就需要把一些循环绑定到CUDA编程模型中的threadIdx或blockIdx上,同样,我们可以使用算子编译器中的bind接口来完成,如图5-4-4所示,最终我们就可以得到一个简单的可以GPU执行的内核代码。
```
# 在TVM中创建一个默认调度的示例
C = tvm.compute((n,), lambda i: A[i] + B[i])
s = tvm.create_schedule(C.op)
# 在TVM中按照32为因子进行拆分成内个两重循环
xo, xi = s[C].split(s[C].axis[0], factor = 32)
# 使用bind接口来完成和threadIdx或blockIdx的绑定
S[C].reorder(xi, xo)
s[C].bind(xo, tvm.thread_axis("blockIdx.x"))
s[C].bind(xi, tvm.thread_axis("threadIdx.x"))
```
```
// 调度后产生出的内核代码
int i = threadIdx.x * 32 + blockIdx.x;
if (i < n)
{
C[i] = A[i] + B[i];
}
```
<center>图5-4-4. 一个张量加算子调度到GPU上的示例</center>
## 5.4.3 自动调度搜索与代码生成
有了算子表达式和对表达式的调度机制,我们就可以较容易的在一个新的硬件设备上生成一个算子的内核代码了。然而,我们可以看到,在调度的时候,有非常多种决定需要抉择,而且这些决定都会根据硬件的不同而产生不一样的性能影响,这些都需要经验非常丰富的专家才能知道一个较好的调度方案。为了进一步克复这个问题,一类利用机器学习进行自动调度搜索的方法被广泛应用。
<center> <img src="./img/5-4-5-search.png" /></center>
<center>图5-4-5. 自动调度搜索与代码生成</center>
如图5-4-5所示,给定一个算子表达式,我们首先需要针对该表达式自动生成出一个调度的代码模板,模板中可以预留出大量的可配置的参数。生成的模板需要能够尽可能包括各种代码的可能性,也就是保证足够大的搜索空间。给定了代码模板后,剩下的事情就是决定哪一个配置可以生成最优的代码,实际中,一个代码模板可能有成千上万种可选配置,因此,一般的编译器会采用机器学习的方法通过不断尝试,生成代码、测量性能、反馈给机器学习模型、再生成下一个(一批)代码的方式不断迭代搜索,直到搜索到一定的步数后找到一个较优的代码配置,并生成最终代码。通过机器学习的方法的好处是可以针对特别的问题输入和硬件,利用黑盒的方式找到一个较好的专用代码,但其缺点也很明显,在编译的过程中需要大量的编译和尝试,需要花费较长的编译时间和较多的算力。
## 小结与讨论
本章我们主要围绕内核优化与生成展开,包含算子表达式,算子表示与调度逻辑的分离,自动调度搜索与代码生成等内容。
在传统的编译器程序生成中,我们很少看到利用机器学习来自动生成程序的方法,请读者思考这种方法的好处与主要缺点,还有自动代码生成还能被用到哪些场景中呢?
## 参考文献
1. XLA. https://www.tensorflow.org/xla
2. TVM: An automated end to-end optimizing compiler for deep learn
3. Learning to optimize tensor programs
4. Halide: A language and compiler for optimizing parallelism, locality, and recomputation in image processing pipelines.
5. Triton: An Intermediate Language and Compiler for Tiled Neural Network Computations
6. Tensor comprehensions: Framework-agnostic high-performance machine learning abstractions.
7. Akg: Automatic kernel generation for neural processing units using polyhedral transformations.
8. Ansor: Generating high-performance tensor programs for deep learning.
9. Flextensor: An automatic schedule exploration and optimization framework for tensor computation on heterogeneous system.
|
AI-System/Textbook/第5章-深度学习框架的编译与优化/5.4-内核优化.md/0
|
{
"file_path": "AI-System/Textbook/第5章-深度学习框架的编译与优化/5.4-内核优化.md",
"repo_id": "AI-System",
"token_count": 6043
}
| 13 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 6.5 分布式训练的通信协调
- [6.5 分布式训练的通信协调](#65-分布式训练的通信协调)
- [6.5.1 通信协调的硬件](#651-通信协调的硬件)
- [6.5.2 通信协调的软件](#652-通信协调的软件)
- [6.5.3 课后实验:AllReduce的实现和优化](#653-课后实验allreduce的实现和优化)
- [小结与讨论](#小结与讨论)
- [思考题:为什么模型训练通常需要分布式进行,而分布式模型预测并不常见?](#思考题为什么模型训练通常需要分布式进行而分布式模型预测并不常见)
- [参考文献](#参考文献)
通信协调在分布式训练的整体性能中起到了举足轻重的作用。众多软硬件技术在深度学的发展过程中被提出和应用。本节以
GPU为例,介绍目前深度学习中所采用的主流通信技术。
按照方式,通信可分为:机器内通信和机器间通信。前者包含:共享内存、GPUDirect
P2P over PCIe、GPUDirect P2P over NVLink [<sup>[8]</sup>](#ref8),而后者包含:TCP/IP网络、
RDMA网络和GPUDirect RDMA网络。
## 6.5.1 通信协调的硬件
<center><img src="./img/image34.png" width="600" height="" /></center>
<center>图6-5-1: 常见的加速设备形式 左:HGX卡;右:标准双槽PCIe卡 (<a href=https://www.nvidia.com/en-us/data-center/a100>图片来源</a>) </center>
<center><img src="./img/image35.png" width="600" height="" /></center>
<center>图6-5-2: 多设备通过不同的方式互联 左:HGX 8 GPU互联;右:标准PCIe卡堆叠 (<a href=https://nvidia.com>图片来源</a>) </center>
图示了两种常见的GPU硬件形式(上)以及连接方式(下):NVLink (300GB/s) vs. PCIe 4.0 (32GB/s)[<sup>[1]</sup>](#ref1)。二者的链路带宽差距高达约10倍。众多实际训练表明,高带宽链路极大地提高了并行训练的总体性能。因此,我们可以看到无论是节点内的多设备以及节点间的网络,链路带宽近些年都取得了大幅提升。
<center><img src="./img/image36.png" width="700" height="" /></center>
<center>图6-5-3: 常见设备互联的带宽 (<a href=https://www.olcf.ornl.gov/wp-content/uploads/2019/12/Summit-NCCL.pdf>图片来源</a>,<a href=https://www.microway.com/hpc-tech-tips/dgx-a100-review-throughput-and-hardware-summary/>A100 NVLink性能数据来源</a>, <a href=https://techcommunity.microsoft.com/t5/azure-global/performance-considerations-for-large-scale-deep-learning/ba-p/2693834>A100 4节点网络性能数据(187 GB/s)来源</a>)</center>
除了NVIDIA之外,其它加速器硬件厂商也提出了类似的高速数据链路。下图分别是AMD和隧原科技[<sup>[6]</sup>](#ref6)设计的加速器互联硬件。
<center><img src="./img/image37.png" width="600" height="" /></center>
<center>图6-5-4: 常见的一些PCIe设备互联硬件背板 左:OCP Summit (<a href=https://146a55aca6f00848c565-a7635525d40ac1c70300198708936b4e.ssl.cf1.rackcdn.com/images/442f418201b7eb32089aa12895ee78977d03bea1.pdf>图片来源</a>), 右:Enflame T10 (<a href=https://www.enflame-tech.com/support>图片来源</a>)</center>
而依据GPU的硬件互联结构,可以绘制出互联拓扑。目前的互联结构存在多种不同的拓扑。如下图所示,最为常见的 PCI only 连结仅使用标准的PCI/PCIe接口将加速卡与系统的其它部分连接起来。受限于PCIe的带宽限制(例如PCIe 4.0 x16 单向传输带宽为 31.508 GB/s)以及树形的连接拓扑,PCIe在设备互联上具有天然的障碍。因此,在GPU高性能计算中常配备专用高速链路实现高带宽的卡间互联,包括DGX-1/P9中的卡间直连,以及DGX-2/3中采用交换机形式的NVSwitch。
<center><img src="./img/image38.png" width="600" height="" /></center>
<center>图6-5-5: 常见的加速设备硬件互联拓扑 (<a href=https://www.olcf.ornl.gov/wp-content/uploads/2019/12/Summit-NCCL.pdf>图片来源</a>)</center>
除了通信拓扑,通信的协议也在不断迭代。如下图的**GPUDirect
P2P**[<sup>[7]</sup>](#ref7),GPU可以直接访问另一GPU的显存,无需CPU介入或系统内存中转,从而实现“零拷贝(zero-copy)”。
开启这项功能的对于GPU以及之间的连接方式等硬件条件均有要求:GPU属于Tesla / Quadra 专业级别,并且GPU之间通过NVLink互联或者属于同一PCIe root(例如,不允许跨NUMA node)。
<center><img src="./img/image39.png" width="600" height="" /></center>
<center>图6-5-6: 传统通过PCIe和CPU内存进行的设备间通信 (<a href=http://developer.download.nvidia.com/compute/cuda/4_0/CUDA_Toolkit_4.0_Overview.pdf>图片来源</a>) </center>
<center><img src="./img/image40.jpeg" width="600" height="" /></center>
<center>图6-5-7: 通过PCIe直接进行设备间通信 (<a href=http://developer.download.nvidia.com/compute/cuda/4_0/CUDA_Toolkit_4.0_Overview.pdf>图片来源</a>)</center>
而在跨节点网络中也有类似的协议**GPUDirect
RDMA** [<sup>[8]</sup>](#ref8),实现了GPU中的数据通过网络直接发送,无需系统内存中转,也实现了“零拷贝(zero-copy)”。但这里网络操作仍需CPU发起,因此与GPUDirect
P2P的纯GPU操作有所区别。
开启这项功能的条件,除了满足GPUDirect的基本条件之外,还需满足RDMA网卡与GPU也属于同一PCIe root。
<center><img src="./img/image41.png" width="600" height="" /></center>
<center>图6-5-8: GPUDirect RDMA 通信 (<a href=https://developer.nvidia.com/gpudirect>图片来源</a>)</center>
## 6.5.2 通信协调的软件
**分布式训练系统 通信库**
为了更好地服务深度学习等GPU任务,NVIDIA提出了针对其GPU等硬件产品的通信库 **NCCL: NVIDIA Collective Communication Library**[<sup>[12]</sup>](#ref12)。
<center><img src="./img/image42.png" width="600" height="" /></center>
<center>图6-5-9: GPU通信库的系统定位 (<a href=https://www.olcf.ornl.gov/wp-content/uploads/2019/12/Summit-NCCL.pdf>图片来源</a>) </center>
NCCL提供类似MPI的通信接口,包含集合式通信(collective communication)all-gather、 all-reduce、 broadcast、 reduce、reduce-scatter 以及点对点(point-to-point)通信send 和receive。
**拓扑感知的通信** NCCL这样的通信库中目前能够提供的通信算法主要针对已有的标准硬件,相对比较有限的,而有研究工作(例如: [SCCL](<https://github.com/microsoft/sccl>) )根据连接拓扑和带宽延迟等信息,可以综合设计性能更为优化的通信算法。
<center><img src="./img/image43.png" width="800" height="" /></center>
<center>图6-5-10: 常见的GPU互联结构下的通信拓扑 (<a href=https://www.olcf.ornl.gov/wp-content/uploads/2019/12/Summit-NCCL.pdf>图片来源</a>)</center>
除了NVIDIA之外,其它的厂商也发布了针对自身产品的高效通信库,例如AMD的[RCCL](<https://github.com/ROCmSoftwarePlatform/rccl>)以及intel的[OneCCL](<https://oneapi-src.github.io/oneCCL/>)。
随着硬件的快速发展,带来了更高的性能和更大的优化机遇,因此软件研究方面的迭代,尤其是支持分布式深度学习训练的算法硬件协同设计的研究,依然存在这巨大的潜力。
---------------------
## 6.5.3 课后实验:AllReduce的实现和优化
<!-- 本章的内容学习之后可以参考[实验7](../../Labs/AdvancedLabs/Lab7/README.md)进行对应的练习以加深理解。 -->
**实验目的**
1. 理解并行训练的原理和实现
2. 定制一个新的并行训练的通信压缩算法
实验环境(参考)
* Ubuntu 18.04
* PyTorch==1.5.0 (务必安装CPU版本)
* OpenMPI
* Horovod==0.19.4
实验原理:深度学习中,分布式训练算法和分布式训练系统的基本知识
**实验内容**
实验流程图:
<!--  -->
<center><img src="./img/Lab4-flow.png" width="200" height="" /></center>
<center>图6-5-11: AllReduce的实现和优化 实验流程图 </center>
具体步骤:
1. 安装依赖支持:OpenMPI, Horovod
2. 编写程序,使用Horovod库,增加数据并行训练支持
1. 参照Horovod with PyTorch参考文档,修改 `mnist_basic.py` 文件, 另存为 `pytorch_mnist_horovod.py`,使用Horovod库实现数据并行
- mnist_basic.py原始文件地址:https://github.com/pytorch/examples/blob/master/mnist/main.py
- Horovod with PyTorch文档地址:https://github.com/horovod/horovod/blob/master/docs/pytorch.rst
2. 记录每个step的运行时间和正确率(accuracy)
3. 理解Horovod的执行逻辑,利用Numpy实现float8(8bit), float16(16bit)编码方案的压缩/解压缩
1. 克隆GitHub上Horovod库
2. 修改 `/horovod/torch/compression.py` 文件,增加Bit8Compressor和Bit16Compressor类,实现compress和decompress函数。(提示:torch.Tensor没有8-bit float类型支持,所以Bit8Compressor还需实现float32和float8类型的相互转化)
4. 修改Horovod库中代码,增加对float8(8bit), float16(16bit)格式的压缩
1. 修改 `/horovod/torch/mpi_ops.py` 文件,利用Horovod内嵌的AllGather通信和压缩接口,增加对float8(8bit), float16(16bit)格式的压缩代码的调用。
2. 重新build Horovod库。
5. 修改MNIST样例代码,增加压缩功能。
6. 测试代码正确性,比较原始代码、数据并行、加入压缩算法三者的性能差别。
7. [选做项目] 利用C++/CUDA API实现更为高效的压缩/解压缩编码
**实验报告**
实验环境:
<style>table{margin: auto;}</style>
||||
|--------|--------------|--------------------------|
|硬件环境|服务器数目| |
||网卡型号、数目||
||GPU型号、数目||
||GPU连接方式||
|软件环境|OS版本||
||GPU driver、(opt. NIC driver)||
||深度学习框架<br>python包名称及版本||
||CUDA版本||
||||
<center>表6-6-1: 实验环境记录</center>
实验结果:
比较原始串行训练,用Horovod并行训练,加入压缩算法三者,在同样epoch条件下的训练时间和结果正确率。
Epoch size: ___________
|||||
|-----|-----|-----|-----|
| 训练算法 || 训练时间 | 结果正确率 |
|串行训练||||
| 用Horovod并行 | Device# == 2 |||
||Device# == 4|||
| float8(8bit)压缩 | Device# == 2 |||
|| Device# == 4 |||
| float16(16bit)压缩 | Device# == 2 |||
|| Device# == 4 |||
|||||
<center>表6-6-2: 压缩通信性能比较</center>
**参考代码**
1. 安装Horovod
安装OpenMPI:`sudo apt install openmpi-bin`
安装Horovod:`python3 -m pip install horovod==0.19.4 --user`
2. 利用Horovod并行化pytorch MNIST模型训练
2.1. Device# == 1
运行命令:`python3 pytorch_mnist_horovod.py`
2.2. Device# == N (e.g., N == 2, 4, 6, 8)
运行命令:`horovodrun -n 2 python3 pytorch_mnist_horovod.py –hvd True `
参考代码: https://github.com/horovod/horovod/blob/master/examples/pytorch_mnist.py
**基于Horovod(v0.19.4)库增加bit-16和bit-8的并行训练的通信压缩算法**
1. Build Horovod
运行命令:`HOROVOD_WITHOUT_MXNET=1 HOROVOD_WITHOUT_GLOO=1 HOROVOD_WITHOUT_TENSORFLOW=1 HOROVOD_WITH_PYTORCH=1 python setup.py build`
2. 在horovod库中需要修改的文件和代码片段: bit8,bit16.git_diff
3. 执行压缩算法进行训练
```
mpirun -n 2 python pytorch_mnist_compress.py --bit8-allreduce
mpirun -n 2 python pytorch_mnist_compress.py --bit16-allreduce
```
---------------------
## 小结与讨论
### 思考题:为什么模型训练通常需要分布式进行,而分布式模型预测并不常见?
* 计算模式不同:预测任务占用存储更小,更容易放在单个设备中
* 训练需要各个工作节点(Worker)保持通信,从而协调统一地**更新**模型参数;
* 预测中的模型参数是**固定**的,各个工作节点分别使用只读副本,无需相互通信协调
## 参考文献
<div id="ref1"></div>
1. [NVIDIA A100 GPU](https://www.nvidia.com/en-us/data-center/a100)
<div id="ref2"></div>
2. [Sylvain Jeaugey, NVIDIA, DISTRIBUTED DEEP NEURAL NETWORK TRAINING: NCCL ON SUMMIT](https://www.olcf.ornl.gov/wp-content/uploads/2019/12/Summit-NCCL.pdf)
<div id="ref3"></div>
3. [DGX A100 review: Throughput and Hardware Summary](https://www.microway.com/hpc-tech-tips/dgx-a100-review-throughput-and-hardware-summary/)
<div id="ref4"></div>
4. [Performance considerations for large scale deep learning training on Azure NDv4 (A100) series](https://techcommunity.microsoft.com/t5/azure-global/performance-considerations-for-large-scale-deep-learning/ba-p/2693834)
<div id="ref5"></div>
5. [An Open Accelerator Infrastructure Project for OCP Accelerator Module (OAM)](https://146a55aca6f00848c565-a7635525d40ac1c70300198708936b4e.ssl.cf1.rackcdn.com/images/442f418201b7eb32089aa12895ee78977d03bea1.pdf)
<div id="ref6"></div>
6. [Enflame T10 Manual](https://www.enflame-tech.com/support)
<div id="ref7"></div>
7. [CUDA 4.0 Overview](http://developer.download.nvidia.com/compute/cuda/4_0/CUDA_Toolkit_4.0_Overview.pdf)
<div id="ref8"></div>
8. [NVIDIA GPUDirect: Enhancing Data Movement and Access for GPUs](https://developer.nvidia.com/gpudirect)
<div id="ref9"></div>
9. [SCCL: Synthesizing optimal collective communication algorithms](https://github.com/microsoft/sccl)
<div id="ref10"></div>
10. [RCCL: ROCm Communication Collectives Library](https://github.com/ROCmSoftwarePlatform/rccl)
<div id="ref11"></div>
11. [OneCCL: Intel oneAPI Collective Communications Library](https://oneapi-src.github.io/oneCCL/)
<div id="ref12"></div>
12. [NCCL: The NVIDIA Collective Communication Library](https://developer.nvidia.com/nccl)
<div id="ref13"></div>
13. [Horovod with PyTorch 文档](https://github.com/horovod/horovod/blob/master/docs/pytorch.rst)
<div id="ref14"></div>
14. [Horovod MNIST并行训练参考代码](https://github.com/horovod/horovod/blob/master/examples/pytorch_mnist.py)
|
AI-System/Textbook/第6章-分布式训练算法与系统/6.5-分布式训练的通信协调.md/0
|
{
"file_path": "AI-System/Textbook/第6章-分布式训练算法与系统/6.5-分布式训练的通信协调.md",
"repo_id": "AI-System",
"token_count": 8329
}
| 14 |
parameters:
Agent: Hosted Ubuntu 1604
Demands: "python3"
stageName: 'defaultStageName'
jobDisplayName: 'defaultDisplayName'
jobTimeoutInMinutes: 180
TridentWorkloadTypeShort: #
DeployLocation: #
TestPostfix: # "" | "-release" | "-preview"
Deploy_Location_Short: #
DefaultWorkingDirectory: #
Template: #
aksimagename: 'myimage'
ProjectLocation: #
PythonPath: #
cluster_name: #
flighting_release: false
flighting_preview: false
doCleanup: True
sub_vars: ../vars/agce_devops_sub_vars.yml
workload_vars: #
sp_appid: #
sp_password: #
stages:
- stage: ${{parameters.stageName}}
dependsOn: []
jobs:
- job: deploy_notebook_steps
displayName: ${{parameters.jobDisplayName}}
pool:
name: ${{parameters.Agent}}
demands: ${{parameters.Demands}}
container: "rocker/tidyverse:latest"
timeoutInMinutes: ${{parameters.jobTimeoutInMinutes}}
workspace:
clean: all
variables:
- template: ${{parameters.sub_vars}}
- template: ${{parameters.workload_vars}}
steps:
- template: ../steps/deploy_container_steps_v2.yml
parameters:
template: ${{variables.Template}}
azureSubscription: ${{variables.azureSubscription}}
azure_subscription: ${{variables.azure_subscription}}
azureresourcegroup: ${{variables.TridentWorkloadTypeShort}}-${{variables.DeployLocation}}${{parameters.TestPostfix}}
workspacename: ${{variables.TridentWorkloadTypeShort}}-${{variables.DeployLocation}}
azureregion: ${{variables.DeployLocation}}
aksimagename: ${{parameters.aksimagename}}
aks_name: ${{variables.TridentWorkloadTypeShort}}${{parameters.TestPostfix}}
location: ${{variables.ProjectLocation}}
python_path: ${{parameters.DefaultWorkingDirectory}}${{variables.PythonPath}}
cluster_name: ${{variables.TridentWorkloadTypeShort}}${{parameters.TestPostfix}}
flighting_release: ${{parameters.flighting_release}}
flighting_preview: ${{parameters.flighting_preview}}
sp_appid: ${{parameters.sp_appid}}
sp_password: ${{parameters.sp_password}}
doCleanup: ${{parameters.doCleanup}}
|
AI/.ci/stage/deploy_container_stage_v2.yml/0
|
{
"file_path": "AI/.ci/stage/deploy_container_stage_v2.yml",
"repo_id": "AI",
"token_count": 851
}
| 15 |
parameters:
azureSubscription: 'x'
azure_subscription: 'x'
location: '.'
azureresourcegroup: 'x'
workspacename: 'x'
azureregion: westus2
aksimagename: 'x'
aks_name: 'x'
aks_service_name: 'x'
conda: 'ado-ml-batch-train'
doCleanup: true
python_path: 'x'
max_total_runs: 1
flighting_release: false
flighting_preview: false
sql_server_name: "x"
sql_database_name: "x"
sql_username: "x"
sql_password: "x"
data_prep: true
train: true
post_cleanup: true
container_name: "x"
account_name: "x"
account_key: "x"
datastore_rg: "x"
steps:
- template: config_conda.yml
parameters:
conda_location: ${{parameters.location}}
azureSubscription: ${{parameters.azureSubscription}}
conda: ${{parameters.conda}}
flighting_release: ${{parameters.flighting_release}}
flighting_preview: ${{parameters.flighting_preview}}
- template: azpapermill_iterator.yml
parameters:
notebooks: '00_AMLConfiguration.ipynb 01_AutoML_Local.ipynb 03_Train_Impact_Score_Model.ipynb'
location: ${{parameters.location}}
azureSubscription: ${{parameters.azureSubscription}}
conda: ${{parameters.conda}}
azure_subscription: ${{parameters.azure_subscription}}
azureresourcegroup: ${{parameters.azureresourcegroup}}
workspacename: ${{parameters.workspacename}}
azureregion: ${{parameters.azureregion}}
sql_server_name: ${{parameters.sql_server_name}}
sql_database_name: ${{parameters.sql_database_name}}
sql_username: ${{parameters.sql_username}}
sql_password: ${{parameters.sql_password}}
container_name: ${{parameters.container_name}}
account_name: ${{parameters.account_name}}
account_key: ${{parameters.account_key}}
datastore_rg: ${{parameters.datastore_rg}}
- ${{ if eq(parameters.data_prep, 'true') }}:
- template: azpapermill.yml
parameters:
notebook: 01_Training_Script.ipynb
conda: ${{parameters.conda}}
azureSubscription: ${{parameters.azureSubscription}}
location: ${{parameters.location}}
- template: azpapermill.yml
parameters:
notebook: 02_Testing_Script.ipynb
conda: ${{parameters.conda}}
azureSubscription: ${{parameters.azureSubscription}}
location: ${{parameters.location}}
- template: azpapermill.yml
parameters:
notebook: 03_Run_Locally.ipynb
conda: ${{parameters.conda}}
location: ${{parameters.location}}
azureSubscription: ${{parameters.azureSubscription}}
azure_subscription: ${{parameters.azure_subscription}}
azureresourcegroup: ${{parameters.azureresourcegroup}}
workspacename: ${{parameters.workspacename}}
azureregion: ${{parameters.azureregion}}
- ${{ if eq(parameters.train, 'true') }}:
- template: azpapermill.yml
parameters:
notebook: 04_Hyperparameter_Random_Search.ipynb
conda: ${{parameters.conda}}
location: ${{parameters.location}}
azureSubscription: ${{parameters.azureSubscription}}
max_total_runs: ${{parameters.max_total_runs}}
- template: azpapermill.yml
parameters:
notebook: 07_Train_With_AML_Pipeline.ipynb
conda: ${{parameters.conda}}
location: ${{parameters.location}}
azureSubscription: ${{parameters.azureSubscription}}
max_total_runs: ${{parameters.max_total_runs}}
- template: cleanuptask.yml
parameters:
azureSubscription: ${{parameters.azureSubscription}}
conda: deployment_aml
location: ${{parameters.location}}
azureresourcegroup: ${{parameters.azureresourcegroup}}
doCleanup: ${{parameters.post_cleanup}}
|
AI/.ci/steps/ADOTrainDeployAMLJob.yml/0
|
{
"file_path": "AI/.ci/steps/ADOTrainDeployAMLJob.yml",
"repo_id": "AI",
"token_count": 1392
}
| 16 |
parameters:
notebook: # defaults for any parameters that aren't specified
location: "."
azureSubscription: 'x'
azure_subscription: 'x'
timeoutInMinutes: 90
steps:
- bash: |
cd ${{parameters.location}}
echo Execute ${{parameters.notebook}}
Rscript ./${{parameters.notebook}}
timeoutInMinutes: ${{parameters.timeoutInMinutes}}
displayName: ${{parameters.notebook}}
|
AI/.ci/steps/bash_r.yml/0
|
{
"file_path": "AI/.ci/steps/bash_r.yml",
"repo_id": "AI",
"token_count": 134
}
| 17 |
parameters:
deployment_name: ''
template: ''
azureSubscription: ''
azure_subscription: ''
azureresourcegroup: ''
workspacename: ''
azureregion: ''
aksimagename: 'myimage'
environment: 'tridant-ai'
doCleanup: False
alias: '-'
project: '-'
expires : "2019-08-01"
agent: 'AI-GPU'
ENVIRONMENT_PREFIX: "ml-rts-"
deploymentguidtag: "ABC123"
aks_name: "mlaks"
location: "" #Root Dir of Project
python_path: "" #Root Dir of Python Env
python_secret_root: "./"
steps:
- template: cleanuptask.yml
parameters:
azureSubscription: ${{parameters.azureSubscription}}
conda: ${{parameters.conda}}
azureresourcegroup: ${{parameters.azureresourcegroup}}
doCleanup: True
- template: createResourceGroupTemplate.yml
parameters:
azureSubscription: ${{parameters.azureSubscription}}
azureresourcegroup: ${{parameters.azureresourcegroup}}
location: ${{parameters.azureregion}}
alias : ${{parameters.alias}}
project : ${{parameters.project}}
expires : ${{parameters.expires}}
- template: deploy_notebook_steps.yml
parameters:
template: ${{parameters.template}}
azureSubscription: ${{parameters.azureSubscription}}
azure_subscription: ${{parameters.azure_subscription}}
azureresourcegroup: ${{parameters.azureresourcegroup}}
workspacename: ${{parameters.workspacename}}
azureregion: ${{parameters.azureregion}}
doCleanup: ${{parameters.doCleanup}}
alias : ${{parameters.alias}}
project : ${{parameters.project}}
expires : ${{parameters.expires}}
aks_name: ${{parameters.aks_name}}
location: ${{parameters.location}}
python_path: ${{parameters.python_path}}
conda: deployment_aml
- task: AzureCLI@1
inputs:
azureSubscription: ${{parameters.azureSubscription}}
scriptLocation: inlineScript
inlineScript: |
source activate ${{parameters.conda}}
pip install -U azure azure-cli==2.0.75 azure-keyvault==1.1.0 python-dotenv
python ${{parameters.python_secret_root}}.ci/scripts/set_secret.py -n "${{parameters.ENVIRONMENT_PREFIX}}-key"
- task: AzurePowerShell@4
inputs:
azureSubscription: ${{parameters.azureSubscription}}
ScriptType: 'FilePath'
ScriptPath: '${{parameters.python_secret_root}}.ci/scripts/SetResource.ps1'
ScriptArguments: '-resourceGroupName ''${{parameters.azureresourcegroup}}'' -tagId ''deployment-id'' -deploymentId ''${{parameters.deploymentguidtag}}'''
azurePowerShellVersion: 'LatestVersion'
displayName: 'Tag All Resources'
|
AI/.ci/steps/deploy_steps.yml/0
|
{
"file_path": "AI/.ci/steps/deploy_steps.yml",
"repo_id": "AI",
"token_count": 937
}
| 18 |
variables:
TridentWorkloadTypeShort: aidlbat
DeployLocation: eastus
ProjectLocation: "notebooks/"
PythonPath: "."
Template: DLBatchDeployAMLJob.yml
|
AI/.ci/vars/dl_batch_scoring.yml/0
|
{
"file_path": "AI/.ci/vars/dl_batch_scoring.yml",
"repo_id": "AI",
"token_count": 52
}
| 19 |
<jupyter_start><jupyter_code>import sys
sys.path += ['../utils']
import csv
from tqdm import tqdm
import collections
import gzip
import pickle
import numpy as np
import faiss
import os
import pytrec_eval
import json
from msmarco_eval import quality_checks_qids, compute_metrics, load_reference<jupyter_output><empty_output><jupyter_text>Define params below<jupyter_code>checkpoint_path = # location for dumpped query and passage/document embeddings which is output_dir
checkpoint = 0 # embedding from which checkpoint(ie: 200000)
data_type = 0 # 0 for document, 1 for passage
test_set = 1 # 0 for dev_set, 1 for eval_set
raw_data_dir =
processed_data_dir =<jupyter_output><empty_output><jupyter_text>Load Qrel<jupyter_code>if data_type == 0:
topN = 100
else:
topN = 1000
dev_query_positive_id = {}
query_positive_id_path = os.path.join(processed_data_dir, "dev-qrel.tsv")
with open(query_positive_id_path, 'r', encoding='utf8') as f:
tsvreader = csv.reader(f, delimiter="\t")
for [topicid, docid, rel] in tsvreader:
topicid = int(topicid)
docid = int(docid)
if topicid not in dev_query_positive_id:
dev_query_positive_id[topicid] = {}
dev_query_positive_id[topicid][docid] = int(rel)<jupyter_output><empty_output><jupyter_text>Prepare rerank data<jupyter_code>qidmap_path = processed_data_dir+"/qid2offset.pickle"
pidmap_path = processed_data_dir+"/pid2offset.pickle"
if data_type == 0:
if test_set == 1:
query_path = raw_data_dir+"/msmarco-test2019-queries.tsv"
passage_path = raw_data_dir+"/msmarco-doctest2019-top100"
else:
query_path = raw_data_dir+"/msmarco-docdev-queries.tsv"
passage_path = raw_data_dir+"/msmarco-docdev-top100"
else:
if test_set == 1:
query_path = raw_data_dir+"/msmarco-test2019-queries.tsv"
passage_path = raw_data_dir+"/msmarco-passagetest2019-top1000.tsv"
else:
query_path = raw_data_dir+"/queries.dev.small.tsv"
passage_path = raw_data_dir+"/top1000.dev"
with open(qidmap_path, 'rb') as handle:
qidmap = pickle.load(handle)
with open(pidmap_path, 'rb') as handle:
pidmap = pickle.load(handle)
qset = set()
with gzip.open(query_path, 'rt', encoding='utf-8') if query_path[-2:] == "gz" else open(query_path, 'rt', encoding='utf-8') as f:
tsvreader = csv.reader(f, delimiter="\t")
for [qid, query] in tsvreader:
qset.add(qid)
bm25 = collections.defaultdict(set)
with gzip.open(passage_path, 'rt', encoding='utf-8') if passage_path[-2:] == "gz" else open(passage_path, 'rt', encoding='utf-8') as f:
for line in tqdm(f):
if data_type == 0:
[qid, Q0, pid, rank, score, runstring] = line.split(' ')
pid = pid[1:]
else:
[qid, pid, query, passage] = line.split("\t")
if qid in qset and int(qid) in qidmap:
bm25[qidmap[int(qid)]].add(pidmap[int(pid)])
print("number of queries with " +str(topN) + " BM25 passages:", len(bm25))<jupyter_output><empty_output><jupyter_text>Calculate Metrics<jupyter_code>def convert_to_string_id(result_dict):
string_id_dict = {}
# format [string, dict[string, val]]
for k, v in result_dict.items():
_temp_v = {}
for inner_k, inner_v in v.items():
_temp_v[str(inner_k)] = inner_v
string_id_dict[str(k)] = _temp_v
return string_id_dict
def EvalDevQuery(query_embedding2id, passage_embedding2id, dev_query_positive_id, I_nearest_neighbor,topN):
prediction = {} #[qid][docid] = docscore, here we use -rank as score, so the higher the rank (1 > 2), the higher the score (-1 > -2)
total = 0
labeled = 0
Atotal = 0
Alabeled = 0
qids_to_ranked_candidate_passages = {}
for query_idx in range(len(I_nearest_neighbor)):
seen_pid = set()
query_id = query_embedding2id[query_idx]
prediction[query_id] = {}
top_ann_pid = I_nearest_neighbor[query_idx].copy()
selected_ann_idx = top_ann_pid[:topN]
rank = 0
if query_id in qids_to_ranked_candidate_passages:
pass
else:
# By default, all PIDs in the list of 1000 are 0. Only override those that are given
tmp = [0] * 1000
qids_to_ranked_candidate_passages[query_id] = tmp
for idx in selected_ann_idx:
pred_pid = passage_embedding2id[idx]
if not pred_pid in seen_pid:
# this check handles multiple vector per document
qids_to_ranked_candidate_passages[query_id][rank]=pred_pid
Atotal += 1
if pred_pid not in dev_query_positive_id[query_id]:
Alabeled += 1
if rank < 10:
total += 1
if pred_pid not in dev_query_positive_id[query_id]:
labeled += 1
rank += 1
prediction[query_id][pred_pid] = -rank
seen_pid.add(pred_pid)
# use out of the box evaluation script
evaluator = pytrec_eval.RelevanceEvaluator(
convert_to_string_id(dev_query_positive_id), {'map_cut', 'ndcg_cut', 'recip_rank','recall'})
eval_query_cnt = 0
result = evaluator.evaluate(convert_to_string_id(prediction))
qids_to_relevant_passageids = {}
for qid in dev_query_positive_id:
qid = int(qid)
if qid in qids_to_relevant_passageids:
pass
else:
qids_to_relevant_passageids[qid] = []
for pid in dev_query_positive_id[qid]:
if pid>0:
qids_to_relevant_passageids[qid].append(pid)
ms_mrr = compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
ndcg = 0
Map = 0
mrr = 0
recall = 0
recall_1000 = 0
for k in result.keys():
eval_query_cnt += 1
ndcg += result[k]["ndcg_cut_10"]
Map += result[k]["map_cut_10"]
mrr += result[k]["recip_rank"]
recall += result[k]["recall_"+str(topN)]
final_ndcg = ndcg / eval_query_cnt
final_Map = Map / eval_query_cnt
final_mrr = mrr / eval_query_cnt
final_recall = recall / eval_query_cnt
hole_rate = labeled/total
Ahole_rate = Alabeled/Atotal
return final_ndcg, eval_query_cnt, final_Map, final_mrr, final_recall, hole_rate, ms_mrr, Ahole_rate, result, prediction
dev_query_embedding = []
dev_query_embedding2id = []
passage_embedding = []
passage_embedding2id = []
for i in range(8):
try:
with open(checkpoint_path + "dev_query_"+str(checkpoint)+"__emb_p__data_obj_"+str(i)+".pb", 'rb') as handle:
dev_query_embedding.append(pickle.load(handle))
with open(checkpoint_path + "dev_query_"+str(checkpoint)+"__embid_p__data_obj_"+str(i)+".pb", 'rb') as handle:
dev_query_embedding2id.append(pickle.load(handle))
with open(checkpoint_path + "passage_"+str(checkpoint)+"__emb_p__data_obj_"+str(i)+".pb", 'rb') as handle:
passage_embedding.append(pickle.load(handle))
with open(checkpoint_path + "passage_"+str(checkpoint)+"__embid_p__data_obj_"+str(i)+".pb", 'rb') as handle:
passage_embedding2id.append(pickle.load(handle))
except:
break
if (not dev_query_embedding) or (not dev_query_embedding2id) or (not passage_embedding) or not (passage_embedding2id):
print("No data found for checkpoint: ",checkpoint)
dev_query_embedding = np.concatenate(dev_query_embedding, axis=0)
dev_query_embedding2id = np.concatenate(dev_query_embedding2id, axis=0)
passage_embedding = np.concatenate(passage_embedding, axis=0)
passage_embedding2id = np.concatenate(passage_embedding2id, axis=0)<jupyter_output><empty_output><jupyter_text>reranking metrics<jupyter_code>pidmap = collections.defaultdict(list)
for i in range(len(passage_embedding2id)):
pidmap[passage_embedding2id[i]].append(i) # abs pos(key) to rele pos(val)
if len(bm25) == 0:
print("Rerank data set is empty. Check if your data prepration is done on the same data set. Rerank metrics is skipped.")
else:
rerank_data = {}
all_dev_I = []
for i,qid in enumerate(dev_query_embedding2id):
p_set = []
p_set_map = {}
if qid not in bm25:
print(qid,"not in bm25")
else:
count = 0
for k,pid in enumerate(bm25[qid]):
if pid in pidmap:
for val in pidmap[pid]:
p_set.append(passage_embedding[val])
p_set_map[count] = val # new rele pos(key) to old rele pos(val)
count += 1
else:
print(pid,"not in passages")
dim = passage_embedding.shape[1]
faiss.omp_set_num_threads(16)
cpu_index = faiss.IndexFlatIP(dim)
p_set = np.asarray(p_set)
cpu_index.add(p_set)
_, dev_I = cpu_index.search(dev_query_embedding[i:i+1], len(p_set))
for j in range(len(dev_I[0])):
dev_I[0][j] = p_set_map[dev_I[0][j]]
all_dev_I.append(dev_I[0])
result = EvalDevQuery(dev_query_embedding2id, passage_embedding2id, dev_query_positive_id, all_dev_I, topN)
final_ndcg, eval_query_cnt, final_Map, final_mrr, final_recall, hole_rate, ms_mrr, Ahole_rate, metrics, prediction = result
print("Reranking Results for checkpoint "+str(checkpoint))
print("Reranking NDCG@10:" + str(final_ndcg))
print("Reranking map@10:" + str(final_Map))
print("Reranking pytrec_mrr:" + str(final_mrr))
print("Reranking recall@"+str(topN)+":" + str(final_recall))
print("Reranking hole rate@10:" + str(hole_rate))
print("Reranking hole rate:" + str(Ahole_rate))
print("Reranking ms_mrr:" + str(ms_mrr))<jupyter_output><empty_output><jupyter_text>full ranking metrics<jupyter_code>dim = passage_embedding.shape[1]
faiss.omp_set_num_threads(16)
cpu_index = faiss.IndexFlatIP(dim)
cpu_index.add(passage_embedding)
_, dev_I = cpu_index.search(dev_query_embedding, topN)
result = EvalDevQuery(dev_query_embedding2id, passage_embedding2id, dev_query_positive_id, dev_I, topN)
final_ndcg, eval_query_cnt, final_Map, final_mrr, final_recall, hole_rate, ms_mrr, Ahole_rate, metrics, prediction = result
print("Results for checkpoint "+str(checkpoint))
print("NDCG@10:" + str(final_ndcg))
print("map@10:" + str(final_Map))
print("pytrec_mrr:" + str(final_mrr))
print("recall@"+str(topN)+":" + str(final_recall))
print("hole rate@10:" + str(hole_rate))
print("hole rate:" + str(Ahole_rate))
print("ms_mrr:" + str(ms_mrr))<jupyter_output><empty_output>
|
ANCE/evaluation/Calculate Metrics.ipynb/0
|
{
"file_path": "ANCE/evaluation/Calculate Metrics.ipynb",
"repo_id": "ANCE",
"token_count": 4926
}
| 20 |
"""Lamb optimizer."""
import collections
import math
import torch
from tensorboardX import SummaryWriter
from torch.optim import Optimizer
def log_lamb_rs(optimizer: Optimizer, event_writer: SummaryWriter, token_count: int):
"""Log a histogram of trust ratio scalars in across layers."""
results = collections.defaultdict(list)
for group in optimizer.param_groups:
for p in group['params']:
state = optimizer.state[p]
for i in ('weight_norm', 'adam_norm', 'trust_ratio'):
if i in state:
results[i].append(state[i])
for k, v in results.items():
event_writer.add_histogram(f'lamb/{k}', torch.tensor(v), token_count)
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Paper v3 does not use debiasing.
# Apply bias to lr to avoid broadcast.
step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
if group['weight_decay'] != 0:
adam_step.add_(group['weight_decay'], p.data)
adam_norm = adam_step.pow(2).sum().sqrt()
if weight_norm == 0 or adam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / adam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(-step_size * trust_ratio, adam_step)
return loss
|
ANCE/utils/lamb.py/0
|
{
"file_path": "ANCE/utils/lamb.py",
"repo_id": "ANCE",
"token_count": 2325
}
| 21 |
"""
Code for self-training with weak supervision.
Author: Giannis Karamanolakis ([email protected])
"""
import os
import math
import random
import numpy as np
from numpy.random import seed
import tensorflow.keras as K
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Embedding, Input, Dropout, Dense, Lambda
from tensorflow.keras.models import Model
from scipy.special import softmax
from bert import bert_tokenization
import bert
from bert.loader import load_stock_weights
class DefaultModelTrainer:
"""
Student Trainer based on default model architectures for equal comparison with previous approaches
The Trainer considers pre-computed contextualized embeddings that are already provided with previous benchmarks
It has to implement: __init__, train, evaluate, save, load
"""
def __init__(self, args, logger=None):
self.args = args
self.dataset = args.dataset
self.name = '{}_CLF'.format(self.dataset)
self.logger = logger
self.manual_seed = args.seed
self.max_seq_length = args.max_seq_length
self.datapath = args.datapath
self.lower_case = True
self.model_dir = args.logdir
self.tokenizer=None
self.learning_rate = args.learning_rate
self.finetuning_rate = args.finetuning_rate
self.num_supervised_trials = args.num_supervised_trials
self.sup_batch_size = args.train_batch_size
self.sup_epochs = args.num_epochs
self.unsup_epochs = args.num_unsup_epochs
self.num_labels = args.num_labels
self.model = None
self.gpus = None
def init(self):
self.model = construct_model(self.max_seq_length, self.num_labels, dataset=self.dataset)
self.model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")])
return
def preprocess(self, texts, preprocessed_texts=None):
return texts
def train(self, train_texts, train_labels, dev_texts=None, dev_labels=None, eval_fn=None,
preprocessed_train_texts=None, preprocessed_dev_texts=None):
self.logger.info("Class labels: {}".format(self.num_labels))
x_train = np.array(self.preprocess(train_texts, preprocessed_train_texts))
y_train = np.array(train_labels)
x_dev = np.array(self.preprocess(dev_texts, preprocessed_dev_texts))
y_dev = np.array(dev_labels)
self.logger.info("X Train Shape " + str(x_train.shape) + ' ' + str(y_train.shape))
self.logger.info("X Dev Shape " + str(x_dev.shape) + ' ' + str(y_dev.shape))
model_file = os.path.join(self.model_dir, "supervised_model.h5")
distributed_res = self.distributed_train(x_train, y_train, x_dev, y_dev, model_file)
self.model = distributed_res['model']
if not os.path.exists(model_file):
self.model.save_weights(model_file)
print("Supervised model file saved to {}".format(model_file))
res = {}
res['dev_loss'] = distributed_res['dev_loss']
return res
def train_pseudo(self, train_texts, train_labels, train_weights, dev_texts=None, dev_labels=None, eval_fn=None,
preprocessed_train_texts=None, preprocessed_dev_texts=None):
x_train = np.array(self.preprocess(train_texts, preprocessed_train_texts))
y_train = np.array(train_labels)
x_weight = np.array(train_weights) if train_weights is not None else None
x_dev = np.array(self.preprocess(dev_texts, preprocessed_dev_texts))
y_dev = np.array(dev_labels)
if self.gpus is None:
self.strategy = tf.distribute.MirroredStrategy()
gpus = self.strategy.num_replicas_in_sync
self.gpus = gpus
if self.model is None:
self.init()
with self.strategy.scope():
if y_train.ndim == 2:
# support soft labels
self.model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.CategoricalAccuracy(name="acc")])
y_dev = to_categorical(y_dev)
else:
self.model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")])
self.model.fit(
x=[x_train, np.zeros((len(x_train), self.max_seq_length))],
y=y_train,
validation_data=([x_dev, np.zeros((len(x_dev), self.max_seq_length))], y_dev),
batch_size=32 * self.gpus,
shuffle=True,
sample_weight=x_weight,
epochs=self.unsup_epochs,
callbacks=[
create_learning_rate_scheduler(max_learn_rate=self.learning_rate, end_learn_rate=1e-7,
warmup_epoch_count=3, total_epoch_count=self.unsup_epochs),
K.callbacks.EarlyStopping(patience=5, restore_best_weights=True)]
)
res = {}
return res
def finetune(self, train_texts, train_labels, dev_texts=None, dev_labels=None, eval_fn=None,
preprocessed_train_texts=None, preprocessed_dev_texts=None):
# Similar to training but with smaller learning rate
x_train = np.array(self.preprocess(train_texts, preprocessed_train_texts))
y_train = np.array(train_labels)
x_dev = np.array(self.preprocess(dev_texts, preprocessed_dev_texts))
y_dev = np.array(dev_labels)
if self.gpus is None:
self.strategy = tf.distribute.MirroredStrategy()
gpus = self.strategy.num_replicas_in_sync
self.gpus = gpus
with self.strategy.scope():
if y_train.ndim == 2:
# support soft labels
self.model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.CategoricalAccuracy(name="acc")])
y_dev = to_categorical(y_dev)
else:
self.model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")])
self.model.fit(
x=[x_train, np.zeros((len(x_train), self.max_seq_length))],
y=y_train,
validation_data=([x_dev, np.zeros((len(x_dev), self.max_seq_length))], y_dev),
batch_size=self.sup_batch_size * self.gpus,
shuffle=True,
epochs=self.unsup_epochs,
callbacks=[
create_learning_rate_scheduler(max_learn_rate=self.finetuning_rate, end_learn_rate=1e-7,
warmup_epoch_count=3, total_epoch_count=self.unsup_epochs),
K.callbacks.EarlyStopping(patience=5, restore_best_weights=True)]
)
res = {}
return res
def distributed_train(self, x_train, y_train, x_dev, y_dev, model_file):
N_base = self.num_supervised_trials
self.strategy = tf.distribute.MirroredStrategy()
gpus = self.strategy.num_replicas_in_sync
self.gpus = gpus
print('Number of devices: {}'.format(gpus))
best_base_model = None
best_validation_loss = np.inf
for counter in range(N_base):
with self.strategy.scope():
strong_model = construct_model(self.max_seq_length, self.num_labels, dataset=self.dataset)
strong_model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")])
if os.path.exists(model_file):
strong_model.load_weights(model_file)
best_base_model = strong_model
print("No Training... Pre-trained supervised model loaded from {}".format(model_file))
break
if counter == 0:
print(strong_model.summary())
print("training supervised model {}/{}".format(counter, N_base))
strong_model.fit(
x=[x_train, np.zeros((len(x_train), self.max_seq_length))],
y=y_train,
batch_size=self.sup_batch_size * gpus,
shuffle=True,
epochs=self.sup_epochs,
callbacks=[
create_learning_rate_scheduler(max_learn_rate=self.learning_rate, end_learn_rate=1e-7, warmup_epoch_count=20,
total_epoch_count=self.sup_epochs),
K.callbacks.EarlyStopping(patience=20, restore_best_weights=True)
],
validation_data=([x_dev, np.zeros((len(x_dev), self.max_seq_length))], y_dev))
val_loss = strong_model.evaluate([x_dev, np.zeros((len(x_dev), self.max_seq_length))], y_dev)
print("Validation loss for run {} : {}".format(counter, val_loss))
if val_loss[0] < best_validation_loss:
best_base_model = strong_model
best_validation_loss = val_loss[0]
strong_model = best_base_model
res = strong_model.evaluate([x_dev, np.zeros((len(x_dev), self.max_seq_length))], y_dev)
print("Best validation loss for base model {}: {}".format(best_validation_loss, res))
return {
'dev_loss': best_validation_loss,
'model': strong_model
}
def predict(self, texts, batch_size=256, preprocessed_texts=None, prefix=""):
x_train = np.array(self.preprocess(texts, preprocessed_texts))
self.logger.info("Predicting labels for {} texts".format(len(texts)))
y_pred = self.model.predict(
[x_train, np.zeros((len(x_train), self.max_seq_length))],
batch_size=batch_size
)
# Get student's features
layer_name = 'first' #last
desiredOutputs = [self.model.get_layer(layer_name).output]
newModel = tf.keras.Model(self.model.inputs, desiredOutputs)
features = newModel([x_train, np.zeros((len(x_train), self.max_seq_length))])
preds = np.argmax(y_pred, axis=-1).flatten()
soft_proba = softmax(y_pred, axis=-1)
return {
'preds': preds,
'proba': soft_proba,
'features': features.numpy()
}
def load(self, savefolder):
self.logger.info("loading student from {}".format(savefolder))
raise (BaseException('not implemented'))
def save(self, savefolder):
model_file = os.path.join(savefolder, "final_model.h5")
self.logger.info("Saving model at {}".format(model_file))
self.model.save_weights(model_file)
return
def create_learning_rate_scheduler(max_learn_rate=5e-5,
end_learn_rate=1e-7,
warmup_epoch_count=10,
total_epoch_count=90):
def lr_scheduler(epoch):
if epoch < warmup_epoch_count:
res = (max_learn_rate / warmup_epoch_count) * (epoch + 1)
else:
res = max_learn_rate * math.exp(
math.log(end_learn_rate / max_learn_rate) * (epoch - warmup_epoch_count + 1) / (
total_epoch_count - warmup_epoch_count + 1))
return float(res)
learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1)
return learning_rate_scheduler
def construct_model(max_seq_length, num_labels, dense_dropout=0.5, dataset='trec'):
# Constructing default model architectures for equal comparison with previous approaches
if dataset == 'trec':
emb_size = 1024
hidden_size = 512
num_layers = 2
elif dataset == 'youtube':
emb_size = 16634
hidden_size = 512
num_layers = 0
elif dataset == 'sms':
emb_size = 1024
hidden_size = 512
num_layers = 2
elif dataset == 'census':
emb_size = 105
hidden_size = 256
num_layers = 2
elif dataset == 'mitr':
emb_size = 1024
hidden_size = 512
num_layers = 2
elif dataset in ['spouse']:
emb_size = 768
hidden_size = 512
num_layers = 5
else:
raise(BaseException("Default model not available for {}".format(dataset)))
features = Input(shape=(emb_size,), name="first")
hidden = Dropout(dense_dropout)(features)
for i in range(num_layers):
name = 'dense{}'.format(i) if i != num_layers - 1 else 'last'
hidden = Dense(units=hidden_size, activation="relu", name=name)(hidden)
hidden = Dropout(dense_dropout)(hidden)
logits = hidden
outputs = Dense(units=num_labels, activation="softmax", name="output_1")(logits)
model = tf.keras.Model(inputs=features, outputs=outputs)
return model
|
ASTRA/astra/model/default_model.py/0
|
{
"file_path": "ASTRA/astra/model/default_model.py",
"repo_id": "ASTRA",
"token_count": 6561
}
| 22 |
# ------------------------------------------------------------------------------------------
# Copyright (c). All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import sys
import io
import json
with open(sys.argv[1], 'r', encoding='utf8') as reader, \
open(sys.argv[2], 'w', encoding='utf8') as writer :
for line in reader:
items = line.strip().split('||')
context = items[0]
completion = items[1].strip('\n')
x = {}
x['context'] = context #+ '||'
x['completion'] = completion
writer.write(json.dumps(x)+'\n')
|
AdaMix/NLG/src/format_converting_e2e.py/0
|
{
"file_path": "AdaMix/NLG/src/format_converting_e2e.py",
"repo_id": "AdaMix",
"token_count": 192
}
| 23 |
<!---
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Examples
This folder contains actively maintained examples of use of 🤗 Transformers organized along NLP tasks. If you are looking for an example that used to
be in this folder, it may have moved to our [research projects](https://github.com/huggingface/transformers/tree/master/examples/research_projects) subfolder (which contains frozen snapshots of research projects).
## Important note
**Important**
To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
pip install .
```
Then cd in the example folder of your choice and run
```bash
pip install -r requirements.txt
```
To browse the examples corresponding to released versions of 🤗 Transformers, click on the line below and then on your desired version of the library:
<details>
<summary>Examples for older versions of 🤗 Transformers</summary>
- [v4.3.3](https://github.com/huggingface/transformers/tree/v4.3.3/examples)
- [v4.2.2](https://github.com/huggingface/transformers/tree/v4.2.2/examples)
- [v4.1.1](https://github.com/huggingface/transformers/tree/v4.1.1/examples)
- [v4.0.1](https://github.com/huggingface/transformers/tree/v4.0.1/examples)
- [v3.5.1](https://github.com/huggingface/transformers/tree/v3.5.1/examples)
- [v3.4.0](https://github.com/huggingface/transformers/tree/v3.4.0/examples)
- [v3.3.1](https://github.com/huggingface/transformers/tree/v3.3.1/examples)
- [v3.2.0](https://github.com/huggingface/transformers/tree/v3.2.0/examples)
- [v3.1.0](https://github.com/huggingface/transformers/tree/v3.1.0/examples)
- [v3.0.2](https://github.com/huggingface/transformers/tree/v3.0.2/examples)
- [v2.11.0](https://github.com/huggingface/transformers/tree/v2.11.0/examples)
- [v2.10.0](https://github.com/huggingface/transformers/tree/v2.10.0/examples)
- [v2.9.1](https://github.com/huggingface/transformers/tree/v2.9.1/examples)
- [v2.8.0](https://github.com/huggingface/transformers/tree/v2.8.0/examples)
- [v2.7.0](https://github.com/huggingface/transformers/tree/v2.7.0/examples)
- [v2.6.0](https://github.com/huggingface/transformers/tree/v2.6.0/examples)
- [v2.5.1](https://github.com/huggingface/transformers/tree/v2.5.1/examples)
- [v2.4.0](https://github.com/huggingface/transformers/tree/v2.4.0/examples)
- [v2.3.0](https://github.com/huggingface/transformers/tree/v2.3.0/examples)
- [v2.2.0](https://github.com/huggingface/transformers/tree/v2.2.0/examples)
- [v2.1.1](https://github.com/huggingface/transformers/tree/v2.1.0/examples)
- [v2.0.0](https://github.com/huggingface/transformers/tree/v2.0.0/examples)
- [v1.2.0](https://github.com/huggingface/transformers/tree/v1.2.0/examples)
- [v1.1.0](https://github.com/huggingface/transformers/tree/v1.1.0/examples)
- [v1.0.0](https://github.com/huggingface/transformers/tree/v1.0.0/examples)
</details>
Alternatively, you can find switch your cloned 🤗 Transformers to a specific version (for instance with v3.5.1) with
```bash
git checkout tags/v3.5.1
```
and run the example command as usual afterward.
## The Big Table of Tasks
Here is the list of all our examples:
- with information on whether they are **built on top of `Trainer`/`TFTrainer`** (if not, they still work, they might
just lack some features),
- whether or not they leverage the [🤗 Datasets](https://github.com/huggingface/datasets) library.
- links to **Colab notebooks** to walk through the scripts and run them easily,
<!--
Coming soon!
- links to **Cloud deployments** to be able to deploy large-scale trainings in the Cloud with little to no setup.
-->
| Task | Example datasets | Trainer support | TFTrainer support | 🤗 Datasets | Colab
|---|---|:---:|:---:|:---:|:---:|
| [**`language-modeling`**](https://github.com/huggingface/transformers/tree/master/examples/language-modeling) | Raw text | ✅ | - | ✅ | [](https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/01_how_to_train.ipynb)
| [**`multiple-choice`**](https://github.com/huggingface/transformers/tree/master/examples/multiple-choice) | SWAG, RACE, ARC | ✅ | ✅ | ✅ | [](https://colab.research.google.com/github/ViktorAlm/notebooks/blob/master/MPC_GPU_Demo_for_TF_and_PT.ipynb)
| [**`question-answering`**](https://github.com/huggingface/transformers/tree/master/examples/question-answering) | SQuAD | ✅ | ✅ | ✅ | [](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb)
| [**`summarization`**](https://github.com/huggingface/transformers/tree/master/examples/seq2seq) | CNN/Daily Mail | ✅ | - | - | -
| [**`text-classification`**](https://github.com/huggingface/transformers/tree/master/examples/text-classification) | GLUE, XNLI | ✅ | ✅ | ✅ | [](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification.ipynb)
| [**`text-generation`**](https://github.com/huggingface/transformers/tree/master/examples/text-generation) | - | n/a | n/a | - | [](https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/02_how_to_generate.ipynb)
| [**`token-classification`**](https://github.com/huggingface/transformers/tree/master/examples/token-classification) | CoNLL NER | ✅ | ✅ | ✅ | [](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/token_classification.ipynb)
| [**`translation`**](https://github.com/huggingface/transformers/tree/master/examples/seq2seq) | WMT | ✅ | - | - | -
## Distributed training and mixed precision
All the PyTorch scripts mentioned above work out of the box with distributed training and mixed precision, thanks to
the [Trainer API](https://huggingface.co/transformers/main_classes/trainer.html). To launch one of them on _n_ GPUS,
use the following command:
```bash
python -m torch.distributed.launch \
--nproc_per_node number_of_gpu_you_have path_to_script.py \
--all_arguments_of_the_script
```
As an example, here is how you would fine-tune the BERT large model (with whole word masking) on the text
classification MNLI task using the `run_glue` script, with 8 GPUs:
```bash
python -m torch.distributed.launch \
--nproc_per_node 8 text-classification/run_glue.py \
--model_name_or_path bert-large-uncased-whole-word-masking \
--task_name mnli \
--do_train \
--do_eval \
--max_seq_length 128 \
--per_device_train_batch_size 8 \
--learning_rate 2e-5 \
--num_train_epochs 3.0 \
--output_dir /tmp/mnli_output/
```
If you have a GPU with mixed precision capabilities (architecture Pascal or more recent), you can use mixed precision
training with PyTorch 1.6.0 or latest, or by installing the [Apex](https://github.com/NVIDIA/apex) library for previous
versions. Just add the flag `--fp16` to your command launching one of the scripts mentioned above!
Using mixed precision training usually results in 2x-speedup for training with the same final results (as shown in
[this table](https://github.com/huggingface/transformers/tree/master/examples/text-classification#mixed-precision-training)
for text classification).
## Running on TPUs
When using Tensorflow, TPUs are supported out of the box as a `tf.distribute.Strategy`.
When using PyTorch, we support TPUs thanks to `pytorch/xla`. For more context and information on how to setup your TPU environment refer to Google's documentation and to the
very detailed [pytorch/xla README](https://github.com/pytorch/xla/blob/master/README.md).
In this repo, we provide a very simple launcher script named
[xla_spawn.py](https://github.com/huggingface/transformers/tree/master/examples/xla_spawn.py) that lets you run our
example scripts on multiple TPU cores without any boilerplate. Just pass a `--num_cores` flag to this script, then your
regular training script with its arguments (this is similar to the `torch.distributed.launch` helper for
`torch.distributed`):
```bash
python xla_spawn.py --num_cores num_tpu_you_have \
path_to_script.py \
--all_arguments_of_the_script
```
As an example, here is how you would fine-tune the BERT large model (with whole word masking) on the text
classification MNLI task using the `run_glue` script, with 8 TPUs:
```bash
python xla_spawn.py --num_cores 8 \
text-classification/run_glue.py \
--model_name_or_path bert-large-uncased-whole-word-masking \
--task_name mnli \
--do_train \
--do_eval \
--max_seq_length 128 \
--per_device_train_batch_size 8 \
--learning_rate 2e-5 \
--num_train_epochs 3.0 \
--output_dir /tmp/mnli_output/
```
## Logging & Experiment tracking
You can easily log and monitor your runs code. The following are currently supported:
* [TensorBoard](https://www.tensorflow.org/tensorboard)
* [Weights & Biases](https://docs.wandb.ai/integrations/huggingface)
* [Comet ML](https://www.comet.ml/docs/python-sdk/huggingface/)
### Weights & Biases
To use Weights & Biases, install the wandb package with:
```bash
pip install wandb
```
Then log in the command line:
```bash
wandb login
```
If you are in Jupyter or Colab, you should login with:
```python
import wandb
wandb.login()
```
To enable logging to W&B, include `"wandb"` in the `report_to` of your `TrainingArguments` or script. Or just pass along `--report_to all` if you have `wandb` installed.
Whenever you use `Trainer` or `TFTrainer` classes, your losses, evaluation metrics, model topology and gradients (for `Trainer` only) will automatically be logged.
Advanced configuration is possible by setting environment variables:
<table>
<thead>
<tr>
<th style="text-align:left">Environment Variables</th>
<th style="text-align:left">Options</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align:left">WANDB_LOG_MODEL</td>
<td style="text-align:left">Log the model as artifact at the end of training (<b>false</b> by default)</td>
</tr>
<tr>
<td style="text-align:left">WANDB_WATCH</td>
<td style="text-align:left">
<ul>
<li><b>gradients</b> (default): Log histograms of the gradients</li>
<li><b>all</b>: Log histograms of gradients and parameters</li>
<li><b>false</b>: No gradient or parameter logging</li>
</ul>
</td>
</tr>
<tr>
<td style="text-align:left">WANDB_PROJECT</td>
<td style="text-align:left">Organize runs by project</td>
</tr>
</tbody>
</table>
Set run names with `run_name` argument present in scripts or as part of `TrainingArguments`.
Additional configuration options are available through generic [wandb environment variables](https://docs.wandb.com/library/environment-variables).
Refer to related [documentation & examples](https://docs.wandb.ai/integrations/huggingface).
### Comet.ml
To use `comet_ml`, install the Python package with:
```bash
pip install comet_ml
```
or if in a Conda environment:
```bash
conda install -c comet_ml -c anaconda -c conda-forge comet_ml
```
|
AdaMix/docs/source/examples.md/0
|
{
"file_path": "AdaMix/docs/source/examples.md",
"repo_id": "AdaMix",
"token_count": 4478
}
| 24 |
..
Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
General Utilities
-----------------------------------------------------------------------------------------------------------------------
This page lists all of Transformers general utility functions that are found in the file ``file_utils.py``.
Most of those are only useful if you are studying the general code in the library.
Enums and namedtuples
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.file_utils.ExplicitEnum
.. autoclass:: transformers.file_utils.PaddingStrategy
.. autoclass:: transformers.file_utils.TensorType
Special Decorators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: transformers.file_utils.add_start_docstrings
.. autofunction:: transformers.file_utils.add_start_docstrings_to_model_forward
.. autofunction:: transformers.file_utils.add_end_docstrings
.. autofunction:: transformers.file_utils.add_code_sample_docstrings
.. autofunction:: transformers.file_utils.replace_return_docstrings
Special Properties
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.file_utils.cached_property
Other Utilities
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.file_utils._BaseLazyModule
|
AdaMix/docs/source/internal/file_utils.rst/0
|
{
"file_path": "AdaMix/docs/source/internal/file_utils.rst",
"repo_id": "AdaMix",
"token_count": 477
}
| 25 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
Trainer
-----------------------------------------------------------------------------------------------------------------------
The :class:`~transformers.Trainer` and :class:`~transformers.TFTrainer` classes provide an API for feature-complete
training in most standard use cases. It's used in most of the :doc:`example scripts <../examples>`.
Before instantiating your :class:`~transformers.Trainer`/:class:`~transformers.TFTrainer`, create a
:class:`~transformers.TrainingArguments`/:class:`~transformers.TFTrainingArguments` to access all the points of
customization during training.
The API supports distributed training on multiple GPUs/TPUs, mixed precision through `NVIDIA Apex
<https://github.com/NVIDIA/apex>`__ and Native AMP for PyTorch and :obj:`tf.keras.mixed_precision` for TensorFlow.
Both :class:`~transformers.Trainer` and :class:`~transformers.TFTrainer` contain the basic training loop which supports
the above features. To inject custom behavior you can subclass them and override the following methods:
- **get_train_dataloader**/**get_train_tfdataset** -- Creates the training DataLoader (PyTorch) or TF Dataset.
- **get_eval_dataloader**/**get_eval_tfdataset** -- Creates the evaluation DataLoader (PyTorch) or TF Dataset.
- **get_test_dataloader**/**get_test_tfdataset** -- Creates the test DataLoader (PyTorch) or TF Dataset.
- **log** -- Logs information on the various objects watching training.
- **create_optimizer_and_scheduler** -- Sets up the optimizer and learning rate scheduler if they were not passed at
init.
- **compute_loss** - Computes the loss on a batch of training inputs.
- **training_step** -- Performs a training step.
- **prediction_step** -- Performs an evaluation/test step.
- **run_model** (TensorFlow only) -- Basic pass through the model.
- **evaluate** -- Runs an evaluation loop and returns metrics.
- **predict** -- Returns predictions (with metrics if labels are available) on a test set.
.. warning::
The :class:`~transformers.Trainer` class is optimized for 🤗 Transformers models and can have surprising behaviors
when you use it on other models. When using it on your own model, make sure:
- your model always return tuples or subclasses of :class:`~transformers.file_utils.ModelOutput`.
- your model can compute the loss if a :obj:`labels` argument is provided and that loss is returned as the first
element of the tuple (if your model returns tuples)
- your model can accept multiple label arguments (use the :obj:`label_names` in your
:class:`~transformers.TrainingArguments` to indicate their name to the :class:`~transformers.Trainer`) but none
of them should be named :obj:`"label"`.
Here is an example of how to customize :class:`~transformers.Trainer` using a custom loss function for multi-label
classification:
.. code-block:: python
import torch
from transformers import Trainer
class MultilabelTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
labels = inputs.pop("labels")
outputs = model(**inputs)
logits = outputs.logits
loss_fct = torch.nn.BCEWithLogitsLoss()
loss = loss_fct(logits.view(-1, self.model.config.num_labels),
labels.float().view(-1, self.model.config.num_labels))
return (loss, outputs) if return_outputs else loss
Another way to customize the training loop behavior for the PyTorch :class:`~transformers.Trainer` is to use
:doc:`callbacks <callback>` that can inspect the training loop state (for progress reporting, logging on TensorBoard or
other ML platforms...) and take decisions (like early stopping).
Trainer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.Trainer
:members:
Seq2SeqTrainer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.Seq2SeqTrainer
:members: evaluate, predict
TFTrainer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFTrainer
:members:
TrainingArguments
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TrainingArguments
:members:
Seq2SeqTrainingArguments
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.Seq2SeqTrainingArguments
:members:
TFTrainingArguments
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFTrainingArguments
:members:
Trainer Integrations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The :class:`~transformers.Trainer` has been extended to support libraries that may dramatically improve your training
time and fit much bigger models.
Currently it supports third party solutions, `DeepSpeed <https://github.com/microsoft/DeepSpeed>`__ and `FairScale
<https://github.com/facebookresearch/fairscale/>`__, which implement parts of the paper `ZeRO: Memory Optimizations
Toward Training Trillion Parameter Models, by Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, Yuxiong He
<https://arxiv.org/abs/1910.02054>`__.
This provided support is new and experimental as of this writing.
Installation Notes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
As of this writing, both FairScale and Deepspeed require compilation of CUDA C++ code, before they can be used.
While all installation issues should be dealt with through the corresponding GitHub Issues of `FairScale
<https://github.com/facebookresearch/fairscale/issues>`__ and `Deepspeed
<https://github.com/microsoft/DeepSpeed/issues>`__, there are a few common issues that one may encounter while building
any PyTorch extension that needs to build CUDA extensions.
Therefore, if you encounter a CUDA-related build issue while doing one of the following or both:
.. code-block:: bash
pip install fairscale
pip install deepspeed
please, read the following notes first.
In these notes we give examples for what to do when ``pytorch`` has been built with CUDA ``10.2``. If your situation is
different remember to adjust the version number to the one you are after.
**Possible problem #1:**
While, Pytorch comes with its own CUDA toolkit, to build these two projects you must have an identical version of CUDA
installed system-wide.
For example, if you installed ``pytorch`` with ``cudatoolkit==10.2`` in the Python environment, you also need to have
CUDA ``10.2`` installed system-wide.
The exact location may vary from system to system, but ``/usr/local/cuda-10.2`` is the most common location on many
Unix systems. When CUDA is correctly set up and added to the ``PATH`` environment variable, one can find the
installation location by doing:
.. code-block:: bash
which nvcc
If you don't have CUDA installed system-wide, install it first. You will find the instructions by using your favorite
search engine. For example, if you're on Ubuntu you may want to search for: `ubuntu cuda 10.2 install
<https://www.google.com/search?q=ubuntu+cuda+10.2+install>`__.
**Possible problem #2:**
Another possible common problem is that you may have more than one CUDA toolkit installed system-wide. For example you
may have:
.. code-block:: bash
/usr/local/cuda-10.2
/usr/local/cuda-11.0
Now, in this situation you need to make sure that your ``PATH`` and ``LD_LIBRARY_PATH`` environment variables contain
the correct paths to the desired CUDA version. Typically, package installers will set these to contain whatever the
last version was installed. If you encounter the problem, where the package build fails because it can't find the right
CUDA version despite you having it installed system-wide, it means that you need to adjust the 2 aforementioned
environment variables.
First, you may look at their contents:
.. code-block:: bash
echo $PATH
echo $LD_LIBRARY_PATH
so you get an idea of what is inside.
It's possible that ``LD_LIBRARY_PATH`` is empty.
``PATH`` lists the locations of where executables can be found and ``LD_LIBRARY_PATH`` is for where shared libraries
are to looked for. In both cases, earlier entries have priority over the later ones. ``:`` is used to separate multiple
entries.
Now, to tell the build program where to find the specific CUDA toolkit, insert the desired paths to be listed first by
doing:
.. code-block:: bash
export PATH=/usr/local/cuda-10.2/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/cuda-10.2/lib64:$LD_LIBRARY_PATH
Note that we aren't overwriting the existing values, but prepending instead.
Of course, adjust the version number, the full path if need be. Check that the directories you assign actually do
exist. ``lib64`` sub-directory is where the various CUDA ``.so`` objects, like ``libcudart.so`` reside, it's unlikely
that your system will have it named differently, but if it is adjust it to reflect your reality.
**Possible problem #3:**
Some older CUDA versions may refuse to build with newer compilers. For example, you my have ``gcc-9`` but it wants
``gcc-7``.
There are various ways to go about it.
If you can install the latest CUDA toolkit it typically should support the newer compiler.
Alternatively, you could install the lower version of the compiler in addition to the one you already have, or you may
already have it but it's not the default one, so the build system can't see it. If you have ``gcc-7`` installed but the
build system complains it can't find it, the following might do the trick:
.. code-block:: bash
sudo ln -s /usr/bin/gcc-7 /usr/local/cuda-10.2/bin/gcc
sudo ln -s /usr/bin/g++-7 /usr/local/cuda-10.2/bin/g++
Here, we are making a symlink to ``gcc-7`` from ``/usr/local/cuda-10.2/bin/gcc`` and since
``/usr/local/cuda-10.2/bin/`` should be in the ``PATH`` environment variable (see the previous problem's solution), it
should find ``gcc-7`` (and ``g++7``) and then the build will succeed.
As always make sure to edit the paths in the example to match your situation.
**If still unsuccessful:**
If after addressing these you still encounter build issues, please, proceed with the GitHub Issue of `FairScale
<https://github.com/facebookresearch/fairscale/issues>`__ and `Deepspeed
<https://github.com/microsoft/DeepSpeed/issues>`__, depending on the project you have the problem with.
FairScale
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
By integrating `FairScale <https://github.com/facebookresearch/fairscale/>`__ the :class:`~transformers.Trainer`
provides support for the following features from `the ZeRO paper <https://arxiv.org/abs/1910.02054>`__:
1. Optimizer State Sharding
2. Gradient Sharding
3. Model Parameters Sharding (new and very experimental)
4. CPU offload (new and very experimental)
You will need at least two GPUs to use this feature.
To deploy this feature:
1. Install the library via pypi:
.. code-block:: bash
pip install fairscale
or find more details on `the FairScale's GitHub page
<https://github.com/facebookresearch/fairscale/#installation>`__.
2. To use the first version of Sharded data-parallelism, add ``--sharded_ddp simple`` to the command line arguments,
and make sure you have added the distributed launcher ``-m torch.distributed.launch
--nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE`` if you haven't been using it already.
For example here is how you could use it for ``run_translation.py`` with 2 GPUs:
.. code-block:: bash
python -m torch.distributed.launch --nproc_per_node=2 examples/seq2seq/run_translation.py \
--model_name_or_path t5-small --per_device_train_batch_size 1 \
--output_dir output_dir --overwrite_output_dir \
--do_train --max_train_samples 500 --num_train_epochs 1 \
--dataset_name wmt16 --dataset_config "ro-en" \
--source_lang en --target_lang ro \
--fp16 --sharded_ddp simple
Notes:
- This feature requires distributed training (so multiple GPUs).
- It is not implemented for TPUs.
- It works with ``--fp16`` too, to make things even faster.
- One of the main benefits of enabling ``--sharded_ddp simple`` is that it uses a lot less GPU memory, so you should be
able to use significantly larger batch sizes using the same hardware (e.g. 3x and even bigger) which should lead to
significantly shorter training time.
3. To use the second version of Sharded data-parallelism, add ``--sharded_ddp zero_dp_2`` or ``--sharded_ddp zero_dp_3`
to the command line arguments, and make sure you have added the distributed launcher ``-m torch.distributed.launch
--nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE`` if you haven't been using it already.
For example here is how you could use it for ``run_translation.py`` with 2 GPUs:
.. code-block:: bash
python -m torch.distributed.launch --nproc_per_node=2 examples/seq2seq/run_translation.py \
--model_name_or_path t5-small --per_device_train_batch_size 1 \
--output_dir output_dir --overwrite_output_dir \
--do_train --max_train_samples 500 --num_train_epochs 1 \
--dataset_name wmt16 --dataset_config "ro-en" \
--source_lang en --target_lang ro \
--fp16 --sharded_ddp zero_dp_2
:obj:`zero_dp_2` is an optimized version of the simple wrapper, while :obj:`zero_dp_3` fully shards model weights,
gradients and optimizer states.
Both are compatible with adding :obj:`cpu_offload` to enable ZeRO-offload (activate it like this: :obj:`--sharded_ddp
"zero_dp_2 cpu_offload"`).
Notes:
- This feature requires distributed training (so multiple GPUs).
- It is not implemented for TPUs.
- It works with ``--fp16`` too, to make things even faster.
- The ``cpu_offload`` additional option requires ``--fp16``.
- This is an area of active development, so make sure you have a source install of fairscale to use this feature as
some bugs you encounter may have been fixed there already.
Known caveats:
- This feature is incompatible with :obj:`--predict_with_generate` in the `run_translation.py` script.
- Using :obj:`--sharded_ddp zero_dp_3` requires wrapping each layer of the model in the special container
:obj:`FullyShardedDataParallelism` of fairscale. It should be used with the option :obj:`auto_wrap` if you are not
doing this yourself: :obj:`--sharded_ddp "zero_dp_3 auto_wrap"`.
DeepSpeed
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
`DeepSpeed <https://github.com/microsoft/DeepSpeed>`__ implements everything described in the `ZeRO paper
<https://arxiv.org/abs/1910.02054>`__, except ZeRO's stage 3. "Parameter Partitioning (Pos+g+p)". Currently it provides
full support for:
1. Optimizer State Partitioning (ZeRO stage 1)
2. Add Gradient Partitioning (ZeRO stage 2)
3. Custom fp16 handling
4. A range of fast Cuda-extension-based Optimizers
5. ZeRO-Offload
ZeRO-Offload has its own dedicated paper: `ZeRO-Offload: Democratizing Billion-Scale Model Training
<https://arxiv.org/abs/2101.06840>`__.
DeepSpeed is currently used only for training, as all the currently available features are of no use to inference.
Installation
=======================================================================================================================
Install the library via pypi:
.. code-block:: bash
pip install deepspeed
or find more details on `the DeepSpeed's GitHub page <https://github.com/microsoft/deepspeed#installation>`__.
Deployment with multiple GPUs
=======================================================================================================================
To deploy this feature with multiple GPUs adjust the :class:`~transformers.Trainer` command line arguments as
following:
1. replace ``python -m torch.distributed.launch`` with ``deepspeed``.
2. add a new argument ``--deepspeed ds_config.json``, where ``ds_config.json`` is the DeepSpeed configuration file as
documented `here <https://www.deepspeed.ai/docs/config-json/>`__. The file naming is up to you.
Therefore, if your original command line looked as following:
.. code-block:: bash
python -m torch.distributed.launch --nproc_per_node=2 your_program.py <normal cl args>
Now it should be:
.. code-block:: bash
deepspeed --num_gpus=2 your_program.py <normal cl args> --deepspeed ds_config.json
Unlike, ``torch.distributed.launch`` where you have to specify how many GPUs to use with ``--nproc_per_node``, with the
``deepspeed`` launcher you don't have to use the corresponding ``--num_gpus`` if you want all of your GPUs used. The
full details on how to configure various nodes and GPUs can be found `here
<https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node>`__.
In fact, you can continue using ``-m torch.distributed.launch`` with DeepSpeed as long as you don't need to use
``deepspeed`` launcher-specific arguments. Typically if you don't need a multi-node setup you're not required to use
the ``deepspeed`` launcher. But since in the DeepSpeed documentation it'll be used everywhere, for consistency we will
use it here as well.
Here is an example of running ``run_translation.py`` under DeepSpeed deploying all available GPUs:
.. code-block:: bash
deepspeed examples/seq2seq/run_translation.py \
--deepspeed examples/tests/deepspeed/ds_config.json \
--model_name_or_path t5-small --per_device_train_batch_size 1 \
--output_dir output_dir --overwrite_output_dir --fp16 \
--do_train --max_train_samples 500 --num_train_epochs 1 \
--dataset_name wmt16 --dataset_config "ro-en" \
--source_lang en --target_lang ro
Note that in the DeepSpeed documentation you are likely to see ``--deepspeed --deepspeed_config ds_config.json`` - i.e.
two DeepSpeed-related arguments, but for the sake of simplicity, and since there are already so many arguments to deal
with, we combined the two into a single argument.
For some practical usage examples, please, see this `post
<https://github.com/huggingface/transformers/issues/8771#issuecomment-759248400>`__.
Deployment with one GPU
=======================================================================================================================
To deploy DeepSpeed with one GPU adjust the :class:`~transformers.Trainer` command line arguments as following:
.. code-block:: bash
deepspeed --num_gpus=1 examples/seq2seq/run_translation.py \
--deepspeed examples/tests/deepspeed/ds_config.json \
--model_name_or_path t5-small --per_device_train_batch_size 1 \
--output_dir output_dir --overwrite_output_dir --fp16 \
--do_train --max_train_samples 500 --num_train_epochs 1 \
--dataset_name wmt16 --dataset_config "ro-en" \
--source_lang en --target_lang ro
This is almost the same as with multiple-GPUs, but here we tell DeepSpeed explicitly to use just one GPU. By default,
DeepSpeed deploys all GPUs it can see. If you have only 1 GPU to start with, then you don't need this argument. The
following `documentation <https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node>`__ discusses the
launcher options.
Why would you want to use DeepSpeed with just one GPU?
1. It has a ZeRO-offload feature which can delegate some computations and memory to the host's CPU and RAM, and thus
leave more GPU resources for model's needs - e.g. larger batch size, or enabling a fitting of a very big model which
normally won't fit.
2. It provides a smart GPU memory management system, that minimizes memory fragmentation, which again allows you to fit
bigger models and data batches.
While we are going to discuss the configuration in details next, the key to getting a huge improvement on a single GPU
with DeepSpeed is to have at least the following configuration in the configuration file:
.. code-block:: json
{
"zero_optimization": {
"stage": 2,
"allgather_partitions": true,
"allgather_bucket_size": 2e8,
"reduce_scatter": true,
"reduce_bucket_size": 2e8,
"overlap_comm": true,
"contiguous_gradients": true,
"cpu_offload": true
},
}
which enables ``cpu_offload`` and some other important features. You may experiment with the buffer sizes, you will
find more details in the discussion below.
For a practical usage example of this type of deployment, please, see this `post
<https://github.com/huggingface/transformers/issues/8771#issuecomment-759176685>`__.
Notes:
- if you need to run on a specific GPU, which is different from GPU 0, you can't use ``CUDA_VISIBLE_DEVICES`` to limit
the visible scope of available GPUs. Instead, you have to use the following syntax:
.. code-block:: bash
deepspeed --include localhost:1 examples/seq2seq/run_translation.py ...
In this example, we tell DeepSpeed to use GPU 1 (second gpu).
Deployment in Notebooks
=======================================================================================================================
The problem with running notebook cells as a script is that there is no normal ``deepspeed`` launcher to rely on, so
under certain setups we have to emulate it.
Here is how you'd have to adjust your training code in the notebook to use DeepSpeed.
.. code-block:: python
# DeepSpeed requires a distributed environment even when only one process is used.
# This emulates a launcher in the notebook
import os
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '9994' # modify if RuntimeError: Address already in use
os.environ['RANK'] = "0"
os.environ['LOCAL_RANK'] = "0"
os.environ['WORLD_SIZE'] = "1"
# Now proceed as normal, plus pass the deepspeed config file
training_args = TrainingArguments(..., deepspeed="ds_config.json")
trainer = Trainer(...)
trainer.train()
Note: `...` stands for the normal arguments that you'd pass to the functions.
If you want to create the config file on the fly in the notebook in the current directory, you could have a dedicated
cell with:
.. code-block:: python
%%bash
cat <<'EOT' > ds_config.json
{
"fp16": {
"enabled": true,
"loss_scale": 0,
"loss_scale_window": 1000,
"hysteresis": 2,
"min_loss_scale": 1
},
"zero_optimization": {
"stage": 2,
"allgather_partitions": true,
"allgather_bucket_size": 2e8,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": 2e8,
"contiguous_gradients": true,
"cpu_offload": true
},
"zero_allow_untested_optimizer": true,
"optimizer": {
"type": "AdamW",
"params": {
"lr": 3e-5,
"betas": [0.8, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7
}
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": 0,
"warmup_max_lr": 3e-5,
"warmup_num_steps": 500
}
},
"steps_per_print": 2000,
"wall_clock_breakdown": false
}
EOT
That's said if the script is not in the notebook cells, you can launch ``deepspeed`` normally via shell from a cell
with:
.. code-block::
!deepspeed examples/seq2seq/run_translation.py ...
or with bash magic, where you can write a multi-line code for the shell to run:
.. code-block::
%%bash
cd /somewhere
deepspeed examples/seq2seq/run_translation.py ...
Configuration
=======================================================================================================================
For the complete guide to the DeepSpeed configuration options that can be used in its configuration file please refer
to the `following documentation <https://www.deepspeed.ai/docs/config-json/>`__.
You can find dozens of DeepSpeed configuration examples that address various practical needs in `the DeepSpeedExamples
repo <https://github.com/microsoft/DeepSpeedExamples>`__:
.. code-block:: bash
git clone https://github.com/microsoft/DeepSpeedExamples
cd DeepSpeedExamples
find . -name '*json'
Continuing the code from above, let's say you're looking to configure the Lamb optimizer. So you can search through the
example ``.json`` files with:
.. code-block:: bash
grep -i Lamb $(find . -name '*json')
Some more examples are to be found in the `main repo <https://github.com/microsoft/DeepSpeed>`__ as well.
While you always have to supply the DeepSpeed configuration file, you can configure the DeepSpeed integration in
several ways:
1. Supply most of the configuration inside the file, and just use a few required command line arguments. This is the
recommended way as it puts most of the configuration params in one place.
2. Supply just the ZeRO configuration params inside the file, and configure the rest using the normal
:class:`~transformers.Trainer` command line arguments.
3. Any variation of the first two ways.
To get an idea of what DeepSpeed configuration file looks like, here is one that activates ZeRO stage 2 features,
enables FP16, uses AdamW optimizer and WarmupLR scheduler:
.. code-block:: json
{
"fp16": {
"enabled": true,
"loss_scale": 0,
"loss_scale_window": 1000,
"hysteresis": 2,
"min_loss_scale": 1
},
"zero_optimization": {
"stage": 2,
"allgather_partitions": true,
"allgather_bucket_size": 5e8,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": 5e8,
"contiguous_gradients": true,
"cpu_offload": true
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": 3e-5,
"betas": [ 0.8, 0.999 ],
"eps": 1e-8,
"weight_decay": 3e-7
}
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": 0,
"warmup_max_lr": 3e-5,
"warmup_num_steps": 500
}
}
}
If you already have a command line that you have been using with :class:`transformers.Trainer` args, you can continue
using those and the :class:`~transformers.Trainer` will automatically convert them into the corresponding DeepSpeed
configuration at run time. For example, you could use the following configuration file:
.. code-block:: json
{
"zero_optimization": {
"stage": 2,
"allgather_partitions": true,
"allgather_bucket_size": 5e8,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": 5e8,
"contiguous_gradients": true,
"cpu_offload": true
}
}
and the following command line arguments:
.. code-block:: bash
--learning_rate 3e-5 --warmup_steps 500 --adam_beta1 0.8 --adam_beta2 0.999 --adam_epsilon 1e-8 \
--weight_decay 3e-7 --lr_scheduler_type constant_with_warmup --fp16 --fp16_backend amp
to achieve the same configuration as provided by the longer json file in the first example.
When you execute the program, DeepSpeed will log the configuration it received from the :class:`~transformers.Trainer`
to the console, so you can see exactly what the final configuration was passed to it.
Shared Configuration
=======================================================================================================================
Some configuration information is required by both the :class:`~transformers.Trainer` and DeepSpeed to function
correctly, therefore, to prevent conflicting definitions, which could lead to hard to detect errors, we chose to
configure those via the :class:`~transformers.Trainer` command line arguments.
Therefore, the following DeepSpeed configuration params shouldn't be used with the :class:`~transformers.Trainer`:
* ``train_batch_size``
* ``train_micro_batch_size_per_gpu``
* ``gradient_accumulation_steps``
as these will be automatically derived from the run time environment and the following 2 command line arguments:
.. code-block:: bash
--per_device_train_batch_size 8 --gradient_accumulation_steps 2
which are always required to be supplied.
Of course, you will need to adjust the values in this example to your situation.
ZeRO
=======================================================================================================================
The ``zero_optimization`` section of the configuration file is the most important part (`docs
<https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training>`__), since that is where you define
which ZeRO stages you want to enable and how to configure them.
.. code-block:: json
{
"zero_optimization": {
"stage": 2,
"allgather_partitions": true,
"allgather_bucket_size": 5e8,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": 5e8,
"contiguous_gradients": true,
"cpu_offload": true
}
}
Notes:
- enabling ``cpu_offload`` should reduce GPU RAM usage (it requires ``"stage": 2``)
- ``"overlap_comm": true`` trades off increased GPU RAM usage to lower all-reduce latency. ``overlap_comm`` uses 4.5x
the ``allgather_bucket_size`` and ``reduce_bucket_size`` values. So if they are set to 5e8, this requires a 9GB
footprint (``5e8 x 2Bytes x 2 x 4.5``). Therefore, if you have a GPU with 8GB or less RAM, to avoid getting
OOM-errors you will need to reduce those parameters to about ``2e8``, which would require 3.6GB. You will want to do
the same on larger capacity GPU as well, if you're starting to hit OOM.
- when reducing these buffers you're trading communication speed to avail more GPU RAM. The smaller the buffer size,
the slower the communication, and the more GPU RAM will be available to other tasks. So if a bigger batch size is
important, getting a slightly slower training time could be a good trade.
This section has to be configured exclusively via DeepSpeed configuration - the :class:`~transformers.Trainer` provides
no equivalent command line arguments.
Optimizer
=======================================================================================================================
DeepSpeed's main optimizers are Adam, AdamW, OneBitAdam, and Lamb. These have been thoroughly tested with ZeRO and are
thus recommended to be used. It, however, can import other optimizers from ``torch``. The full documentation is `here
<https://www.deepspeed.ai/docs/config-json/#optimizer-parameters>`__.
If you don't configure the ``optimizer`` entry in the configuration file, the :class:`~transformers.Trainer` will
automatically set it to ``AdamW`` and will use the supplied values or the defaults for the following command line
arguments: ``--learning_rate``, ``--adam_beta1``, ``--adam_beta2``, ``--adam_epsilon`` and ``--weight_decay``.
Here is an example of the pre-configured ``optimizer`` entry for AdamW:
.. code-block:: json
{
"optimizer": {
"type": "AdamW",
"params": {
"lr": 0.001,
"betas": [0.8, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7
}
}
}
If you want to use another optimizer which is not listed above, you will have to add ``"zero_allow_untested_optimizer":
true`` to the top level configuration.
If you want to use one of the officially supported optimizers, configure them explicitly in the configuration file, and
make sure to adjust the values. e.g. if use Adam you will want ``weight_decay`` around ``0.01``.
Scheduler
=======================================================================================================================
DeepSpeed supports LRRangeTest, OneCycle, WarmupLR and WarmupDecayLR LR schedulers. The full documentation is `here
<https://www.deepspeed.ai/docs/config-json/#scheduler-parameters>`__.
If you don't configure the ``scheduler`` entry in the configuration file, the :class:`~transformers.Trainer` will use
the value of ``--lr_scheduler_type`` to configure it. Currently the :class:`~transformers.Trainer` supports only 2 LR
schedulers that are also supported by DeepSpeed:
* ``WarmupLR`` via ``--lr_scheduler_type constant_with_warmup``
* ``WarmupDecayLR`` via ``--lr_scheduler_type linear``. This is also the default value for ``--lr_scheduler_type``,
therefore, if you don't configure the scheduler this is scheduler that will get configured by default.
In either case, the values of ``--learning_rate`` and ``--warmup_steps`` will be used for the configuration.
In other words, if you don't use the configuration file to set the ``scheduler`` entry, provide either:
.. code-block:: bash
--lr_scheduler_type constant_with_warmup --learning_rate 3e-5 --warmup_steps 500
or
.. code-block:: bash
--lr_scheduler_type linear --learning_rate 3e-5 --warmup_steps 500
with the desired values. If you don't pass these arguments, reasonable default values will be used instead.
In the case of WarmupDecayLR ``total_num_steps`` gets set either via the ``--max_steps`` command line argument, or if
it is not provided, derived automatically at run time based on the environment and the size of the dataset and other
command line arguments.
Here is an example of the pre-configured ``scheduler`` entry for WarmupLR (``constant_with_warmup`` in the
:class:`~transformers.Trainer` API):
.. code-block:: json
{
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": 0,
"warmup_max_lr": 0.001,
"warmup_num_steps": 1000
}
}
}
Automatic Mixed Precision
=======================================================================================================================
You can work with FP16 in one of the following ways:
1. Pytorch native amp, as documented `here <https://www.deepspeed.ai/docs/config-json/#fp16-training-options>`__.
2. NVIDIA's apex, as documented `here
<https://www.deepspeed.ai/docs/config-json/#automatic-mixed-precision-amp-training-options>`__.
If you want to use an equivalent of the Pytorch native amp, you can either configure the ``fp16`` entry in the
configuration file, or use the following command line arguments: ``--fp16 --fp16_backend amp``.
Here is an example of the ``fp16`` configuration:
.. code-block:: json
{
"fp16": {
"enabled": true,
"loss_scale": 0,
"loss_scale_window": 1000,
"hysteresis": 2,
"min_loss_scale": 1
},
}
If you want to use NVIDIA's apex instead, you can can either configure the ``amp`` entry in the configuration file, or
use the following command line arguments: ``--fp16 --fp16_backend apex --fp16_opt_level 01``.
Here is an example of the ``amp`` configuration:
.. code-block:: json
{
"amp": {
"enabled": true,
"opt_level": "O1"
}
}
Gradient Accumulation
=======================================================================================================================
While normally DeepSpeed gets gradient accumulation configured with:
.. code-block:: json
{
"gradient_accumulation_steps": 3,
}
in this case, to enable gradient accumulation, pass the command line `--gradient_accumulation_steps` argument as normal
and it will get injected into the DeepSpeed configuration.
If you try to add it directly to the configuration file, you will receive an error from the Trainer - this is because
this setting is needed by the Trainer too, and so this approach ensures that there is a single way of setting this
value and thus avoid potential subtle errors.
Gradient Clipping
=======================================================================================================================
If you don't configure the ``gradient_clipping`` entry in the configuration file, the :class:`~transformers.Trainer`
will use the value of the ``--max_grad_norm`` command line argument to set it.
Here is an example of the ``gradient_clipping`` configuration:
.. code-block:: json
{
"gradient_clipping": 1.0,
}
Notes
=======================================================================================================================
* DeepSpeed works with the PyTorch :class:`~transformers.Trainer` but not TF :class:`~transformers.TFTrainer`.
* While DeepSpeed has a pip installable PyPI package, it is highly recommended that it gets installed from `source
<https://github.com/microsoft/deepspeed#installation>`__ to best match your hardware and also if you need to enable
certain features, like 1-bit Adam, which aren't available in the pypi distribution.
* You don't have to use the :class:`~transformers.Trainer` to use DeepSpeed with HuggingFace ``transformers`` - you can
use any model with your own trainer, and you will have to adapt the latter according to `the DeepSpeed integration
instructions <https://www.deepspeed.ai/getting-started/#writing-deepspeed-models>`__.
Main DeepSpeed Resources
=======================================================================================================================
- `Project's github <https://github.com/microsoft/deepspeed>`__
- `Usage docs <https://www.deepspeed.ai/getting-started/>`__
- `API docs <https://deepspeed.readthedocs.io/en/latest/index.html>`__
- `Blog posts <https://www.microsoft.com/en-us/research/search/?q=deepspeed>`__
Papers:
- `ZeRO: Memory Optimizations Toward Training Trillion Parameter Models <https://arxiv.org/abs/1910.02054>`__
- `ZeRO-Offload: Democratizing Billion-Scale Model Training <https://arxiv.org/abs/2101.06840>`__
Finally, please, remember that, HuggingFace :class:`~transformers.Trainer` only integrates DeepSpeed, therefore if you
have any problems or questions with regards to DeepSpeed usage, please, file an issue with `DeepSpeed GitHub
<https://github.com/microsoft/DeepSpeed/issues>`__.
|
AdaMix/docs/source/main_classes/trainer.rst/0
|
{
"file_path": "AdaMix/docs/source/main_classes/trainer.rst",
"repo_id": "AdaMix",
"token_count": 12134
}
| 26 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
DeBERTa-v2
-----------------------------------------------------------------------------------------------------------------------
Overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The DeBERTa model was proposed in `DeBERTa: Decoding-enhanced BERT with Disentangled Attention
<https://arxiv.org/abs/2006.03654>`__ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen It is based on Google's
BERT model released in 2018 and Facebook's RoBERTa model released in 2019.
It builds on RoBERTa with disentangled attention and enhanced mask decoder training with half of the data used in
RoBERTa.
The abstract from the paper is the following:
*Recent progress in pre-trained neural language models has significantly improved the performance of many natural
language processing (NLP) tasks. In this paper we propose a new model architecture DeBERTa (Decoding-enhanced BERT with
disentangled attention) that improves the BERT and RoBERTa models using two novel techniques. The first is the
disentangled attention mechanism, where each word is represented using two vectors that encode its content and
position, respectively, and the attention weights among words are computed using disentangled matrices on their
contents and relative positions. Second, an enhanced mask decoder is used to replace the output softmax layer to
predict the masked tokens for model pretraining. We show that these two techniques significantly improve the efficiency
of model pretraining and performance of downstream tasks. Compared to RoBERTa-Large, a DeBERTa model trained on half of
the training data performs consistently better on a wide range of NLP tasks, achieving improvements on MNLI by +0.9%
(90.2% vs. 91.1%), on SQuAD v2.0 by +2.3% (88.4% vs. 90.7%) and RACE by +3.6% (83.2% vs. 86.8%). The DeBERTa code and
pre-trained models will be made publicly available at https://github.com/microsoft/DeBERTa.*
The following information is visible directly on the [original implementation
repository](https://github.com/microsoft/DeBERTa). DeBERTa v2 is the second version of the DeBERTa model. It includes
the 1.5B model used for the SuperGLUE single-model submission and achieving 89.9, versus human baseline 89.8. You can
find more details about this submission in the authors'
[blog](https://www.microsoft.com/en-us/research/blog/microsoft-deberta-surpasses-human-performance-on-the-superglue-benchmark/)
New in v2:
- **Vocabulary** In v2 the tokenizer is changed to use a new vocabulary of size 128K built from the training data.
Instead of a GPT2-based tokenizer, the tokenizer is now
[sentencepiece-based](https://github.com/google/sentencepiece) tokenizer.
- **nGiE(nGram Induced Input Encoding)** The DeBERTa-v2 model uses an additional convolution layer aside with the first
transformer layer to better learn the local dependency of input tokens.
- **Sharing position projection matrix with content projection matrix in attention layer** Based on previous
experiments, this can save parameters without affecting the performance.
- **Apply bucket to encode relative postions** The DeBERTa-v2 model uses log bucket to encode relative positions
similar to T5.
- **900M model & 1.5B model** Two additional model sizes are available: 900M and 1.5B, which significantly improves the
performance of downstream tasks.
The original code can be found `here <https://github.com/microsoft/DeBERTa>`__.
DebertaV2Config
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.DebertaV2Config
:members:
DebertaV2Tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.DebertaV2Tokenizer
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
create_token_type_ids_from_sequences, save_vocabulary
DebertaV2Model
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.DebertaV2Model
:members: forward
DebertaV2PreTrainedModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.DebertaV2PreTrainedModel
:members: forward
DebertaV2ForMaskedLM
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.DebertaV2ForMaskedLM
:members: forward
DebertaV2ForSequenceClassification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.DebertaV2ForSequenceClassification
:members: forward
DebertaV2ForTokenClassification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.DebertaV2ForTokenClassification
:members: forward
DebertaV2ForQuestionAnswering
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.DebertaV2ForQuestionAnswering
:members: forward
|
AdaMix/docs/source/model_doc/deberta_v2.rst/0
|
{
"file_path": "AdaMix/docs/source/model_doc/deberta_v2.rst",
"repo_id": "AdaMix",
"token_count": 1444
}
| 27 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
LXMERT
-----------------------------------------------------------------------------------------------------------------------
Overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The LXMERT model was proposed in `LXMERT: Learning Cross-Modality Encoder Representations from Transformers
<https://arxiv.org/abs/1908.07490>`__ by Hao Tan & Mohit Bansal. It is a series of bidirectional transformer encoders
(one for the vision modality, one for the language modality, and then one to fuse both modalities) pretrained using a
combination of masked language modeling, visual-language text alignment, ROI-feature regression, masked
visual-attribute modeling, masked visual-object modeling, and visual-question answering objectives. The pretraining
consists of multiple multi-modal datasets: MSCOCO, Visual-Genome + Visual-Genome Question Answering, VQA 2.0, and GQA.
The abstract from the paper is the following:
*Vision-and-language reasoning requires an understanding of visual concepts, language semantics, and, most importantly,
the alignment and relationships between these two modalities. We thus propose the LXMERT (Learning Cross-Modality
Encoder Representations from Transformers) framework to learn these vision-and-language connections. In LXMERT, we
build a large-scale Transformer model that consists of three encoders: an object relationship encoder, a language
encoder, and a cross-modality encoder. Next, to endow our model with the capability of connecting vision and language
semantics, we pre-train the model with large amounts of image-and-sentence pairs, via five diverse representative
pretraining tasks: masked language modeling, masked object prediction (feature regression and label classification),
cross-modality matching, and image question answering. These tasks help in learning both intra-modality and
cross-modality relationships. After fine-tuning from our pretrained parameters, our model achieves the state-of-the-art
results on two visual question answering datasets (i.e., VQA and GQA). We also show the generalizability of our
pretrained cross-modality model by adapting it to a challenging visual-reasoning task, NLVR, and improve the previous
best result by 22% absolute (54% to 76%). Lastly, we demonstrate detailed ablation studies to prove that both our novel
model components and pretraining strategies significantly contribute to our strong results; and also present several
attention visualizations for the different encoders*
Tips:
- Bounding boxes are not necessary to be used in the visual feature embeddings, any kind of visual-spacial features
will work.
- Both the language hidden states and the visual hidden states that LXMERT outputs are passed through the
cross-modality layer, so they contain information from both modalities. To access a modality that only attends to
itself, select the vision/language hidden states from the first input in the tuple.
- The bidirectional cross-modality encoder attention only returns attention values when the language modality is used
as the input and the vision modality is used as the context vector. Further, while the cross-modality encoder
contains self-attention for each respective modality and cross-attention, only the cross attention is returned and
both self attention outputs are disregarded.
The original code can be found `here <https://github.com/airsplay/lxmert>`__.
LxmertConfig
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.LxmertConfig
:members:
LxmertTokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.LxmertTokenizer
:members:
LxmertTokenizerFast
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.LxmertTokenizerFast
:members:
Lxmert specific outputs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.models.lxmert.modeling_lxmert.LxmertModelOutput
:members:
.. autoclass:: transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput
:members:
.. autoclass:: transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput
:members:
.. autoclass:: transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput
:members:
.. autoclass:: transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput
:members:
LxmertModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.LxmertModel
:members: forward
LxmertForPreTraining
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.LxmertForPreTraining
:members: forward
LxmertForQuestionAnswering
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.LxmertForQuestionAnswering
:members: forward
TFLxmertModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFLxmertModel
:members: call
TFLxmertForPreTraining
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFLxmertForPreTraining
:members: call
|
AdaMix/docs/source/model_doc/lxmert.rst/0
|
{
"file_path": "AdaMix/docs/source/model_doc/lxmert.rst",
"repo_id": "AdaMix",
"token_count": 1506
}
| 28 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
T5
-----------------------------------------------------------------------------------------------------------------------
**DISCLAIMER:** This model is still a work in progress, if you see something strange, file a `Github Issue
<https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title>`__.
Overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The T5 model was presented in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer
<https://arxiv.org/pdf/1910.10683.pdf>`_ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang,
Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.
The abstract from the paper is the following:
*Transfer learning, where a model is first pre-trained on a data-rich task before being fine-tuned on a downstream
task, has emerged as a powerful technique in natural language processing (NLP). The effectiveness of transfer learning
has given rise to a diversity of approaches, methodology, and practice. In this paper, we explore the landscape of
transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a
text-to-text format. Our systematic study compares pretraining objectives, architectures, unlabeled datasets, transfer
approaches, and other factors on dozens of language understanding tasks. By combining the insights from our exploration
with scale and our new "Colossal Clean Crawled Corpus", we achieve state-of-the-art results on many benchmarks covering
summarization, question answering, text classification, and more. To facilitate future work on transfer learning for
NLP, we release our dataset, pre-trained models, and code.*
Tips:
- T5 is an encoder-decoder model pre-trained on a multi-task mixture of unsupervised and supervised tasks and for which
each task is converted into a text-to-text format. T5 works well on a variety of tasks out-of-the-box by prepending a
different prefix to the input corresponding to each task, e.g., for translation: *translate English to German: ...*,
for summarization: *summarize: ...*.
For more information about which prefix to use, it is easiest to look into Appendix D of the `paper
<https://arxiv.org/pdf/1910.10683.pdf>`__. - For sequence-to-sequence generation, it is recommended to use
:obj:`T5ForConditionalGeneration.generate()`. This method takes care of feeding the encoded input via cross-attention
layers to the decoder and auto-regressively generates the decoder output. - T5 uses relative scalar embeddings.
Encoder input padding can be done on the left and on the right.
The original code can be found `here <https://github.com/google-research/text-to-text-transfer-transformer>`__.
Training
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
T5 is an encoder-decoder model and converts all NLP problems into a text-to-text format. It is trained using teacher
forcing. This means that for training we always need an input sequence and a target sequence. The input sequence is fed
to the model using :obj:`input_ids`. The target sequence is shifted to the right, i.e., prepended by a start-sequence
token and fed to the decoder using the :obj:`decoder_input_ids`. In teacher-forcing style, the target sequence is then
appended by the EOS token and corresponds to the :obj:`labels`. The PAD token is hereby used as the start-sequence
token. T5 can be trained / fine-tuned both in a supervised and unsupervised fashion.
- Unsupervised denoising training
In this setup spans of the input sequence are masked by so-called sentinel tokens (*a.k.a* unique mask tokens) and
the output sequence is formed as a concatenation of the same sentinel tokens and the *real* masked tokens. Each
sentinel token represents a unique mask token for this sentence and should start with :obj:`<extra_id_0>`,
:obj:`<extra_id_1>`, ... up to :obj:`<extra_id_99>`. As a default, 100 sentinel tokens are available in
:class:`~transformers.T5Tokenizer`.
For instance, the sentence "The cute dog walks in the park" with the masks put on "cute dog" and "the" should be
processed as follows:
.. code-block::
input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids
labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2>', return_tensors='pt').input_ids
# the forward function automatically creates the correct decoder_input_ids
loss = model(input_ids=input_ids, labels=labels).loss
- Supervised training
In this setup the input sequence and output sequence are standard sequence-to-sequence input output mapping. In
translation, for instance with the input sequence "The house is wonderful." and output sequence "Das Haus ist
wunderbar.", the sentences should be processed as follows:
.. code-block::
input_ids = tokenizer('translate English to German: The house is wonderful.', return_tensors='pt').input_ids
labels = tokenizer('Das Haus ist wunderbar.', return_tensors='pt').input_ids
# the forward function automatically creates the correct decoder_input_ids
loss = model(input_ids=input_ids, labels=labels).loss
T5Config
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.T5Config
:members:
T5Tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.T5Tokenizer
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
create_token_type_ids_from_sequences, save_vocabulary
T5TokenizerFast
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.T5TokenizerFast
:members:
T5Model
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.T5Model
:members: forward, parallelize, deparallelize
T5ForConditionalGeneration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.T5ForConditionalGeneration
:members: forward, parallelize, deparallelize
T5EncoderModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.T5EncoderModel
:members: forward, parallelize, deparallelize
TFT5Model
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFT5Model
:members: call
TFT5ForConditionalGeneration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFT5ForConditionalGeneration
:members: call
TFT5EncoderModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFT5EncoderModel
:members: call
|
AdaMix/docs/source/model_doc/t5.rst/0
|
{
"file_path": "AdaMix/docs/source/model_doc/t5.rst",
"repo_id": "AdaMix",
"token_count": 1963
}
| 29 |
#!/usr/bin/env bash
if ! [ -f ./dev.txt ]; then
echo "Download dev dataset...."
curl -L -o ./dev.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-dev.conllu'
fi
if ! [ -f ./test.txt ]; then
echo "Download test dataset...."
curl -L -o ./test.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-test.conllu'
fi
if ! [ -f ./train.txt ]; then
echo "Download train dataset...."
curl -L -o ./train.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-train.conllu'
fi
export MAX_LENGTH=200
export BERT_MODEL=bert-base-uncased
export OUTPUT_DIR=postagger-model
export BATCH_SIZE=32
export NUM_EPOCHS=3
export SAVE_STEPS=750
export SEED=1
# Add parent directory to python path to access lightning_base.py
export PYTHONPATH="../":"${PYTHONPATH}"
python3 run_ner.py --data_dir ./ \
--task_type POS \
--model_name_or_path $BERT_MODEL \
--output_dir $OUTPUT_DIR \
--max_seq_length $MAX_LENGTH \
--num_train_epochs $NUM_EPOCHS \
--train_batch_size $BATCH_SIZE \
--seed $SEED \
--gpus 1 \
--do_train \
--do_predict
|
AdaMix/examples/legacy/pytorch-lightning/run_pos.sh/0
|
{
"file_path": "AdaMix/examples/legacy/pytorch-lightning/run_pos.sh",
"repo_id": "AdaMix",
"token_count": 440
}
| 30 |
#!/usr/bin/env python
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import fire
def minify(src_dir: str, dest_dir: str, n: int):
"""Write first n lines of each file f in src_dir to dest_dir/f """
src_dir = Path(src_dir)
dest_dir = Path(dest_dir)
dest_dir.mkdir(exist_ok=True)
for path in src_dir.iterdir():
new = [x.rstrip() for x in list(path.open().readlines())][:n]
dest_path = dest_dir.joinpath(path.name)
print(dest_path)
dest_path.open("w").write("\n".join(new))
if __name__ == "__main__":
fire.Fire(minify)
|
AdaMix/examples/legacy/seq2seq/minify_dataset.py/0
|
{
"file_path": "AdaMix/examples/legacy/seq2seq/minify_dataset.py",
"repo_id": "AdaMix",
"token_count": 398
}
| 31 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from filelock import FileLock
try:
import nltk
NLTK_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
NLTK_AVAILABLE = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def add_newline_to_end_of_each_sentence(x: str) -> str:
"""This was added to get rougeLsum scores matching published rougeL scores for BART and PEGASUS."""
re.sub("<n>", "", x) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(x))
|
AdaMix/examples/legacy/seq2seq/sentence_splitter.py/0
|
{
"file_path": "AdaMix/examples/legacy/seq2seq/sentence_splitter.py",
"repo_id": "AdaMix",
"token_count": 403
}
| 32 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
python finetune_trainer.py \
--model_name_or_path=facebook/mbart-large-cc25 \
--data_dir $ENRO_DIR \
--output_dir mbart_cc25_enro --overwrite_output_dir \
--learning_rate=3e-5 \
--warmup_steps 500 \
--fp16 \
--label_smoothing 0.1 \
--adam_eps 1e-06 \
--src_lang en_XX --tgt_lang ro_RO \
--freeze_embeds \
--per_device_train_batch_size=4 --per_device_eval_batch_size=4 \
--max_source_length 128 --max_target_length 128 --val_max_target_length 128 --test_max_target_length 128\
--sortish_sampler \
--num_train_epochs 6 \
--save_steps 25000 --eval_steps 25000 --logging_steps 1000 \
--do_train --do_eval --do_predict \
--evaluation_strategy steps \
--predict_with_generate --logging_first_step \
--task translation \
"$@"
|
AdaMix/examples/legacy/seq2seq/train_mbart_cc25_enro.sh/0
|
{
"file_path": "AdaMix/examples/legacy/seq2seq/train_mbart_cc25_enro.sh",
"repo_id": "AdaMix",
"token_count": 501
}
| 33 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Multiple choice fine-tuning: utilities to work with multiple choice tasks of reading comprehension """
import csv
import glob
import json
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional
import tqdm
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class InputExample:
"""
A single training/test example for multiple choice
Args:
example_id: Unique id for the example.
question: string. The untokenized text of the second sequence (question).
contexts: list of str. The untokenized text of the first sequence (context of corresponding question).
endings: list of str. multiple choice's options. Its length must be equal to contexts' length.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
example_id: str
question: str
contexts: List[str]
endings: List[str]
label: Optional[str]
@dataclass(frozen=True)
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
"""
example_id: str
input_ids: List[List[int]]
attention_mask: Optional[List[List[int]]]
token_type_ids: Optional[List[List[int]]]
label: Optional[int]
class Split(Enum):
train = "train"
dev = "dev"
test = "test"
if is_torch_available():
import torch
from torch.utils.data.dataset import Dataset
class MultipleChoiceDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
features: List[InputFeatures]
def __init__(
self,
data_dir: str,
tokenizer: PreTrainedTokenizer,
task: str,
max_seq_length: Optional[int] = None,
overwrite_cache=False,
mode: Split = Split.train,
):
processor = processors[task]()
cached_features_file = os.path.join(
data_dir,
"cached_{}_{}_{}_{}".format(
mode.value,
tokenizer.__class__.__name__,
str(max_seq_length),
task,
),
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}")
self.features = torch.load(cached_features_file)
else:
logger.info(f"Creating features from dataset file at {data_dir}")
label_list = processor.get_labels()
if mode == Split.dev:
examples = processor.get_dev_examples(data_dir)
elif mode == Split.test:
examples = processor.get_test_examples(data_dir)
else:
examples = processor.get_train_examples(data_dir)
logger.info("Training examples: %s", len(examples))
self.features = convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
)
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(self.features, cached_features_file)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class TFMultipleChoiceDataset:
"""
This will be superseded by a framework-agnostic approach
soon.
"""
features: List[InputFeatures]
def __init__(
self,
data_dir: str,
tokenizer: PreTrainedTokenizer,
task: str,
max_seq_length: Optional[int] = 128,
overwrite_cache=False,
mode: Split = Split.train,
):
processor = processors[task]()
logger.info(f"Creating features from dataset file at {data_dir}")
label_list = processor.get_labels()
if mode == Split.dev:
examples = processor.get_dev_examples(data_dir)
elif mode == Split.test:
examples = processor.get_test_examples(data_dir)
else:
examples = processor.get_train_examples(data_dir)
logger.info("Training examples: %s", len(examples))
self.features = convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
)
def gen():
for (ex_index, ex) in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
self.dataset = tf.data.Dataset.from_generator(
gen,
(
{
"example_id": tf.int32,
"input_ids": tf.int32,
"attention_mask": tf.int32,
"token_type_ids": tf.int32,
},
tf.int64,
),
(
{
"example_id": tf.TensorShape([]),
"input_ids": tf.TensorShape([None, None]),
"attention_mask": tf.TensorShape([None, None]),
"token_type_ids": tf.TensorShape([None, None]),
},
tf.TensorShape([]),
),
)
def get_dataset(self):
self.dataset = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features)))
return self.dataset
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
class DataProcessor:
"""Base class for data converters for multiple choice data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the test set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
class RaceProcessor(DataProcessor):
"""Processor for the RACE data set."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} train".format(data_dir))
high = os.path.join(data_dir, "train/high")
middle = os.path.join(data_dir, "train/middle")
high = self._read_txt(high)
middle = self._read_txt(middle)
return self._create_examples(high + middle, "train")
def get_dev_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} dev".format(data_dir))
high = os.path.join(data_dir, "dev/high")
middle = os.path.join(data_dir, "dev/middle")
high = self._read_txt(high)
middle = self._read_txt(middle)
return self._create_examples(high + middle, "dev")
def get_test_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} test".format(data_dir))
high = os.path.join(data_dir, "test/high")
middle = os.path.join(data_dir, "test/middle")
high = self._read_txt(high)
middle = self._read_txt(middle)
return self._create_examples(high + middle, "test")
def get_labels(self):
"""See base class."""
return ["0", "1", "2", "3"]
def _read_txt(self, input_dir):
lines = []
files = glob.glob(input_dir + "/*txt")
for file in tqdm.tqdm(files, desc="read files"):
with open(file, "r", encoding="utf-8") as fin:
data_raw = json.load(fin)
data_raw["race_id"] = file
lines.append(data_raw)
return lines
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (_, data_raw) in enumerate(lines):
race_id = "%s-%s" % (set_type, data_raw["race_id"])
article = data_raw["article"]
for i in range(len(data_raw["answers"])):
truth = str(ord(data_raw["answers"][i]) - ord("A"))
question = data_raw["questions"][i]
options = data_raw["options"][i]
examples.append(
InputExample(
example_id=race_id,
question=question,
contexts=[article, article, article, article], # this is not efficient but convenient
endings=[options[0], options[1], options[2], options[3]],
label=truth,
)
)
return examples
class SynonymProcessor(DataProcessor):
"""Processor for the Synonym data set."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} train".format(data_dir))
return self._create_examples(self._read_csv(os.path.join(data_dir, "mctrain.csv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} dev".format(data_dir))
return self._create_examples(self._read_csv(os.path.join(data_dir, "mchp.csv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} dev".format(data_dir))
return self._create_examples(self._read_csv(os.path.join(data_dir, "mctest.csv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1", "2", "3", "4"]
def _read_csv(self, input_file):
with open(input_file, "r", encoding="utf-8") as f:
return list(csv.reader(f))
def _create_examples(self, lines: List[List[str]], type: str):
"""Creates examples for the training and dev sets."""
examples = [
InputExample(
example_id=line[0],
question="", # in the swag dataset, the
# common beginning of each
# choice is stored in "sent2".
contexts=[line[1], line[1], line[1], line[1], line[1]],
endings=[line[2], line[3], line[4], line[5], line[6]],
label=line[7],
)
for line in lines # we skip the line with the column names
]
return examples
class SwagProcessor(DataProcessor):
"""Processor for the SWAG data set."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} train".format(data_dir))
return self._create_examples(self._read_csv(os.path.join(data_dir, "train.csv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} dev".format(data_dir))
return self._create_examples(self._read_csv(os.path.join(data_dir, "val.csv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} dev".format(data_dir))
raise ValueError(
"For swag testing, the input file does not contain a label column. It can not be tested in current code"
"setting!"
)
return self._create_examples(self._read_csv(os.path.join(data_dir, "test.csv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1", "2", "3"]
def _read_csv(self, input_file):
with open(input_file, "r", encoding="utf-8") as f:
return list(csv.reader(f))
def _create_examples(self, lines: List[List[str]], type: str):
"""Creates examples for the training and dev sets."""
if type == "train" and lines[0][-1] != "label":
raise ValueError("For training, the input file must contain a label column.")
examples = [
InputExample(
example_id=line[2],
question=line[5], # in the swag dataset, the
# common beginning of each
# choice is stored in "sent2".
contexts=[line[4], line[4], line[4], line[4]],
endings=[line[7], line[8], line[9], line[10]],
label=line[11],
)
for line in lines[1:] # we skip the line with the column names
]
return examples
class ArcProcessor(DataProcessor):
"""Processor for the ARC data set (request from allennlp)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} train".format(data_dir))
return self._create_examples(self._read_json(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} dev".format(data_dir))
return self._create_examples(self._read_json(os.path.join(data_dir, "dev.jsonl")), "dev")
def get_test_examples(self, data_dir):
logger.info("LOOKING AT {} test".format(data_dir))
return self._create_examples(self._read_json(os.path.join(data_dir, "test.jsonl")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1", "2", "3"]
def _read_json(self, input_file):
with open(input_file, "r", encoding="utf-8") as fin:
lines = fin.readlines()
return lines
def _create_examples(self, lines, type):
"""Creates examples for the training and dev sets."""
# There are two types of labels. They should be normalized
def normalize(truth):
if truth in "ABCD":
return ord(truth) - ord("A")
elif truth in "1234":
return int(truth) - 1
else:
logger.info("truth ERROR! %s", str(truth))
return None
examples = []
three_choice = 0
four_choice = 0
five_choice = 0
other_choices = 0
# we deleted example which has more than or less than four choices
for line in tqdm.tqdm(lines, desc="read arc data"):
data_raw = json.loads(line.strip("\n"))
if len(data_raw["question"]["choices"]) == 3:
three_choice += 1
continue
elif len(data_raw["question"]["choices"]) == 5:
five_choice += 1
continue
elif len(data_raw["question"]["choices"]) != 4:
other_choices += 1
continue
four_choice += 1
truth = str(normalize(data_raw["answerKey"]))
assert truth != "None"
question_choices = data_raw["question"]
question = question_choices["stem"]
id = data_raw["id"]
options = question_choices["choices"]
if len(options) == 4:
examples.append(
InputExample(
example_id=id,
question=question,
contexts=[
options[0]["para"].replace("_", ""),
options[1]["para"].replace("_", ""),
options[2]["para"].replace("_", ""),
options[3]["para"].replace("_", ""),
],
endings=[options[0]["text"], options[1]["text"], options[2]["text"], options[3]["text"]],
label=truth,
)
)
if type == "train":
assert len(examples) > 1
assert examples[0].label is not None
logger.info("len examples: %s}", str(len(examples)))
logger.info("Three choices: %s", str(three_choice))
logger.info("Five choices: %s", str(five_choice))
logger.info("Other choices: %s", str(other_choices))
logger.info("four choices: %s", str(four_choice))
return examples
def convert_examples_to_features(
examples: List[InputExample],
label_list: List[str],
max_length: int,
tokenizer: PreTrainedTokenizer,
) -> List[InputFeatures]:
"""
Loads a data file into a list of `InputFeatures`
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
choices_inputs = []
for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)):
text_a = context
if example.question.find("_") != -1:
# this is for cloze question
text_b = example.question.replace("_", ending)
else:
text_b = example.question + " " + ending
inputs = tokenizer(
text_a,
text_b,
add_special_tokens=True,
max_length=max_length,
padding="max_length",
truncation=True,
return_overflowing_tokens=True,
)
if "num_truncated_tokens" in inputs and inputs["num_truncated_tokens"] > 0:
logger.info(
"Attention! you are cropping tokens (swag task is ok). "
"If you are training ARC and RACE and you are poping question + options,"
"you need to try to use a bigger max seq length!"
)
choices_inputs.append(inputs)
label = label_map[example.label]
input_ids = [x["input_ids"] for x in choices_inputs]
attention_mask = (
[x["attention_mask"] for x in choices_inputs] if "attention_mask" in choices_inputs[0] else None
)
token_type_ids = (
[x["token_type_ids"] for x in choices_inputs] if "token_type_ids" in choices_inputs[0] else None
)
features.append(
InputFeatures(
example_id=example.example_id,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label,
)
)
for f in features[:2]:
logger.info("*** Example ***")
logger.info("feature: %s" % f)
return features
processors = {"race": RaceProcessor, "swag": SwagProcessor, "arc": ArcProcessor, "syn": SynonymProcessor}
MULTIPLE_CHOICE_TASKS_NUM_LABELS = {"race", 4, "swag", 4, "arc", 4, "syn", 5}
|
AdaMix/examples/multiple-choice/utils_multiple_choice.py/0
|
{
"file_path": "AdaMix/examples/multiple-choice/utils_multiple_choice.py",
"repo_id": "AdaMix",
"token_count": 10039
}
| 34 |
# coding=utf-8
# Copyright 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and Microsoft Corporation.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model with Patience-based Early Exit. """
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
logger = logging.getLogger(__name__)
class BertEncoderWithPabee(BertEncoder):
def adaptive_forward(self, hidden_states, current_layer, attention_mask=None, head_mask=None):
layer_outputs = self.layer[current_layer](hidden_states, attention_mask, head_mask[current_layer])
hidden_states = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModelWithPabee(BertModel):
"""
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,
Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the
:obj:`is_decoder` argument of the configuration set to :obj:`True`; an
:obj:`encoder_hidden_states` is expected as an input to the forward pass.
.. _`Attention is all you need`:
https://arxiv.org/abs/1706.03762
"""
def __init__(self, config):
super().__init__(config)
self.encoder = BertEncoderWithPabee(config)
self.init_weights()
self.patience = 0
self.inference_instances_num = 0
self.inference_layers_num = 0
self.regression_threshold = 0
def set_regression_threshold(self, threshold):
self.regression_threshold = threshold
def set_patience(self, patience):
self.patience = patience
def reset_stats(self):
self.inference_instances_num = 0
self.inference_layers_num = 0
def log_stats(self):
avg_inf_layers = self.inference_layers_num / self.inference_instances_num
message = f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up = {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
print(message)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_dropout=None,
output_layers=None,
regression=False,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = embedding_output
if self.training:
res = []
for i in range(self.config.num_hidden_layers):
encoder_outputs = self.encoder.adaptive_forward(
encoder_outputs, current_layer=i, attention_mask=extended_attention_mask, head_mask=head_mask
)
pooled_output = self.pooler(encoder_outputs)
logits = output_layers[i](output_dropout(pooled_output))
res.append(logits)
elif self.patience == 0: # Use all layers for inference
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
pooled_output = self.pooler(encoder_outputs[0])
res = [output_layers[self.config.num_hidden_layers - 1](pooled_output)]
else:
patient_counter = 0
patient_result = None
calculated_layer_num = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
encoder_outputs = self.encoder.adaptive_forward(
encoder_outputs, current_layer=i, attention_mask=extended_attention_mask, head_mask=head_mask
)
pooled_output = self.pooler(encoder_outputs)
logits = output_layers[i](pooled_output)
if regression:
labels = logits.detach()
if patient_result is not None:
patient_labels = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
patient_counter = 0
else:
labels = logits.detach().argmax(dim=1)
if patient_result is not None:
patient_labels = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(patient_labels)):
patient_counter += 1
else:
patient_counter = 0
patient_result = logits
if patient_counter == self.patience:
break
res = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
BERT_START_DOCSTRING,
)
class BertForSequenceClassificationWithPabee(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModelWithPabee(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifiers = nn.ModuleList(
[nn.Linear(config.hidden_size, self.config.num_labels) for _ in range(config.num_hidden_layers)]
)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForSequenceClassification
from pabee import BertForSequenceClassificationWithPabee
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassificationWithPabee.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
logits = self.bert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_dropout=self.dropout,
output_layers=self.classifiers,
regression=self.num_labels == 1,
)
outputs = (logits[-1],)
if labels is not None:
total_loss = None
total_weights = 0
for ix, logits_item in enumerate(logits):
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits_item.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits_item.view(-1, self.num_labels), labels.view(-1))
if total_loss is None:
total_loss = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
outputs = (total_loss / total_weights,) + outputs
return outputs
|
AdaMix/examples/research_projects/bert-loses-patience/pabee/modeling_pabee_bert.py/0
|
{
"file_path": "AdaMix/examples/research_projects/bert-loses-patience/pabee/modeling_pabee_bert.py",
"repo_id": "AdaMix",
"token_count": 6696
}
| 35 |
# DeeBERT: Early Exiting for *BERT
This is the code base for the paper [DeeBERT: Dynamic Early Exiting for Accelerating BERT Inference](https://www.aclweb.org/anthology/2020.acl-main.204/), modified from its [original code base](https://github.com/castorini/deebert).
The original code base also has information for downloading sample models that we have trained in advance.
## Usage
There are three scripts in the folder which can be run directly.
In each script, there are several things to modify before running:
* `PATH_TO_DATA`: path to the GLUE dataset.
* `--output_dir`: path for saving fine-tuned models. Default: `./saved_models`.
* `--plot_data_dir`: path for saving evaluation results. Default: `./results`. Results are printed to stdout and also saved to `npy` files in this directory to facilitate plotting figures and further analyses.
* `MODEL_TYPE`: bert or roberta
* `MODEL_SIZE`: base or large
* `DATASET`: SST-2, MRPC, RTE, QNLI, QQP, or MNLI
#### train_deebert.sh
This is for fine-tuning DeeBERT models.
#### eval_deebert.sh
This is for evaluating each exit layer for fine-tuned DeeBERT models.
#### entropy_eval.sh
This is for evaluating fine-tuned DeeBERT models, given a number of different early exit entropy thresholds.
## Citation
Please cite our paper if you find the resource useful:
```
@inproceedings{xin-etal-2020-deebert,
title = "{D}ee{BERT}: Dynamic Early Exiting for Accelerating {BERT} Inference",
author = "Xin, Ji and
Tang, Raphael and
Lee, Jaejun and
Yu, Yaoliang and
Lin, Jimmy",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.acl-main.204",
pages = "2246--2251",
}
```
|
AdaMix/examples/research_projects/deebert/README.md/0
|
{
"file_path": "AdaMix/examples/research_projects/deebert/README.md",
"repo_id": "AdaMix",
"token_count": 618
}
| 36 |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Preprocessing script before distillation.
"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPT2Tokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)."
)
parser.add_argument("--file_path", type=str, default="data/dump.txt", help="The path to the data.")
parser.add_argument("--tokenizer_type", type=str, default="bert", choices=["bert", "roberta", "gpt2"])
parser.add_argument("--tokenizer_name", type=str, default="bert-base-uncased", help="The tokenizer to use.")
parser.add_argument("--dump_file", type=str, default="data/dump", help="The dump file prefix.")
args = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})")
if args.tokenizer_type == "bert":
tokenizer = BertTokenizer.from_pretrained(args.tokenizer_name)
bos = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
sep = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
tokenizer = RobertaTokenizer.from_pretrained(args.tokenizer_name)
bos = tokenizer.special_tokens_map["cls_token"] # `<s>`
sep = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
tokenizer = GPT2Tokenizer.from_pretrained(args.tokenizer_name)
bos = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
sep = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}")
with open(args.file_path, "r", encoding="utf8") as fp:
data = fp.readlines()
logger.info("Start encoding")
logger.info(f"{len(data)} examples to process.")
rslt = []
iter = 0
interval = 10000
start = time.time()
for text in data:
text = f"{bos} {text.strip()} {sep}"
token_ids = tokenizer.encode(text, add_special_tokens=False)
rslt.append(token_ids)
iter += 1
if iter % interval == 0:
end = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl")
start = time.time()
logger.info("Finished binarization")
logger.info(f"{len(data)} examples processed.")
dp_file = f"{args.dump_file}.{args.tokenizer_name}.pickle"
vocab_size = tokenizer.vocab_size
if vocab_size < (1 << 16):
rslt_ = [np.uint16(d) for d in rslt]
else:
rslt_ = [np.int32(d) for d in rslt]
random.shuffle(rslt_)
logger.info(f"Dump to {dp_file}")
with open(dp_file, "wb") as handle:
pickle.dump(rslt_, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
|
AdaMix/examples/research_projects/distillation/scripts/binarized_data.py/0
|
{
"file_path": "AdaMix/examples/research_projects/distillation/scripts/binarized_data.py",
"repo_id": "AdaMix",
"token_count": 1430
}
| 37 |
<jupyter_start><jupyter_code>#%pip install-r requirements.txt
from IPython.display import clear_output, Image, display
import PIL.Image
import io
import json
import torch
import numpy as np
from processing_image import Preprocess
from visualizing_image import SingleImageViz
from modeling_frcnn import GeneralizedRCNN
from utils import Config
import utils
from transformers import LxmertForQuestionAnswering, LxmertTokenizer
import wget
import pickle
import os
# URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/images/input.jpg",
URL = "https://vqa.cloudcv.org/media/test2014/COCO_test2014_000000262567.jpg"
OBJ_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/objects_vocab.txt"
ATTR_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/attributes_vocab.txt"
GQA_URL = "https://raw.githubusercontent.com/airsplay/lxmert/master/data/gqa/trainval_label2ans.json"
VQA_URL = "https://raw.githubusercontent.com/airsplay/lxmert/master/data/vqa/trainval_label2ans.json"
# for visualizing output
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 255))
f = io.BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
# load object, attribute, and answer labels
objids = utils.get_data(OBJ_URL)
attrids = utils.get_data(ATTR_URL)
gqa_answers = utils.get_data(GQA_URL)
vqa_answers = utils.get_data(VQA_URL)
# load models and model components
frcnn_cfg = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned")
frcnn = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=frcnn_cfg)
image_preprocess = Preprocess(frcnn_cfg)
lxmert_tokenizer = LxmertTokenizer.from_pretrained("unc-nlp/lxmert-base-uncased")
lxmert_gqa = LxmertForQuestionAnswering.from_pretrained("unc-nlp/lxmert-gqa-uncased")
lxmert_vqa = LxmertForQuestionAnswering.from_pretrained("unc-nlp/lxmert-vqa-uncased")
#image viz
frcnn_visualizer = SingleImageViz(URL, id2obj=objids, id2attr=attrids)
# run frcnn
images, sizes, scales_yx = image_preprocess(URL)
output_dict = frcnn(
images,
sizes,
scales_yx=scales_yx,
padding="max_detections",
max_detections=frcnn_cfg.max_detections,
return_tensors="pt"
)
# add boxes and labels to the image
frcnn_visualizer.draw_boxes(
output_dict.get("boxes"),
output_dict.pop("obj_ids"),
output_dict.pop("obj_probs"),
output_dict.pop("attr_ids"),
output_dict.pop("attr_probs"),
)
showarray(frcnn_visualizer._get_buffer())
test_questions_for_url1 = [
"Where is this scene?",
"what is the man riding?",
"What is the man wearing?",
"What is the color of the horse?"
]
test_questions_for_url2 = [
"Where is the cat?",
"What is near the disk?",
"What is the color of the table?",
"What is the color of the cat?",
"What is the shape of the monitor?",
]
#Very important that the boxes are normalized
normalized_boxes = output_dict.get("normalized_boxes")
features = output_dict.get("roi_features")
for test_question in test_questions_for_url2:
# run lxmert
test_question = [test_question]
inputs = lxmert_tokenizer(
test_question,
padding="max_length",
max_length=20,
truncation=True,
return_token_type_ids=True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors="pt"
)
# run lxmert(s)
output_gqa = lxmert_gqa(
input_ids=inputs.input_ids,
attention_mask=inputs.attention_mask,
visual_feats=features,
visual_pos=normalized_boxes,
token_type_ids=inputs.token_type_ids,
output_attentions=False,
)
output_vqa = lxmert_vqa(
input_ids=inputs.input_ids,
attention_mask=inputs.attention_mask,
visual_feats=features,
visual_pos=normalized_boxes,
token_type_ids=inputs.token_type_ids,
output_attentions=False,
)
# get prediction
pred_vqa = output_vqa["question_answering_score"].argmax(-1)
pred_gqa = output_gqa["question_answering_score"].argmax(-1)
print("Question:", test_question)
print("prediction from LXMERT GQA:", gqa_answers[pred_gqa])
print("prediction from LXMERT VQA:", vqa_answers[pred_vqa])<jupyter_output>Question: ['Where is the cat?']
prediction from LXMERT GQA: desk
prediction from LXMERT VQA: desk
Question: ['What is near the disk?']
prediction from LXMERT GQA: can
prediction from LXMERT VQA: cat
Question: ['What is the color of the table?']
prediction from LXMERT GQA: brown
prediction from LXMERT VQA: brown
Question: ['What is the color of the cat?']
prediction from LXMERT GQA: black
prediction from LXMERT VQA: black and white
Question: ['What is the shape of the monitor?']
prediction from LXMERT GQA: square
prediction from LXMERT VQA: rectangle
|
AdaMix/examples/research_projects/lxmert/demo.ipynb/0
|
{
"file_path": "AdaMix/examples/research_projects/lxmert/demo.ipynb",
"repo_id": "AdaMix",
"token_count": 1981
}
| 38 |
# Copyright 2020-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Once a model has been fine-pruned, the weights that are masked during the forward pass can be pruned once for all.
For instance, once the a model from the :class:`~emmental.MaskedBertForSequenceClassification` is trained, it can be saved (and then loaded)
as a standard :class:`~transformers.BertForSequenceClassification`.
"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def main(args):
pruning_method = args.pruning_method
threshold = args.threshold
model_name_or_path = args.model_name_or_path.rstrip("/")
target_model_path = args.target_model_path
print(f"Load fine-pruned model from {model_name_or_path}")
model = torch.load(os.path.join(model_name_or_path, "pytorch_model.bin"))
pruned_model = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
pruned_model[name] = tensor
print(f"Copied layer {name}")
elif "classifier" in name or "qa_output" in name:
pruned_model[name] = tensor
print(f"Copied layer {name}")
elif "bias" in name:
pruned_model[name] = tensor
print(f"Copied layer {name}")
else:
if pruning_method == "magnitude":
mask = MagnitudeBinarizer.apply(inputs=tensor, threshold=threshold)
pruned_model[name] = tensor * mask
print(f"Pruned layer {name}")
elif pruning_method == "topK":
if "mask_scores" in name:
continue
prefix_ = name[:-6]
scores = model[f"{prefix_}mask_scores"]
mask = TopKBinarizer.apply(scores, threshold)
pruned_model[name] = tensor * mask
print(f"Pruned layer {name}")
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
prefix_ = name[:-6]
scores = model[f"{prefix_}mask_scores"]
mask = ThresholdBinarizer.apply(scores, threshold, True)
pruned_model[name] = tensor * mask
print(f"Pruned layer {name}")
elif pruning_method == "l0":
if "mask_scores" in name:
continue
prefix_ = name[:-6]
scores = model[f"{prefix_}mask_scores"]
l, r = -0.1, 1.1
s = torch.sigmoid(scores)
s_bar = s * (r - l) + l
mask = s_bar.clamp(min=0.0, max=1.0)
pruned_model[name] = tensor * mask
print(f"Pruned layer {name}")
else:
raise ValueError("Unknown pruning method")
if target_model_path is None:
target_model_path = os.path.join(
os.path.dirname(model_name_or_path), f"bertarized_{os.path.basename(model_name_or_path)}"
)
if not os.path.isdir(target_model_path):
shutil.copytree(model_name_or_path, target_model_path)
print(f"\nCreated folder {target_model_path}")
torch.save(pruned_model, os.path.join(target_model_path, "pytorch_model.bin"))
print("\nPruned model saved! See you later!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help="Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning, sigmoied_threshold = Soft movement pruning)",
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help="For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`",
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
args = parser.parse_args()
main(args)
|
AdaMix/examples/research_projects/movement-pruning/bertarize.py/0
|
{
"file_path": "AdaMix/examples/research_projects/movement-pruning/bertarize.py",
"repo_id": "AdaMix",
"token_count": 2263
}
| 39 |
"""Finetuning script for RAG models. Adapted from examples.seq2seq.finetune.py"""
import argparse
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
import torch.distributed as dist
from pytorch_lightning.accelerators.ddp_accelerator import DDPAccelerator
from pytorch_lightning.cluster_environments import TorchElasticEnvironment
from torch.utils.data import DataLoader
from transformers import (
AutoConfig,
AutoTokenizer,
BartForConditionalGeneration,
BatchEncoding,
RagConfig,
RagSequenceForGeneration,
RagTokenForGeneration,
RagTokenizer,
T5ForConditionalGeneration,
)
from transformers import logging as transformers_logging
from transformers.integrations import is_ray_available
if is_ray_available():
import ray
from distributed_ray_retriever import RagRayDistributedRetriever, RayRetriever
from callbacks_rag import ( # noqa: E402 # isort:skipq
get_checkpoint_callback,
get_early_stopping_callback,
Seq2SeqLoggingCallback,
)
from distributed_pytorch_retriever import RagPyTorchDistributedRetriever # noqa: E402 # isort:skip
from utils_rag import ( # noqa: E402 # isort:skip
calculate_exact_match,
flatten_list,
get_git_info,
is_rag_model,
lmap,
pickle_save,
save_git_info,
save_json,
set_extra_model_params,
Seq2SeqDataset,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
transformers_logging.set_verbosity_info()
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
# In PTL >v1.0, `init_ddp_connection` method in the `LightningModule`
# is no longer used, and is moved into DDPAccelerator instead.
# We override DDPAccelerator to add our custom logic for initializing the
# retriever.
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/tests/backends/test_accelerator_connector.py
class CustomAccel(DDPAccelerator):
def __init__(self, trainer=None, **kwargs):
# Trainer is set later.
super().__init__(trainer, **kwargs)
def init_ddp_connection(self, global_rank: int, world_size: int, is_slurm_managing_tasks: bool = True):
logger.info("Custom init_ddp_connection.")
module = self.trainer.model
if self.cluster_environment is None:
self.cluster_environment = TorchElasticEnvironment()
self.distributed_port = module.hparams.distributed_port
os.environ["MASTER_PORT"] = str(self.distributed_port)
super().init_ddp_connection(global_rank, world_size, is_slurm_managing_tasks)
if module.is_rag_model:
if module.distributed_retriever == "pytorch":
module.model.rag.retriever.init_retrieval(self.distributed_port)
elif module.distributed_retriever == "ray" and global_rank == 0:
# For the Ray retriever, only initialize it once when global
# rank is 0.
module.model.rag.retriever.init_retrieval()
class GenerativeQAModule(BaseTransformer):
mode = "generative_qa"
loss_names = ["loss"]
metric_names = ["em"]
val_metric = "em"
def __init__(self, hparams, **kwargs):
# when loading from a pytorch lightning checkpoint, hparams are passed as dict
if isinstance(hparams, dict):
hparams = AttrDict(hparams)
if hparams.model_type == "rag_sequence":
self.model_class = RagSequenceForGeneration
elif hparams.model_type == "rag_token":
self.model_class = RagTokenForGeneration
elif hparams.model_type == "bart":
self.model_class = BartForConditionalGeneration
else:
self.model_class = T5ForConditionalGeneration
self.is_rag_model = is_rag_model(hparams.model_type)
config_class = RagConfig if self.is_rag_model else AutoConfig
config = config_class.from_pretrained(hparams.model_name_or_path)
# set retriever parameters
config.index_name = hparams.index_name or config.index_name
config.passages_path = hparams.passages_path or config.passages_path
config.index_path = hparams.index_path or config.index_path
config.use_dummy_dataset = hparams.use_dummy_dataset
# set extra_model_params for generator configs and load_model
extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "attention_dropout", "dropout")
if self.is_rag_model:
if hparams.prefix is not None:
config.generator.prefix = hparams.prefix
config.label_smoothing = hparams.label_smoothing
hparams, config.generator = set_extra_model_params(extra_model_params, hparams, config.generator)
if hparams.distributed_retriever == "pytorch":
retriever = RagPyTorchDistributedRetriever.from_pretrained(hparams.model_name_or_path, config=config)
elif hparams.distributed_retriever == "ray":
# The Ray retriever needs the handles to the retriever actors.
retriever = RagRayDistributedRetriever.from_pretrained(
hparams.model_name_or_path, hparams.actor_handles, config=config
)
model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config, retriever=retriever)
prefix = config.question_encoder.prefix
else:
if hparams.prefix is not None:
config.prefix = hparams.prefix
hparams, config = set_extra_model_params(extra_model_params, hparams, config)
model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config)
prefix = config.prefix
tokenizer = (
RagTokenizer.from_pretrained(hparams.model_name_or_path)
if self.is_rag_model
else AutoTokenizer.from_pretrained(hparams.model_name_or_path)
)
super().__init__(hparams, config=config, tokenizer=tokenizer, model=model)
save_git_info(self.hparams.output_dir)
self.output_dir = Path(self.hparams.output_dir)
self.metrics_save_path = Path(self.output_dir) / "metrics.json"
self.hparams_save_path = Path(self.output_dir) / "hparams.pkl"
pickle_save(self.hparams, self.hparams_save_path)
self.step_count = 0
self.metrics = defaultdict(list)
self.dataset_kwargs: dict = dict(
data_dir=self.hparams.data_dir,
max_source_length=self.hparams.max_source_length,
prefix=prefix or "",
)
n_observations_per_split = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
self.target_lens = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
self.hparams.git_sha = get_git_info()["repo_sha"]
self.num_workers = hparams.num_workers
self.distributed_port = self.hparams.distributed_port
# For single GPU training, init_ddp_connection is not called.
# So we need to initialize the retrievers here.
if hparams.gpus <= 1:
if hparams.distributed_retriever == "ray":
self.model.retriever.init_retrieval()
elif hparams.distributed_retriever == "pytorch":
self.model.retriever.init_retrieval(self.distributed_port)
self.distributed_retriever = hparams.distributed_retriever
def forward(self, input_ids, **kwargs):
return self.model(input_ids, **kwargs)
def ids_to_clean_text(self, generated_ids: List[int]):
gen_text = self.tokenizer.batch_decode(
generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
return lmap(str.strip, gen_text)
def _step(self, batch: dict) -> Tuple:
source_ids, source_mask, target_ids = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"]
rag_kwargs = {}
if isinstance(self.model, T5ForConditionalGeneration):
decoder_input_ids = self.model._shift_right(target_ids)
lm_labels = target_ids
elif isinstance(self.model, BartForConditionalGeneration):
decoder_input_ids = target_ids[:, :-1].contiguous()
lm_labels = target_ids[:, 1:].clone()
else:
assert self.is_rag_model
generator = self.model.rag.generator
if isinstance(generator, T5ForConditionalGeneration):
decoder_start_token_id = generator.config.decoder_start_token_id
decoder_input_ids = (
torch.cat(
[torch.Tensor([[decoder_start_token_id]] * target_ids.shape[0]).to(target_ids), target_ids],
dim=1,
)
if target_ids.shape[0] < self.target_lens["train"]
else generator._shift_right(target_ids)
)
elif isinstance(generator, BartForConditionalGeneration):
decoder_input_ids = target_ids
lm_labels = decoder_input_ids
rag_kwargs["reduce_loss"] = True
assert decoder_input_ids is not None
outputs = self(
source_ids,
attention_mask=source_mask,
decoder_input_ids=decoder_input_ids,
use_cache=False,
labels=lm_labels,
**rag_kwargs,
)
loss = outputs["loss"]
return (loss,)
@property
def pad(self) -> int:
raise NotImplementedError("pad not implemented")
def training_step(self, batch, batch_idx) -> Dict:
loss_tensors = self._step(batch)
logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
# tokens per batch
tgt_pad_token_id = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer, RagTokenizer)
else self.tokenizer.pad_token_id
)
src_pad_token_id = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer, RagTokenizer)
else self.tokenizer.pad_token_id
)
logs["tpb"] = (
batch["input_ids"].ne(src_pad_token_id).sum() + batch["decoder_input_ids"].ne(tgt_pad_token_id).sum()
)
return {"loss": loss_tensors[0], "log": logs}
def validation_step(self, batch, batch_idx) -> Dict:
return self._generative_step(batch)
def validation_epoch_end(self, outputs, prefix="val") -> Dict:
self.step_count += 1
losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
loss = losses["loss"]
gen_metrics = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
metrics_tensor: torch.FloatTensor = torch.tensor(gen_metrics[self.val_metric]).type_as(loss)
gen_metrics.update({k: v.item() for k, v in losses.items()})
# fix for https://github.com/PyTorchLightning/pytorch-lightning/issues/2424
if dist.is_initialized():
dist.all_reduce(metrics_tensor, op=dist.ReduceOp.SUM)
metrics_tensor = metrics_tensor / dist.get_world_size()
gen_metrics.update({self.val_metric: metrics_tensor.item()})
losses.update(gen_metrics)
metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
metrics["step_count"] = self.step_count
self.save_metrics(metrics, prefix) # writes to self.metrics_save_path
preds = flatten_list([x["preds"] for x in outputs])
return {"log": metrics, "preds": preds, f"{prefix}_loss": loss, f"{prefix}_{self.val_metric}": metrics_tensor}
def save_metrics(self, latest_metrics, type_path) -> None:
self.metrics[type_path].append(latest_metrics)
save_json(self.metrics, self.metrics_save_path)
def calc_generative_metrics(self, preds, target) -> Dict:
return calculate_exact_match(preds, target)
def _generative_step(self, batch: dict) -> dict:
start_time = time.time()
batch = BatchEncoding(batch).to(device=self.model.device)
generated_ids = self.model.generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
do_deduplication=False, # rag specific parameter
use_cache=True,
min_length=1,
max_length=self.target_lens["val"],
)
gen_time = (time.time() - start_time) / batch["input_ids"].shape[0]
preds: List[str] = self.ids_to_clean_text(generated_ids)
target: List[str] = self.ids_to_clean_text(batch["decoder_input_ids"])
loss_tensors = self._step(batch)
base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
gen_metrics: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids))
base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **gen_metrics)
return base_metrics
def test_step(self, batch, batch_idx):
return self._generative_step(batch)
def test_epoch_end(self, outputs):
return self.validation_epoch_end(outputs, prefix="test")
def get_dataset(self, type_path) -> Seq2SeqDataset:
n_obs = self.n_obs[type_path]
max_target_length = self.target_lens[type_path]
dataset = Seq2SeqDataset(
self.tokenizer,
type_path=type_path,
n_obs=n_obs,
max_target_length=max_target_length,
**self.dataset_kwargs,
)
return dataset
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader:
dataset = self.get_dataset(type_path)
dataloader = DataLoader(
dataset,
batch_size=batch_size,
collate_fn=dataset.collate_fn,
shuffle=shuffle,
num_workers=self.num_workers,
)
return dataloader
def train_dataloader(self) -> DataLoader:
dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True)
return dataloader
def val_dataloader(self) -> DataLoader:
return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size)
def test_dataloader(self) -> DataLoader:
return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size)
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
save_path = self.output_dir.joinpath("checkpoint{}".format(self.step_count))
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
@staticmethod
def add_model_specific_args(parser, root_dir):
BaseTransformer.add_model_specific_args(parser, root_dir)
add_generic_args(parser, root_dir)
parser.add_argument(
"--max_source_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_target_length",
default=25,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--val_max_target_length",
default=25,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--test_max_target_length",
default=25,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--logger_name", type=str, choices=["default", "wandb", "wandb_shared"], default="default")
parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.")
parser.add_argument("--n_val", type=int, default=-1, required=False, help="# examples. -1 means use all.")
parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.")
parser.add_argument("--label_smoothing", type=float, default=0.0, required=False)
parser.add_argument(
"--prefix",
type=str,
default=None,
help="Prefix added at the beginning of each text, typically used with T5-based models.",
)
parser.add_argument(
"--early_stopping_patience",
type=int,
default=-1,
required=False,
help="-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.",
)
parser.add_argument(
"--distributed-port", type=int, default=-1, required=False, help="Port number for distributed training."
)
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token", "bart", "t5"],
type=str,
help="RAG model type: sequence or token, if none specified, the type is inferred from the model_name_or_path",
)
return parser
@staticmethod
def add_retriever_specific_args(parser):
parser.add_argument(
"--index_name",
type=str,
default=None,
help="Name of the index to use: 'hf' for a canonical dataset from the datasets library (default), 'custom' for a local index, or 'legacy' for the orignal one)",
)
parser.add_argument(
"--passages_path",
type=str,
default=None,
help="Path to the dataset of passages for custom index. More info about custom indexes in the RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`",
)
parser.add_argument(
"--index_path",
type=str,
default=None,
help="Path to the faiss index for custom index. More info about custom indexes in the RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`",
)
parser.add_argument(
"--distributed_retriever",
choices=["ray", "pytorch"],
type=str,
default="pytorch",
help="What implementation to use for distributed retriever? If "
"pytorch is selected, the index is loaded on training "
"worker 0, and torch.distributed is used to handle "
"communication between training worker 0, and the other "
"training workers. If ray is selected, the Ray library is "
"used to create load the index on separate processes, "
"and Ray handles the communication between the training "
"workers and the retrieval actors.",
)
parser.add_argument(
"--use_dummy_dataset",
type=bool,
default=False,
help="Whether to use the dummy version of the dataset index. More info about custom indexes in the RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`",
)
return parser
@staticmethod
def add_ray_specific_args(parser):
# Ray cluster address.
parser.add_argument(
"--ray-address",
default="auto",
type=str,
help="The address of the Ray cluster to connect to. If not "
"specified, Ray will attempt to automatically detect the "
"cluster. Has no effect if pytorch is used as the distributed "
"retriever.",
)
parser.add_argument(
"--num_retrieval_workers",
type=int,
default=1,
help="The number of retrieval actors to use when Ray is selected"
"for the distributed retriever. Has no effect when "
"distributed_retriever is set to pytorch.",
)
return parser
def main(args=None, model=None) -> GenerativeQAModule:
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd())
parser = GenerativeQAModule.add_retriever_specific_args(parser)
args = args or parser.parse_args()
Path(args.output_dir).mkdir(exist_ok=True)
named_actors = []
if args.distributed_retriever == "ray" and args.gpus > 1:
if not is_ray_available():
raise RuntimeError("Please install Ray to use the Ray " "distributed retriever.")
# Connect to an existing Ray cluster.
try:
ray.init(address=args.ray_address)
except (ConnectionError, ValueError):
logger.warning(
"Connection to Ray cluster failed. Make sure a Ray"
"cluster is running by either using Ray's cluster "
"launcher (`ray up`) or by manually starting Ray on "
"each node via `ray start --head` for the head node "
"and `ray start --address='<ip address>:6379'` for "
"additional nodes. See "
"https://docs.ray.io/en/master/cluster/index.html "
"for more info."
)
raise
# Create Ray actors only for rank 0.
if ("LOCAL_RANK" not in os.environ or os.environ["LOCAL_RANK"] == 0) and (
"NODE_RANK" not in os.environ or os.environ["NODE_RANK"] == 0
):
remote_cls = ray.remote(RayRetriever)
named_actors = [
remote_cls.options(name="retrieval_worker_{}".format(i)).remote()
for i in range(args.num_retrieval_workers)
]
else:
logger.info(
"Getting named actors for NODE_RANK {}, LOCAL_RANK {}".format(
os.environ["NODE_RANK"], os.environ["LOCAL_RANK"]
)
)
named_actors = [ray.get_actor("retrieval_worker_{}".format(i)) for i in range(args.num_retrieval_workers)]
args.actor_handles = named_actors
assert args.actor_handles == named_actors
if model is None:
model: GenerativeQAModule = GenerativeQAModule(args)
dataset = Path(args.data_dir).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir).startswith("/tmp")
or str(args.output_dir).startswith("/var")
):
training_logger = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
project = os.environ.get("WANDB_PROJECT", dataset)
training_logger = WandbLogger(name=model.output_dir.name, project=project)
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
training_logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}")
es_callback = (
get_early_stopping_callback(model.val_metric, args.early_stopping_patience)
if args.early_stopping_patience >= 0
else False
)
trainer: pl.Trainer = generic_train(
model,
args,
logging_callback=Seq2SeqLoggingCallback(),
checkpoint_callback=get_checkpoint_callback(args.output_dir, model.val_metric),
early_stopping_callback=es_callback,
logger=training_logger,
accelerator=CustomAccel() if args.gpus > 1 else None,
profiler=pl.profiler.AdvancedProfiler() if args.profile else None,
)
pickle_save(model.hparams, model.output_dir / "hparams.pkl")
if not args.do_predict:
return model
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd())
parser = GenerativeQAModule.add_retriever_specific_args(parser)
parser = GenerativeQAModule.add_ray_specific_args(parser)
# Pytorch Lightning Profiler
parser.add_argument(
"--profile",
action="store_true",
help="If True, use pytorch_lightning.profiler.AdvancedProfiler to profile the Trainer.",
)
args = parser.parse_args()
main(args)
|
AdaMix/examples/research_projects/rag/finetune_rag.py/0
|
{
"file_path": "AdaMix/examples/research_projects/rag/finetune_rag.py",
"repo_id": "AdaMix",
"token_count": 11333
}
| 40 |
#!/usr/bin/env python
import os
from pathlib import Path
from typing import Dict, List
import fire
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from transformers.utils.logging import get_logger
logger = get_logger(__name__)
def remove_prefix(text: str, prefix: str):
if text.startswith(prefix):
return text[len(prefix) :]
return text # or whatever
def sanitize(sd):
return {remove_prefix(k, "model."): v for k, v in sd.items()}
def average_state_dicts(state_dicts: List[Dict[str, torch.Tensor]]):
new_sd = {}
for k in state_dicts[0].keys():
tensors = [sd[k] for sd in state_dicts]
new_t = sum(tensors) / len(tensors)
assert isinstance(new_t, torch.Tensor)
new_sd[k] = new_t
return new_sd
def convert_pl_to_hf(pl_ckpt_path: str, hf_src_model_dir: str, save_path: str) -> None:
"""Cleanup a pytorch-lightning .ckpt file or experiment dir and save a huggingface model with that state dict.
Silently allows extra pl keys (like teacher.) Puts all ckpt models into CPU RAM at once!
Args:
pl_ckpt_path (:obj:`str`): Path to a .ckpt file saved by pytorch_lightning or dir containing ckpt files.
If a directory is passed, all .ckpt files inside it will be averaged!
hf_src_model_dir (:obj:`str`): Path to a directory containing a correctly shaped checkpoint
save_path (:obj:`str`): Directory to save the new model
"""
hf_model = AutoModelForSeq2SeqLM.from_pretrained(hf_src_model_dir)
if os.path.isfile(pl_ckpt_path):
ckpt_files = [pl_ckpt_path]
else:
assert os.path.isdir(pl_ckpt_path)
ckpt_files = list(Path(pl_ckpt_path).glob("*.ckpt"))
assert ckpt_files, f"could not find any ckpt files inside the {pl_ckpt_path} directory"
if len(ckpt_files) > 1:
logger.info(f"averaging the weights of {ckpt_files}")
state_dicts = [sanitize(torch.load(x, map_location="cpu")["state_dict"]) for x in ckpt_files]
state_dict = average_state_dicts(state_dicts)
missing, unexpected = hf_model.load_state_dict(state_dict, strict=False)
assert not missing, f"missing keys: {missing}"
hf_model.save_pretrained(save_path)
try:
tok = AutoTokenizer.from_pretrained(hf_src_model_dir)
tok.save_pretrained(save_path)
except Exception:
pass
# dont copy tokenizer if cant
if __name__ == "__main__":
fire.Fire(convert_pl_to_hf)
|
AdaMix/examples/research_projects/seq2seq-distillation/convert_pl_checkpoint_to_hf.py/0
|
{
"file_path": "AdaMix/examples/research_projects/seq2seq-distillation/convert_pl_checkpoint_to_hf.py",
"repo_id": "AdaMix",
"token_count": 1017
}
| 41 |
#!/usr/bin/env bash
export PYTHONPATH="../":"${PYTHONPATH}"
export BS=32
export GAS=1
python finetune.py \
--learning_rate=3e-5 \
--fp16 \
--gpus 1 \
--do_train \
--do_predict \
--val_check_interval 0.25 \
--n_val 500 \
--num_train_epochs 2 \
--freeze_encoder --freeze_embeds --data_dir cnn_dm \
--max_target_length 142 --val_max_target_length=142 \
--train_batch_size=$BS --eval_batch_size=$BS --gradient_accumulation_steps=$GAS \
--model_name_or_path sshleifer/student_cnn_12_6 \
--tokenizer_name facebook/bart-large \
--warmup_steps 500 \
--output_dir distilbart-cnn-12-6 \
"$@"
|
AdaMix/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh/0
|
{
"file_path": "AdaMix/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh",
"repo_id": "AdaMix",
"token_count": 292
}
| 42 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.