text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="file-submodule" format="2">
<advance width="1200"/>
<unicode hex="EAEC"/>
<note>
file-submodule
</note>
<outline>
<contour>
<point x="104" y="834" type="line"/>
<point x="184" y="834" type="line"/>
<point x="184" y="502" type="line"/>
<point x="104" y="502" type="line"/>
</contour>
<contour>
<point x="104" y="918" type="line"/>
<point x="104" y="1166" type="line"/>
<point x="405" y="1166" type="line"/>
<point x="441" y="1104" type="line"/>
<point x="476" y="1082" type="line"/>
<point x="932" y="1082" type="line"/>
<point x="932" y="918" type="line"/>
<point x="1016" y="918" type="line"/>
<point x="1016" y="1122" type="line"/>
<point x="972" y="1166" type="line"/>
<point x="498" y="1166" type="line"/>
<point x="463" y="1228" type="line"/>
<point x="427" y="1250" type="line"/>
<point x="60" y="1250" type="line"/>
<point x="20" y="1206" type="line"/>
<point x="20" y="458" type="line"/>
<point x="60" y="418" type="line"/>
<point x="184" y="418" type="line"/>
<point x="184" y="210" type="line"/>
<point x="228" y="170" type="line"/>
<point x="1140" y="170" type="line"/>
<point x="1180" y="210" type="line"/>
<point x="1180" y="874" type="line"/>
<point x="1140" y="918" type="line"/>
<point x="666" y="918" type="line"/>
<point x="631" y="980" type="line"/>
<point x="596" y="998" type="line"/>
<point x="228" y="998" type="line"/>
<point x="184" y="958" type="line"/>
<point x="184" y="918" type="line"/>
</contour>
<contour>
<point x="640" y="834" type="line"/>
<point x="1100" y="834" type="line"/>
<point x="1100" y="750" type="line"/>
<point x="600" y="750" type="line"/>
<point x="565" y="728" type="line"/>
<point x="529" y="666" type="line"/>
<point x="268" y="666" type="line"/>
<point x="268" y="918" type="line"/>
<point x="569" y="918" type="line"/>
<point x="604" y="852" type="line"/>
</contour>
<contour>
<point x="591" y="604" type="line"/>
<point x="627" y="666" type="line"/>
<point x="1100" y="666" type="line"/>
<point x="1100" y="254" type="line"/>
<point x="268" y="254" type="line"/>
<point x="268" y="586" type="line"/>
<point x="556" y="586" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/file-submodule.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/file-submodule.glif",
"repo_id": "cascadia-code",
"token_count": 1201
}
| 839 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="gear" format="2">
<advance width="1200"/>
<unicode hex="EAF8"/>
<note>
gear
</note>
<outline>
<contour>
<point x="751" y="993" type="line"/>
<point x="914" y="1100" type="line"/>
<point x="981" y="1033" type="line"/>
<point x="875" y="869" type="line"/>
<point x="901" y="803" type="line"/>
<point x="1100" y="759" type="line"/>
<point x="1100" y="661" type="line"/>
<point x="905" y="626" type="line"/>
<point x="883" y="559" type="line"/>
<point x="990" y="396" type="line"/>
<point x="923" y="329" type="line"/>
<point x="759" y="435" type="line"/>
<point x="693" y="413" type="line"/>
<point x="649" y="214" type="line"/>
<point x="551" y="214" type="line"/>
<point x="516" y="405" type="line"/>
<point x="449" y="427" type="line"/>
<point x="286" y="320" type="line"/>
<point x="219" y="387" type="line"/>
<point x="325" y="551" type="line"/>
<point x="303" y="617" type="line"/>
<point x="104" y="661" type="line"/>
<point x="104" y="759" type="line"/>
<point x="303" y="803" type="line"/>
<point x="317" y="861" type="line"/>
<point x="210" y="1024" type="line"/>
<point x="286" y="1091" type="line"/>
<point x="449" y="985" type="line"/>
<point x="507" y="1011" type="line"/>
<point x="551" y="1210" type="line"/>
<point x="649" y="1210" type="line"/>
<point x="693" y="1011" type="line"/>
</contour>
<contour>
<point x="485" y="1290" type="line"/>
<point x="449" y="1091" type="line"/>
<point x="277" y="1210" type="line"/>
<point x="109" y="1042" type="line"/>
<point x="219" y="869" type="line"/>
<point x="20" y="825" type="line"/>
<point x="20" y="595" type="line"/>
<point x="219" y="559" type="line"/>
<point x="104" y="387" type="line"/>
<point x="268" y="219" type="line"/>
<point x="441" y="329" type="line"/>
<point x="485" y="130" type="line"/>
<point x="715" y="130" type="line"/>
<point x="759" y="329" type="line"/>
<point x="932" y="214" type="line"/>
<point x="1100" y="378" type="line"/>
<point x="981" y="551" type="line"/>
<point x="1180" y="595" type="line"/>
<point x="1180" y="825" type="line"/>
<point x="981" y="861" type="line"/>
<point x="1100" y="1033" type="line"/>
<point x="932" y="1201" type="line"/>
<point x="759" y="1091" type="line"/>
<point x="715" y="1290" type="line"/>
</contour>
<contour>
<point x="768" y="776"/>
<point x="666" y="878"/>
<point x="534" y="878"/>
<point x="436" y="776"/>
<point x="436" y="644"/>
<point x="534" y="546"/>
<point x="666" y="546"/>
<point x="768" y="644"/>
<point x="768" y="710" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="565" y="626"/>
<point x="516" y="675"/>
<point x="516" y="745"/>
<point x="565" y="794"/>
<point x="635" y="794"/>
<point x="684" y="745"/>
<point x="684" y="675"/>
<point x="635" y="626"/>
<point x="600" y="626" type="qcurve" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/gear.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/gear.glif",
"repo_id": "cascadia-code",
"token_count": 1601
}
| 840 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="grabber" format="2">
<advance width="1200"/>
<unicode hex="EB02"/>
<note>
grabber
</note>
<outline>
<contour>
<point x="1180" y="792" type="line"/>
<point x="1180" y="876" type="line"/>
<point x="20" y="876" type="line"/>
<point x="20" y="792" type="line"/>
</contour>
<contour>
<point x="1180" y="544" type="line"/>
<point x="1180" y="624" type="line"/>
<point x="20" y="624" type="line"/>
<point x="20" y="544" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/grabber.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/grabber.glif",
"repo_id": "cascadia-code",
"token_count": 277
}
| 841 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="megaphone" format="2">
<advance width="1200"/>
<unicode hex="EB1E"/>
<note>
megaphone
</note>
<outline>
<contour>
<point x="20" y="788" type="line"/>
<point x="20" y="621" type="line"/>
<point x="57" y="575" type="line"/>
<point x="233" y="529" type="line"/>
<point x="219" y="478"/>
<point x="246" y="381"/>
<point x="316" y="307"/>
<point x="417" y="284"/>
<point x="515" y="312"/>
<point x="584" y="381"/>
<point x="598" y="432" type="qcurve"/>
<point x="1125" y="293" type="line"/>
<point x="1180" y="339" type="line"/>
<point x="1180" y="1084" type="line"/>
<point x="1125" y="1130" type="line"/>
<point x="57" y="834" type="line"/>
</contour>
<contour>
<point x="510" y="455" type="line"/>
<point x="501" y="418"/>
<point x="436" y="381"/>
<point x="362" y="390"/>
<point x="311" y="446"/>
<point x="311" y="487" type="qcurve" smooth="yes"/>
<point x="311" y="497"/>
<point x="316" y="506" type="qcurve"/>
</contour>
<contour>
<point x="103" y="751" type="line"/>
<point x="1097" y="1033" type="line"/>
<point x="1097" y="395" type="line"/>
<point x="103" y="658" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/megaphone.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/megaphone.glif",
"repo_id": "cascadia-code",
"token_count": 666
}
| 842 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="notebook" format="2">
<advance width="1200"/>
<unicode hex="EBAF"/>
<note>
notebook
</note>
<outline>
<contour>
<point x="20" y="175" type="line"/>
<point x="106" y="85" type="line"/>
<point x="913" y="85" type="line"/>
<point x="1003" y="175" type="line"/>
<point x="1003" y="1249" type="line"/>
<point x="913" y="1335" type="line"/>
<point x="106" y="1335" type="line"/>
<point x="20" y="1249" type="line"/>
</contour>
<contour>
<point x="913" y="1249" type="line"/>
<point x="913" y="175" type="line"/>
<point x="106" y="175" type="line"/>
<point x="106" y="1249" type="line"/>
</contour>
<contour>
<point x="197" y="977" type="line"/>
<point x="287" y="891" type="line"/>
<point x="736" y="891" type="line"/>
<point x="822" y="977" type="line"/>
<point x="822" y="1068" type="line"/>
<point x="736" y="1159" type="line"/>
<point x="287" y="1159" type="line"/>
<point x="197" y="1068" type="line"/>
</contour>
<contour>
<point x="736" y="1068" type="line"/>
<point x="736" y="977" type="line"/>
<point x="287" y="977" type="line"/>
<point x="287" y="1068" type="line"/>
</contour>
<contour>
<point x="1180" y="801" type="line"/>
<point x="1180" y="977" type="line"/>
<point x="1094" y="977" type="line"/>
<point x="1094" y="801" type="line"/>
</contour>
<contour>
<point x="1094" y="533" type="line"/>
<point x="1180" y="533" type="line"/>
<point x="1180" y="710" type="line"/>
<point x="1094" y="710" type="line"/>
</contour>
<contour>
<point x="1180" y="261" type="line"/>
<point x="1180" y="443" type="line"/>
<point x="1094" y="443" type="line"/>
<point x="1094" y="261" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/notebook.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/notebook.glif",
"repo_id": "cascadia-code",
"token_count": 950
}
| 843 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="play" format="2">
<advance width="1200"/>
<unicode hex="EB2C"/>
<note>
play
</note>
<outline>
<contour>
<point x="1142" y="754" type="line"/>
<point x="146" y="1420" type="line"/>
<point x="58" y="1373" type="line"/>
<point x="58" y="47" type="line"/>
<point x="146" y="0" type="line"/>
<point x="1142" y="660" type="line"/>
</contour>
<contour>
<point x="1012" y="707" type="line"/>
<point x="170" y="147" type="line"/>
<point x="170" y="1267" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/play.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/play.glif",
"repo_id": "cascadia-code",
"token_count": 294
}
| 844 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="regex" format="2">
<advance width="1200"/>
<unicode hex="EB38"/>
<note>
regex
</note>
<outline>
<contour>
<point x="793" y="986" type="line"/>
<point x="546" y="1135" type="line"/>
<point x="499" y="1053" type="line"/>
<point x="747" y="903" type="line"/>
<point x="499" y="749" type="line"/>
<point x="546" y="666" type="line"/>
<point x="793" y="816" type="line"/>
<point x="793" y="517" type="line"/>
<point x="886" y="517" type="line"/>
<point x="886" y="816" type="line"/>
<point x="1134" y="666" type="line"/>
<point x="1180" y="749" type="line"/>
<point x="933" y="903" type="line"/>
<point x="1180" y="1053" type="line"/>
<point x="1134" y="1135" type="line"/>
<point x="886" y="986" type="line"/>
<point x="886" y="1290" type="line"/>
<point x="793" y="1290" type="line"/>
</contour>
<contour>
<point x="20" y="130" type="line"/>
<point x="407" y="130" type="line"/>
<point x="407" y="517" type="line"/>
<point x="20" y="517" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/regex.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/regex.glif",
"repo_id": "cascadia-code",
"token_count": 566
}
| 845 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="stop-circle" format="2">
<advance width="1200"/>
<unicode hex="EBA5"/>
<note>
stop-circle
</note>
<outline>
<contour>
<point x="443" y="545" type="line"/>
<point x="778" y="545" type="line"/>
<point x="778" y="880" type="line"/>
<point x="443" y="880" type="line"/>
</contour>
<contour>
<point x="555" y="1304"/>
<point x="363" y="1251"/>
<point x="282" y="1197" type="qcurve" smooth="yes"/>
<point x="113" y="1085"/>
<point x="46" y="880" type="qcurve" smooth="yes"/>
<point x="14" y="786"/>
<point x="28" y="581"/>
<point x="99" y="393"/>
<point x="233" y="241"/>
<point x="323" y="192" type="qcurve" smooth="yes"/>
<point x="497" y="94"/>
<point x="711" y="134" type="qcurve" smooth="yes"/>
<point x="809" y="152"/>
<point x="979" y="255"/>
<point x="1046" y="335" type="qcurve" smooth="yes"/>
<point x="1180" y="509"/>
<point x="1180" y="701" type="qcurve" smooth="yes"/>
<point x="1180" y="947"/>
<point x="1010" y="1130" type="qcurve" smooth="yes"/>
<point x="939" y="1201"/>
<point x="756" y="1291"/>
<point x="658" y="1295" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="617" y="197"/>
<point x="448" y="224"/>
<point x="376" y="268" type="qcurve" smooth="yes"/>
<point x="224" y="353"/>
<point x="77" y="675"/>
<point x="180" y="1023"/>
<point x="323" y="1130" type="qcurve" smooth="yes"/>
<point x="390" y="1179"/>
<point x="559" y="1219"/>
<point x="644" y="1215" type="qcurve" smooth="yes"/>
<point x="814" y="1201"/>
<point x="943" y="1072" type="qcurve" smooth="yes"/>
<point x="1010" y="1001"/>
<point x="1086" y="813"/>
<point x="1086" y="710" type="qcurve" smooth="yes"/>
<point x="1095" y="545"/>
<point x="984" y="393" type="qcurve" smooth="yes"/>
<point x="872" y="255"/>
<point x="702" y="215" type="qcurve" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/stop-circle.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/stop-circle.glif",
"repo_id": "cascadia-code",
"token_count": 1047
}
| 846 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="triangle-right" format="2">
<advance width="1200"/>
<unicode hex="EB70"/>
<note>
triangle-right
</note>
<outline>
<contour>
<point x="889" y="661" type="line"/>
<point x="889" y="744" type="line"/>
<point x="363" y="1290" type="line"/>
<point x="311" y="1249" type="line"/>
<point x="311" y="166" type="line"/>
<point x="363" y="130" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/triangle-right.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/triangle-right.glif",
"repo_id": "cascadia-code",
"token_count": 220
}
| 847 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="Kdenlive" format="2">
<advance width="1200"/>
<unicode hex="F33C"/>
<note>
Kdenlive
</note>
<outline>
<contour>
<point x="598" y="1005" type="line"/>
<point x="598" y="275" type="line"/>
<point x="625" y="275" type="line"/>
<point x="625" y="1005" type="line"/>
<point x="718" y="1145" type="line"/>
<point x="507" y="1145" type="line"/>
</contour>
<contour>
<point x="706" y="1018"/>
<point x="706" y="1002" type="qcurve" smooth="yes"/>
<point x="706" y="366" type="line" smooth="yes"/>
<point x="706" y="354"/>
<point x="727" y="345"/>
<point x="734" y="352" type="qcurve"/>
<point x="1173" y="669" type="line" smooth="yes"/>
<point x="1180" y="674"/>
<point x="1180" y="692"/>
<point x="1173" y="696" type="qcurve" smooth="yes"/>
<point x="734" y="1014" type="line" smooth="yes"/>
<point x="731" y="1016"/>
<point x="725" y="1016" type="qcurve"/>
<point x="725" y="1018" type="line"/>
</contour>
<contour>
<point x="450" y="971" type="line"/>
<point x="516" y="971" type="line"/>
<point x="516" y="1018" type="line"/>
<point x="450" y="1018" type="line"/>
</contour>
<contour>
<point x="398" y="891" type="line"/>
<point x="516" y="891" type="line"/>
<point x="516" y="941" type="line"/>
<point x="398" y="941" type="line"/>
</contour>
<contour>
<point x="258" y="814" type="line"/>
<point x="516" y="814" type="line"/>
<point x="516" y="862" type="line"/>
<point x="258" y="862" type="line"/>
</contour>
<contour>
<point x="115" y="735" type="line"/>
<point x="516" y="735" type="line"/>
<point x="516" y="785" type="line"/>
<point x="115" y="785" type="line"/>
</contour>
<contour>
<point x="20" y="658" type="line"/>
<point x="516" y="658" type="line"/>
<point x="516" y="705" type="line"/>
<point x="20" y="705" type="line"/>
</contour>
<contour>
<point x="176" y="581" type="line"/>
<point x="516" y="581" type="line"/>
<point x="516" y="628" type="line"/>
<point x="176" y="628" type="line"/>
</contour>
<contour>
<point x="315" y="502" type="line"/>
<point x="516" y="502" type="line"/>
<point x="516" y="549" type="line"/>
<point x="315" y="549" type="line"/>
</contour>
<contour>
<point x="260" y="425" type="line"/>
<point x="516" y="425" type="line"/>
<point x="516" y="472" type="line"/>
<point x="260" y="472" type="line"/>
</contour>
<contour>
<point x="392" y="350" type="line"/>
<point x="516" y="350" type="line"/>
<point x="516" y="397" type="line"/>
<point x="392" y="397" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/font-logos.ufo/glyphs/K_denlive.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/font-logos.ufo/glyphs/K_denlive.glif",
"repo_id": "cascadia-code",
"token_count": 1409
}
| 848 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="Neovim" format="2">
<advance width="1200"/>
<unicode hex="F36F"/>
<note>
Neovim
</note>
<outline>
<contour>
<point x="97" y="1201" type="line"/>
<point x="882" y="3" type="line"/>
<point x="1101" y="225" type="line"/>
<point x="316" y="1420" type="line"/>
</contour>
<contour>
<point x="882" y="630" type="line"/>
<point x="1134" y="255" type="line"/>
<point x="1184" y="308" type="line"/>
<point x="1184" y="1118" type="line"/>
<point x="882" y="1417" type="line"/>
</contour>
<contour>
<point x="16" y="1115" type="line"/>
<point x="19" y="308" type="line"/>
<point x="324" y="0" type="line"/>
<point x="324" y="788" type="line"/>
<point x="69" y="1173" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/font-logos.ufo/glyphs/N_eovim.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/font-logos.ufo/glyphs/N_eovim.glif",
"repo_id": "cascadia-code",
"token_count": 419
}
| 849 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="dot" format="2">
<advance width="1200"/>
<unicode hex="F4C3"/>
<note>
dot
</note>
<outline>
<contour>
<point x="388" y="498"/>
<point x="521" y="420"/>
<point x="679" y="420"/>
<point x="812" y="498"/>
<point x="890" y="631"/>
<point x="890" y="789"/>
<point x="812" y="922"/>
<point x="679" y="1000"/>
<point x="521" y="1000"/>
<point x="388" y="922"/>
<point x="310" y="789"/>
<point x="310" y="631"/>
</contour>
<contour>
<point x="781" y="785"/>
<point x="781" y="635"/>
<point x="675" y="529"/>
<point x="525" y="529"/>
<point x="419" y="635"/>
<point x="419" y="785"/>
<point x="525" y="891"/>
<point x="675" y="891"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:49:53 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/dot.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/dot.glif",
"repo_id": "cascadia-code",
"token_count": 517
}
| 850 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="shield-x" format="2">
<advance width="1200"/>
<unicode hex="F513"/>
<note>
shield-x
</note>
<outline>
<contour>
<point x="623" y="1357"/>
<point x="577" y="1357"/>
<point x="556" y="1350" type="qcurve" smooth="yes"/>
<point x="121" y="1211" type="line" smooth="yes"/>
<point x="76" y="1197"/>
<point x="20" y="1121"/>
<point x="20" y="1073" type="qcurve" smooth="yes"/>
<point x="20" y="781" type="line" smooth="yes"/>
<point x="20" y="558"/>
<point x="128" y="393" type="qcurve" smooth="yes"/>
<point x="257" y="197"/>
<point x="545" y="74" type="qcurve" smooth="yes"/>
<point x="571" y="63"/>
<point x="600" y="63" type="qcurve" smooth="yes"/>
<point x="629" y="63"/>
<point x="655" y="74" type="qcurve" smooth="yes"/>
<point x="943" y="197"/>
<point x="1072" y="393" type="qcurve" smooth="yes"/>
<point x="1180" y="558"/>
<point x="1180" y="781" type="qcurve" smooth="yes"/>
<point x="1180" y="1073" type="line" smooth="yes"/>
<point x="1180" y="1121"/>
<point x="1124" y="1197"/>
<point x="1079" y="1211" type="qcurve" smooth="yes"/>
<point x="644" y="1350" type="line" smooth="yes"/>
</contour>
<contour>
<point x="597" y="1233"/>
<point x="594" y="1232" type="qcurve" smooth="yes"/>
<point x="159" y="1093" type="line" smooth="yes"/>
<point x="144" y="1088"/>
<point x="144" y="1073" type="qcurve" smooth="yes"/>
<point x="144" y="781" type="line" smooth="yes"/>
<point x="144" y="595"/>
<point x="232" y="461" type="qcurve" smooth="yes"/>
<point x="340" y="296"/>
<point x="594" y="188" type="qcurve" smooth="yes"/>
<point x="597" y="187"/>
<point x="603" y="187"/>
<point x="606" y="188" type="qcurve" smooth="yes"/>
<point x="860" y="296"/>
<point x="968" y="461" type="qcurve" smooth="yes"/>
<point x="1056" y="595"/>
<point x="1056" y="781" type="qcurve" smooth="yes"/>
<point x="1056" y="1073" type="line" smooth="yes"/>
<point x="1056" y="1088"/>
<point x="1041" y="1093" type="qcurve" smooth="yes"/>
<point x="606" y="1232" type="line" smooth="yes"/>
<point x="603" y="1233"/>
</contour>
<contour>
<point x="480" y="948"/>
<point x="454" y="948" type="qcurve" smooth="yes"/>
<point x="446" y="948"/>
<point x="438" y="946" type="qcurve" smooth="yes"/>
<point x="417" y="941"/>
<point x="391" y="908"/>
<point x="391" y="886" type="qcurve" smooth="yes"/>
<point x="391" y="860"/>
<point x="411" y="841" type="qcurve" smooth="yes"/>
<point x="512" y="740" type="line"/>
<point x="487" y="715" type="line" smooth="yes"/>
<point x="461" y="689"/>
<point x="411" y="639"/>
<point x="411" y="639" type="qcurve"/>
<point x="394" y="621"/>
<point x="394" y="596" type="qcurve" smooth="yes"/>
<point x="394" y="571"/>
<point x="431" y="534"/>
<point x="456" y="534" type="qcurve" smooth="yes"/>
<point x="481" y="534"/>
<point x="499" y="551" type="qcurve" smooth="yes"/>
<point x="600" y="652" type="line"/>
<point x="701" y="551" type="line" smooth="yes"/>
<point x="720" y="531"/>
<point x="746" y="531" type="qcurve" smooth="yes"/>
<point x="754" y="531"/>
<point x="762" y="533" type="qcurve" smooth="yes"/>
<point x="783" y="538"/>
<point x="809" y="571"/>
<point x="809" y="593" type="qcurve" smooth="yes"/>
<point x="809" y="620"/>
<point x="789" y="639" type="qcurve" smooth="yes"/>
<point x="688" y="740" type="line"/>
<point x="789" y="841" type="line" smooth="yes"/>
<point x="808" y="860"/>
<point x="808" y="886" type="qcurve" smooth="yes"/>
<point x="808" y="894"/>
<point x="806" y="902" type="qcurve" smooth="yes"/>
<point x="801" y="922"/>
<point x="768" y="949"/>
<point x="746" y="949" type="qcurve" smooth="yes"/>
<point x="720" y="949"/>
<point x="701" y="929" type="qcurve" smooth="yes"/>
<point x="600" y="828" type="line"/>
<point x="499" y="929" type="line" smooth="yes"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:49:54 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/shield-x.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/shield-x.glif",
"repo_id": "cascadia-code",
"token_count": 2175
}
| 851 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="telescope" format="2">
<advance width="1200"/>
<unicode hex="F46B"/>
<note>
telescope
</note>
<outline>
<contour>
<point x="1041" y="1256"/>
<point x="981" y="1292"/>
<point x="945" y="1292" type="qcurve" smooth="yes"/>
<point x="906" y="1292"/>
<point x="873" y="1269" type="qcurve" smooth="yes"/>
<point x="77" y="726" type="line" smooth="yes"/>
<point x="51" y="708"/>
<point x="20" y="652"/>
<point x="20" y="619" type="qcurve" smooth="yes"/>
<point x="20" y="584"/>
<point x="37" y="555" type="qcurve" smooth="yes"/>
<point x="70" y="497" type="line" smooth="yes"/>
<point x="88" y="467"/>
<point x="147" y="433"/>
<point x="182" y="433" type="qcurve" smooth="yes"/>
<point x="212" y="433"/>
<point x="238" y="445" type="qcurve" smooth="yes"/>
<point x="1107" y="864" type="line" smooth="yes"/>
<point x="1140" y="879"/>
<point x="1180" y="942"/>
<point x="1180" y="980" type="qcurve" smooth="yes"/>
<point x="1180" y="1015"/>
<point x="1163" y="1044" type="qcurve" smooth="yes"/>
<point x="1058" y="1227" type="line" smooth="yes"/>
</contour>
<contour>
<point x="693" y="562"/>
<point x="664" y="562" type="qcurve" smooth="yes"/>
<point x="651" y="562"/>
<point x="640" y="556" type="qcurve" smooth="yes"/>
<point x="506" y="492" type="line" smooth="yes"/>
<point x="493" y="485"/>
<point x="484" y="473" type="qcurve" smooth="yes"/>
<point x="446" y="418" type="line" smooth="yes"/>
<point x="409" y="363"/>
<point x="333" y="252"/>
<point x="333" y="252" type="qcurve"/>
<point x="323" y="238"/>
<point x="323" y="221" type="qcurve" smooth="yes"/>
<point x="323" y="198"/>
<point x="356" y="165"/>
<point x="379" y="165" type="qcurve" smooth="yes"/>
<point x="408" y="165"/>
<point x="424" y="190" type="qcurve" smooth="yes"/>
<point x="564" y="393" type="line"/>
<point x="564" y="184" type="line" smooth="yes"/>
<point x="564" y="161"/>
<point x="596" y="128"/>
<point x="642" y="128"/>
<point x="675" y="161"/>
<point x="675" y="184" type="qcurve" smooth="yes"/>
<point x="675" y="393" type="line"/>
<point x="814" y="190" type="line" smooth="yes"/>
<point x="830" y="165"/>
<point x="859" y="165" type="qcurve" smooth="yes"/>
<point x="882" y="165"/>
<point x="915" y="198"/>
<point x="915" y="221" type="qcurve" smooth="yes"/>
<point x="915" y="238"/>
<point x="905" y="252" type="qcurve" smooth="yes"/>
<point x="710" y="538" type="line" smooth="yes"/>
</contour>
<contour>
<point x="810" y="843" type="line"/>
<point x="409" y="650" type="line"/>
<point x="339" y="771" type="line"/>
<point x="707" y="1021" type="line"/>
</contour>
<contour>
<point x="940" y="1181"/>
<point x="946" y="1181" type="qcurve" smooth="yes"/>
<point x="948" y="1181"/>
<point x="950" y="1180" type="qcurve" smooth="yes"/>
<point x="958" y="1179"/>
<point x="962" y="1172" type="qcurve" smooth="yes"/>
<point x="988" y="1126" type="line" smooth="yes"/>
<point x="1014" y="1080"/>
<point x="1067" y="989"/>
<point x="1067" y="989" type="qcurve"/>
<point x="1070" y="985"/>
<point x="1070" y="980" type="qcurve" smooth="yes"/>
<point x="1070" y="977"/>
<point x="1069" y="975" type="qcurve"/>
<point x="1066" y="967"/>
<point x="1059" y="964" type="qcurve" smooth="yes"/>
<point x="910" y="891" type="line"/>
<point x="799" y="1084" type="line"/>
<point x="936" y="1177" type="line"/>
<point x="935" y="1177" type="line"/>
</contour>
<contour>
<point x="247" y="708" type="line"/>
<point x="309" y="602" type="line"/>
<point x="279" y="588" type="line" smooth="yes"/>
<point x="249" y="574"/>
<point x="190" y="545"/>
<point x="190" y="545" type="qcurve"/>
<point x="186" y="543"/>
<point x="182" y="543" type="qcurve" smooth="yes"/>
<point x="172" y="543"/>
<point x="166" y="552" type="qcurve" smooth="yes"/>
<point x="158" y="567" type="line" smooth="yes"/>
<point x="150" y="581"/>
<point x="133" y="610"/>
<point x="133" y="610" type="qcurve"/>
<point x="131" y="614"/>
<point x="131" y="619" type="qcurve" smooth="yes"/>
<point x="131" y="629"/>
<point x="139" y="634" type="qcurve" smooth="yes"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:49:52 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/telescope.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/telescope.glif",
"repo_id": "cascadia-code",
"token_count": 2364
}
| 852 |
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>com.schriftgestaltung.DisplayStrings</key>
<array>
<string>/smiley</string>
</array>
<key>com.schriftgestaltung.appVersion</key>
<string>3241</string>
<key>com.schriftgestaltung.disablesAutomaticAlignment</key>
<true/>
<key>com.schriftgestaltung.fontMaster.customParameters</key>
<array>
<dict>
<key>name</key>
<string>CVT Table</string>
<key>value</key>
<string>(
68,
1297
)</string>
</dict>
</array>
<key>com.schriftgestaltung.fontMasterID</key>
<string>m01</string>
<key>com.schriftgestaltung.formatVersion</key>
<integer>3</integer>
<key>com.schriftgestaltung.useGlyphOrder</key>
<true/>
<key>com.schriftgestaltung.useNiceNames</key>
<false/>
<key>public.glyphOrder</key>
<array>
<string>heart</string>
<string>zap</string>
<string>light-bulb</string>
<string>repo</string>
<string>repo-forked</string>
<string>repo-push</string>
<string>repo-pull</string>
<string>book</string>
<string>accessibility</string>
<string>git-pull-request</string>
<string>mark-github</string>
<string>download</string>
<string>upload</string>
<string>accessibility-inset</string>
<string>alert-fill</string>
<string>file-code</string>
<string>apps</string>
<string>file-media</string>
<string>file-zip</string>
<string>archive</string>
<string>tag</string>
<string>file-directory</string>
<string>file-submodule</string>
<string>person</string>
<string>arrow-both</string>
<string>git-commit</string>
<string>git-branch</string>
<string>git-merge</string>
<string>mirror</string>
<string>issue-opened</string>
<string>issue-reopened</string>
<string>issue-closed</string>
<string>star</string>
<string>comment</string>
<string>question</string>
<string>alert</string>
<string>search</string>
<string>gear</string>
<string>arrow-down-left</string>
<string>tools</string>
<string>sign-out</string>
<string>rocket</string>
<string>rss</string>
<string>paste</string>
<string>sign-in</string>
<string>organization</string>
<string>device-mobile</string>
<string>unfold</string>
<string>check</string>
<string>mail</string>
<string>read</string>
<string>arrow-up</string>
<string>arrow-right</string>
<string>arrow-down</string>
<string>arrow-left</string>
<string>pin</string>
<string>gift</string>
<string>graph</string>
<string>triangle-left</string>
<string>credit-card</string>
<string>clock</string>
<string>ruby</string>
<string>broadcast</string>
<string>key</string>
<string>arrow-down-right</string>
<string>repo-clone</string>
<string>diff</string>
<string>eye</string>
<string>comment-discussion</string>
<string>arrow-switch</string>
<string>dot-fill</string>
<string>square-fill</string>
<string>device-camera</string>
<string>device-camera-video</string>
<string>pencil</string>
<string>info</string>
<string>triangle-right</string>
<string>triangle-down</string>
<string>link</string>
<string>plus</string>
<string>three-bars</string>
<string>code</string>
<string>location</string>
<string>list-unordered</string>
<string>list-ordered</string>
<string>quote</string>
<string>versions</string>
<string>calendar</string>
<string>lock</string>
<string>diff-added</string>
<string>diff-removed</string>
<string>diff-modified</string>
<string>diff-renamed</string>
<string>horizontal-rule</string>
<string>arrow-up-left</string>
<string>milestone</string>
<string>checklist</string>
<string>megaphone</string>
<string>chevron-right</string>
<string>bookmark</string>
<string>sliders</string>
<string>meter</string>
<string>history</string>
<string>link-external</string>
<string>mute</string>
<string>x</string>
<string>circle-slash</string>
<string>pulse</string>
<string>sync</string>
<string>telescope</string>
<string>arrow-up-right</string>
<string>home</string>
<string>stop</string>
<string>bug</string>
<string>logo-github</string>
<string>file-binary</string>
<string>database</string>
<string>server</string>
<string>diff-ignored</string>
<string>ellipsis</string>
<string>bell-fill</string>
<string>hubot</string>
<string>bell-slash</string>
<string>blocked</string>
<string>bookmark-fill</string>
<string>chevron-up</string>
<string>chevron-down</string>
<string>chevron-left</string>
<string>triangle-up</string>
<string>git-compare</string>
<string>logo-gist</string>
<string>file-symlink-file</string>
<string>file-symlink-directory</string>
<string>squirrel</string>
<string>globe</string>
<string>unmute</string>
<string>mention</string>
<string>package</string>
<string>browser</string>
<string>terminal</string>
<string>markdown</string>
<string>dash</string>
<string>fold</string>
<string>inbox</string>
<string>trash</string>
<string>paintbrush</string>
<string>flame</string>
<string>briefcase</string>
<string>plug</string>
<string>bookmark-slash-fill</string>
<string>mortar-board</string>
<string>law</string>
<string>thumbsup</string>
<string>thumbsdown</string>
<string>desktop-download</string>
<string>beaker</string>
<string>bell</string>
<string>cache</string>
<string>shield</string>
<string>bold</string>
<string>check-circle</string>
<string>italic</string>
<string>tasklist</string>
<string>verified</string>
<string>smiley</string>
<string>unverified</string>
<string>check-circle-fill</string>
<string>file</string>
<string>grabber</string>
<string>checkbox</string>
<string>reply</string>
<string>device-desktop</string>
<string>circle</string>
<string>clock-fill</string>
<string>cloud</string>
<string>cloud-offline</string>
<string>code-of-conduct</string>
<string>code-review</string>
<string>code-square</string>
<string>codescan</string>
<string>codescan-checkmark</string>
<string>codespaces</string>
<string>columns</string>
<string>command-palette</string>
<string>commit</string>
<string>container</string>
<string>copilot</string>
<string>copilot-error</string>
<string>copilot-warning</string>
<string>copy</string>
<string>cpu</string>
<string>cross-reference</string>
<string>dependabot</string>
<string>diamond</string>
<string>discussion-closed</string>
<string>discussion-duplicate</string>
<string>discussion-outdated</string>
<string>dot</string>
<string>duplicate</string>
<string>eye-closed</string>
<string>feed-discussion</string>
<string>feed-forked</string>
<string>feed-heart</string>
<string>feed-merged</string>
<string>feed-person</string>
<string>feed-repo</string>
<string>feed-rocket</string>
<string>feed-star</string>
<string>feed-tag</string>
<string>feed-trophy</string>
<string>file-added</string>
<string>file-badge</string>
<string>file-diff</string>
<string>file-directory-fill</string>
<string>file-directory-open-fill</string>
<string>file-moved</string>
<string>file-removed</string>
<string>filter</string>
<string>fiscal-host</string>
<string>fold-down</string>
<string>fold-up</string>
<string>git-merge-queue</string>
<string>git-pull-request-closed</string>
<string>git-pull-request-draft</string>
<string>goal</string>
<string>hash</string>
<string>heading</string>
<string>heart-fill</string>
<string>home-fill</string>
<string>hourglass</string>
<string>id-badge</string>
<string>image</string>
<string>infinity</string>
<string>issue-draft</string>
<string>issue-tracked-by</string>
<string>issue-tracks</string>
<string>iterations</string>
<string>kebab-horizontal</string>
<string>key-asterisk</string>
<string>log</string>
<string>moon</string>
<string>move-to-bottom</string>
<string>move-to-end</string>
<string>move-to-start</string>
<string>move-to-top</string>
<string>multi-select</string>
<string>no-entry</string>
<string>north-star</string>
<string>note</string>
<string>number</string>
<string>package-dependencies</string>
<string>package-dependents</string>
<string>paper-airplane</string>
<string>paperclip</string>
<string>passkey-fill</string>
<string>people</string>
<string>person-add</string>
<string>person-fill</string>
<string>play</string>
<string>plus-circle</string>
<string>project</string>
<string>project-roadmap</string>
<string>project-symlink</string>
<string>project-template</string>
<string>rel-file-path</string>
<string>repo-deleted</string>
<string>repo-locked</string>
<string>repo-template</string>
<string>report</string>
<string>rows</string>
<string>screen-full</string>
<string>screen-normal</string>
<string>share</string>
<string>share-android</string>
<string>shield-check</string>
<string>shield-lock</string>
<string>shield-slash</string>
<string>shield-x</string>
<string>sidebar-collapse</string>
<string>sidebar-expand</string>
<string>single-select</string>
<string>skip</string>
<string>skip-fill</string>
<string>sort-asc</string>
<string>sort-desc</string>
<string>sparkle-fill</string>
<string>sponsor-tiers</string>
<string>square</string>
<string>stack</string>
<string>star-fill</string>
<string>stopwatch</string>
<string>strikethrough</string>
<string>sun</string>
<string>tab</string>
<string>tab-external</string>
<string>table</string>
<string>telescope-fill</string>
<string>trophy</string>
<string>typography</string>
<string>unlink</string>
<string>unlock</string>
<string>unread</string>
<string>video</string>
<string>webhook</string>
<string>workflow</string>
<string>x-circle</string>
<string>x-circle-fill</string>
<string>zoom-in</string>
<string>zoom-out</string>
<string>bookmark-slash</string>
</array>
<key>public.postscriptNames</key>
<dict>
<key>accessibility</key>
<string>uniF406</string>
<key>accessibility-inset</key>
<string>uniF40B</string>
<key>alert</key>
<string>uniF421</string>
<key>alert-fill</key>
<string>uniF40C</string>
<key>apps</key>
<string>uniF40E</string>
<key>archive</key>
<string>uniF411</string>
<key>arrow-both</key>
<string>uniF416</string>
<key>arrow-down</key>
<string>uniF433</string>
<key>arrow-down-left</key>
<string>uniF424</string>
<key>arrow-down-right</key>
<string>uniF43E</string>
<key>arrow-left</key>
<string>uniF434</string>
<key>arrow-right</key>
<string>uniF432</string>
<key>arrow-switch</key>
<string>uniF443</string>
<key>arrow-up</key>
<string>uniF431</string>
<key>arrow-up-left</key>
<string>uniF45C</string>
<key>arrow-up-right</key>
<string>uniF46C</string>
<key>beaker</key>
<string>uniF499</string>
<key>bell-fill</key>
<string>uniF476</string>
<key>bell-slash</key>
<string>uniF478</string>
<key>blocked</key>
<string>uniF479</string>
<key>bold</key>
<string>uniF49D</string>
<key>bookmark-fill</key>
<string>uniF47A</string>
<key>bookmark-slash</key>
<string>uniF533</string>
<key>bookmark-slash-fill</key>
<string>uniF493</string>
<key>broadcast</key>
<string>uniF43C</string>
<key>browser</key>
<string>uniF488</string>
<key>cache</key>
<string>uniF49B</string>
<key>check</key>
<string>uniF42E</string>
<key>check-circle</key>
<string>uniF49E</string>
<key>check-circle-fill</key>
<string>uniF4A4</string>
<key>checkbox</key>
<string>uniF4A7</string>
<key>checklist</key>
<string>uniF45E</string>
<key>chevron-down</key>
<string>uniF47C</string>
<key>chevron-left</key>
<string>uniF47D</string>
<key>chevron-right</key>
<string>uniF460</string>
<key>chevron-up</key>
<string>uniF47B</string>
<key>circle-slash</key>
<string>uniF468</string>
<key>clock</key>
<string>uniF43A</string>
<key>clock-fill</key>
<string>uniF4AB</string>
<key>cloud-offline</key>
<string>uniF4AD</string>
<key>code</key>
<string>uniF44F</string>
<key>code-of-conduct</key>
<string>uniF4AE</string>
<key>code-review</key>
<string>uniF4AF</string>
<key>code-square</key>
<string>uniF4B0</string>
<key>codescan</key>
<string>uniF4B1</string>
<key>codescan-checkmark</key>
<string>uniF4B2</string>
<key>codespaces</key>
<string>uniF4B3</string>
<key>columns</key>
<string>uniF4B4</string>
<key>command-palette</key>
<string>uniF4B5</string>
<key>comment</key>
<string>uniF41F</string>
<key>comment-discussion</key>
<string>uniF442</string>
<key>commit</key>
<string>uniF4B6</string>
<key>container</key>
<string>uniF4B7</string>
<key>copilot</key>
<string>uniF4B8</string>
<key>copilot-error</key>
<string>uniF4B9</string>
<key>copilot-warning</key>
<string>uniF4BA</string>
<key>copy</key>
<string>uniF4BB</string>
<key>cpu</key>
<string>uniF4BC</string>
<key>credit-card</key>
<string>uniF439</string>
<key>cross-reference</key>
<string>uniF4BD</string>
<key>dash</key>
<string>uniF48B</string>
<key>database</key>
<string>uniF472</string>
<key>dependabot</key>
<string>uniF4BE</string>
<key>desktop-download</key>
<string>uniF498</string>
<key>device-camera</key>
<string>uniF446</string>
<key>device-camera-video</key>
<string>uniF447</string>
<key>device-desktop</key>
<string>uniF4A9</string>
<key>device-mobile</key>
<string>uniF42C</string>
<key>diff</key>
<string>uniF440</string>
<key>diff-added</key>
<string>uniF457</string>
<key>diff-ignored</key>
<string>uniF474</string>
<key>diff-modified</key>
<string>uniF459</string>
<key>diff-removed</key>
<string>uniF458</string>
<key>diff-renamed</key>
<string>uniF45A</string>
<key>discussion-closed</key>
<string>uniF4C0</string>
<key>discussion-duplicate</key>
<string>uniF4C1</string>
<key>discussion-outdated</key>
<string>uniF4C2</string>
<key>dot</key>
<string>uniF4C3</string>
<key>dot-fill</key>
<string>uniF444</string>
<key>download</key>
<string>uniF409</string>
<key>duplicate</key>
<string>uniF4C4</string>
<key>eye-closed</key>
<string>uniF4C5</string>
<key>feed-discussion</key>
<string>uniF4C6</string>
<key>feed-forked</key>
<string>uniF4C7</string>
<key>feed-heart</key>
<string>uniF4C8</string>
<key>feed-merged</key>
<string>uniF4C9</string>
<key>feed-person</key>
<string>uniF4CA</string>
<key>feed-repo</key>
<string>uniF4CB</string>
<key>feed-rocket</key>
<string>uniF4CC</string>
<key>feed-star</key>
<string>uniF4CD</string>
<key>feed-tag</key>
<string>uniF4CE</string>
<key>feed-trophy</key>
<string>uniF4CF</string>
<key>file</key>
<string>uniF4A5</string>
<key>file-added</key>
<string>uniF4D0</string>
<key>file-badge</key>
<string>uniF4D1</string>
<key>file-binary</key>
<string>uniF471</string>
<key>file-code</key>
<string>uniF40D</string>
<key>file-diff</key>
<string>uniF4D2</string>
<key>file-directory</key>
<string>uniF413</string>
<key>file-directory-fill</key>
<string>uniF4D3</string>
<key>file-directory-open-fill</key>
<string>uniF4D4</string>
<key>file-media</key>
<string>uniF40F</string>
<key>file-moved</key>
<string>uniF4D5</string>
<key>file-removed</key>
<string>uniF4D6</string>
<key>file-submodule</key>
<string>uniF414</string>
<key>file-symlink-directory</key>
<string>uniF482</string>
<key>file-symlink-file</key>
<string>uniF481</string>
<key>file-zip</key>
<string>uniF410</string>
<key>filter</key>
<string>uniF4D7</string>
<key>fiscal-host</key>
<string>uniF4D8</string>
<key>flame</key>
<string>uniF490</string>
<key>fold</key>
<string>uniF48C</string>
<key>fold-down</key>
<string>uniF4D9</string>
<key>fold-up</key>
<string>uniF4DA</string>
<key>gift</key>
<string>uniF436</string>
<key>git-branch</key>
<string>uniF418</string>
<key>git-commit</key>
<string>uniF417</string>
<key>git-compare</key>
<string>uniF47F</string>
<key>git-merge</key>
<string>uniF419</string>
<key>git-merge-queue</key>
<string>uniF4DB</string>
<key>git-pull-request</key>
<string>uniF407</string>
<key>git-pull-request-closed</key>
<string>uniF4DC</string>
<key>git-pull-request-draft</key>
<string>uniF4DD</string>
<key>globe</key>
<string>uniF484</string>
<key>goal</key>
<string>uniF4DE</string>
<key>grabber</key>
<string>uniF4A6</string>
<key>graph</key>
<string>uniF437</string>
<key>hash</key>
<string>uniF4DF</string>
<key>heading</key>
<string>uniF4E0</string>
<key>heart-fill</key>
<string>uniF4E1</string>
<key>history</key>
<string>uniF464</string>
<key>home</key>
<string>uniF46D</string>
<key>home-fill</key>
<string>uniF4E2</string>
<key>horizontal-rule</key>
<string>uniF45B</string>
<key>hubot</key>
<string>uniF477</string>
<key>id-badge</key>
<string>uniF4E4</string>
<key>image</key>
<string>uniF4E5</string>
<key>inbox</key>
<string>uniF48D</string>
<key>info</key>
<string>uniF449</string>
<key>issue-closed</key>
<string>uniF41D</string>
<key>issue-draft</key>
<string>uniF4E7</string>
<key>issue-opened</key>
<string>uniF41B</string>
<key>issue-reopened</key>
<string>uniF41C</string>
<key>issue-tracked-by</key>
<string>uniF4E8</string>
<key>issue-tracks</key>
<string>uniF4E9</string>
<key>italic</key>
<string>uniF49F</string>
<key>iterations</key>
<string>uniF4EA</string>
<key>kebab-horizontal</key>
<string>uniF4EB</string>
<key>key-asterisk</key>
<string>uniF4EC</string>
<key>law</key>
<string>uniF495</string>
<key>light-bulb</key>
<string>uniF400</string>
<key>link</key>
<string>uniF44C</string>
<key>link-external</key>
<string>uniF465</string>
<key>list-ordered</key>
<string>uniF452</string>
<key>list-unordered</key>
<string>uniF451</string>
<key>location</key>
<string>uniF450</string>
<key>log</key>
<string>uniF4ED</string>
<key>logo-gist</key>
<string>uniF480</string>
<key>logo-github</key>
<string>uniF470</string>
<key>mail</key>
<string>uniF42F</string>
<key>mark-github</key>
<string>uniF408</string>
<key>markdown</key>
<string>uniF48A</string>
<key>megaphone</key>
<string>uniF45F</string>
<key>mention</key>
<string>uniF486</string>
<key>meter</key>
<string>uniF463</string>
<key>milestone</key>
<string>uniF45D</string>
<key>moon</key>
<string>uniF4EE</string>
<key>mortar-board</key>
<string>uniF494</string>
<key>move-to-bottom</key>
<string>uniF4EF</string>
<key>move-to-end</key>
<string>uniF4F0</string>
<key>move-to-start</key>
<string>uniF4F1</string>
<key>move-to-top</key>
<string>uniF4F2</string>
<key>multi-select</key>
<string>uniF4F3</string>
<key>mute</key>
<string>uniF466</string>
<key>no-entry</key>
<string>uniF4F4</string>
<key>north-star</key>
<string>uniF4F5</string>
<key>number</key>
<string>uniF4F7</string>
<key>organization</key>
<string>uniF42B</string>
<key>package-dependencies</key>
<string>uniF4F8</string>
<key>package-dependents</key>
<string>uniF4F9</string>
<key>paintbrush</key>
<string>uniF48F</string>
<key>paper-airplane</key>
<string>uniF4FA</string>
<key>passkey-fill</key>
<string>uniF4FC</string>
<key>paste</key>
<string>uniF429</string>
<key>people</key>
<string>uniF4FD</string>
<key>person</key>
<string>uniF415</string>
<key>person-add</key>
<string>uniF4FE</string>
<key>person-fill</key>
<string>uniF4FF</string>
<key>pin</key>
<string>uniF435</string>
<key>play</key>
<string>uniF500</string>
<key>plug</key>
<string>uniF492</string>
<key>plus-circle</key>
<string>uniF501</string>
<key>project</key>
<string>uniF502</string>
<key>project-roadmap</key>
<string>uniF503</string>
<key>project-symlink</key>
<string>uniF504</string>
<key>project-template</key>
<string>uniF505</string>
<key>pulse</key>
<string>uniF469</string>
<key>quote</key>
<string>uniF453</string>
<key>read</key>
<string>uniF430</string>
<key>rel-file-path</key>
<string>uniF506</string>
<key>reply</key>
<string>uniF4A8</string>
<key>repo</key>
<string>uniF401</string>
<key>repo-clone</key>
<string>uniF43F</string>
<key>repo-deleted</key>
<string>uniF507</string>
<key>repo-forked</key>
<string>uniF402</string>
<key>repo-locked</key>
<string>uniF508</string>
<key>repo-pull</key>
<string>uniF404</string>
<key>repo-push</key>
<string>uniF403</string>
<key>repo-template</key>
<string>uniF509</string>
<key>report</key>
<string>uniF50A</string>
<key>rows</key>
<string>uniF50B</string>
<key>rss</key>
<string>uniF428</string>
<key>ruby</key>
<string>uniF43B</string>
<key>screen-full</key>
<string>uniF50C</string>
<key>screen-normal</key>
<string>uniF50D</string>
<key>search</key>
<string>uniF422</string>
<key>server</key>
<string>uniF473</string>
<key>share</key>
<string>uniF50E</string>
<key>share-android</key>
<string>uniF50F</string>
<key>shield-check</key>
<string>uniF510</string>
<key>shield-lock</key>
<string>uniF511</string>
<key>shield-slash</key>
<string>uniF512</string>
<key>shield-x</key>
<string>uniF513</string>
<key>sidebar-collapse</key>
<string>uniF514</string>
<key>sidebar-expand</key>
<string>uniF515</string>
<key>sign-in</key>
<string>uniF42A</string>
<key>sign-out</key>
<string>uniF426</string>
<key>single-select</key>
<string>uniF516</string>
<key>skip</key>
<string>uniF517</string>
<key>skip-fill</key>
<string>uniF518</string>
<key>sliders</key>
<string>uniF462</string>
<key>smiley</key>
<string>uniF4A2</string>
<key>sort-asc</key>
<string>uniF519</string>
<key>sort-desc</key>
<string>uniF51A</string>
<key>sparkle-fill</key>
<string>uniF51B</string>
<key>sponsor-tiers</key>
<string>uniF51C</string>
<key>square</key>
<string>uniF51D</string>
<key>square-fill</key>
<string>uniF445</string>
<key>squirrel</key>
<string>uniF483</string>
<key>stack</key>
<string>uniF51E</string>
<key>star</key>
<string>uniF41E</string>
<key>star-fill</key>
<string>uniF51F</string>
<key>stop</key>
<string>uniF46E</string>
<key>strikethrough</key>
<string>uniF521</string>
<key>sync</key>
<string>uniF46A</string>
<key>tab</key>
<string>uniF523</string>
<key>tab-external</key>
<string>uniF524</string>
<key>table</key>
<string>uniF525</string>
<key>tag</key>
<string>uniF412</string>
<key>tasklist</key>
<string>uniF4A0</string>
<key>telescope-fill</key>
<string>uniF526</string>
<key>terminal</key>
<string>uniF489</string>
<key>three-bars</key>
<string>uniF44E</string>
<key>thumbsdown</key>
<string>uniF497</string>
<key>thumbsup</key>
<string>uniF496</string>
<key>tools</key>
<string>uniF425</string>
<key>trash</key>
<string>uniF48E</string>
<key>triangle-down</key>
<string>uniF44B</string>
<key>triangle-left</key>
<string>uniF438</string>
<key>triangle-right</key>
<string>uniF44A</string>
<key>triangle-up</key>
<string>uniF47E</string>
<key>typography</key>
<string>uniF528</string>
<key>unfold</key>
<string>uniF42D</string>
<key>unlink</key>
<string>uniF529</string>
<key>unlock</key>
<string>uniF52A</string>
<key>unmute</key>
<string>uniF485</string>
<key>unread</key>
<string>uniF52B</string>
<key>unverified</key>
<string>uniF4A3</string>
<key>upload</key>
<string>uniF40A</string>
<key>verified</key>
<string>uniF4A1</string>
<key>versions</key>
<string>uniF454</string>
<key>video</key>
<string>uniF52C</string>
<key>webhook</key>
<string>uniF52D</string>
<key>workflow</key>
<string>uniF52E</string>
<key>x-circle</key>
<string>uniF52F</string>
<key>x-circle-fill</key>
<string>uniF530</string>
<key>zap</key>
<string>uni26A1</string>
<key>zoom-in</key>
<string>uniF531</string>
<key>zoom-out</key>
<string>uniF532</string>
</dict>
</dict>
</plist>
|
cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/lib.plist/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/lib.plist",
"repo_id": "cascadia-code",
"token_count": 13645
}
| 853 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="i_seti_karma" format="2">
<advance width="1200"/>
<unicode hex="E622"/>
<note>
i_seti_karma
</note>
<outline>
<contour>
<point x="420" y="1412" type="line"/>
<point x="29" y="1412" type="line"/>
<point x="29" y="1020" type="line"/>
<point x="298" y="0" type="line"/>
<point x="420" y="0" type="line"/>
<point x="420" y="408" type="line"/>
<point x="486" y="408" type="line"/>
<point x="763" y="0" type="line"/>
<point x="1171" y="0" type="line"/>
<point x="690" y="726" type="line"/>
<point x="690" y="767" type="line"/>
<point x="1139" y="1420" type="line"/>
<point x="706" y="1420" type="line"/>
<point x="486" y="1061" type="line"/>
<point x="420" y="1061" type="line"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:42:08 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_karma.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_karma.glif",
"repo_id": "cascadia-code",
"token_count": 490
}
| 854 |
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>ascender</key>
<integer>1755</integer>
<key>capHeight</key>
<integer>0</integer>
<key>copyright</key>
<string>Weather Icons licensed under SIL OFL 1.1 — Code licensed under MIT License — Documentation licensed under CC BY 3.0</string>
<key>descender</key>
<integer>-293</integer>
<key>familyName</key>
<string>Weather Icons</string>
<key>guidelines</key>
<array/>
<key>italicAngle</key>
<integer>0</integer>
<key>openTypeHeadCreated</key>
<string>2015/08/18 21:25:12</string>
<key>openTypeHheaAscender</key>
<integer>2245</integer>
<key>openTypeHheaDescender</key>
<integer>-718</integer>
<key>openTypeNameDesigner</key>
<string>Erik Flowers, Lukas Bischoff (v1 Art)</string>
<key>openTypeNameDesignerURL</key>
<string>http://www.helloerik.com, http://www.artill.de</string>
<key>openTypeNamePreferredSubfamilyName</key>
<string>Regular</string>
<key>openTypeOS2Panose</key>
<array>
<integer>0</integer>
<integer>0</integer>
<integer>5</integer>
<integer>0</integer>
<integer>0</integer>
<integer>0</integer>
<integer>0</integer>
<integer>0</integer>
<integer>0</integer>
<integer>0</integer>
</array>
<key>openTypeOS2StrikeoutPosition</key>
<integer>1229</integer>
<key>openTypeOS2StrikeoutSize</key>
<integer>57</integer>
<key>openTypeOS2Type</key>
<array>
<integer>2</integer>
</array>
<key>openTypeOS2TypoAscender</key>
<integer>1755</integer>
<key>openTypeOS2TypoDescender</key>
<integer>-293</integer>
<key>openTypeOS2TypoLineGap</key>
<integer>0</integer>
<key>openTypeOS2WeightClass</key>
<integer>400</integer>
<key>openTypeOS2WidthClass</key>
<integer>5</integer>
<key>openTypeOS2WinAscent</key>
<integer>2245</integer>
<key>openTypeOS2WinDescent</key>
<integer>718</integer>
<key>postscriptBlueValues</key>
<array/>
<key>postscriptFamilyBlues</key>
<array/>
<key>postscriptFamilyOtherBlues</key>
<array/>
<key>postscriptFontName</key>
<string>WeatherIcons-Regular</string>
<key>postscriptOtherBlues</key>
<array/>
<key>postscriptStemSnapH</key>
<array/>
<key>postscriptStemSnapV</key>
<array/>
<key>styleMapStyleName</key>
<string>regular</string>
<key>styleName</key>
<string>Regular</string>
<key>unitsPerEm</key>
<integer>2048</integer>
<key>versionMajor</key>
<integer>1</integer>
<key>versionMinor</key>
<integer>100</integer>
<key>xHeight</key>
<integer>0</integer>
</dict>
</plist>
|
cascadia-code/sources/nerdfonts/full/processed/weathericons-regular-webfont.ufo/fontinfo.plist/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/weathericons-regular-webfont.ufo/fontinfo.plist",
"repo_id": "cascadia-code",
"token_count": 1244
}
| 855 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="uniF0C6" format="2">
<advance width="1200"/>
<unicode hex="E3BE"/>
<note>
uniF0C6
</note>
<outline>
<contour>
<point x="228" y="704"/>
<point x="215" y="691"/>
<point x="215" y="673"/>
<point x="228" y="660"/>
<point x="238" y="660" type="qcurve" smooth="yes"/>
<point x="355" y="660" type="line" smooth="yes"/>
<point x="364" y="660"/>
<point x="376" y="670"/>
<point x="378" y="678" type="qcurve" smooth="yes"/>
<point x="413" y="886" type="line"/>
<point x="477" y="366" type="line" smooth="yes"/>
<point x="479" y="357"/>
<point x="491" y="346"/>
<point x="499" y="346" type="qcurve" smooth="yes"/>
<point x="508" y="346"/>
<point x="521" y="357"/>
<point x="522" y="366" type="qcurve" smooth="yes"/>
<point x="571" y="748" type="line"/>
<point x="613" y="637" type="line" smooth="yes"/>
<point x="615" y="630"/>
<point x="626" y="622"/>
<point x="633" y="622" type="qcurve" smooth="yes"/>
<point x="635" y="622" type="line" smooth="yes"/>
<point x="642" y="623"/>
<point x="654" y="632"/>
<point x="656" y="640" type="qcurve" smooth="yes"/>
<point x="716" y="911" type="line"/>
<point x="744" y="390" type="line" smooth="yes"/>
<point x="744" y="382"/>
<point x="756" y="369"/>
<point x="773" y="368"/>
<point x="786" y="377"/>
<point x="788" y="386" type="qcurve" smooth="yes"/>
<point x="853" y="660" type="line"/>
<point x="962" y="660" type="line" smooth="yes"/>
<point x="972" y="660"/>
<point x="985" y="673"/>
<point x="985" y="691"/>
<point x="972" y="704"/>
<point x="962" y="704" type="qcurve" smooth="yes"/>
<point x="835" y="704" type="line" smooth="yes"/>
<point x="827" y="704"/>
<point x="815" y="695"/>
<point x="813" y="687" type="qcurve" smooth="yes"/>
<point x="780" y="547" type="line"/>
<point x="752" y="1075" type="line" smooth="yes"/>
<point x="751" y="1084"/>
<point x="740" y="1096"/>
<point x="731" y="1096" type="qcurve" smooth="yes"/>
<point x="723" y="1097"/>
<point x="709" y="1087"/>
<point x="707" y="1079" type="qcurve" smooth="yes"/>
<point x="629" y="724" type="line"/>
<point x="583" y="849" type="line" smooth="yes"/>
<point x="580" y="856"/>
<point x="566" y="864"/>
<point x="559" y="864" type="qcurve" smooth="yes"/>
<point x="551" y="863"/>
<point x="540" y="852"/>
<point x="539" y="844" type="qcurve"/>
<point x="500" y="547" type="line"/>
<point x="440" y="1043" type="line" smooth="yes"/>
<point x="438" y="1052"/>
<point x="426" y="1062"/>
<point x="418" y="1063" type="qcurve" smooth="yes"/>
<point x="410" y="1063"/>
<point x="397" y="1053"/>
<point x="395" y="1044" type="qcurve" smooth="yes"/>
<point x="337" y="704" type="line"/>
<point x="238" y="704" type="line" smooth="yes"/>
</contour>
</outline>
<lib>
<dict>
<key>assembly</key>
<string>SVTCA[0] /* SetFreedomAndProjectionVectorToAxis */
PUSHB[ ] /* 1 value pushed */
3
MDAP[1] /* MoveDirectAbsPt */
PUSHB[ ] /* 1 value pushed */
32
SHP[1] /* ShiftPointByLastPoint */
PUSHB[ ] /* 1 value pushed */
68
MDRP[01101] /* MoveDirectRelPt */
PUSHB[ ] /* 2 values pushed */
38 67
SHP[0] /* ShiftPointByLastPoint */
SHP[0] /* ShiftPointByLastPoint */
SVTCA[1] /* SetFreedomAndProjectionVectorToAxis */
PUSHB[ ] /* 1 value pushed */
70
MDAP[1] /* MoveDirectAbsPt */
PUSHB[ ] /* 2 values pushed */
71 1
CALL[ ] /* CallFunction */
PUSHB[ ] /* 1 value pushed */
54
SMD[ ] /* SetMinimumDistance */
PUSHW[ ] /* 3 values pushed */
16150 -2759 21
CALL[ ] /* CallFunction */
SPVFS[ ] /* SetPVectorFromStack */
PUSHB[ ] /* 1 value pushed */
67
MDAP[0] /* MoveDirectAbsPt */
SFVTPV[ ] /* SetFVectorToPVector */
PUSHB[ ] /* 1 value pushed */
66
MDRP[00000] /* MoveDirectRelPt */
PUSHB[ ] /* 2 values pushed */
7 11
MIRP[11001] /* MoveIndirectRelPt */
PUSHB[ ] /* 1 value pushed */
8
MDRP[00000] /* MoveDirectRelPt */
PUSHW[ ] /* 3 values pushed */
-16261 -2001 21
CALL[ ] /* CallFunction */
SPVFS[ ] /* SetPVectorFromStack */
PUSHB[ ] /* 2 values pushed */
7 8
SFVTL[0] /* SetFVectorToLine */
PUSHB[ ] /* 1 value pushed */
8
SRP0[ ] /* SetRefPoint0 */
SFVTPV[ ] /* SetFVectorToPVector */
PUSHB[ ] /* 1 value pushed */
9
MDRP[00000] /* MoveDirectRelPt */
PUSHB[ ] /* 2 values pushed */
60 12
MIRP[11001] /* MoveIndirectRelPt */
PUSHB[ ] /* 1 value pushed */
59
MDRP[00000] /* MoveDirectRelPt */
PUSHW[ ] /* 3 values pushed */
16250 -2094 21
CALL[ ] /* CallFunction */
SPVFS[ ] /* SetPVectorFromStack */
PUSHB[ ] /* 2 values pushed */
60 59
SFVTL[0] /* SetFVectorToLine */
PUSHB[ ] /* 1 value pushed */
59
SRP0[ ] /* SetRefPoint0 */
SFVTPV[ ] /* SetFVectorToPVector */
PUSHB[ ] /* 1 value pushed */
58
MDRP[00000] /* MoveDirectRelPt */
PUSHB[ ] /* 2 values pushed */
15 11
MIRP[11001] /* MoveIndirectRelPt */
PUSHB[ ] /* 1 value pushed */
16
MDRP[00000] /* MoveDirectRelPt */
PUSHW[ ] /* 3 values pushed */
15992 -3561 21
CALL[ ] /* CallFunction */
SPVFS[ ] /* SetPVectorFromStack */
SFVTPV[ ] /* SetFVectorToPVector */
PUSHB[ ] /* 1 value pushed */
51
SRP0[ ] /* SetRefPoint0 */
PUSHB[ ] /* 1 value pushed */
50
MDRP[00000] /* MoveDirectRelPt */
PUSHB[ ] /* 2 values pushed */
24 11
MIRP[11001] /* MoveIndirectRelPt */
PUSHB[ ] /* 1 value pushed */
25
MDRP[00000] /* MoveDirectRelPt */
PUSHW[ ] /* 3 values pushed */
-16360 -884 21
CALL[ ] /* CallFunction */
SPVFS[ ] /* SetPVectorFromStack */
PUSHB[ ] /* 2 values pushed */
51 50
SFVTL[0] /* SetFVectorToLine */
PUSHB[ ] /* 1 value pushed */
50
SRP0[ ] /* SetRefPoint0 */
SFVTPV[ ] /* SetFVectorToPVector */
PUSHB[ ] /* 1 value pushed */
26
MDRP[00000] /* MoveDirectRelPt */
PUSHB[ ] /* 2 values pushed */
44 11
MIRP[11001] /* MoveIndirectRelPt */
PUSHB[ ] /* 1 value pushed */
31
MDRP[00000] /* MoveDirectRelPt */
PUSHB[ ] /* 2 values pushed */
24 25
SFVTL[0] /* SetFVectorToLine */
PUSHB[ ] /* 1 value pushed */
50
SRP0[ ] /* SetRefPoint0 */
PUSHB[ ] /* 4 values pushed */
25 50 26 19
CALL[ ] /* CallFunction */
PUSHW[ ] /* 3 values pushed */
-16362 -852 21
CALL[ ] /* CallFunction */
SFVFS[ ] /* SetFVectorFromStack */
PUSHB[ ] /* 1 value pushed */
44
SRP0[ ] /* SetRefPoint0 */
PUSHB[ ] /* 4 values pushed */
43 44 31 19
CALL[ ] /* CallFunction */
PUSHB[ ] /* 3 values pushed */
43 44 31
DUP[ ] /* DuplicateTopStack */
ROLL[ ] /* RollTopThreeStack */
DUP[ ] /* DuplicateTopStack */
ROLL[ ] /* RollTopThreeStack */
SWAP[ ] /* SwapTopStack */
SPVTL[0] /* SetPVectorToLine */
SFVTPV[ ] /* SetFVectorToPVector */
SRP1[ ] /* SetRefPoint1 */
SRP2[ ] /* SetRefPoint2 */
IP[ ] /* InterpolatePts */
SVTCA[0] /* SetFreedomAndProjectionVectorToAxis */
NPUSHB[ ] /* 17 values pushed */
7 8 9 15 16 24 25 26 31 43 44 50 51 58 59 60 66
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
SVTCA[1] /* SetFreedomAndProjectionVectorToAxis */
NPUSHB[ ] /* 18 values pushed */
7 8 9 15 16 24 25 26 31 43 44 50 51 58 59 60 66 67
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
MDAP[0] /* MoveDirectAbsPt */
PUSHB[ ] /* 1 value pushed */
64
SMD[ ] /* SetMinimumDistance */
SVTCA[1] /* SetFreedomAndProjectionVectorToAxis */
SVTCA[0] /* SetFreedomAndProjectionVectorToAxis */
IUP[0] /* InterpolateUntPts */
IUP[1] /* InterpolateUntPts */</string>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:48:52 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/weathericons-regular-webfont.ufo/glyphs/uniF_0C_6.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/weathericons-regular-webfont.ufo/glyphs/uniF_0C_6.glif",
"repo_id": "cascadia-code",
"token_count": 3861
}
| 856 |
[](https://github.com/microsoft/causica/actions/workflows/ci-build.yml)
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/microsoft/causica)
# Causica
## Overview
Causal machine learning enables individuals and organizations to make better data-driven decisions. In particular, causal ML allows us to answer “what if” questions about the effect of potential actions on outcomes.
Causal ML is a nascent area, we aim to enable a **scalable**, **flexible**, **real-world applicable end-to-end** causal inference framework. In perticular, we bridge between causal discovery, causal inference, and deep learning to achieve the goal. We aim to develop technology can automate causal decision-making using existing observational data alone, output both the discovered causal relationships and estimate the effect of actions simultaneously.
Causica is a deep learning library for end-to-end causal inference, including both causal discovery and inference. It implements deep end-to-end inference framework [2] and different alternatives.
This project splits the interventional decision making from observational decision making Azua repo found here [Azua](https://github.com/microsoft/project-azua).
This codebase has been heavily refactored, you can find the previous version of the code [here](https://github.com/microsoft/causica/releases/tag/v0.0.0).
# DECI: End to End Causal Inference
## Installation
The Causica repo is on PyPI so you can be pip installed:
```
pip install causica
```
## About
Real-world data-driven decision making requires causal inference to ensure the validity of drawn conclusions. However, it is very uncommon to have a-priori perfect knowledge of the causal relationships underlying relevant variables. DECI allows the end user to perform causal inference without having complete knowledge of the causal graph. This is done by combining the causal discovery and causal inference steps in a single model. DECI takes in observational data and outputs ATE and CATE estimates.
For more information, please refer to the [paper](https://arxiv.org/abs/2202.02195).
**Model Description**
DECI is a generative model that employs an additive noise structural equation model (ANM-SEM) to capture the functional relationships among variables and exogenous noise, while simultaneously learning a variational distribution over causal graphs. Specifically, the relationships among variables are captured with flexible neural networks while the exogenous noise is modelled as either a Gaussian or spline-flow noise model. The SEM is reversible, meaning that we can generate an observation vector from an exogenous noise vector through forward simulation and given a observation vector we can recover a unique corresponding exogenous noise vector. In this sense, the DECI SEM can be seen as a flow from exogenous noise to observations. We employ a mean-field approximate posterior distribution over graphs, which is learnt together with the functional relationships among variables by optimising an evidence lower bound (ELBO). Additionally, DECI supports learning under partially observed data.
**Simulation-based Causal Inference**
DECI estimates causal quantities (ATE) by applying the relevant interventions to its learnt causal graph (i.e. mutilating incoming edges to intervened variables) and then sampling from the generative model. This process involves first sampling a vector of exogenous noise from the learnt noise distribution and then forward simulating the SEM until an observation vector is obtained. ATE can be computed via estimating an expectation over the effect variable of interest using MonteCarlo samples of the intervened distribution of observations.
## How to run
The best place to start is the `examples/multi_investment_sales_attribution.ipynb` notebook. This explains how to fit a model using PyTorch Lightning and test ATE and ITE results.
For a more detailed introduction to the components and how they fit together, see the notebook `examples/csuite_example.ipynb`, for how to train a DECI model and check the causal discovery.
This will download the data from the CSuite Azure blob storage and train DECI on it. See [here](https://github.com/microsoft/csuite) for more info about CSuite datasets. The notebook will work on any of the available CSuite datasets.
**Specifying a noise model**
The noise exogenous model can be modified by changing the `noise_dist` field within `TrainingConfig`, either Gaussian or Spline are allowed.
The Gaussian model has Gaussian exogenous noise distribution with mean set to 0 while its variance is learnt.
The Spline model uses a flexible spline flow that is learnt from the data. This model provides most gains in heavy-tailed noise settings, where the Gaussian model is at risk of overfitting to outliers, but can take longer to train.
**Using a known Causal graph**
To use DECI to learn the functional relationships, remove the variational distribution terms from the loss and replace the sample with the known graph.
**Example using the CLI**
An example of how to run a training job with the noise distribution specified in the config `src/causica/config/lightning/default_gaussian.yaml` and the data configuration specified in `src/causica/config/lightning/default_data.yaml`:
```
python -m causica.lightning.main \
--config src/causica/config/lightning/default_gaussian.yaml --data src/causica/config/lightning/default_data.yaml
```
## Further extensions
For now, we have removed Rhino and DDECI from the codebase but they will be added back. You can still access the previously released versions [here](https://github.com/microsoft/causica/releases/tag/v0.0.0).
# References
If you have used the models in our code base, please consider to cite the corresponding papers:
[1], **(VISL)** Pablo Morales-Alvarez, Wenbo Gong, Angus Lamb, Simon Woodhead, Simon Peyton Jones, Nick Pawlowski, Miltiadis Allamanis, Cheng Zhang, "Simultaneous Missing Value Imputation and Structure Learning with Groups", [ArXiv preprint](https://arxiv.org/abs/2110.08223)
[2], **(DECI)** Tomas Geffner, Javier Antoran, Adam Foster, Wenbo Gong, Chao Ma, Emre Kiciman, Amit Sharma, Angus Lamb, Martin Kukla, Nick Pawlowski, Miltiadis Allamanis, Cheng Zhang. Deep End-to-end Causal Inference. [Arxiv preprint](https://arxiv.org/abs/2202.02195) (2022)
[3], **(DDECI)** Matthew Ashman, Chao Ma, Agrin Hilmkil, Joel Jennings, Cheng Zhang. Causal Reasoning in the Presence of Latent Confounders via Neural ADMG Learning. [ICLR](https://openreview.net/forum?id=dcN0CaXQhT) (2023)
[4], **(Rhino)** Wenbo Gong, Joel Jennings, Cheng Zhang, Nick Pawlowski. Rhino: Deep Causal Temporal Relationship Learning with History-dependent Noise. [ICLR](https://openreview.net/forum?id=i_1rbq8yFWC) (2023)
# Development
## Poetry
We use Poetry to manage the project dependencies, they're specified in the [pyproject.toml](pyproject.toml). To install poetry run:
```
curl -sSL https://install.python-poetry.org | python3 -
```
To install the environment run `poetry install`, this will create a virtualenv that you can use by running either `poetry shell` or `poetry run {command}`. It's also a virtualenv that you can interact with in the normal way too.
More information about poetry can be found [here](https://python-poetry.org/)
## mlflow
We use [mlflow](https://mlflow.org/) for logging metrics and artifacts. By default it will run locally and store results in `./mlruns`.
|
causica/README.md/0
|
{
"file_path": "causica/README.md",
"repo_id": "causica",
"token_count": 1997
}
| 857 |
import torch
from scotch.sdes.sdes_core import SDE
class YeastGlycolysisSDE(SDE):
"""Implementation of yeast glycolysis SDE (Daniels & Nemenman 2015, Bellot et al. 2022); see paper for more details.
Attributes:
noise_type: type of SDE noise, required for BaseSDE; always "diagonal" for glycolysis
sde_type: type of SDE, required for BaseSDE; always "ito" for glycolysis
noise_scale: diffusion coefficient constant
others: parameters for yeast glycolysis SDE
Methods:
f: drift coefficient for glycolysis system
g: diffusion coefficient for glycolysis system
"""
noise_type = "diagonal"
sde_type = "ito"
def __init__(
self,
noise_scale,
k1=0.52,
K1=100,
K2=6,
K3=16,
K4=100,
K5=1.28,
K6=12,
K=1.8,
kappa=13,
phi=0.1,
q=4,
A=4,
N=1,
J0=2.5,
):
super().__init__()
self.noise_scale = noise_scale
self.k1 = k1
self.K1 = K1
self.K2 = K2
self.K3 = K3
self.K4 = K4
self.K5 = K5
self.K6 = K6
self.K = K
self.kappa = kappa
self.phi = phi
self.q = q
self.A = A
self.N = N
self.J0 = J0
@staticmethod
def graph():
return torch.tensor(
[
[1, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 1],
[0, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 1],
],
dtype=torch.long,
)
def f(self, t, y):
_ = t
dydt = torch.zeros_like(y)
dydt[:, 0] = self.J0 - (self.K1 * y[:, 0] * y[:, 5]) / (1 + (y[:, 5] / self.k1) ** self.q)
dydt[:, 1] = (
(2 * self.K1 * y[:, 0] * y[:, 5]) / (1 + (y[:, 5] / self.k1) ** self.q)
- self.K2 * y[:, 1] * (self.N - y[:, 4])
- self.K6 * y[:, 1] * y[:, 4]
)
dydt[:, 2] = self.K2 * y[:, 1] * (self.N - y[:, 4]) - self.K3 * y[:, 2] * (self.A - y[:, 5])
dydt[:, 3] = (
self.K3 * y[:, 2] * (self.A - y[:, 5]) - self.K4 * y[:, 3] * y[:, 4] - self.kappa * (y[:, 3] - y[:, 6])
)
dydt[:, 4] = self.K2 * y[:, 1] * (self.N - y[:, 4]) - self.K4 * y[:, 3] * y[:, 4] - self.K6 * y[:, 1] * y[:, 4]
dydt[:, 5] = (
(-2 * self.K1 * y[:, 0] * y[:, 5]) / (1 + (y[:, 5] / self.k1) ** self.q)
+ 2 * self.K3 * y[:, 2] * (self.A - y[:, 5])
- self.K5 * y[:, 5]
)
dydt[:, 6] = self.phi * self.kappa * (y[:, 3] - y[:, 6]) - self.K * y[:, 6]
return dydt
def g(self, t, y):
_ = t
return self.noise_scale * torch.ones_like(y)
|
causica/research_experiments/scotch/src/scotch/dataset_generation/example_sdes/yeast_glycolysis.py/0
|
{
"file_path": "causica/research_experiments/scotch/src/scotch/dataset_generation/example_sdes/yeast_glycolysis.py",
"repo_id": "causica",
"token_count": 1721
}
| 858 |
"""This implements the abstract classes for the drift and diffusion coefficients used in SCOTCH"""
from abc import ABC, abstractmethod
from torch import Tensor, nn
class TrajectoryGraphEncoder(ABC, nn.Module):
@abstractmethod
def forward(self, trajectories: Tensor, graphs: Tensor) -> Tensor:
"""Encode trajectories and graphs into context vectors for posterior SDE.
Args:
trajectories (Tensor): Observed trajectories, shape (batch_size, num_time_points, observed_size)
graphs (Tensor): Weighted adjacency matrix, shape (batch_size, observed_size, observed_size)
Returns:
Tensor: Context vectors, shape (batch_size, num_time_points, context_size)
"""
class ContextualDriftCoefficient(ABC, nn.Module):
@abstractmethod
def forward(self, latent_states: Tensor, context_vectors: Tensor) -> Tensor:
"""Compute drift coefficient, given (batched) input latent states and context vectors. This will be used as
the drift coefficient of the posterior SDE.
Args:
latent_states: Latent states, shape (batch_size, latent_size)
context_vectors: Context vectors, shape (batch_size, context_size)
Returns:
Tensor: Drift coefficient, shape (batch_size, latent_size)
"""
class GraphCoefficient(ABC, nn.Module):
@abstractmethod
def forward(self, latent_states: Tensor, graphs: Tensor):
"""Compute drift (or diffusion) coefficient. The difference is that it supports the graph as input.
Args:
latent_states: Latent states, shape (batch_size, latent_size)
graphs: Context vectors, shape (batch_size, latent_size, latent_size)
Returns:
Tensor: Drift (or diffusion) coefficient, shape (batch_size, latent_size)
"""
class DiffusionCoefficient(ABC, nn.Module):
@abstractmethod
def forward(self, latent_states: Tensor):
"""Compute diffusion coefficient, given (batched) input latent states.
Args:
latent_states: Latent states, shape (batch_size, latent_size)
Returns:
Tensor: Diffusion coefficient, shape (batch_size, latent_size)
"""
|
causica/research_experiments/scotch/src/scotch/sdes/sde_modules.py/0
|
{
"file_path": "causica/research_experiments/scotch/src/scotch/sdes/sde_modules.py",
"repo_id": "causica",
"token_count": 819
}
| 859 |
"""
A module to load data from the standard directory structure (i.e. the one followed by csuite)
"""
import json
import logging
import os
from collections import defaultdict
from dataclasses import dataclass
from enum import Enum
from functools import partial
from typing import Any, Counter, Optional
import fsspec
import numpy as np
import torch
from dataclasses_json import dataclass_json
from tensordict import TensorDict
from causica.datasets.interventional_data import CounterfactualData, InterventionData
from causica.datasets.tensordict_utils import convert_one_hot
from causica.datasets.variable_types import DTYPE_MAP, VariableTypeEnum
CAUSICA_DATASETS_PATH = "https://azuastoragepublic.z6.web.core.windows.net/"
InterventionWithEffects = tuple[InterventionData, InterventionData, set[str]]
CounterfactualWithEffects = tuple[CounterfactualData, Optional[CounterfactualData], set[str]]
logger = logging.getLogger(__name__)
class DataEnum(Enum):
TRAIN = "train.csv"
TEST = "test.csv"
VALIDATION = "val.csv"
INTERVENTIONS = "interventions.json"
COUNTERFACTUALS = "counterfactuals.json"
TRUE_ADJACENCY = "adj_matrix.csv"
VARIABLES_JSON = "variables.json"
@dataclass_json
@dataclass(frozen=True)
class Variable:
"""Class to represent a variable in the variables metadata json object.
Args:
group_name: The name of the group the variable belongs to.
name: The name of the variable.
type: The type of the variable.
lower: The lower bound of the variable (if it is continuous).
upper: The upper bound of the variable (if it is continuous).
always_observed: Whether the variable is always observed.
"""
group_name: str
name: str
type: VariableTypeEnum = VariableTypeEnum.CONTINUOUS
lower: Optional[float] = None
upper: Optional[float] = None
always_observed: bool = True
@dataclass_json
@dataclass(frozen=True)
class VariablesMetadata:
"""Class to represent the variables metadata json object."""
variables: list[Variable]
def load_data(
root_path: str,
data_enum: DataEnum,
variables_metadata: Optional[VariablesMetadata] = None,
**storage_options: dict[str, Any],
):
"""
Load the Data from the location, dataset name and type of data.
Args:
root_path: The root path to the Data e.g. `CAUSICA_DATASETS_PATH/csuite_linexp_2`
data_enum: The type of dataset for which to return the path
variables_metadata: Optional variables object (to save downloading it multiple times)
**storage_options: Keyword args passed to `fsspec.open`
Return:
The downloaded data (the type depends on the data requested)
"""
path_name = os.path.join(root_path, data_enum.value)
fsspec_open = partial(fsspec.open, mode="r", encoding="utf-8", **storage_options)
logger.debug("Loading %s from %s with storage options %s", data_enum, path_name, storage_options)
if data_enum == DataEnum.TRUE_ADJACENCY:
with fsspec_open(path_name) as f:
return torch.tensor(np.loadtxt(f, dtype=int, delimiter=","))
if data_enum == DataEnum.VARIABLES_JSON:
if variables_metadata is not None:
raise ValueError("Variables metadata was supplied and requested")
with fsspec_open(path_name) as f:
return VariablesMetadata.from_json(f.read()) # type: ignore
if variables_metadata is None:
variables_metadata = load_data(root_path, data_enum=DataEnum.VARIABLES_JSON)
with fsspec_open(path_name) as f:
match data_enum:
case (DataEnum.TRAIN | DataEnum.TEST | DataEnum.VALIDATION):
arr = np.loadtxt(f, delimiter=",")
categorical_sizes = _get_categorical_sizes(variables_list=variables_metadata.variables)
return convert_one_hot(
tensordict_from_variables_metadata(arr, variables_metadata.variables),
one_hot_sizes=categorical_sizes,
)
case DataEnum.INTERVENTIONS:
return _load_interventions(json_object=json.load(f), metadata=variables_metadata)
case DataEnum.COUNTERFACTUALS:
return _load_counterfactuals(json_object=json.load(f), metadata=variables_metadata)
raise RuntimeError("Unrecognized data type")
def _load_interventions(json_object: dict[str, Any], metadata: VariablesMetadata) -> list[InterventionWithEffects]:
"""
Load the Interventional Datasets as a list of interventions/counterfactuals.
Args:
json_object: The .json file loaded as a json object.
metadata: Metadata of the dataset containing names and types.
Returns:
A list of interventions and the nodes we want to observe for each
"""
intervened_column_to_group_name: dict[int, str] = dict(
zip(
json_object["metadata"]["columns_to_nodes"],
list(item.group_name for item in metadata.variables),
)
)
interventions_list = []
for environment in json_object["environments"]:
conditioning_idxs = environment["conditioning_idxs"]
if conditioning_idxs is None:
condition_nodes = []
else:
condition_nodes = [intervened_column_to_group_name[idx] for idx in environment["conditioning_idxs"]]
intervention_nodes = [intervened_column_to_group_name[idx] for idx in environment["intervention_idxs"]]
intervention = _to_intervention(
np.array(environment["test_data"], dtype=float),
intervention_nodes=intervention_nodes,
condition_nodes=condition_nodes,
variables_list=metadata.variables,
)
# if the json has reference data create another intervention dataclass
if (reference_data := environment["reference_data"]) is None:
raise RuntimeError()
reference = _to_intervention(
np.array(reference_data, dtype=float),
intervention_nodes=intervention_nodes,
condition_nodes=condition_nodes,
variables_list=metadata.variables,
)
# store the nodes we're interested in observing
effect_nodes = set(intervened_column_to_group_name[idx] for idx in environment["effect_idxs"])
# default to all nodes
if not effect_nodes:
effect_nodes = set(intervention.sampled_nodes)
interventions_list.append((intervention, reference, effect_nodes))
return interventions_list
def _load_counterfactuals(json_object: dict[str, Any], metadata: VariablesMetadata) -> list[CounterfactualWithEffects]:
"""
Load the Interventional Datasets as a list of counterfactuals.
Args:
json_object: The .json file in loaded as a json object.
metadata: Metadata of the dataset containing names and types.
Returns:
A list of counterfactuals and the nodes we want to observe for each
"""
intervened_column_to_group_name: dict[int, str] = dict(
zip(
json_object["metadata"]["columns_to_nodes"],
list(item.group_name for item in metadata.variables),
)
)
cf_list = []
for environment in json_object["environments"]:
factual_data = np.array(environment["conditioning_values"])
intervention_nodes = [intervened_column_to_group_name[idx] for idx in environment["intervention_idxs"]]
intervention = _to_counterfactual(
np.array(environment["test_data"], dtype=float),
factual_data,
intervention_nodes=intervention_nodes,
variables_list=metadata.variables,
)
# if the json has reference data create another intervention dataclass
if (reference_data := environment["reference_data"]) is None:
reference = None
else:
reference = _to_counterfactual(
np.array(reference_data, dtype=float),
factual_data,
intervention_nodes=intervention_nodes,
variables_list=metadata.variables,
)
# store the nodes we're interested in observing
effect_nodes = set(intervened_column_to_group_name[idx] for idx in environment["effect_idxs"])
# default to all nodes
if not effect_nodes:
effect_nodes = set(intervention.sampled_nodes)
cf_list.append((intervention, reference, effect_nodes))
return cf_list
def _to_intervention(
data: np.ndarray, intervention_nodes: list[str], condition_nodes: list[str], variables_list: list[Variable]
) -> InterventionData:
"""Create an `InterventionData` object from the data within the json file."""
interv_data = tensordict_from_variables_metadata(data, variables_list=variables_list)
# all the intervention values in the dataset should be the same, so we use the first row
first_row = interv_data[0]
assert all(torch.allclose(interv_data[node_name], first_row[node_name]) for node_name in intervention_nodes)
intervention_values = TensorDict(
{node_name: first_row[node_name] for node_name in intervention_nodes}, batch_size=tuple()
)
condition_values = TensorDict(
{node_name: first_row[node_name] for node_name in condition_nodes}, batch_size=tuple()
)
categorical_sizes = _get_categorical_sizes(variables_list=variables_list)
return InterventionData(
intervention_values=convert_one_hot(
intervention_values, _intersect_dicts_left(categorical_sizes, intervention_values)
),
intervention_data=convert_one_hot(interv_data, _intersect_dicts_left(categorical_sizes, interv_data)),
condition_values=convert_one_hot(condition_values, _intersect_dicts_left(categorical_sizes, condition_values)),
)
def _to_counterfactual(
data: np.ndarray, base_data: np.ndarray, intervention_nodes: list[str], variables_list: list[Variable]
) -> CounterfactualData:
"""Create an `CounterfactualData` object from the data within the json file."""
interv_data = tensordict_from_variables_metadata(data, variables_list=variables_list)
# all the intervention values in the dataset should be the same, so we use the first row
first_row = interv_data[0]
assert all(torch.allclose(interv_data[node_name], first_row[node_name]) for node_name in intervention_nodes)
intervention_values = TensorDict(
{node_name: first_row[node_name] for node_name in intervention_nodes}, batch_size=tuple()
)
factual_data = tensordict_from_variables_metadata(base_data, variables_list=variables_list)
categorical_sizes = _get_categorical_sizes(variables_list=variables_list)
return CounterfactualData(
intervention_values=convert_one_hot(
intervention_values, _intersect_dicts_left(categorical_sizes, intervention_values)
),
counterfactual_data=convert_one_hot(interv_data, _intersect_dicts_left(categorical_sizes, interv_data)),
factual_data=convert_one_hot(factual_data, _intersect_dicts_left(categorical_sizes, factual_data)),
)
def _get_categorical_sizes(variables_list: list[Variable]) -> dict[str, int]:
categorical_sizes = {}
for item in variables_list:
if item.type == VariableTypeEnum.CATEGORICAL:
upper = item.upper
lower = item.lower
if upper is not None and lower is not None:
categorical_sizes[item.group_name] = int(upper - lower + 1)
else:
assert upper is None and lower is None, "Please specify either both limits or neither"
categorical_sizes[item.group_name] = -1
return categorical_sizes
def tensordict_from_variables_metadata(data: np.ndarray, variables_list: list[Variable]) -> TensorDict:
"""Returns a tensor created by concatenating all values along the last dim."""
assert data.ndim == 2, "Numpy loading only supported for 2d data"
batch_size = data.shape[0]
# guaranteed to be ordered correctly in python 3.7+ https://docs.python.org/3/library/collections.html#collections.Counter
sizes = Counter(d.group_name for d in variables_list) # get the dimensions of each key from the variables
sum_sizes = sum(sizes.values())
assert sum_sizes == data.shape[1], f"Variable sizes do not match data shape, got {sum_sizes} and {data.shape}"
# NOTE: This assumes that variables in the same group will have the same type.
dtypes = {item.group_name: DTYPE_MAP[item.type] for item in variables_list}
# slice the numpy array and assign the slices to the values of keys in the dictionary
d = TensorDict({}, batch_size=batch_size)
curr_idx = 0
for key, length in sizes.items():
d[key] = torch.Tensor(data[:, curr_idx : curr_idx + length]).to(dtype=dtypes[key])
curr_idx += length
return d
def tensordict_to_tensor(tensor_dict: TensorDict) -> torch.Tensor:
"""
Convert a `TensorDict` into a 2D `torch.Tensor`.
"""
return torch.cat(tuple(tensor_dict.values()), dim=-1)
def _intersect_dicts_left(dict_1: dict, dict_2: dict) -> dict:
"""Select the keys that are in both dictionaries, with values from the first."""
return {key: dict_1[key] for key in dict_1.keys() & dict_2.keys()}
def get_group_names(variables_metadata: VariablesMetadata) -> list[str]:
"""Get the names of the groups in the variables dict."""
return list(dict.fromkeys([var.group_name for var in variables_metadata.variables]))
def get_group_idxs(variables_metadata: VariablesMetadata) -> list[list[int]]:
"""Get the indices of the nodes/groups in each group."""
group_names = get_group_names(variables_metadata)
return [
[idx for idx, var in enumerate(variables_metadata.variables) if var.group_name == group_name]
for group_name in group_names
]
def get_group_variable_names(variable_metadata: VariablesMetadata) -> dict[str, list[str]]:
"""Get a dictionary mapping node/group names to the variables in that group."""
variable_groups = defaultdict(list)
for variable in variable_metadata.variables:
variable_groups[variable.group_name].append(variable.name)
return variable_groups
def get_name_to_idx(variables_metadata: VariablesMetadata) -> dict[str, int]:
"""Get a dictionary mapping node/group names to their index in the variables dict."""
return {var.name: idx for idx, var in enumerate(variables_metadata.variables)}
|
causica/src/causica/datasets/causica_dataset_format/load.py/0
|
{
"file_path": "causica/src/causica/datasets/causica_dataset_format/load.py",
"repo_id": "causica",
"token_count": 5495
}
| 860 |
from typing import Optional
import torch
import torch.distributions as td
class ExpertGraphContainer(torch.nn.Module):
def __init__(self, dag: torch.Tensor, mask: torch.Tensor, confidence: float, scale: float) -> None:
"""Container holding an "experts" prior belief about the underlying causal DAG.
Arguments:
dag: The binary adjacency matrix representing domain knowledge in the form of a DAG. Corresponds to `mask`.
mask: A binary mask indicating whether or not the corresponding edge of the `dag` has information.
confidence: A value in the interval (0, 1] indicating the confidence of the existence of the edge
scale: Scaling factor for expert graph loss
"""
super().__init__()
self.dag = torch.nn.Parameter(dag, requires_grad=False)
self.mask = torch.nn.Parameter(mask, requires_grad=False)
self.confidence: torch.Tensor
self.scale: torch.Tensor
self.register_buffer("confidence", torch.tensor(confidence, dtype=torch.float))
self.register_buffer("scale", torch.tensor(scale, dtype=torch.float))
class GibbsDAGPrior(td.Distribution):
"""
Represents a prior distribution over adjacency matrices.
The prior distribution consists of two terms:
A sparsity term
A Expert Graph term that represents some known prior belief about the graph.
Each term has an associated parameter (lambda)
"""
arg_constraints: dict = {}
def __init__(
self,
num_nodes: int,
sparsity_lambda: float,
expert_graph_container: Optional[ExpertGraphContainer] = None,
**kwargs
) -> None:
"""
Args:
num_nodes (int): Number of nodes in the graph
sparsity_lambda (float): Coefficient of sparsity term
dagness_alpha (torch.Tensor): Coefficient of dagness term
dagness_rho (torch.Tensor): Coefficient of squared dagness term
expert_graph_container (ExpertGraphContainer): Dataclass containing prior belief about the real graph
"""
super().__init__(torch.Size(), event_shape=torch.Size((num_nodes, num_nodes)), **kwargs)
self._num_nodes = num_nodes
self._expert_graph_container = expert_graph_container
self._sparsity_lambda = sparsity_lambda
def get_sparsity_term(self, A: torch.Tensor) -> torch.Tensor:
"""
A term that encourages sparsity (see https://arxiv.org/pdf/2106.07635.pdf).
The term is small when A is sparse.
Args:
A (torch.Tensor): Adjacency matrix of shape (input_dim, input_dim).
Returns:
Sparsity term.
"""
return A.abs().sum()
def get_expert_graph_term(self, A: torch.Tensor) -> torch.Tensor:
"""
A term that encourages A to be close to given expert graph.
Args:
A (torch.Tensor): Adjacency matrix of shape (input_dim, input_dim).
Returns:
(torch.Tensor): Expert graph term.
"""
assert isinstance(self._expert_graph_container, ExpertGraphContainer)
return (
(
self._expert_graph_container.mask
* (A - self._expert_graph_container.confidence * self._expert_graph_container.dag)
)
.abs()
.sum()
)
def log_prob(self, value: torch.Tensor) -> torch.Tensor:
"""
Calculates the (un-normalized) log probability of adjacency matrix `value`
under the distribution given by this instance.
Args:
value (torch.Tensor): Adjacency matrix of shape (input_dim, input_dim).
Returns:
(torch.Tensor): The un-normalized log probability of `value`.
"""
assert value.shape[-2:] == (self._num_nodes, self._num_nodes)
log_prob = -self._sparsity_lambda * self.get_sparsity_term(value)
if self._expert_graph_container is not None:
log_prob -= self._expert_graph_container.scale * self.get_expert_graph_term(value)
return log_prob
|
causica/src/causica/distributions/adjacency/gibbs_dag_prior.py/0
|
{
"file_path": "causica/src/causica/distributions/adjacency/gibbs_dag_prior.py",
"repo_id": "causica",
"token_count": 1686
}
| 861 |
from causica.distributions.transforms.base import SequentialTransformModule, TransformModule, TypedTransform
from causica.distributions.transforms.joint import JointTransform, JointTransformModule
from causica.distributions.transforms.tensor_to_tensordict import TensorToTensorDictTransform, shapes_to_slices
|
causica/src/causica/distributions/transforms/__init__.py/0
|
{
"file_path": "causica/src/causica/distributions/transforms/__init__.py",
"repo_id": "causica",
"token_count": 84
}
| 862 |
import torch
import torch.distributions as td
from torch.distributions.constraints import Constraint
from causica.distributions.adjacency import AdjacencyDistribution
from causica.distributions.distribution_module import DistributionModule
from causica.distributions.noise.joint import JointNoiseModule
from causica.functional_relationships.functional_relationships import FunctionalRelationships
from causica.sem.distribution_parameters_sem import DistributionParametersSEM
class SEMDistribution(td.Distribution):
"""A distribution over structural equation models.
Samples are instances of DistributionParametersSEM. Note however that this was created before
pytorch set the expected type of samples to torch.Tensor, so this is breaking the types a bit.
The distribution is essentially the same as the given adjacency distribution but with samples converted to SEMs.
Therefore, all distribution properties such as entropy, mean and mode are given by the equivalent properties for the
adjacency distribution.
"""
arg_constraints: dict[str, Constraint] = {}
def __init__(
self,
adjacency_dist: AdjacencyDistribution,
noise_module: JointNoiseModule,
functional_relationships: FunctionalRelationships,
):
"""
Args:
adjacency_dist: Distribution from which adjacency matrices are sampled to construct SEMs.
noise_module: The noise module for any SEM of this distribution.
functional_relationships: The functional relationship for any SEM of this distribution.
"""
super().__init__()
self._adjacency_dist = adjacency_dist
self._noise_module = noise_module
self._functional_relationships = functional_relationships
def _create_sems(self, graphs: torch.Tensor) -> list[DistributionParametersSEM]:
graphs = graphs.reshape(-1, *graphs.shape[-2:])
return [
DistributionParametersSEM(
graph=graph,
noise_dist=self._noise_module,
func=self._functional_relationships,
)
for graph in graphs.unbind(dim=0)
]
def sample(self, sample_shape: torch.Size = torch.Size()):
graphs = self._adjacency_dist.sample(sample_shape)
if not sample_shape:
graphs = graphs[None, ...]
return self._create_sems(graphs)
def relaxed_sample(
self, sample_shape: torch.Size = torch.Size(), temperature: float = 0.0
) -> list[DistributionParametersSEM]:
graphs = self._adjacency_dist.relaxed_sample(sample_shape=sample_shape, temperature=temperature)
return self._create_sems(graphs)
def entropy(self) -> torch.Tensor:
return self._adjacency_dist.entropy()
@property
def mean(self) -> DistributionParametersSEM: # type: ignore
return DistributionParametersSEM(
graph=self._adjacency_dist.mean,
noise_dist=self._noise_module,
func=self._functional_relationships,
)
@property
def mode(self) -> DistributionParametersSEM: # type: ignore
return DistributionParametersSEM(
graph=self._adjacency_dist.mode,
noise_dist=self._noise_module,
func=self._functional_relationships,
)
def log_prob(self, value: DistributionParametersSEM) -> torch.Tensor: # type: ignore
return self._adjacency_dist.log_prob(value.graph)
class SEMDistributionModule(DistributionModule[SEMDistribution]):
"""Represents a SEMDistribution with learnable parameters."""
def __init__(
self,
adjacency_module: DistributionModule[AdjacencyDistribution],
functional_relationships: FunctionalRelationships,
noise_module: JointNoiseModule,
):
super().__init__()
self.adjacency_module = adjacency_module
self.functional_relationships = functional_relationships
self.noise_module = noise_module
def forward(self) -> SEMDistribution:
return SEMDistribution(
adjacency_dist=self.adjacency_module(),
noise_module=self.noise_module,
functional_relationships=self.functional_relationships,
)
|
causica/src/causica/sem/sem_distribution.py/0
|
{
"file_path": "causica/src/causica/sem/sem_distribution.py",
"repo_id": "causica",
"token_count": 1569
}
| 863 |
"""Module with generic Adjacency Distribution tests."""
from typing import Type
import numpy as np
import pytest
import torch
from causica.distributions.adjacency import (
AdjacencyDistribution,
ConstrainedAdjacencyDistribution,
ENCOAdjacencyDistribution,
ThreeWayAdjacencyDistribution,
)
def _distribution_factory(
dist_class: Type[AdjacencyDistribution], num_nodes: int, batch_shape: torch.Size
) -> AdjacencyDistribution:
"""Create a combined interface for producing Adjacency Distributions (allows us to use `parametrize` over them)"""
if dist_class is ConstrainedAdjacencyDistribution:
logits = torch.randn(batch_shape + ((num_nodes * (num_nodes - 1)) // 2, 3))
inner_dist = ThreeWayAdjacencyDistribution(logits=logits)
square_ones = torch.ones(num_nodes, num_nodes, dtype=torch.bool)
positive_constraints = torch.triu(square_ones, diagonal=1)
negative_constraints = torch.tril(square_ones, diagonal=-1)
return ConstrainedAdjacencyDistribution(
inner_dist, positive_constraints=positive_constraints, negative_constraints=negative_constraints
)
if dist_class is ENCOAdjacencyDistribution:
length = (num_nodes * (num_nodes - 1)) // 2
return ENCOAdjacencyDistribution(
logits_exist=torch.randn(batch_shape + (num_nodes, num_nodes)),
logits_orient=torch.randn(batch_shape + (length,)),
)
if dist_class is ThreeWayAdjacencyDistribution:
logits = torch.randn(batch_shape + ((num_nodes * (num_nodes - 1)) // 2, 3))
return ThreeWayAdjacencyDistribution(logits=logits)
raise ValueError("Unrecognised Class")
DIST_CLASSES = [ConstrainedAdjacencyDistribution, ENCOAdjacencyDistribution, ThreeWayAdjacencyDistribution]
BATCH_SHAPES = [torch.Size(), (2,)]
# pylint: disable=protected-access
@pytest.mark.parametrize("dist_class", DIST_CLASSES)
@pytest.mark.parametrize("batch_shape", BATCH_SHAPES)
def test_support(dist_class: Type[AdjacencyDistribution], batch_shape: torch.Size):
"""Test that the defined support works as expected. This method will be used to test other features."""
num_nodes = 3
dist = _distribution_factory(dist_class=dist_class, num_nodes=num_nodes, batch_shape=batch_shape)
mat = torch.ones((num_nodes, num_nodes))
# validate sample returns None when there is no error
dist._validate_sample(mat)
# validate sample throws when the sample is invalid
with pytest.raises(ValueError):
dist._validate_sample(4 * mat)
@pytest.mark.parametrize("batch_shape", BATCH_SHAPES)
@pytest.mark.parametrize("dist_class", DIST_CLASSES)
@pytest.mark.parametrize("relaxed_sample", [True, False])
@pytest.mark.parametrize(("num_nodes", "sample_shape"), [(3, tuple()), (4, (20,)), (2, (4, 5))])
def test_sample_shape(
dist_class: Type[AdjacencyDistribution],
num_nodes: int,
sample_shape: torch.Size,
relaxed_sample: bool,
batch_shape: torch.Size,
):
"""Test the sample/rsample method returns binary tensors in the support of the correct shape"""
dist = _distribution_factory(dist_class=dist_class, num_nodes=num_nodes, batch_shape=batch_shape)
samples = dist.relaxed_sample(sample_shape, temperature=0.1) if relaxed_sample else dist.sample(sample_shape)
assert samples.shape == sample_shape + batch_shape + (num_nodes, num_nodes)
dist._validate_sample(samples)
assert not np.isnan(samples).any()
@pytest.mark.parametrize("dist_class", DIST_CLASSES)
@pytest.mark.parametrize("relaxed_sample", [True, False])
@pytest.mark.parametrize("sample_shape", [(2000,), (40, 50)])
def test_sample_distinct(
dist_class: Type[AdjacencyDistribution],
sample_shape: torch.Size,
relaxed_sample: bool,
):
"""Test the sample/rsample method returns distinct binary tensors"""
num_nodes = 4
dist = _distribution_factory(dist_class=dist_class, num_nodes=num_nodes, batch_shape=torch.Size())
samples = dist.relaxed_sample(sample_shape, temperature=0.1) if relaxed_sample else dist.sample(sample_shape)
# the samples should be different from each other
assert not np.all(np.isclose(samples, samples.reshape(-1, num_nodes, num_nodes)[0, ...]))
@pytest.mark.parametrize("batch_shape", BATCH_SHAPES)
@pytest.mark.parametrize("dist_class", DIST_CLASSES)
def test_mean(dist_class: Type[AdjacencyDistribution], batch_shape: torch.Size):
"""Test basic properties of the means of the distributions"""
num_nodes = 3
dist = _distribution_factory(dist_class=dist_class, num_nodes=num_nodes, batch_shape=batch_shape)
mean = dist.mean
assert mean.shape == batch_shape + (num_nodes, num_nodes)
assert (mean <= 1.0).all()
assert (mean >= 0.0).all()
@pytest.mark.parametrize("batch_shape", BATCH_SHAPES)
@pytest.mark.parametrize("dist_class", DIST_CLASSES)
def test_mode(dist_class: Type[AdjacencyDistribution], batch_shape: torch.Size):
"""Test basic properties of the modes of the distributions"""
num_nodes = 3
dist = _distribution_factory(dist_class=dist_class, num_nodes=num_nodes, batch_shape=batch_shape)
mode = dist.mode
dist._validate_sample(mode) # mode should be in the support
assert mode.shape == batch_shape + (num_nodes, num_nodes)
@pytest.mark.parametrize("batch_shape", BATCH_SHAPES)
@pytest.mark.parametrize("sample_shape", [tuple(), (2,), (3,)])
@pytest.mark.parametrize("dist_class", DIST_CLASSES)
def test_log_prob(dist_class: Type[AdjacencyDistribution], batch_shape: torch.Size, sample_shape: torch.Size):
"""Test basic properties of the log_prob of the distributions"""
num_nodes = 4
dist = _distribution_factory(dist_class=dist_class, num_nodes=num_nodes, batch_shape=batch_shape)
values = torch.randint(0, 2, sample_shape + batch_shape + (num_nodes, num_nodes), dtype=torch.float64)
log_probs = dist.log_prob(values)
assert log_probs.shape == sample_shape + batch_shape
|
causica/test/distributions/adjacency/test_adjacency_distributions.py/0
|
{
"file_path": "causica/test/distributions/adjacency/test_adjacency_distributions.py",
"repo_id": "causica",
"token_count": 2215
}
| 864 |
import gc
from causica.distributions.transforms.base import TypedTransform
class _StrIntTransform(TypedTransform[str, int]):
"""Dummy transform for testing types."""
def _call(self, x: str) -> int:
return int(x)
def _inverse(self, y: int) -> str:
return str(y)
def test_typed_transform() -> None:
"""Tests the basic functionality of TypedTransform.
Should be paired with a mypy step to ensure that the types are consistent."""
transform = _StrIntTransform()
x: str = "1"
y: int = transform(x)
inverse: TypedTransform[int, str] = transform.inv # Check that the inverse is indeed recognized as a TypedTransform
assert isinstance(inverse, TypedTransform)
assert transform.inv(y) == x
def test_weak_ref_inv_release() -> None:
"""Test that the weak reference to the inverse is released."""
transform = _StrIntTransform()
inverse = transform.inv
id_ = id(inverse)
# Check that the inverse is kept while there is an active reference
assert id(transform.inv) == id_
# Release the direct reference to the inverse and force garbage collection
del inverse
gc.collect()
# Check that accessing the inverse now yields a new object
assert id(transform.inv) != id_
|
causica/test/distributions/transforms/test_base.py/0
|
{
"file_path": "causica/test/distributions/transforms/test_base.py",
"repo_id": "causica",
"token_count": 412
}
| 865 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Tuple, Union
import torch
import torch.nn as nn
from cliffordlayers.nn.functional.utils import _w_assert
def get_1d_clifford_kernel(
w: Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList], g: torch.Tensor
) -> Tuple[int, torch.Tensor]:
"""Clifford kernel for 1d Clifford algebras, g = [-1] corresponds to a complex number kernel.
Args:
w (Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList]): Weight input of shape `(2, d~input~, d~output~, ...)`.
g (torch.Tensor): Signature of Clifford algebra.
Raises:
ValueError: Wrong encoding/decoding options provided.
Returns:
(Tuple[int, torch.Tensor]): Number of output blades, weight output of shape `(d~output~ * 2, d~input~ * 2, ...)`.
"""
assert isinstance(g, torch.Tensor)
assert g.numel() == 1
w = _w_assert(w)
assert len(w) == 2
k0 = torch.cat([w[0], g[0] * w[1]], dim=1)
k1 = torch.cat([w[1], w[0]], dim=1)
k = torch.cat([k0, k1], dim=0)
return 2, k
def get_2d_clifford_kernel(
w: Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList], g: torch.Tensor
) -> Tuple[int, torch.Tensor]:
"""Clifford kernel for 2d Clifford algebras, g = [-1, -1] corresponds to a quaternion kernel.
Args:
w (Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList]): Weight input of shape `(4, d~input~, d~output~, ...)`.
g (torch.Tensor): Signature of Clifford algebra.
Raises:
ValueError: Wrong encoding/decoding options provided.
Returns:
(Tuple[int, torch.Tensor]): Number of output blades, weight output of shape `(d~output~ * 4, d~input~ * 4, ...)`.
"""
assert isinstance(g, torch.Tensor)
assert g.numel() == 2
w = _w_assert(w)
assert len(w) == 4
k0 = torch.cat([w[0], g[0] * w[1], g[1] * w[2], -g[0] * g[1] * w[3]], dim=1)
k1 = torch.cat([w[1], w[0], -g[1] * w[3], g[1] * w[2]], dim=1)
k2 = torch.cat([w[2], g[0] * w[3], w[0], -g[0] * w[1]], dim=1)
k3 = torch.cat([w[3], w[2], -w[1], w[0]], dim=1)
k = torch.cat([k0, k1, k2, k3], dim=0)
return 4, k
def get_2d_clifford_rotation_kernel(
w: Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList], g: torch.Tensor
) -> Tuple[int, torch.Tensor]:
"""Rotational Clifford kernel for 2d Clifford algebras, the vector part corresponds to quaternion rotation.
Args:
w (Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList]): Weight input of shape `(6, d~input~, d~output~, ...)`.
`w[0]`, `w[1]`, `w[2]`, `w[3]` are the 2D Clifford weight tensors;
`w[4]` is the scaling tensor; `w[5]` is the zero kernel tensor.
g (torch.Tensor): Signature of Clifford algebra.
Raises:
ValueError: Wrong encoding/decoding options provided.
Returns:
(Tuple[int, torch.Tensor]): Number of output blades, weight output of shape `(d~output~ * 4, d~input~ * 4, ...)`.
"""
assert isinstance(g, torch.Tensor)
assert g.numel() == 2
assert g[0] == -1 and g[1] == -1, "Wrong signature of Clifford algebra. Signature not suitable for rotation kernel."
w = _w_assert(w)
assert len(w) == 6
# Adding scalar output kernel.
k0 = torch.cat([w[0], -w[1], -w[2], -w[3]], dim=1)
# Rotational kernel from here onwards.
s0 = w[0] * w[0]
s1 = w[1] * w[1]
s2 = w[2] * w[2]
s3 = w[3] * w[3]
norm = torch.sqrt(s0 + s1 + s2 + s3 + 0.0001)
w0_n = w[0] / norm
w1_n = w[1] / norm
w2_n = w[2] / norm
w3_n = w[3] / norm
norm_factor = 2.0
s1 = norm_factor * (w1_n * w1_n)
s2 = norm_factor * (w2_n * w2_n)
s3 = norm_factor * (w3_n * w3_n)
rot01 = norm_factor * w0_n * w1_n
rot02 = norm_factor * w0_n * w2_n
rot03 = norm_factor * w0_n * w3_n
rot12 = norm_factor * w1_n * w2_n
rot13 = norm_factor * w1_n * w3_n
rot23 = norm_factor * w2_n * w3_n
scale = w[4]
zero_kernel = w[5]
k1 = torch.cat(
[
zero_kernel,
scale * (1.0 - (s2 + s3)),
scale * (rot12 - rot03),
scale * (rot13 + rot02),
],
dim=1,
)
k2 = torch.cat(
[
zero_kernel,
scale * (rot12 + rot03),
scale * (1.0 - (s1 + s3)),
scale * (rot23 - rot01),
],
dim=1,
)
k3 = torch.cat(
[
zero_kernel,
scale * (rot13 - rot02),
scale * (rot23 + rot01),
scale * (1.0 - (s1 + s2)),
],
dim=1,
)
k = torch.cat([k0, k1, k2, k3], dim=0)
return 4, k
def get_3d_clifford_kernel(
w: Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList], g: torch.Tensor
) -> Tuple[int, torch.Tensor]:
"""Clifford kernel for 3d Clifford algebras, g = [-1, -1, -1] corresponds to an octonion kernel.
Args:
w (Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList]): Weight input of shape `(8, d~input~, d~output~, ...)`.
g (torch.Tensor): Signature of Clifford algebra.
Raises:
ValueError: Wrong encoding/decoding options provided.
Returns:
(Tuple[int, torch.Tensor]): Number of output blades, weight output of dimension `(d~output~ * 8, d~input~ * 8, ...)`.
"""
assert isinstance(g, torch.Tensor)
assert g.numel() == 3
w = _w_assert(w)
assert len(w) == 8
k0 = torch.cat(
[
w[0],
w[1] * g[0],
w[2] * g[1],
w[3] * g[2],
-w[4] * g[0] * g[1],
-w[5] * g[0] * g[2],
-w[6] * g[1] * g[2],
-w[7] * g[0] * g[1] * g[2],
],
dim=1,
)
k1 = torch.cat(
[w[1], w[0], -w[4] * g[1], -w[5] * g[2], w[2] * g[1], w[3] * g[2], -w[7] * g[1] * g[2], -w[6] * g[2] * g[1]],
dim=1,
)
k2 = torch.cat(
[w[2], w[4] * g[0], w[0], -w[6] * g[2], -w[1] * g[0], w[7] * g[0] * g[2], w[3] * g[2], w[5] * g[2] * g[0]],
dim=1,
)
k3 = torch.cat(
[w[3], w[5] * g[0], w[6] * g[1], w[0], -w[7] * g[0] * g[1], -w[1] * g[0], -w[2] * g[1], -w[4] * g[0] * g[1]],
dim=1,
)
k4 = torch.cat([w[4], w[2], -w[1], g[2] * w[7], w[0], -w[6] * g[2], w[5] * g[2], w[3] * g[2]], dim=1)
k5 = torch.cat([w[5], w[3], -w[7] * g[1], -w[1], w[6] * g[1], w[0], -w[4] * g[1], -w[2] * g[1]], dim=1)
k6 = torch.cat([w[6], w[7] * g[0], w[3], -w[2], -w[5] * g[0], w[4] * g[0], w[0], w[1] * g[0]], dim=1)
k7 = torch.cat([w[7], w[6], -w[5], w[4], w[3], -w[2], w[1], w[0]], dim=1)
k = torch.cat([k0, k1, k2, k3, k4, k5, k6, k7], dim=0)
return 8, k
def get_complex_kernel(w: Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList]) -> torch.Tensor:
"""Complex kernel.
Args:
w (Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList]): Weight input of shape `(2, d~input~, d~output~, ...)`.
Returns:
(torch.Tensor): Weight output of shape `(d~output~ * 2, d~input~ * 2, ...)`.
"""
w = _w_assert(w)
assert len(w) == 2
k0 = torch.cat([w[0], -w[1]], dim=1)
k1 = torch.cat([w[1], w[0]], dim=1)
k = torch.cat([k0, k1], dim=0)
return k
def get_quaternion_kernel(w: Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList]) -> torch.Tensor:
"""Quaternion kernel.
Args:
w (Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList]): Weight input of shape `(4, d~input~, d~output~, ...)`.
Returns:
(torch.Tensor): Weight output of shape `(d~output~ * 4, d~input~ * 4, ...)`.
"""
w = _w_assert(w)
assert len(w) == 4
k0 = torch.cat([w[0], -w[1], -w[2], -w[3]], dim=1)
k1 = torch.cat([w[1], w[0], w[3], -w[2]], dim=1)
k2 = torch.cat([w[2], -w[3], w[0], w[1]], dim=1)
k3 = torch.cat([w[3], w[2], -w[1], w[0]], dim=1)
k = torch.cat([k0, k1, k2, k3], dim=0)
return k
def get_quaternion_rotation_kernel(w: Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList]) -> torch.Tensor:
"""Quaternion rotation, taken mostly from <https://github.com/Orkis-Research/Pytorch-Quaternion-Neural-Networks>
Args:
w (Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList]): Weight input of shape `(6, d~input~, d~output~, ...)`.
`w[0]`, `w[1]`, `w[2]`, `w[3]` are the quaternion w;
tensors; `w[4]` is the scaling tensor; `w[5]` is the zero kernel tensor.
Returns:
(torch.Tensor): Quaternion weight output of dimension `(d~output * 3, d~input * 4, ...)`.
"""
w = _w_assert(w)
assert len(w) == 6
square_1 = w[0] * w[0]
square_2 = w[1] * w[1]
square_3 = w[2] * w[2]
square_4 = w[3] * w[3]
norm = torch.sqrt(square_1 + square_2 + square_3 + square_4 + 0.0001)
w1_n = w[0] / norm
w2_n = w[1] / norm
w3_n = w[2] / norm
w4_n = w[3] / norm
norm_factor = 2.0
square_2 = norm_factor * (w2_n * w2_n)
square_3 = norm_factor * (w3_n * w3_n)
square_4 = norm_factor * (w4_n * w4_n)
rot12 = norm_factor * w1_n * w2_n
rot13 = norm_factor * w1_n * w3_n
rot14 = norm_factor * w1_n * w4_n
rot23 = norm_factor * w2_n * w3_n
rot24 = norm_factor * w2_n * w4_n
rot34 = norm_factor * w3_n * w4_n
scale = w[4]
zero_kernel = w[5]
rot_kernel2 = torch.cat(
[
zero_kernel,
scale * (1.0 - (square_3 + square_4)),
scale * (rot23 - rot14),
scale * (rot24 + rot13),
],
dim=1,
)
rot_kernel3 = torch.cat(
[
zero_kernel,
scale * (rot23 + rot14),
scale * (1.0 - (square_2 + square_4)),
scale * (rot34 - rot12),
],
dim=1,
)
rot_kernel4 = torch.cat(
[
zero_kernel,
scale * (rot24 - rot13),
scale * (rot34 + rot12),
scale * (1.0 - (square_2 + square_3)),
],
dim=1,
)
k = torch.cat([rot_kernel2, rot_kernel3, rot_kernel4], dim=0)
return k
def get_octonion_kernel(w: Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList]) -> torch.Tensor:
"""Octonion kernels.
Args:
w (Union[tuple, list, torch.Tensor, nn.Parameter, nn.ParameterList]): Weight input of shape `(8, d~input~, d~output~, ...)`.
Returns:
(torch.Tensor): Weight output of shape `(d~output~ * 8, d~input~ * 8, ...)`.
"""
w = _w_assert(w)
assert len(w) == 8
k0 = torch.cat([w[0], -w[1], -w[2], -w[3], -w[4], -w[5], -w[6], w[7]], dim=1)
k1 = torch.cat([w[1], w[0], w[4], w[5], -w[2], -w[3], -w[7], -w[6]], dim=1)
k2 = torch.cat([w[2], -w[4], w[0], w[6], w[1], w[7], -w[3], w[5]], dim=1)
k3 = torch.cat([w[3], -w[5], -w[6], w[0], -w[7], w[1], w[2], -w[4]], dim=1)
k4 = torch.cat([w[4], w[2], -w[1], -w[7], w[0], w[6], -w[5], -w[3]], dim=1)
k5 = torch.cat([w[5], w[3], w[7], -w[1], -w[6], w[0], w[4], w[2]], dim=1)
k6 = torch.cat([w[6], -w[7], w[3], -w[2], w[5], -w[4], w[0], -w[1]], dim=1)
k7 = torch.cat([w[7], w[6], -w[5], w[4], w[3], -w[2], w[1], w[0]], dim=1)
k = torch.cat([k0, k1, k2, k3, k4, k5, k6, k7], dim=0)
return k
|
cliffordlayers/cliffordlayers/cliffordkernels.py/0
|
{
"file_path": "cliffordlayers/cliffordlayers/cliffordkernels.py",
"repo_id": "cliffordlayers",
"token_count": 5870
}
| 866 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# This script generates the research page from the research.yml file.
import yaml
header = """
<!-- This is automatically generated from research.yml. Do not edit this file directly. -->
# Research
Following is a list of research papers that have been published using CliffordLayers.
If you have used CliffordLayers in your research, and would like it listed here, please add your paper to [this file](https://github.com/microsoft/cliffordlayers/blob/main/docs/research.yml) by sending a pull request to the [CliffordLayers repository](https://github.com/microsoft/cliffordlayers).
"""
def snippet(paper):
title = paper["title"]
authors = ", ".join(paper["authors"]).replace("(", "<sup>").replace(")", "</sup>")
affiliations = ", ".join(f"<sup>{num}</sup>{affil}" for num, affil in paper["affiliations"].items())
link = paper["link"]
abstract = paper["abstract"]
image_file = paper["image"]
paper_snippet = f"""
<figure markdown>
{{ width="500"}}
<figcaption>
<!-- Large font: -->
<h2>
<a href="{link}">{title}</a>
</h2>
</figcaption>
</figure>
<center>
{authors}
<small>{affiliations}</small>
</center>
**Abstract:** {abstract}\n\n
"""
return paper_snippet
def main(outfile):
with open("research.yml") as f:
research = yaml.load(f, Loader=yaml.SafeLoader)["papers"]
with open(outfile, "w") as f:
f.write(header)
research = sorted(research, key=lambda x: x["date"], reverse=True)
snippets = [snippet(paper) for paper in research]
f.write("\n\n---\n\n".join(snippets))
if __name__ == "__main__":
main("research.md")
|
cliffordlayers/docs/generate_research.py/0
|
{
"file_path": "cliffordlayers/docs/generate_research.py",
"repo_id": "cliffordlayers",
"token_count": 617
}
| 867 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from cliffordlayers.models.gca.twod import CliffordG3ResNet2d, CliffordG3UNet2d
def test_gca_resnet():
x = torch.randn(8, 4, 128, 128, 3)
in_channels = 4
out_channels = 1
model = CliffordG3ResNet2d(
num_blocks=[2, 2, 2, 2],
in_channels=in_channels,
out_channels=out_channels,
hidden_channels=32,
norm=True,
)
if torch.cuda.is_available():
x = x.to("cuda:0")
model = model.to("cuda:0")
out = model(x)
assert out.shape == (8, out_channels, 128, 128, 3)
model = CliffordG3ResNet2d(
num_blocks=[2, 2, 2, 2],
in_channels=in_channels,
out_channels=out_channels,
hidden_channels=32,
norm=False,
)
if torch.cuda.is_available():
x = x.to("cuda:0")
model = model.to("cuda:0")
out = model(x)
assert out.shape == (8, out_channels, 128, 128, 3)
def test_gca_unet():
x = torch.randn(8, 4, 128, 128, 3)
in_channels = 4
out_channels = 1
model = CliffordG3UNet2d(
in_channels=in_channels,
out_channels=out_channels,
hidden_channels=32,
norm=True,
)
if torch.cuda.is_available():
x = x.to("cuda:0")
model = model.to("cuda:0")
out = model(x)
assert out.shape == (8, out_channels, 128, 128, 3)
model = CliffordG3UNet2d(
in_channels=in_channels,
out_channels=out_channels,
hidden_channels=32,
norm=False,
)
if torch.cuda.is_available():
x = x.to("cuda:0")
model = model.to("cuda:0")
out = model(x)
assert out.shape == (8, out_channels, 128, 128, 3)
if __name__ == "__main__":
test_gca_resnet()
test_gca_unet()
|
cliffordlayers/tests/test_GCAFluidNet2d.py/0
|
{
"file_path": "cliffordlayers/tests/test_GCAFluidNet2d.py",
"repo_id": "cliffordlayers",
"token_count": 899
}
| 868 |
#!/usr/bin/env/python
"""
Usage:
evaluate.py --dataset zinc|qm9|cep
Options:
-h --help Show this screen.
--dataset NAME Dataset name: zinc, qm9, cep
"""
import utils
from utils import dataset_info
import numpy as np
from docopt import docopt
if __name__ == '__main__':
args = docopt(__doc__)
dataset=args.get('--dataset')
logpscorer, logp_score_per_molecule=utils.check_logp(dataset)
qedscorer, qed_score_per_molecule=utils.check_qed(dataset)
novelty=utils.novelty_metric(dataset)
total, nonplanar=utils.check_planar(dataset)
total, atom_counter, atom_per_molecule =utils.count_atoms(dataset)
total, edge_type_counter, edge_type_per_molecule=utils.count_edge_type(dataset)
total, shape_count, shape_count_per_molecule=utils.shape_count(dataset)
total, tree_count=utils.check_cyclic(dataset)
sascorer, sa_score_per_molecule=utils.check_sascorer(dataset)
total, validity=utils.check_validity(dataset)
print("------------------------------------------")
print("Metrics")
print("------------------------------------------")
print("total molecule")
print(total)
print("------------------------------------------")
print("percentage of nonplanar:")
print(nonplanar/total)
print("------------------------------------------")
print("avg atom:")
for atom_type, c in atom_counter.items():
print(dataset_info(dataset)['atom_types'][atom_type])
print(c/total)
print("standard deviation")
print(np.std(atom_per_molecule, axis=0))
print("------------------------------------------")
print("avg edge_type:")
for edge_type, c in edge_type_counter.items():
print(edge_type+1)
print(c/total)
print("standard deviation")
print(np.std(edge_type_per_molecule, axis=0))
print("------------------------------------------")
print("avg shape:")
for shape, c in zip(utils.geometry_numbers, shape_count):
print(shape)
print(c/total)
print("standard deviation")
print(np.std(shape_count_per_molecule, axis=0))
print("------------------------------------------")
print("percentage of tree:")
print(tree_count/total)
print("------------------------------------------")
print("percentage of validity:")
print(validity/total)
print("------------------------------------------")
print("avg sa_score:")
print(sascorer)
print("standard deviation")
print(np.std(sa_score_per_molecule))
print("------------------------------------------")
print("avg logp_score:")
print(logpscorer)
print("standard deviation")
print(np.std(logp_score_per_molecule))
print("------------------------------------------")
print("percentage of novelty:")
print(novelty)
print("------------------------------------------")
print("avg qed_score:")
print(qedscorer)
print("standard deviation")
print(np.std(qed_score_per_molecule))
print("------------------------------------------")
print("uniqueness")
print(utils.check_uniqueness(dataset))
print("------------------------------------------")
print("percentage of SSSR")
print(utils.sssr_metric(dataset))
|
constrained-graph-variational-autoencoder/evaluate.py/0
|
{
"file_path": "constrained-graph-variational-autoencoder/evaluate.py",
"repo_id": "constrained-graph-variational-autoencoder",
"token_count": 1192
}
| 869 |
# cookiecutter-spacy-fastapi
Python cookiecutter API for quick deployments of spaCy models with FastAPI
## Azure Search
The API interface is compatible with Azure Search Cognitive Skills.
For instructions on adding your API as a Custom Cognitive Skill in Azure Search see:
https://docs.microsoft.com/en-us/azure/search/cognitive-search-custom-skill-interface
## Requirements
- Python >= 3.6 with pip installed
## Quickstart
### Install the latest [Cookiecutter](https://github.com/audreyr/cookiecutter) if you haven't installed it yet (this requires Cookiecutter 1.4.0 or higher):
```
pip install --user cookiecutter
```
### Point cookiecutter to this GitHub repository to automatically download and generate your project
```
cookiecutter https://github.com/Microsoft/cookiecutter-azure-search-cognitive-skill
```
View the README.md of your new project for instructions on next steps
## Resources
This project has two key dependencies:
| Dependency Name | Documentation | Description |
|-----------------|------------------------------|----------------------------------------------------------------------------------------|
| spaCy | https://spacy.io | Industrial-strength Natural Language Processing (NLP) with Python and Cython |
| FastAPI | https://fastapi.tiangolo.com | FastAPI framework, high performance, easy to learn, fast to code, ready for production |
# Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [[email protected]](mailto:[email protected]) with any additional questions or comments.
|
cookiecutter-spacy-fastapi/README.md/0
|
{
"file_path": "cookiecutter-spacy-fastapi/README.md",
"repo_id": "cookiecutter-spacy-fastapi",
"token_count": 680
}
| 870 |
import uvicorn
from app.api import app
if __name__ == '__main__':
uvicorn.run(app, host='0.0.0.0', port=8080, log_level='info')
|
cookiecutter-spacy-fastapi/{{cookiecutter.project_slug}}/main.py/0
|
{
"file_path": "cookiecutter-spacy-fastapi/{{cookiecutter.project_slug}}/main.py",
"repo_id": "cookiecutter-spacy-fastapi",
"token_count": 56
}
| 871 |
var Migrations = artifacts.require("./Migrations.sol")
module.exports = function (deployer) {
deployer.deploy(Migrations)
}
|
0xDeCA10B/demo/client/migrations/1_initial_migration.js/0
|
{
"file_path": "0xDeCA10B/demo/client/migrations/1_initial_migration.js",
"repo_id": "0xDeCA10B",
"token_count": 47
}
| 0 |
import InputLabel from '@material-ui/core/InputLabel'
import MenuItem from '@material-ui/core/MenuItem'
import Select from '@material-ui/core/Select'
import Tooltip from '@material-ui/core/Tooltip'
import React from 'react'
export function checkStorages(storages) {
return Promise.all(Object.entries(storages).map(([key, storage]) => {
return storage.health().then(status => {
if (status.healthy) {
return key
} else if (status.details.err) {
console.warn(`${key} data is not available.`)
console.warn(status.details.err)
}
}).catch(err => {
console.warn(`${key} data is not available.`)
console.warn(err)
})
}))
}
export function renderStorageSelector(detailedDescription, currentValue, handleInputChange, permittedStorageTypes) {
if (currentValue !== 'none' && permittedStorageTypes.indexOf(currentValue) < 0) {
// `currentValue` is invalid. Avoid a warning.
currentValue = ''
}
return <div>
<Tooltip placement="top-start"
title={detailedDescription}>
<InputLabel htmlFor="storage-selector">
Storage
</InputLabel>
</Tooltip>
<Select value={currentValue} onChange={handleInputChange} inputProps={{
name: 'storageType',
id: 'storage-selector',
}}>
{permittedStorageTypes.indexOf('none') >= 0 &&
<MenuItem key="storage-select-none" value="none">None (do not store data)</MenuItem>
}
{permittedStorageTypes.indexOf('local') >= 0 &&
<MenuItem key="storage-select-local" value="local">Local (only on this device)</MenuItem>
}
{permittedStorageTypes.indexOf('service') >= 0 &&
<MenuItem key="storage-select-service" value="service">External (a database elsewhere)</MenuItem>
}
</Select>
</div>
}
|
0xDeCA10B/demo/client/src/components/storageSelector.js/0
|
{
"file_path": "0xDeCA10B/demo/client/src/components/storageSelector.js",
"repo_id": "0xDeCA10B",
"token_count": 648
}
| 1 |
/**
* Information about a shared model.
*/
export class ModelInformation {
id?: number
name: string
address: string
description: string
modelType: string
encoder: string
accuracy?: number
constructor(obj: any) {
this.id = obj.id
this.name = obj.name
this.address = obj.address
this.modelType = obj.modelType
this.description = obj.description
this.encoder = obj.encoder
this.accuracy = obj.accuracy
}
}
/**
* A response when getting models.
*/
export class ModelsResponse {
/**
* The models matching the request.
*/
models: ModelInformation[]
/**
* The number of remaining models for the query. This does not count the models in `models`.
*/
remaining: number
constructor(models: ModelInformation[], remaining: number) {
this.models = models
this.remaining = remaining
}
}
export class RemoveResponse {
success: boolean
constructor(success: boolean) {
this.success = success
}
}
export class OriginalData {
constructor(public text: string) {
}
}
export class DataStoreHealthStatus {
healthy: boolean
details: any
constructor(healthy: boolean, details?: any) {
this.healthy = healthy
this.details = details
if (this.details === undefined) {
this.details = {}
}
}
}
/**
* Interact with the storage of model and data information.
*/
export interface DataStore {
health(): Promise<DataStoreHealthStatus>
saveOriginalData(transactionHash: string, originalData: OriginalData): Promise<any>
getOriginalData(transactionHash: string): Promise<OriginalData>
saveModelInformation(modelInformation: ModelInformation): Promise<any>
getModels(afterAddress?: string, limit?: number): Promise<ModelsResponse>
getModel(modelId?: number, address?: string): Promise<ModelInformation>
removeModel(modelInformation: ModelInformation): Promise<RemoveResponse>
}
|
0xDeCA10B/demo/client/src/storage/data-store.ts/0
|
{
"file_path": "0xDeCA10B/demo/client/src/storage/data-store.ts",
"repo_id": "0xDeCA10B",
"token_count": 547
}
| 2 |
const fs = require('fs')
const CollaborativeTrainer64 = artifacts.require("./CollaborativeTrainer64")
const DataHandler64 = artifacts.require("./data/DataHandler64")
const NearestCentroidClassifier = artifacts.require("./classification/NearestCentroidClassifier")
const Stakeable64 = artifacts.require("./incentive/Stakeable64")
const { convertData } = require('../../src/float-utils-node')
/**
* This test was mainly created to test gas usage.
*/
contract('VpaClassifier', function (accounts) {
let numDimensions = null
const toFloat = 1E9
let dataHandler, incentiveMechanism, classifier, instance
async function normalize(data) {
data = convertData(data, web3, toFloat)
return classifier.norm(data).then(norm => {
return data.map(x => x.mul(web3.utils.toBN(toFloat)).div(norm))
})
}
function parseBN(num) {
if (web3.utils.isBN(num)) {
return num.toNumber()
} else {
assert.typeOf(num, 'number')
return num
}
}
function parseFloatBN(bn) {
assert(web3.utils.isBN(bn), `${bn} is not a BN`)
// Can't divide first since a BN can only be an integer.
return bn.toNumber() / toFloat
}
before("deploy contracts", async () => {
console.log(`Deploying VPA classifier for tests.`)
// Low default times for testing.
const refundTimeS = 15
const ownerClaimWaitTimeS = 20
const anyAddressClaimWaitTimeS = 20
// Weight for deposit cost in wei.
const costWeight = 1E15
// Model
const classifications = []
const centroids = []
const dataCounts = []
let model = fs.readFileSync('./src/ml-models/vpa/vpa-classifier-centroids.json', 'utf8')
model = JSON.parse(model)
for (let [classification, centroidInfo] of Object.entries(model.intents)) {
classifications.push(classification)
// To test gas usage faster, use less dimensions:
// centroidInfo.centroid = centroidInfo.centroid.slice(0, 64);
centroids.push(convertData(centroidInfo.centroid, web3, toFloat))
dataCounts.push(centroidInfo.dataCount)
if (numDimensions === null) {
numDimensions = centroidInfo.centroid.length
} else {
assert.equal(centroidInfo.centroid.length, numDimensions)
}
}
console.log(` Deploying DataHandler.`)
dataHandler = await DataHandler64.new()
console.log(` Deployed data handler to ${dataHandler.address}.`)
incentiveMechanism = await Stakeable64.new(
refundTimeS,
ownerClaimWaitTimeS,
anyAddressClaimWaitTimeS,
costWeight
)
console.log(` Deployed incentive mechanism to ${incentiveMechanism.address}.`)
classifier = await NearestCentroidClassifier.new(
[classifications[0]], [centroids[0]], [dataCounts[0]],
// Block gasLimit by most miners as of May 2019.
{ gas: 8.9E6 }
)
console.log(` Deployed classifier to ${classifier.address}.`)
// Add classes separately to avoid hitting gasLimit.
const addClassPromises = []
for (let i = 1; i < classifications.length; ++i) {
addClassPromises.push(classifier.addClass(
centroids[i], classifications[i], dataCounts[i]
))
}
console.log(` Deploying main entry point.`)
instance = await CollaborativeTrainer64.new(
"name", "description", "encoder",
dataHandler.address,
incentiveMechanism.address,
classifier.address
)
console.log(` Deployed VPA collaborative classifier to ${instance.address}.`)
return Promise.all([
dataHandler.transferOwnership(instance.address),
incentiveMechanism.transferOwnership(instance.address),
classifier.transferOwnership(instance.address),
].concat(addClassPromises)).then(() => {
console.log(" All classes added.")
})
})
it("...should get the classifications", async function () {
const expectedClassifications = ["WEATHER_GET", "MUSIC_PLAY"]
return classifier.getNumClassifications().then(parseBN).then(numClassifications => {
assert.equal(numClassifications, expectedClassifications.length, "Number of classifications is wrong.")
let promises = expectedClassifications.map((_, i) => {
return classifier.classifications(i)
})
return Promise.all(promises).then(results => {
assert.deepEqual(results, expectedClassifications, "Wrong classifications.")
})
})
})
it("...should add data", async () => {
const cost = await instance.incentiveMechanism()
.then(Stakeable64.at)
.then(inc => inc.getNextAddDataCost())
assert(cost.gtn(0), "Cost should be positive.")
// To test random data use:
// const data = Array.from({length: numDimensions}).map(Math.random);
// Test consistent data:
const data = []
for (let i = 0; i < numDimensions; ++i) {
data.push((i + 222) / 1100)
}
const normalizedData = await normalize(data)
const classification = 0
const centroidPromises = data.map((_, dimension) => {
return classifier.centroids(classification, dimension).then(parseFloatBN)
})
const originalCentroidValues = await Promise.all(centroidPromises)
const originalDataCount = await classifier.dataCounts(classification).then(parseBN)
const r = await instance.addData(normalizedData, classification, { from: accounts[0], value: cost })
assert.isBelow(r.receipt.gasUsed, 7.9E6, "Too much gas used.")
assert.isBelow(r.receipt.cumulativeGasUsed, 7.9E6, "Too much gas used.")
const dataCount = await classifier.dataCounts(classification).then(parseBN)
assert.equal(dataCount, originalDataCount + 1, "Wrong data count.")
// Don't need to check every dimension so save time by just checking a few.
const numDimensionsToCheck = 20
const promises = normalizedData.slice(0, numDimensionsToCheck).map((dataVal, dimension) => {
return classifier.centroids(classification, dimension).then(parseFloatBN).then(v => {
assert.closeTo(v,
(originalCentroidValues[dimension] * originalDataCount + parseFloatBN(dataVal)) / dataCount,
1E-8)
})
})
return Promise.all(promises)
})
})
|
0xDeCA10B/demo/client/test/contracts/vpa-classifier.js/0
|
{
"file_path": "0xDeCA10B/demo/client/test/contracts/vpa-classifier.js",
"repo_id": "0xDeCA10B",
"token_count": 2024
}
| 3 |
# Sharing Updatable Models (SUM) on Blockchain Simulation
(formerly Decentralized & Collaborative AI on Blockchain Simulation)
[](https://github.com/microsoft/0xDeCA10B/actions/workflows/simulation-test.yml)
Tools to run simulations for AI models in smart contracts.
## Examples
Even when a bad actor submits incorrect data, an honest contributor profits while the model's accuracy remains stable.
<img src="./assets/drt.gif?raw=true" width=500 alt="Graph showing a good agent's balance increasing and a bad agent's balance decreasing while the model's accuracy on a hidden test remains stable around 70%.">
In the above example, a Perceptron was trained on the [IMDB reviews dataset for sentiment classification][keras-imdb].
Here's a more detailed example:
<img src="./assets/1558466743_plot.png?raw=true" width=500 alt="Graph showing a good agent's balance increasing and a bad agent's balance decreasing while the model's accuracy on a hidden test remains stable around 79%.">
For this simulation, again a Perceptron was trained on the [IMDB reviews dataset for sentiment classification][keras-imdb].
The model was initially trained on 2000 of the 25000 training data samples.
The model has 1000 binary features which are the presence of the 1000 most frequent words in the dataset.
The graph below shows the results of a simulation where for simplicity, we show just one honest contributor and one malicious contributor but these contributors effectively represent many contributors submitting the remaining 92% of the training data over time.
In this simulation, we use the Deposit, Refund, and Take (DRT) incentive mechanism where contributors have 1 day to claim a refund.
Any contributor can take the remaining deposit from a contribution after 9 days.
"Bad Agent" is willing to spend about twice as much on deposits than an honest contributor, "Good Agent".
The adversary is only submitting data about one sixth as often.
Despite the malicious efforts, the accuracy can still be maintained and the honest contributors profit.
# Setup
This section explains how to set up locally, alternatively, you can skip ahead and use a Docker image.
Run:
```bash
conda create --channel conda-forge --name decai-simulation 'python=3.8' bokeh mkl mkl-service numpy pandas phantomjs scikit-learn scipy tensorflow
conda activate decai-simulation
pip install -e .
```
## Docker Setup
1. Clone this repo.
2. Navigate into the folder containing this README.md file:
```bash
cd 0xDeCA10B/simulation
```
3. You can use a Docker image by running:
```bash
docker run --rm -it -p 5006:5006 -v ${PWD}:/root/workspace/0xDeCA10B/simulation --name decai-simulation mcr.microsoft.com/samples/blockchain-ai/0xdeca10b-simulation bash
```
4. You have completed the setup steps.
Next, refer to the Running Simulations steps lower down on this page.
### Building the Docker Image
If you want to build your own fresh image:
```bash
docker build -t decai-simulation .
```
#### (Microsoft Devs) Updating the Public Image
First get permission to push 0xdeca10bcontainerreg.azurecr.io.
Then
```bash
newVersion=<Set the new version. E.g. 1.2.0>
docker tag decai-simulation 0xdeca10bcontainerreg.azurecr.io/public/samples/blockchain-ai/0xdeca10b-simulation:${newVersion}
docker tag decai-simulation 0xdeca10bcontainerreg.azurecr.io/public/samples/blockchain-ai/0xdeca10b-simulation:latest
docker push 0xdeca10bcontainerreg.azurecr.io/public/samples/blockchain-ai/0xdeca10b-simulation:${newVersion}
docker push 0xdeca10bcontainerreg.azurecr.io/public/samples/blockchain-ai/0xdeca10b-simulation:latest
```
# Running Simulations
Run:
```bash
bokeh serve decai/simulation/simulate_imdb_perceptron.py
```
Then open the browser to the address the above command tells you.
It should be something like: [http://localhost:5006/simulate_imdb_perceptron](http://localhost:5006/simulate_imdb_perceptron).
# Customizing Simulations
To try out your own models or incentive mechanisms, you'll need to implement the interfaces.
You can proceed by just copying the examples. Here are the details if you need them:
Suppose you want to use a neural network for the classifier:
1. Implement the [`Classifier`](decai/simulation/contract/classification/classifier.py) interface in a class `NeuralNetworkClassifier`.
The easiest way is to copy an existing classifier like the [`Perceptron`](decai/simulation/contract/classification/perceptron.py).
2. Create a `Module` called `NeuralNetworkModule` which binds `Classifier` to your new class just like in [`PerceptronModule`](decai/simulation/contract/classification/perceptron.py).
Setting up a custom incentive mechanism is similar:
1. Implement [`IncentiveMechanism`](decai/simulation/contract/incentive/incentive_mechanism.py).
You can use [`Stakeable`](decai/simulation/contract/incentive/stakeable.py) as an example.
2. Bind your implementation in a module.
Now set up the main entry point to run the simulation: copy ([`decai/simulation/simulate_imdb_perceptron.py`](decai/simulation/simulate_imdb_perceptron.py)) to a new file, e.g. `decai/simulation/simulate_imdb_neural_network.py`.
In `simulate_imdb_neural_network.py`, you can set up the agents that will act in the simulation.
Then set the modules you created.
So instead of `PerceptronModule` put `NeuralNetworkModule`.
Run `bokeh serve decai/simulation/simulate_imdb_neural_network.py` and open your browse to the displayed URL to try it out.
# Testing
Setup the testing environment:
```bash
pip install -e .[test]
```
Run tests:
(it might take a few minutes the first time because some spaCy models might need to be downloaded)
```bash
pytest
```
[keras-imdb]: https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification
|
0xDeCA10B/simulation/README.md/0
|
{
"file_path": "0xDeCA10B/simulation/README.md",
"repo_id": "0xDeCA10B",
"token_count": 1692
}
| 4 |
import unittest
import numpy as np
from injector import Injector
from decai.simulation.contract.classification.classifier import Classifier
from decai.simulation.contract.classification.ncc_module import NearestCentroidClassifierModule
from decai.simulation.logging_module import LoggingModule
class TestNearestCentroidClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.inj = Injector([
LoggingModule,
NearestCentroidClassifierModule,
])
def test_partial_fit(self):
model = self.inj.get(Classifier)
data = [
[-1.0, -1.0, ],
[-0.5, -0.5, ],
[+1.0, +1.0],
[+0.5, +0.5],
]
labels = [0, 0, 1, 1, ]
data = np.array(data)
labels = np.array(labels)
model.init_model(data, labels)
self.assertEqual(1, model.evaluate(data, labels))
sample = np.array([0.1, 0.1, ])
self.assertEqual(1, model.predict(sample))
# Update a point beyond `sample` so that `sample` gets a new label.
model.update(np.array([0.3, 0.3, ]), 0)
self.assertEqual(0, model.predict(sample))
self.assertEqual(1, model.evaluate(data, labels))
def test_partial_fit_2(self):
model = self.inj.get(Classifier)
data = [
[0, -1.0, ],
[0, -0.5, ],
[0, +1.0],
[0, +0.5],
]
labels = [0, 0, 1, 1, ]
data = np.array(data)
labels = np.array(labels)
model.init_model(data, labels)
self.assertEqual(1, model.evaluate(data, labels))
sample = np.array([0, +0.1, ])
self.assertEqual(1, model.predict(sample))
# Update a point beyond `sample` so that `sample` gets a new label.
model.update(np.array([0, 0, ]), 0)
self.assertEqual(0, model.predict(sample))
self.assertEqual(1, model.evaluate(data, labels))
|
0xDeCA10B/simulation/decai/simulation/contract/classification/tests/test_ncc.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/contract/classification/tests/test_ncc.py",
"repo_id": "0xDeCA10B",
"token_count": 939
}
| 5 |
from typing import List, Optional, Tuple
import numpy as np
from injector import singleton
FeatureIndexMapping = List[int]
@singleton
class FeatureIndexMapper:
"""
Helps with mapping sparse data matrices to compact dense ones
since some classifiers don't work well with sparse data:
* SGDClassifier training needs 32-bit integer indices.
* MultinomialNB training makes the data dense.
This is mostly made to work with 2D data.
"""
def map(self, training_data, testing_data) -> Tuple[np.ndarray, np.ndarray, Optional[FeatureIndexMapping]]:
if isinstance(training_data, np.ndarray):
assert isinstance(testing_data, np.ndarray), \
f"Testing data must also be an ndarray if the training data is an ndarray. Got: {type(testing_data)}."
return training_data, testing_data, None
mapping = sorted(map(int, set(training_data.nonzero()[-1])))
feature_index_to_index_mapping = {v: index for (index, v) in enumerate(mapping)}
# We want: `result_train = training_data[:, mapping].todense()` but this was allocating a large matrix even before calling `todense()`.
# Also tried making a mapping matrix and multiplying by it but that also allocated memory.
result_train = np.zeros(training_data.shape[:-1] + (len(mapping),), dtype=training_data.dtype)
*row_indices, col_indices = training_data.nonzero()
col_indices = tuple(feature_index_to_index_mapping[i] for i in col_indices)
result_train[row_indices, col_indices] = training_data[training_data.nonzero()]
result_test = np.zeros(testing_data.shape[:-1] + (len(mapping),), dtype=testing_data.dtype)
*row_indices, col_indices = testing_data.nonzero()
original_col_indices_used = []
row_indices_used = []
col_indices_mapped = []
for row_index, col_index in zip(*row_indices, col_indices):
index = feature_index_to_index_mapping.get(col_index)
if index is not None:
original_col_indices_used.append(col_index)
row_indices_used.append(row_index)
col_indices_mapped.append(index)
result_test[row_indices_used, col_indices_mapped] = testing_data[row_indices_used, original_col_indices_used]
return result_train, result_test, mapping
|
0xDeCA10B/simulation/decai/simulation/data/featuremapping/feature_index_mapper.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/data/featuremapping/feature_index_mapper.py",
"repo_id": "0xDeCA10B",
"token_count": 922
}
| 6 |
import unittest
from typing import cast
from injector import Injector
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.featuremapping.hashing.murmurhash3 import MurmurHash3Module
from decai.simulation.data.featuremapping.hashing.token_hash import TokenHash
from decai.simulation.data.offensive_data_loader import OffensiveDataLoader, OffensiveDataModule
from decai.simulation.logging_module import LoggingModule
class TestOffensiveDataLoader(unittest.TestCase):
@classmethod
def setUpClass(cls):
inj = Injector([
LoggingModule,
MurmurHash3Module,
OffensiveDataModule,
])
cls.data_loader = inj.get(DataLoader)
assert isinstance(cls.data_loader, OffensiveDataLoader)
cls.data_loader = cast(OffensiveDataLoader, cls.data_loader)
cls.hash = inj.get(TokenHash)
def test_load(self):
train_size = 20
test_size = 10
(x_train, y_train), (x_test, y_test) = self.data_loader.load_data(train_size=train_size, test_size=test_size)
assert x_train.shape[0] == train_size
assert x_train.shape[0] == y_train.shape[0]
assert x_test.shape[0] == test_size
assert x_test.shape[0] == y_test.shape[0]
assert y_train.shape == (train_size,)
assert y_test.shape == (test_size,)
# Test some values to help avoid regressions.
x_train_values_x, x_train_values_y = x_train[0].nonzero()
self.assertEqual(0, x_train_values_x[0])
self.assertEqual(495653056, x_train_values_y[0])
self.assertEqual(1, x_train[x_train_values_x[0], x_train_values_y[0]])
self.assertEqual(0, x_train_values_x[1])
self.assertEqual(443377497, x_train_values_y[1])
self.assertEqual(1, x_train[x_train_values_x[0], x_train_values_y[0]])
col = self.hash.hash("you")
self.assertEqual(814527388, col)
self.assertEqual(2, x_train[1, col])
|
0xDeCA10B/simulation/decai/simulation/data/tests/test_offensive_data_loader.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/data/tests/test_offensive_data_loader.py",
"repo_id": "0xDeCA10B",
"token_count": 857
}
| 7 |
protocolVersion: 2
name: job-config-1
type: job
prerequisites:
- protocolVersion: 2
name: pytorch_example
type: dockerimage
uri: openpai/pai.example.pytorch
taskRoles:
train:
instances: 1
completion:
minFailedInstances: 1
minSucceededInstances: 1
dockerImage: pytorch_example
resourcePerInstance:
cpu: 2
memoryMB: 4096
gpu: 1
commands:
- >
python3 examples/mnist/main.py
--epochs 1
--lr 0.01
--batch-size 64
- sleep 5m
defaults:
virtualCluster: vc1
extras:
hivedScheduler:
jobPriorityClass: test
|
AI-System/Labs/AdvancedLabs/Lab6/config/job-config-1.yaml/0
|
{
"file_path": "AI-System/Labs/AdvancedLabs/Lab6/config/job-config-1.yaml",
"repo_id": "AI-System",
"token_count": 270
}
| 8 |
# MIT License
# Copyright (c) Microsoft Corporation.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
import torch
import torch.nn as nn
import ops
from nni.nas.pytorch import mutables
class AuxiliaryHead(nn.Module):
""" Auxiliary head in 2/3 place of network to let the gradient flow well """
def __init__(self, input_size, C, n_classes):
""" assuming input size 7x7 or 8x8 """
assert input_size in [7, 8]
super().__init__()
self.net = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=input_size - 5, padding=0, count_include_pad=False), # 2x2 out
nn.Conv2d(C, 128, kernel_size=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, kernel_size=2, bias=False), # 1x1 out
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.linear = nn.Linear(768, n_classes)
def forward(self, x):
out = self.net(x)
out = out.view(out.size(0), -1) # flatten
logits = self.linear(out)
return logits
class Node(nn.Module):
def __init__(self, node_id, num_prev_nodes, channels, num_downsample_connect):
super().__init__()
self.ops = nn.ModuleList()
choice_keys = []
for i in range(num_prev_nodes):
stride = 2 if i < num_downsample_connect else 1
choice_keys.append("{}_p{}".format(node_id, i))
self.ops.append(
mutables.LayerChoice(
[
ops.PoolBN('max', channels, 3, stride, 1, affine=False),
ops.PoolBN('avg', channels, 3, stride, 1, affine=False),
nn.Identity() if stride == 1 else ops.FactorizedReduce(channels, channels, affine=False),
ops.SepConv(channels, channels, 3, stride, 1, affine=False),
ops.SepConv(channels, channels, 5, stride, 2, affine=False),
ops.DilConv(channels, channels, 3, stride, 2, 2, affine=False),
ops.DilConv(channels, channels, 5, stride, 4, 2, affine=False)
],
key=choice_keys[-1]))
self.drop_path = ops.DropPath()
self.input_switch = mutables.InputChoice(choose_from=choice_keys, n_chosen=2, key="{}_switch".format(node_id))
def forward(self, prev_nodes):
assert len(self.ops) == len(prev_nodes)
out = [op(node) for op, node in zip(self.ops, prev_nodes)]
out = [self.drop_path(o) if o is not None else None for o in out]
return self.input_switch(out)
class Cell(nn.Module):
def __init__(self, n_nodes, channels_pp, channels_p, channels, reduction_p, reduction):
super().__init__()
self.reduction = reduction
self.n_nodes = n_nodes
# If previous cell is reduction cell, current input size does not match with
# output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing.
if reduction_p:
self.preproc0 = ops.FactorizedReduce(channels_pp, channels, affine=False)
else:
self.preproc0 = ops.StdConv(channels_pp, channels, 1, 1, 0, affine=False)
self.preproc1 = ops.StdConv(channels_p, channels, 1, 1, 0, affine=False)
# generate dag
self.mutable_ops = nn.ModuleList()
for depth in range(2, self.n_nodes + 2):
self.mutable_ops.append(Node("{}_n{}".format("reduce" if reduction else "normal", depth),
depth, channels, 2 if reduction else 0))
def forward(self, s0, s1):
# s0, s1 are the outputs of previous previous cell and previous cell, respectively.
tensors = [self.preproc0(s0), self.preproc1(s1)]
for node in self.mutable_ops:
cur_tensor = node(tensors)
tensors.append(cur_tensor)
output = torch.cat(tensors[2:], dim=1)
return output
class CNN(nn.Module):
def __init__(self, input_size, in_channels, channels, n_classes, n_layers, n_nodes=4,
stem_multiplier=3, auxiliary=False):
super().__init__()
self.in_channels = in_channels
self.channels = channels
self.n_classes = n_classes
self.n_layers = n_layers
self.aux_pos = 2 * n_layers // 3 if auxiliary else -1
c_cur = stem_multiplier * self.channels
self.stem = nn.Sequential(
nn.Conv2d(in_channels, c_cur, 3, 1, 1, bias=False),
nn.BatchNorm2d(c_cur)
)
# for the first cell, stem is used for both s0 and s1
# [!] channels_pp and channels_p is output channel size, but c_cur is input channel size.
channels_pp, channels_p, c_cur = c_cur, c_cur, channels
self.cells = nn.ModuleList()
reduction_p, reduction = False, False
for i in range(n_layers):
reduction_p, reduction = reduction, False
# Reduce featuremap size and double channels in 1/3 and 2/3 layer.
if i in [n_layers // 3, 2 * n_layers // 3]:
c_cur *= 2
reduction = True
cell = Cell(n_nodes, channels_pp, channels_p, c_cur, reduction_p, reduction)
self.cells.append(cell)
c_cur_out = c_cur * n_nodes
channels_pp, channels_p = channels_p, c_cur_out
if i == self.aux_pos:
self.aux_head = AuxiliaryHead(input_size // 4, channels_p, n_classes)
self.gap = nn.AdaptiveAvgPool2d(1)
self.linear = nn.Linear(channels_p, n_classes)
def forward(self, x):
s0 = s1 = self.stem(x)
aux_logits = None
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1)
if i == self.aux_pos and self.training:
aux_logits = self.aux_head(s1)
out = self.gap(s1)
out = out.view(out.size(0), -1) # flatten
logits = self.linear(out)
if aux_logits is not None:
return logits, aux_logits
return logits
def drop_path_prob(self, p):
for module in self.modules():
if isinstance(module, ops.DropPath):
module.p = p
|
AI-System/Labs/AdvancedLabs/Lab8/nas/model.py/0
|
{
"file_path": "AI-System/Labs/AdvancedLabs/Lab8/nas/model.py",
"repo_id": "AI-System",
"token_count": 3302
}
| 9 |
# Labs
The labs consist of basic labs and advanced labs. In this session, we designed experimental courses from the perspective of system research.
Encourage students to implement and optimize system modules by operating and applying mainstream and latest frameworks, platforms and tools to improve their ability to solve practical problems, not just understanding the use of tools.
## Target users
* Junior and Senior students in colleges
* Graduate students
## Experimental design goals
This experimental course is designed from the perspective of system research. Through the operation and application of mainstream and latest frameworks, platforms and tools, students are encouraged to implement and optimize system modules to improve their ability to solve practical problems, rather than just understanding the use of tools.
## Experimental design features
1. Provide a unified framework, platform and tools.
2. Design an operable experiments content.
3. The experiment content of universal design is convenient to deepen and improve according to the characteristics of the universities.
4. Get started with practical engineering projects and deepen the understanding of AI systems.
## Contents
### Basic Labs
||||
|---|---|---|
|<div style="width:50px">Lab No.</div>|Lab Name|Remarks|
|Prerequisites|[Setup Environment](./Prerequisites.md)| Setup envoironment for the experiments |
|Lab 1|[A simple end-to-end AI example, <br>from a system perspective](./BasicLabs/Lab1/README.md)|Understand the systems from debug info and system logs|
|Lab 2|[Customize operators](./BasicLabs/Lab2/README.md)|Design and implement a customized operator (both forward and backward) in python|
|Lab 3|[CUDA implementation](./BasicLabs/Lab3/README.md)|Add a CUDA implementation for the customized operator|
|Lab 4|[AllReduce implementation](./BasicLabs/Lab4/README.md)|Improve AllReduce on Horovod: implement a lossy compression (3LC) on GPU for low-bandwidth network|
|Lab 5|[Configure containers for customized training and inference](./BasicLabs/Lab5/README.md)|Configure containers|
||||
### Advanced Labs
||||
|---|---|---|
|<div style="width:50px">Lab No.</div>|Lab Name|Remarks|
|Lab 6|[Scheduling and resource management system](./AdvancedLabs/Lab6/README.md)|Get familiar with OpenPAI or KubeFlow|
|Lab 7|[Distributed training](./AdvancedLabs/Lab7/README.md)|Try different kinds of all reduce implementations|
|Lab 8|[AutoML](./AdvancedLabs/Lab8/README.md)|Search for a new neural network structure for Image/NLP tasks|
|Lab 9|[RL Systems](./AdvancedLabs/Lab9/README.md)|Configure and get familiar with one of the following RL Systems: RLlib, …|
||||
|
AI-System/Labs/README.md/0
|
{
"file_path": "AI-System/Labs/README.md",
"repo_id": "AI-System",
"token_count": 691
}
| 10 |
# AI-System
[简体中文](./README.md)
This is an online AI System Course to help students learn the whole stack of systems that support AI, and practice them in the real projects. In this course, we will use terms **AI-System** and **System for AI** alternately.
This course is one of the AI-related course in [微软人工智能教育与共建社区](https://github.com/microsoft/ai-edu). Under the [A-基础教程](https://github.com/microsoft/ai-edu/tree/master/A-%E5%9F%BA%E7%A1%80%E6%95%99%E7%A8%8B) module. The course numbe an name are *A6-人工智能系统*.
Welcome to [A-基础教程](https://github.com/microsoft/ai-edu/tree/master/A-%E5%9F%BA%E7%A1%80%E6%95%99%E7%A8%8B) module to access more related content.
It is strongly recommended that learners who want to learn or consolidate the core knowledge of artificial intelligence, first learn [A2-神经网络基本原理简明教程](https://aka.ms/beginnerAI),also known as the **9-step learn Neural Network**。It will bring great help to the study of this course.
## Background
In recent years, the rapid development of artificial intelligence, especially deep learning technology, is inseparable from the continuous progress of hardware and software systems. In the foreseeable future, the development of artificial intelligence technology will still rely on a joint innovation model that combines computer systems and artificial intelligence. Computer systems are now empowering artificial intelligence with a larger scale and higher complexity. This requires not only more system innovation, but also systematic thinking and methodology. At the same time, artificial intelligence in turn provides support for the design of complex systems.
We have noticed that most of the current artificial intelligence-related courses, especially deep learning and machine learning related courses, mainly focus on related theories, algorithms or applications, but system-related courses are rare. We hope that the course of artificial intelligence systems can make artificial intelligence related education more comprehensive and in-depth, so as to jointly promote the cultivation of talents that intersect artificial intelligence and systems.
## Purpose
This course aims to help students:
1. Completely understand the computer system architecture that supports deep learning, and learn the system design under the full life cycle of deep learning through practical problems.
2. Introduce cutting-edge systems and artificial intelligence research work, including AI for Systems and Systems for AI, to help senior undergraduates and graduate students better find and define meaningful research questions.
3. Design experimental courses from the perspective of system research. Encourage students to implement and optimize system modules by operating and applying mainstream and latest frameworks, platforms and tools to improve their ability to solve practical problems, not just understanding the use of tools.
**Prerequisites:** C/C++/Python, Computer Architecture, Introduction to algorithms
## Characteristic
The course mainly includes the following three modules:
The first part is the basic knowledge of artificial intelligence and a full-stack overview of artificial intelligence systems; and the systematic design and methodology of deep learning systems.
The second part of the advanced courses includes the most cutting-edge systems and artificial intelligence research fields.
The third part is the supporting experimental courses, including the most mainstream frameworks, platforms and tools, and a series of experimental projects.
The content of the first part will focus on basic knowledge, while the content of the other two parts will be dynamically adjusted with the technological progress of academia and industry. The content of the latter two parts will be organized in a modular form to facilitate adjustment or combination with other CS courses (such as compilation principles, etc.) as advanced lectures or internship projects.
The design of this course will also draw on the research results and experience of Microsoft Research Asia in the intersection of artificial intelligence and systems, including some platforms and tools developed by Microsoft and the research institute. The course also encourages other schools and teachers to add and adjust more advanced topics or other experiments according to their needs.
## Syllabus
### [Lectures](./Lectures)
Lectures have two parts--basic courses and advanced courses. The first part is focus on basic theories, from lesson 1 to 6, while the second part involves more cutting-edge research, from lesson 7 to 14.
*Basic Courses*
| | | |
|---|---|---|
| Course No.|Lecture Name|Remarks|
|1|Introduction|Overview and system/AI basics|
|2|System perspective of Systems for AI|Systems for AI: a historic view; Fundamentals of neural networks; Fundamentals of Systems for AI|
|3|Computation frameworks for DNN|Backprop and AD, Tensor, DAG, Execution graph. <br>Papers and systems: PyTorch, TensorFlow|
|4|Computer architecture for Matrix computation|Matrix computation, CPU/SIMD, GPGPU, ASIC/TPU <br>Papers and systems: Blas, TPU|
|5|Distributed training algorithms|Data parallelism, model parallelism, distributed SGD <br>Papers and systems: PipeDream|
|6|Distributed training systems|MPI, parameter servers, all-reduce, RDMA <br>Papers and systems: Horovod|
|7|Scheduling and resource management system|Running dnn job on cluster: container, resource allocation, scheduling <br>Papers and systems: Kubeflow, OpenPAI,Gandiva|
|8|Inference systems|Efficiency, latency, throughput, and deployment <br>Papers and systems: TensorRT, TensorflowLite, ONNX|
||||
*Advanced Courses*
| | | |
|---|---|---|
| Course No.|Course Name|Remarks|
|9|Computation graph compilation and optimization|IR, sub-graph pattern match, Matrix multiplication and memory optimization <br>Papers and systems: XLA, MLIR, TVM, NNFusion|
|10|Efficiency via compression and sparsity|Model compression, Sparsity, Pruning|
|11|AutoML systems|Hyper parameter tuning, NAS <br>Papers and systems: Hyperband, SMAC, ENAS, AutoKeras, NNI|
|12|Reinforcement learning systems|Theory of RL, systems for RL <br>Papers and systems: AC3, RLlib, AlphaZero|
|13|Security and Privacy|Federated learning, security, privacy <br>Papers and systems: DeepFake|
|14|AI for systems|AI for traditional systems problems, for system algorithms <br>Papers and systems: Learned Indexes, Learned query path|
||||
### [Labs](./Labs)
Labs also have two parts: The first part is configured to make sure students can run most of Labs at local machine. The advanced part may need a small cluster (local or on Cloud) with GPU support.
*Basic Labs*
||||
|---|---|---|
|<div style="width:50px">Lab No.</div>|Lab Name|Remarks|
|Lab 1|A simple end-to-end AI example, <br>from a system perspective|Understand the systems from debug info and system logs
|Lab 2|Customize operators|Design and implement a customized operator (both forward and backward) in python|
|Lab 3|CUDA implementation|Add a CUDA implementation for the customized operator|
|Lab 4|AllReduce implementation|Improve AllReduce on Horovod: implement a lossy compression (3LC) on GPU for low-bandwidth network|
|Lab 5|Configure containers for customized training and inference|Configure containers|
||||
*Advanced Labs*
||||
|---|---|---|
|<div style="width:50px">Lab No.</div>|Lab Name|Remarks|
|Lab 6|Scheduling and resource management system|Get familiar with OpenPAI or KubeFlow|
|Lab 7|Distributed training|Try different kinds of all reduce implementations|
|Lab 8|AutoML|Search for a new neural network structure for Image/NLP tasks|
|Lab 9|RL Systems|Configure and get familiar with one of the following RL Systems: RLlib, …|
||||
## appendix
The following lists the relevant courses in the direction of artificial intelligence systems in other schools and institutions.
\<TBD>
---
# Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [[email protected]](mailto:[email protected]) with any additional questions or comments.
# Legal Notices
Microsoft and any contributors grant you a license to the Microsoft documentation and other content
in this repository under the [Creative Commons Attribution 4.0 International Public License](https://creativecommons.org/licenses/by/4.0/legalcode),
see the [LICENSE](LICENSE) file, and grant you a license to any code in the repository under the [MIT License](https://opensource.org/licenses/MIT), see the
[LICENSE-CODE](LICENSE-CODE) file.
Microsoft, Windows, Microsoft Azure and/or other Microsoft products and services referenced in the documentation
may be either trademarks or registered trademarks of Microsoft in the United States and/or other countries.
The licenses for this project do not grant you rights to use any Microsoft names, logos, or trademarks.
Microsoft's general trademark guidelines can be found at http://go.microsoft.com/fwlink/?LinkID=254653.
Privacy information can be found at https://privacy.microsoft.com/en-us/
Microsoft and any contributors reserve all other rights, whether under their respective copyrights, patents,
or trademarks, whether by implication, estoppel or otherwise.
|
AI-System/README_en.md/0
|
{
"file_path": "AI-System/README_en.md",
"repo_id": "AI-System",
"token_count": 2485
}
| 11 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 12.2 人工智能训练安全与隐私
- [12.2 人工智能训练安全与隐私](#122-人工智能训练安全与隐私)
- [12.2.1 训练时安全](#1221-训练时安全)
- [12.2.2 训练时隐私](#1222-训练时隐私)
- [12.2.3 联邦学习](#1223-联邦学习)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
本节介绍人工智能训练时的安全与隐私问题及缓解方法。这些问题涉及到人工智能训练的完整性与机密性,反映了可信人工智能训练系统的重要性。
## 12.2.1 深度学习训练时安全
数据投毒(Data Poisoning)一直是各类机器学习方法都面临着的问题。攻击者通过篡改训练数据,来改变最终模型的行为,从而达到一些恶意的目的,例如降低垃圾邮件分类器的精度。在深度学习中,由于训练数据量和模型参数量的增长,出现了更高级、更复杂的攻击方法。
传统的投毒攻击(Poisoning Attack)通过数据投毒来影响模型的正常效果,这种对于测试输入来说无区别的影响很容易被检测出来。近几年针对深度学习的投毒攻击可以在不影响模型对于正常测试样本行为的情况下达到恶意攻击的效果。例如 Ali Shafahi 等人[<sup>[1]</sup>](#poison)在 2018 年提出了一种隐蔽性很强的投毒攻击,可以让被攻击的模型对某个特定的输入样本产生特定的错误输出(例如将特定的恶意软件识别为正常软件),而模型的其他表现一切正常。此攻击除了不影响模型的正常效果之外,还不要求篡改训练数据的标注——此攻击只需要对训练数据的图片做一些微小的扰动。被篡改的图片人看上去很正常,标注也是正确的,因此难以被发现。在现实世界中,攻击者还可以将“毒样本”发布到网上,如果有人爬取这些毒样本来训练自己的模型,那模型就很容易“中毒”。
生成这种“毒样本”的方法与生成对抗样本的方法非常相似:将毒样本 $\hat x$ 的生成看作损失函数 $J_t(x)$ 关于输入样本 $x$ 的优化问题,然后用梯度下降法求解 $\hat x$ 使 $J_t(x)$ 较小。这里 $J_t(x)$ 中的 $t$ 表示想让 $x$ 被模型错误分类到的类别,即目标类别。
另一种利用数据投毒来达到恶意目的的攻击方法使后门攻击(Backdoor Attack),有时也被称为木马攻击(Torjaning Attack)。后门攻击顾名思义,是要在模型中留下一个后门,从而使具有某种特定特征的输入能够触发该后门,从而让模型对这个输入产生一个特定的输出(例如将加上了特定特征的恶意软件识别为正常软件)。值得注意的是,首先,该攻击除了可以发生在训练时,也可以是在模型正常训练好之后用毒样本对模型进行微调;其次,该攻击不是只能影响某个特定输入——将同一个特征加到不同的输入上都能让模型输出这个特定结果,如下图所示:
<center> <img src="./img/12-2-1-trojan.png"/></center>
<center>图 12.2.1 一种后门攻击的示意图:后三个输入样本的右下角被加入了一种特定的特征贴图,被攻击的模型就会将其识别为某个特定的人(图片来源 Liu 等人<a href="#trojan"><sup>[2]</sup></a>)</center>
无论是投毒攻击还是后门攻击,都体现了深度学习的一种脆弱性:人们难以验证深度神经网络的行为是否可信。因为深度学习训练数据集的庞大,所以人们难以一一检查训练数据;因为深度神经网络缺乏可解释性,所以人们难以测试深度神经网络的全部行为。2019 年,Bolun Wang 等人[<sup>[3]</sup>](#cleanse)提出了一种针对后门攻击的检测和缓解方法:给定模型,通过逆向工程反推引起误分类的扰动,检测该扰动是否是一个后门的触发特征;如果一个模型存在后门,通过剪枝、反学习等手段来修补模型,同时保持模型在正常输入下的效果。与对抗样本的研究类似,关于投毒攻击、后门攻击的研究也呈现出一种攻防竞赛的形势。然而,不幸的是,目前大多数缓解方法在面对更先进的攻击时效果并不理想,所以迫切需要健壮、严谨的防御方法。
有趣的是,在深度学习的这一脆弱性带来负面的安全问题的同时,这一性质也能用于正面的目的。2018 年,Jialong Zhang 等人[<sup>[4]</sup>](#watermark)提出用数字水印(Digital Watermarking)来保护深度神经网络的知识产权:通过添加一些特殊的训练数据,使模型在输入包含某种特定的水印特征时,模型输出一个特定的非正常结果,同时模型的正常行为不受影响。这个“特定水印特征-特定结果”的信息是模型所有者私有的,在模型被别人窃取时模型所有者通过提供该信息就能证明自己的所有权。可见,“特定水印特征”就如同后门攻击中的触发特征一样,能够让模型输出特定的结果。同样在 2018 年,Yossi Adi 等人[<sup>[5]</sup>](#watermarkbackdoor)形式化地论证了水印与后门攻击的联系。理论上,可以基于任意一种“强”后门构造出相应的水印,其中强后门主要指难以去除的后门。由此看来,缺乏有效的后门攻击防御方法对于版权保护来说可能并不是一件坏事。
## 12.2.2 深度学习训练时隐私
由于需要大量的计算量,深度学习的训练往往是在服务器而非本地进行的。如果训练过程得不到控制和保护,也可能造成隐私问题。一个最直接的问题是,如果模型是交给外部(如云服务商)训练的,那么训练数据集、模型都可能泄露给外部。如何保护训练数据集和模型的隐私呢?
2017 年,Payman Mohassel 和 Yupeng Zhang[<sup>[6]</sup>](#secureml) 提出了用安全多方计算(Secure Multi-Party Computation)技术来保护整个训练的计算机密性。安全多方计算是密码学中的一种高级技术,可以让参与方在不知道其它参与方数据的情况下联合完成某个计算。在刚刚说的模型训练场景中,可以把模型交给**两个**不同的外部服务器进行训练:训练发起者将自己的训练数据集 $D$ 和模型初始参数 $M$ 分成两份随机的秘密值 $D=D_1+D_2$,$M=M_1+M_2$,也就是安全多方计算中的秘密共享(Secret Sharing),这里的加法都表示某个有限环上的加法;然后,将 $D_1$ 和 $M_1$ 发给第一个服务器,将 $D_2$ 与 $M_2$ 发给第二个服务器;然后让两个服务器运行模型训练的安全多方计算协议,分别得到最终模型参数的秘密共享 $\hat M_1$ 与 $\hat M_2$;最后两个服务器分别将 $\hat M_1$ 与 $\hat M_2$ 发送给训练发起者,训练发起者得到最终模型参数 $\hat M = \hat M_1 + \hat M_2$。在整个过程中,两个服务器都没有得到关于 $D$、$M$、$\hat M$ 的任何信息,因而保护了隐私。不过这个方法的最大缺点是,由于安全多方计算涉及到大量复杂的密码学计算与通信,所以开销特别大——用这种方法训练一个神经网络要比正常、不保护隐私的方法慢几十万倍。随着近几年关于安全多方计算技术研究的进展,这一开销正在不断减少。
像安全多方计算这种能保护隐私的情况下完成计算的技术在最近几年在国内被称为“隐私计算”。流行的隐私计算技术还有同态加密(Homomorphic Encryption)、可信执行环境(Trusted Execution Environment),这两种技术在训练过程中的应用较少,将在下一节中介绍。
除了上述的直接的隐私问题,深度学习训练时还有一些间接的隐私问题,出现在训练过程的完整性得不到保证的时候。Congzheng Song 等人[<sup>[7]</sup>](#memorize)在 2017 年提出了多种攻击方法,通过篡改模型的训练过程来使模型携带额外的信息,从而可以将隐私信息(例如训练数据集的一个子集)嵌入之中。
假如攻击者拥有对训练后的模型的白盒访问权限,即在训练后可以直接访问模型参数,那么通过在训练时将隐私信息编码进模型参数中,就可以在训练后获取这些信息。Song 等人提出了三种的编码方式:一种方法是修改损失函数,使训练出的模型参数与敏感信息的相关系数最大化,这是通过向损失函数增加一个基于皮尔逊相关系数的正则项实现的,对模型正常行为的影响与添加正则项的正常训练相似;另一种方法是用模型参数的正负来编码信息,然后在有符号约束的情况下;还有一种方法更加直接,用模型参数的浮点数表示的最后一位用来编码信息,那么对于参数量为 $n$ 的模型,就可以编码 $n$ 比特的信息,同时这并不会影响模型的正常行为。
假如攻击者拥有对训练后的模型的黑盒访问权限,即在训练后不能直接访问模型参数但可以使用模型,那么通过修改训练数据集使模型学到一些特殊的行为,就可以在训练后利用这些特殊行为获取信息。Song 等人修改训练数据集的方法类似数据投毒,即生成一些恶意训练样本,其标注设置为特定的类别以编码隐私信息。在训练好模型后,通过询问这些恶意样本的分类得到训练时所编码的隐私信息。
## 12.2.3 联邦学习
很多时候的训练是中心化的,即训练发起者收集好训练数据,然后在此之上训练模型。但是由于隐私保护的要求,许多外部的数据不能直接收集。例如大公司想收集用户数据训练模型,但如果用户不愿意将个人数据共享给大公司就不能收集。又例如医疗机构想利用其他医疗机构的数据进行训练,但由于医疗数据是非常敏感的,所以其他医疗机构不能分项自己的数据。因为深度学习的效果往往随着数据量的增长而提高,所以如果不能利用外部的更多数据,就只能用属于训练方自己的内部数据进行训练,训练出的模型效果就不如联合训练的效果。有没有什么办法既能保护隐私,又能利用外部的数据呢?
去中心化的学习,又称协作式学习(Collaborative Learning)或联邦学习(Federated Learning),就是针对这种场景提出的解决方案:训练数据分布在不同参与方中,通过让每个参与方根据自己的数据在自己本地训练模型,并定期与其他参与方交换、更新模型参数,来联合训练一个模型。在整个过程中,各个参与方的训练数据一直保持在本地而不会共享,因此被认为能够保护隐私。
在 2015 年,Reza Shokri 和 Vitaly Shmatikov[<sup>[8]</sup>](#ppdl) 就提出了通过协作式学习能够保护隐私。他们提出了“选择性参数更新”的方法,让每个参与方定期选择**部分**模型参数的梯度上传给一个参数服务器(参数服务器专门用来聚合梯度),同时定期从参数服务器选择**部分**模型参数下载,用来替代自己本地的模型参数。除此之外,他们还提出可以在上传部分参数梯度前对其添加一些噪声来满足**差分隐私**。
在 2017 年,为了提升训练的效果、收敛速度,并由此减少训练轮数、通信开销,Brendan McMahan 等人[<sup>[9]</sup>](#fl)提出了简单而流行的“联邦平均”(Federated Averaging)算法,其主要思想是增加参与方每轮的计算从而减少训练轮数。McMahan 等人称呼这种学习方法为“联邦学习”。联邦学习一般分为四个步骤:(1)参数服务器随机选择参与方的一个子集,并分发当前全局模型到这些参与方;(2)被选到的参与方基于全局模型和本地数据训练模型多轮;(3)参与方上传训练产生的梯度;(4)参数服务器对收到的所有梯度进行加权求和,得到新的全局模型。这四个步骤一直持续下去直到收敛。
<center> <img src="./img/12-2-2-fed.png"/></center>
<center>图 12.2.2 联邦学习的流程</center>
联邦学习作为一种新兴的学习方法,在这几年受到了工业界与学术界的很大关注。联邦学习在算法、系统、安全等方面的诸多问题也成为了目前的研究热点,例如:如何改进算法提来升联邦学习的学习效果和学习效率;如何减少模型上传和梯度下载产生的大量通讯数据;梯度数据与参数的更新值有没有可能泄露隐私。本小节接下来主要介绍联邦学习训练时的隐私问题。
Briland Hitaj 等人[<sup>[10]</sup>](#gan)在 2017 年声称联邦学习的任意参与方都能推断出其他一个参与方的私有训练数据——相比之下,中心化的训练只是把训练数据泄露给一个中心服务器,反而不会让其他参与方获取私有数据的信息。虽然有些骇人听闻,但他们提出的攻击方法确实很强大:恶意的参与者可以在训练过程中利用生成对抗网络(Generative Adversarial Network,简称 GAN)来构造出别人的私有训练数据,如下图所示。
<center> <img src="./img/12-2-3-gan.png"/></center>
<center>图 12.2.3 联邦学习的恶意参与方(右)可以利用训练时的信息构造出其他参与方(左)的私有训练数据(图片来源 Hitaj 等人<a href="https://doi.org/10.1145/3133956.3134012"><sup>[10]</sup></a>)</center>
这种攻击方法的思想是,联邦学习的参与方拥有模型的所有参数,即有白盒访问权限,同时参与方自己的行为(本地参数更新)还可以影响到其他的参与方,因此,恶意参与方可以针对其他参与方的某个类别数据,影响训练过程,从而让被影响的参与方无意泄露更多相关信息。具体来说,恶意参与方可以用 GAN 生成看起来很像目标类别(如猫)的合成图片,并故意将其标注为其他类别(如狗),并将这个假样本加入到自己的训练过程中。这样,有真正的猫的数据的参与方就会更加“努力”地用自己的数据来训练模型以分辨合成的猫与真正的猫,从而使模型包含了更多的信息。而攻击者用这个分类模型来作为 GAN 的判别模型,从而提升 GAN 的生成模型的效果。
对于这种攻击,Luca Melis 等人[<sup>[11]</sup>](#inf)在 2019 年认为其并没有泄露某个真正的训练样本,反而体现了模型的可用性好。他们认为“任何有用的机器学习模型都揭露了关于训练数据的总体信息”,所以给定一个类别,可以重构出一张图片来代表该类别的总体特点。这种攻击并无法从同类别的训练数据中区分某个真正的训练样本。
然而,对于“梯度数据与参数的更新值有没有可能泄露隐私”这个问题,Melis 等人仍然给出了肯定的回答。他们提出的攻击方法能够推断出特定样本是否出现在某个参与方的训练数据中,以及能够推断出某个参与方的训练数据集的非主要属性(而非关于某个类别的总体特点,例如,对于年龄分类模型,推断某个参与方的训练数据中的人是否戴眼镜)。这种攻击的主要思想是对联邦学习进行白盒成员推断攻击(见 12.1.2)以及属性推断攻击,后者可以视为前者的扩展,是通过多任务学习的技巧来使攻击者学习模型参数中的非主要信息。可见,联邦学习也有着棘手的隐私问题。只要让参与方知道训练过程中(或训练结束后)的模型参数,那么恶意的参与方总是可以对模型发动类似成员推断攻击、模型反转攻击的攻击。
刚刚的两种攻击主要站在参与方的角度,探讨了训练数据隐私的问题。如果站在参数服务器的角度,就会发现参数服务器知道的信息更多:除了参与方都能知道的模型参数更新值之外,还能知道每个参与方上传的梯度信息。因此,如果参数服务器是恶意的,会对隐私产生更大的影响。不过好消息是,Keith Bonawitz 等人[<sup>[12]</sup>](#secagg)在 2017 年指出,联邦学习中参数服务器可以只知道聚合后的模型参数更新值,而不必知道每个参与方上传的梯度。这样,参数服务器知道的信息就不比参与方知道的信息多。这种方法被称为“安全聚合”,是通过安全多方计算技术实现的——在不知道各方输入的情况下求和是安全多方计算的一个十分经典且比较高效的应用,相比与 12.2.2 节中用安全多方计算做复杂的计算的开销有十几万倍,做求和计算的开销只有几倍。
## 小结与讨论
本小节主要围绕深度学习的训练安全与隐私问题,讨论了投毒攻击、后门攻击等攻击技术,训练时的直接与间接隐私泄露、联邦学习及其隐私泄露等隐私问题,以及水印、安全多方计算训练、安全聚合等防御技术。
看完本章内容后,可以思考以下几点问题:
- 有没有办法确保训练过程的完整性?
- 联邦学习有棘手的隐私问题,但为什么联邦学习仍被广泛视为一种保护隐私的训练方式?
- 深度学习训练时的安全问题在联邦学习中是否更严重?
## 参考文献
<div id=poison></div>
1. Ali Shafahi, W. Ronny Huang, Mahyar Najibi, Octavian Suciu, Christoph Studer, Tudor Dumitras, and Tom Goldstein. 2018. [Poison Frogs! Targeted Clean-Label Poisoning Attacks on Neural Networks](https://proceedings.neurips.cc/paper/2018/hash/22722a343513ed45f14905eb07621686-Abstract.html). In Conference on Neural Information Processing Systems (NeurIPS), 6106–6116.
<div id=trojan></div>
2. Yingqi Liu, Shiqing Ma, Yousra Aafer, Wen-Chuan Lee, Juan Zhai, Weihang Wang, and Xiangyu Zhang. 2018. [Trojaning Attack on Neural Networks](http://wp.internetsociety.org/ndss/wp-content/uploads/sites/25/2018/02/ndss2018\_03A-5\_Liu\_paper.pdf). In Network and Distributed System Security Symposium (NDSS).
<div id=cleanse></div>
3. Bolun Wang, Yuanshun Yao, Shawn Shan, Huiying Li, Bimal Viswanath, Haitao Zheng, and Ben Y. Zhao. 2019. [Neural Cleanse: Identifying and Mitigating Backdoor Attacks in Neural Networks](https://doi.org/10.1109/SP.2019.00031). In IEEE Symposium on Security and Privacy (S&P), 707–723.
<div id=watermark></div>
4. Jialong Zhang, Zhongshu Gu, Jiyong Jang, Hui Wu, Marc Ph Stoecklin, Heqing Huang, and Ian M. Molloy. 2018. [Protecting Intellectual Property of Deep Neural Networks with Watermarking](https://doi.org/10.1145/3196494.3196550). In ACM Asia Conference on Computer and Communications Security (AsiaCCS), 159–172.
<div id=watermarkbackdoor></div>
5. Yossi Adi, Carsten Baum, Moustapha Cissé, Benny Pinkas, and Joseph Keshet. 2018. [Turning Your Weakness Into a Strength: Watermarking Deep Neural Networks by Backdooring](https://www.usenix.org/conference/usenixsecurity18/presentation/adi). In USENIX Security Symposium, 1615–1631.
<div id=secureml></div>
6. Payman Mohassel and Yupeng Zhang. 2017. [SecureML: A System for Scalable Privacy-Preserving Machine Learning](https://doi.org/10.1109/SP.2017.12). In IEEE Symposium on Security and Privacy (S&P), 19–38.
<div id=memorize></div>
7. Congzheng Song, Thomas Ristenpart, and Vitaly Shmatikov. 2017. [Machine Learning Models that Remember Too Much](https://doi.org/10.1145/3133956.3134077). In ACM Conference on Computer and Communications Security (CCS), 587–601.
<div id=ppdl></div>
8. Reza Shokri and Vitaly Shmatikov. 2015. [Privacy-Preserving Deep Learning](https://doi.org/10.1145/2810103.2813687). In ACM Conference on Computer and Communications Security (CCS), 1310–1321.
<div id=fl></div>
9. Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Agüera y Arcas. 2017. [Communication-Efficient Learning of Deep Networks from Decentralized Data](http://proceedings.mlr.press/v54/mcmahan17a.html). In International Conference on Artificial Intelligence and Statistics (AISTATS), 1273–1282.
<div id=gan></div>
10. Briland Hitaj, Giuseppe Ateniese, and Fernando Pérez-Cruz. 2017. [Deep Models Under the GAN: Information Leakage from Collaborative Deep Learning](https://doi.org/10.1145/3133956.3134012). In ACM Conference on Computer and Communications Security (CCS), 603–618.
<div id=inf></div>
11. Luca Melis, Congzheng Song, Emiliano De Cristofaro, and Vitaly Shmatikov. 2019. [Exploiting Unintended Feature Leakage in Collaborative Learning](https://doi.org/10.1109/SP.2019.00029). In IEEE Symposium on Security and Privacy (S&P), 691–706.
<div id=secagg></div>
12. Kallista A. Bonawitz, Vladimir Ivanov, Ben Kreuter, Antonio Marcedone, H. Brendan McMahan, Sarvar Patel, Daniel Ramage, Aaron Segal, and Karn Seth. 2017. [Practical Secure Aggregation for Privacy-Preserving Machine Learning](https://doi.org/10.1145/3133956.3133982). In ACM Conference on Computer and Communications Security (CCS), 1175–1191.
|
AI-System/Textbook/第12章-人工智能安全与隐私/12.2-人工智能训练安全与隐私.md/0
|
{
"file_path": "AI-System/Textbook/第12章-人工智能安全与隐私/12.2-人工智能训练安全与隐私.md",
"repo_id": "AI-System",
"token_count": 13715
}
| 12 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
## 2.2 神经网络的训练
本小节主要围绕神经网络的训练的训练流程,损失函数,梯度下降和反向传播展开。
- [2.2 神经网络的训练](#22-神经网络的训练)
- [2.2.1 基本训练流程](#221-基本训练流程)
- [前提条件](#前提条件)
- [步骤](#步骤)
- [2.2.2 损失函数](#222-损失函数)
- [概念](#概念)
- [损失函数的作用](#损失函数的作用)
- [均方差函数](#均方差函数)
- [交叉熵损失函数](#交叉熵损失函数)
- [2.2.3 梯度下降](#223-梯度下降)
- [从自然现象中理解梯度下降](#从自然现象中理解梯度下降)
- [梯度下降的数学理解](#梯度下降的数学理解)
- [梯度下降的三要素](#梯度下降的三要素)
- [为什么说是“梯度下降”?](#为什么说是梯度下降)
- [学习率η的选择](#学习率η的选择)
- [2.2.4 反向传播](#224-反向传播)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
### 2.2.1 基本训练流程
从真正的“零”开始学习神经网络时,我没有看到过任何一个流程图来讲述训练过程,大神们写书或者博客时都忽略了这一点,图 2.2.1 是一个简单的流程图。
<img src="./img/TrainFlow.png" />
图 2.2.1 神经网络训练流程图
#### 前提条件
1. 首先是我们已经有了训练数据;
2. 我们已经根据数据的规模、领域,建立了神经网络的基本结构,比如有几层,每一层有几个神经元;
3. 定义好损失函数来合理地计算误差。
#### 步骤
假设我们有表 2.2.1 所示的训练数据样本。
表 2.2.1 训练样本示例
|Id|$x_1$|$x_2$|$x_3$|$Y$|
|---|---|---|---|---|
|1|0.5|1.4|2.7|3|
|2|0.4|1.3|2.5|5|
|3|0.1|1.5|2.3|9|
|4|0.5|1.7|2.9|1|
其中,$x_1,x_2,x_3$ 是每一个样本数据的三个特征值,$Y$ 是样本的真实结果值:
1. 随机初始化权重矩阵,可以根据正态分布等来初始化。这一步可以叫做“猜”,但不是瞎猜;
2. 拿一个或一批数据 $X$ 作为输入,带入权重矩阵 $W$ 中计算 $Z=W*X$,再通过激活函数传入下一层 $A = activation(Z)$,最终得到预测值。在本例中,我们先用 $Id_1$ 的数据输入到矩阵中,得到一个 $A$ 值,假设 $A=5$;
3. 拿到 $Id_1$ 样本的真实值 $Y=3$;
4. 计算损失,假设用均方差函数 $Loss = (A-Y)^2=(5-3)^2=4$;
5. 根据一些神奇的数学公式(反向微分),把 $Loss=4$ 这个值用大喇叭喊话,告诉在前面计算的步骤中,影响 $A=5$ 这个值的每一个权重矩阵 $W$,然后对这些权重矩阵中的值做一个微小的修改(当然是向着好的方向修改);
6. 用 $Id_2$ 样本作为输入再次训练(Go to 2);
7. 这样不断地迭代下去,直到以下一个或几个条件满足就停止训练:损失函数值非常小;准确度满足了要求;迭代到了指定的次数。
训练完成后,我们会把这个神经网络中的结构和权重矩阵的值导出来,形成一个计算图(就是矩阵运算加上激活函数)模型,然后嵌入到任何可以识别/调用这个模型的应用程序中,根据输入的值进行运算,输出预测值。
所以,神经网络的训练需要三个概念的支持,依次是:
- 损失函数
- 梯度下降
- 反向传播
### 2.2.2 损失函数
#### 概念
在各种材料中经常看到的中英文词汇有:误差,偏差,Error,Cost,Loss,损失,代价......意思都差不多,在本书中,使用“损失函数”和“Loss Function”这两个词汇,具体的损失函数符号用 $J$ 来表示,误差值用 $loss$ 表示。
“损失”就是所有样本的“误差”的总和,亦即($m$ 为样本数):
$$损失 = \sum^m_{i=1}误差_i$$
$$J = \sum_{i=1}^m loss_i$$
#### 损失函数的作用
损失函数的作用,就是计算神经网络每次迭代的前向计算结果与真实值的差距,从而指导下一步的训练向正确的方向进行。
如果我们把神经网络的参数调整到完全满足独立样本的输出误差为 $0$,通常会令其它样本的误差变得更大,这样作为误差之和的损失函数值,就会变得更大。所以,我们通常会在根据某个样本的误差调整权重后,计算一下整体样本的损失函数值,来判定网络是不是已经训练到了可接受的状态。
损失函数有两个作用:
1. 用损失函数计算预测值和标签值(真实值)的误差;
2. 损失函数值达到一个满意的值就停止训练。
神经网络常用的损失函数有:
- 均方差函数,主要用于回归
- 交叉熵函数,主要用于分类
二者都是非负函数,极值在底部,用梯度下降法可以求解。
#### 均方差函数
MSE - Mean Square Error。
该函数就是最直观的一个损失函数了,计算预测值和真实值之间的欧式距离。预测值和真实值越接近,两者的均方差就越小。
均方差函数常用于线性回归(linear regression),即函数拟合(function fitting)。公式如下:
$$
loss = {1 \over 2}(z-y)^2 \tag{单样本}
$$
$$
J=\frac{1}{2m} \sum_{i=1}^m (z_i-y_i)^2 \tag{多样本,m为样本个数}
$$
只有两个参数 $(w,b)$ 的损失函数值的 3D 示意图如图 2.2.2。
X 坐标为 $w$,Y 坐标为 $b$,针对每一个 $(w,b)$ 的组合计算出一个损失函数值,用三维图的高度 Z 来表示这个损失函数值。下图中的底部并非一个平面,而是一个有些下凹的曲面,只不过曲率较小。
<img src="./img/lossfunction3d.png" width="500" />
图 2.2.2 $w$ 和 $b$ 同时变化时的损失函数形态
#### 交叉熵损失函数
单个样本的情况的交叉熵函数:
$$loss =- \sum_{j=1}^n y_j \ln a_j$$
其中,$n$ 并不是样本个数,而是分类个数。
对于批量样本的交叉熵计算公式是:
$$J =- \sum_{i=1}^m \sum_{j=1}^n y_{ij} \ln a_{ij}$$
$m$ 是样本数,$n$ 是分类数。
有一类特殊问题,就是事件只有两种情况发生的可能,比如“学会了”和“没学会”,称为 $0/1$ 分类或二分类。对于这类问题,由于$n=2,y_1=1-y_2,a_1=1-a_2$,所以交叉熵可以简化为:
$$loss =-[y \ln a + (1-y) \ln (1-a)] $$
二分类对于批量样本的交叉熵计算公式是:
$$J= - \sum_{i=1}^m [y_i \ln a_i + (1-y_i) \ln (1-a_i)] $$
**交叉熵函数常用于逻辑回归(logistic regression),也就是分类(classification)。**
<img src="./img/crossentropy2.png" width="500" />
图 2.2.3 二分类交叉熵损失函数图
从图 2.2.3 可以看到:
- 当分类为正类时,即 $y=1$ 的红色曲线,当预测值 $a$ 也为 1 时,损失函数值最小为 0;随着预测值 $a$ 变小,损失函数值会变大;
- 当分类为负类时,即 $y=0$ 的蓝色曲线,当预测值 $a$ 也为 0 时,损失函数值最小为 0;随着预测值 $a$ 变大,损失函数值会变大;
### 2.2.3 梯度下降
#### 从自然现象中理解梯度下降
在自然界中,梯度下降的最好例子,就是泉水下山的过程:
1. 水受重力影响,会从当前位置沿着最陡峭的方向流动,有时会形成瀑布(梯度下降);
2. 水流下山的路径不是唯一的,在同一个地点,有可能有多个位置具有同样的陡峭程度,而造成了分流(可以得到多个解);
3. 遇到坑洼地区,有可能形成湖泊,而终止下山过程(不能得到全局最优解,而是局部最优解)。
#### 梯度下降的数学理解
梯度下降的数学公式:
$$\theta_{n+1} = \theta_{n} - \eta \cdot \nabla J(\theta)$$
其中:
- $\theta_{n+1}$:下一个参数值;
- $\theta_n$:当前参数值;
- $-$:减号,梯度的反向;
- $\eta$:学习率或步长,控制每一步走的距离,不要太快以免错过了最佳景点,不要太慢以免时间太长;
- $\nabla$:梯度,函数当前位置的最快上升点;
- $J(\theta)$:函数。
对应到上面的例子中,$\theta$ 就是 $(w,b)$ 的组合。
#### 梯度下降的三要素
1. 当前点;
2. 方向;
3. 步长。
#### 为什么说是“梯度下降”?
“梯度下降”包含了两层含义:
1. 梯度:函数当前位置的最快上升点;
2. 下降:与导数相反的方向,用数学语言描述就是那个减号。
亦即与上升相反的方向运动,就是下降。
<img src="./img/gd_concept.png" width="500" />
图 2.2.4 梯度下降的步骤
图 2.2.4 解释了在函数极值点的两侧做梯度下降的计算过程,梯度下降的目的就是使得x值向极值点逼近。
#### 学习率η的选择
在公式表达时,学习率被表示为$\eta$。在代码里,我们把学习率定义为`learning_rate`,或者`eta`。针对上面的例子,试验不同的学习率对迭代情况的影响,如表 2.2.2 所示。
表 2.2.2 不同学习率对迭代情况的影响
|学习率|迭代路线图|说明|
|---|---|---|
|1.0|<img src="./img/gd100.png" width="500"/>|学习率太大,迭代的情况很糟糕,在一条水平线上跳来跳去,永远也不能下降。|
|0.8|<img src="./img/gd080.png" width="500"/>|学习率大,会有这种左右跳跃的情况发生,这不利于神经网络的训练。|
|0.4|<img src="./img/gd040.png" width="500"/>|学习率合适,损失值会从单侧下降,4步以后基本接近了理想值。|
|0.1|<img src="./img/gd010.png" width="500"/>|学习率较小,损失值会从单侧下降,但下降速度非常慢,10步了还没有到达理想状态。|
### 2.2.4 反向传播
假设有一个黑盒子,输入和输出有一定的对应关系,我们要破解这个黑盒子!于是,我们会有如下破解流程:
1. 记录下所有输入值和输出值,如表2.2.3。
表 2.2.3 样本数据表
|样本ID|输入(特征值)|输出(标签)|
|:---:|--|--|
|1|1|2.21|
|2|1.1|2.431|
|3|1.2|2.652|
|4|2|4.42|
2. 搭建一个神经网络,我们先假设这个黑盒子的逻辑是:$z=w_1 x + w_2 x^2$;
3. 给出初始权重值,$w_1=1, w_2=1$;
4. 输入1,根据 $z=x + x^2$ 得到输出为2,而实际的输出值是2.21;
5. 计算误差值为 $loss=2-2.21=-0.21$;
6. 调整权重值,假设只变动 $w_1$,比如 $w_1 = w_1 - \eta * loss$,令学习率 $\eta=0.1$,则 $w_1 = 1 - 0.1*(-0.21)=1.021$
7. 再输入下一个样本1.1,得到的输出为$z=1.021*1.1+1.1^2=2.3331$
8. 实际输出为2.431,则误差值为 $2.3331-2.431=-0.0979$;
9. 再次调整权重值,$w_1 = w_1 - \eta * loss = 1.021 - 0.1*(-0.0979)=1.03$
......
依此类推,重复 4-9 过程,直到损失函数值小于一个指标,比如 $0.001$,我们就可以认为网络训练完毕,黑盒子“破解”了,实际是被复制了,因为神经网络并不能得到黑盒子里的真实函数体,而只是近似模拟。
从上面的过程可以看出,如果误差值是正数,我们就把权重降低一些;如果误差值为负数,则升高权重。
## 小结与讨论
本小节主要介绍了神经网络的训练的训练流程,损失函数,梯度下降和反向传播。
请读者思考,反向传播过程是否有好办法通过工具自动化求解?
## 参考文献
1. 《智能之门》,胡晓武等著,高等教育出版社
2. Duchi, J., Hazan, E., & Singer, Y. (2011). Adaptive subgradient methods for online learning and stochastic optimization. Journal of Machine Learning Research, 12(Jul), 2121-2159.
3. Zeiler, M. D. (2012). ADADELTA: an adaptive learning rate method. arXiv preprint arXiv:1212.5701.
4. Tieleman, T., & Hinton, G. (2012). Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude. COURSERA: Neural networks for machine learning, 4(2), 26-31.
5. Kingma, D. P., & Ba, J. (2014). Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.
6. 周志华老师的西瓜书《机器学习》
7. Chawla N V, Bowyer K W, Hall L O, et al. SMOTE: synthetic minority over-sampling technique[J]. Journal of Artificial Intelligence Research, 2002, 16(1):321-357.
8. Inoue H. Data Augmentation by Pairing Samples for Images Classification[J]. 2018.
9. Zhang H, Cisse M, Dauphin Y N, et al. mixup: Beyond Empirical Risk Minimization[J]. 2017.
10. 《深度学习》- 伊恩·古德费洛
11. Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun, Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks. Link: https://arxiv.org/pdf/1506.01497v3.pdf
|
AI-System/Textbook/第2章-神经网络基础/2.2-神经网络的训练.md/0
|
{
"file_path": "AI-System/Textbook/第2章-神经网络基础/2.2-神经网络的训练.md",
"repo_id": "AI-System",
"token_count": 8766
}
| 13 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 5.2 计算图优化
- [5.2 计算图优化](#52-计算图优化)
- [5.2.1 算术表达式化简](#521-算术表达式化简)
- [5.2.2 公共子表达式消除](#522-公共子表达式消除)
- [5.2.3 常数传播](#523-常数传播)
- [5.2.4 矩阵乘自动融合](#524-矩阵乘自动融合)
- [5.2.5 算子融合](#525-算子融合)
- [5.2.6 子图替换和随机子图替换](#526-子图替换和随机子图替换)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
正如上一节介绍到,计算图作为连接深度学习框架和前端语言的主要中间表达,被目前主流框架如TensorFlow和PyTorch所使用或者作为标准文件格式来导出模型。
计算图是一个有向无环图(DAG),节点表示算子,边表示张量或者控制边(control flow),节点之间的依赖关系表示每个算子的执行顺序。
计算图的优化被定义为作为在计算图上的函数,通过一系列等价或者近似的优化操作将输入的计算图变换为一个新的计算图。其目标是通过这样的图变换来化简计算图,从而降低计算复杂度或内存开销。在深度神经网络编译器中,有大量优化方法可以被表示为计算图的优化,包括一些在传统程序语言编译器中常用的优化方法。图5-2-1中列举了一些常见的图优化方法,本节会围绕这几种不同类型的优化作一个简要介绍。
<center> <img src="./img/5-2-1-graph_opt.png" /></center>
<center>图5-2-1. 深度神经网络编译器中常见的计算图优化方法</center>
## 5.2.1 算术表达式化简
一类最常见的计算图优化就是算术表达式化简,在计算图中的一些子图所对应的算术表达式,在数学上有等价的化简方法来简化表达式,这反应在计算图上就是将子图转化成一个更简单的子图(如更少的节点),从而降低计算量。如图5-2-2展示了一个利用算术表达式化简计算图的例子,左边的子图包含了两个算法:Const算子(返回元素值为0的常量张量)和Mul算子(计算两个相同形状的算子的元素乘积),通过表达式化简,这个子图可以直接被化简成右边的只包括Const算子的子图。表5-2-1列举了一些常见的算术表达式化简规则,其中X和Y表示张量,0和1表示常量张量,其它操作符均对应张量上的算子。
<center> <img src="./img/5-2-2-simp.png" /></center>
<center>图5-2-2. 一个利用算术表达式化简计算图的例子</center>
<center>
| 变化前 | 变换后 |
| :-----| ----: |
| X * 0 | 0 |
| X * Broadcast(0) | Broadcast(0)|
| X * 1 | X |
| X * Broadcast(1) | X |
| X + 0 | X |
| X + Broadcast(0) | X |
| Log(Exp(X)/Y) | X - Log(Y) |
</center>
<center>表5-2-1. 一些常见的算术表达式化简规则</center>
## 5.2.2 公共子表达式消除
公共子表达式消除(Common Subexpression Elimination, CSE)也是经典编译优化中常用的优化。其目的是通过找到程序中等价的计算表达式,然后通过复用结果的方式消除其它冗余表达式的计算。同理,在计算图中,公共子表达式消除就等同于寻找并消除冗余的计算子图。一个简单的实现算法是按照图的拓扑序(保证一个访问一个节点时,其前继节点均已经访问)遍历图中节点,每个节点按照输入张量和节点类型组合作为键值进行缓存,后续如果有节点有相同的键值则可以被消除,并且将其输入边连接到缓存的节点的输入节点上。如图5-2-3为一个公共子表达式消除的示例,图左边蓝色椭圆中的节点为不在缓存中的节点,也就是必须要执行的节点,而红色椭圆中的节点的计算和前面蓝色节点的计算重复,也就是其键值可以被缓存到,因此可以安全的消除这些节点,于是最终左边的计算图可以被优化成右边的计算图。
<center> <img src="./img/5-2-3-cse.png" /></center>
<center>图5-2-3. 公共子表达式消除的示例</center>
## 5.2.3 常数传播
常数传播(constant propagation)就叫常数折叠(constant folding),也是经典编译优化中的常用优化,其主要方法是通过在编译期计算出也是常数表达式的值,用计算出的值来替换原来的表达式,从而节省运行时的开销。在计算图中,如果一个节点的所有输入张量都是常数张量的话,那么这个节点就可以在编译期计算出输入张量,并替换为一个新的常数张量。如图5-2-4为一个常数传播的示例,其中红色方框内的两个节点都可以被提前计算出来,因皮可以在编译期优化掉。值得注意的是,常数传播需要编译器具有计算的能力,甚至对于一些较大的算子还需要能够在加速硬件上(如GPU)上计算,否则优化的过程就会非常的慢。常数传播的优化在深度学习尤其是模型推理的时候非常有用,因为在推理时,模型中的参数张量全部固定为常数张量,大量计算可以在编译期计算好,极大的化简了推理运算时的计算开销。但时,在深度学习的场景中,常数传播有时候也会带来否优化,如增加计算内存甚至计算时间,一个典型的例子就是一个标量常数张量后面跟一个Broadcast的算子时,如果做了常数传播就会增加内存占用,如果后面是访存密集型的算子的话,也会增加内存压力,从而增加计算时间。
<center> <img src="./img/5-2-4-cf.png" /></center>
<center>图5-2-4. 常数传播的示例</center>
## 5.2.4 矩阵乘自动融合
矩阵乘在深度学习计算图中被广泛应用,如常见的神经网络的线性层、循环神经网络的单元层、注意力机制层等都有大量的矩阵乘法。在同一个网络里,经常会出现形状相同的矩阵乘法,根据一些矩阵的等价规则,如果把些矩阵乘算子融合成一个大的矩阵乘算子,可以更好的利用到GPU的算力,从而加速模型计算。图5-2-5为其中一种常见的矩阵乘自动融合的示例,其中,如果有两个矩阵乘法共享同一个输入张量(图中方框内左侧),我们就可以自动把另个的两个输入张量拼接成一个大的矩阵乘算子(图中方框内右侧),其计算的结果刚好是原算子计算结果的拼接。利用这种规则,图中最右侧的GRU网络中的两组矩阵乘算子可以分别融合成两个大的矩阵乘算子。类似的融合规则还有BatchMatMul,可以把两个相同形状的矩阵拼接成一个新的BatchMatMul算子。
<center> <img src="./img/5-2-5-gemm.png" /></center>
<center>图5-2-5. 矩阵乘自动融合的示例</center>
## 5.2.5 算子融合
上小节中介绍的算子融合方法是针对矩阵乘算子特有的,在深度学习模型中,针对大量的小算子的融合都可以提高GPU的利用率,减少内核启动开销、减少访存开销等好处。例如,Element-wise的算子(如Add,Mul,Sigmoid,Relu等)其计算量非常小,主要计算瓶颈都在内存的读取和写出上,如果前后的算子能够融合起来,前面算子的计算结果就可以直接被后面算子在寄存器中使用,避免数据在内存的读写,从而提交整体计算效率。图5-2-6展示了一个Mul算子和一个Add算子融合的示例,图5-2-7为其对应的融合前后的CUDA代码示例,在没有融合前,执行两个算子需要启动两个GPU内核,前一个计算的结果需要写出到主存中,下一个内核计算的时候需要再次读取的计算核上。然后,融合后的代码只需要启动一个内核,并且可以有效复用中间计算结果。
<center> <img src="./img/5-2-6-fusion.png" /></center>
<center>图5-2-6. 算子融合的示例</center>
```
//融合前为两个单独内核函数
__global__ mul(float *x0, float *x1, float *y)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
y[idx] = x0[idx] * x1[idx];
}
__global__ add(float *x0, float *x1, float *y)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
y[idx] = x0[idx] + x1[idx];
}
```
```
//融合后为一个单独内核函数
__global__ fused_muladd(float *x0, float *x1, float *x2, float *y)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
y[idx] = x0[idx] * x1[idx] + x2[idx];
}
```
<center>图5-2-7. 算子融合的CUDA代码示例</center>
## 5.2.6 子图替换和随机子图替换
鉴于算子融合在深度学习计算中能够带来较好的性能优化,然而在实际的计算图中有太多算子无法做到自动的算子融合,主要原因包括算子的内核实现逻辑不透明、算子之前无法在特有加速器上融合等等。为了在这些的情况下还能进行优化,用户经常会实现一些手工融合的算子来提升性能。那么,编译器在计算图中识别出一个子图并替换成一个等价的新的算子或子图的过程就是子图替换优化。如图5-2-7展示的是基于规则的子图替换示例,需要在系统中注册系列替换规则,如Conv和Relu的子图可以替换为Conv+Relu融合后的算子。
<center> <img src="./img/5-2-8-replace.png" /></center>
<center>图5-2-7. 子图替换的示例</center>
## 小结与讨论
本章我们主要围绕计算图优化,算术表达式化简,公共子表达式消除,常数传播,矩阵乘自动融合,算子融合,子图替换和随机子图替换展开介绍了计算图中常见的图优化方法,这些优化方法在传统编译器也大部分有类似的实现,感兴趣的同学可以参考传统编译器的材料进行进一步学习。
请读者思考在计算图上做的这些优化和传统编译器上的优化有何不同?你还能想到哪些计算图上的优化方法?
## 参考文献
1. https://en.wikipedia.org/wiki/Optimizing_compiler
2. https://en.wikipedia.org/wiki/Common_subexpression_elimination
3. https://en.wikipedia.org/wiki/Constant_folding
4. TensorFlow Graph Optimizations:https://www.tensorflow.org/guide/graph_optimization
5. Graph Optimizations in ONNX Runtimes: https://onnxruntime.ai/docs/performance/graph-optimizations.html
|
AI-System/Textbook/第5章-深度学习框架的编译与优化/5.2-计算图优化.md/0
|
{
"file_path": "AI-System/Textbook/第5章-深度学习框架的编译与优化/5.2-计算图优化.md",
"repo_id": "AI-System",
"token_count": 7069
}
| 14 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 6.3 深度学习并行训练同步方式
- [6.3 深度学习并行训练同步方式](#63-深度学习并行训练同步方式)
- [6.3.1 同步并行](#631-同步并行)
- [6.3.2 异步并行](#632-异步并行)
- [6.3.2 半同步并行](#632-半同步并行)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
在多设备进行并行训练时,可以采用不同的一致性模型,对应其间不同的通信协调方式,大致可分为:同步并行、异步并行、半同步并行。
## 6.3.1 同步并行
<center><img src="./img/image22.png" width="600" height="" /></center>
<center>图6-3-1: 同步并行示意图 (<a href=https://ucbrise.github.io/cs294-ai-sys-fa19/assets/lectures/lec06/06_distributed_training.pdf>图片来源</a>)</center>
同步并行是采用具有同步障的通信协调并行。例如在下图中,每个工作节点(Worker)的在进行了一些本地计算之后需要与其它工作节点通信协调。在通信协调的过程中,所有的工作节点都必须等全部工作节点完成了本次通信之后才能继续下一轮本地计算。阻止工作节点在全部通信完成之前继续下一轮计算是同步障。这样的同步方式也称BSP,其优点是本地计算和通信同步严格顺序化,能够容易地保证并行的执行逻辑于串行相同。但完成本地计算更早的工作节点需要等待其它工作节点处理,造成了计算硬件的浪费。
## 6.3.2 异步并行
采用不含同步障的通信协调并行。相比于同步并行执行,异步并行执行下各个工作节点完全采用灵活的方式协调。如下图所示,时间轴上并没有统一的时刻用于通信或者本地计算,而是工作节点各自分别随时处理自己收到的消息,并且随时发出所需的消息,以此完成节点间的协调。这样做的好处是没有全局同步障带来的相互等待开销。
<center><img src="./img/image23.png" width="600" height="" /></center>
<p style="text-align: center;">图6-3-2: 异步并行示意图 (<a href=https://ucbrise.github.io/cs294-ai-sys-fa19/assets/lectures/lec06/06_distributed_training.pdf>图片来源</a>)</p>
## 6.3.2 半同步并行
采用具有限定的宽松同步障的通信协调并行。半同步的基本思路是在严格同步和完全不受限制的异步并行之间取一个这种方案——受到限制的宽松同步。例如,
在 Stale Synchronous Parallel (SSP)中,系统跟踪各个工作节点的进度并维护最慢进度,通过动态限制进度推进的范围,保证最快进度和最慢进度的差距在一个预定的范围内。这个范围就称为“新旧差阈值”staleness threshold如下图所示,在新旧差阈值为3时,最快进度的工作节点会停下来等待最慢的工作节点。
<center><img src="./img/image24.png" width="600" height="" /></center>
<p style="text-align: center;"> 图6-3-3: 半同步SSP示意图 (<a href=https://ieeexplore.ieee.org/abstract/document/8885215>图片来源</a>) </p>
半同步并行通过对于更新的不一致程度的限制,以达到收敛性居于同步并行和异步并行之间的效果。除了同步时机的区别,目前并行同步方式的理论也涉及同步对象的选择上。例如相比于全局所有工作节点参与的同步,亦有研究只与部分工作节点同步的方式(Ce Zhang et.al.)。
## 小结与讨论
本节通过介绍机器学习中不同的同步、异步、半同步等并行通信方式,以比较的方式解释其性能和收敛性的区别。
## 参考文献
<div id="SSP"></div>
1. [Zhao, Xing, et al. Dynamic Stale Synchronous Parallel Distributed Training for Deep Learning (ICDCS’19)](https://ieeexplore.ieee.org/abstract/document/8885215)
<div id="AISysTraining"></div>
2. [Joseph E. Gonzalez AI-Systems Distributed Training](https://ucbrise.github.io/cs294-ai-sys-fa19/assets/lectures/lec06/06_distributed_training.pdf)
|
AI-System/Textbook/第6章-分布式训练算法与系统/6.3-深度学习并行训练同步方式.md/0
|
{
"file_path": "AI-System/Textbook/第6章-分布式训练算法与系统/6.3-深度学习并行训练同步方式.md",
"repo_id": "AI-System",
"token_count": 2534
}
| 15 |
#!/usr/bin/python
import azureml.core
from azureml.core import Workspace
from dotenv import set_key, get_key, find_dotenv
from pathlib import Path
from AIHelpers.utilities import get_auth
import sys, getopt
def main(argv):
try:
opts, args = getopt.getopt(argv,"hs:rg:wn:wr:",["subscription_id=","resource_group=","workspace_name=", "workspace_region="])
except getopt.GetoptError:
print 'aml_creation.py -s <subscription_id> -rg <resource_group> -wn <workspace_name> -wr <workspace_region>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'aml_creation.py -s <subscription_id> -rg <resource_group> -wn <workspace_name> -wr <workspace_region>'
sys.exit()
elif opt in ("-s", "--subscription_id"):
subscription_id = arg
elif opt in ("-rg", "--resource_group"):
resource_group = arg
elif opt in ("-wn", "--workspace_name"):
workspace_name = arg
elif opt in ("-wr", "--workspace_region"):
workspace_region = arg
env_path = find_dotenv()
if env_path == "":
Path(".env").touch()
env_path = find_dotenv()
ws = Workspace.create(
name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group,
location=workspace_region,
create_resource_group=True,
auth=get_auth(env_path),
exist_ok=True,
)
if __name__ == "__main__":
print("AML SDK Version:", azureml.core.VERSION)
main(sys.argv[1:])
# Azure resources
subscription_id = "{{cookiecutter.subscription_id}}"
resource_group = "{{cookiecutter.resource_group}}"
workspace_name = "{{cookiecutter.workspace_name}}"
workspace_region = "{{cookiecutter.workspace_region}}"
|
AI/.ci/scripts/aml_creation.py/0
|
{
"file_path": "AI/.ci/scripts/aml_creation.py",
"repo_id": "AI",
"token_count": 720
}
| 16 |
parameters:
Agent: Hosted Ubuntu 1604
Demands: "python3"
jobTimeoutInMinutes: 180
jobDisplayName: 'defaultDisplayName'
TridentWorkloadTypeShort: #
DeployLocation: #
DefaultWorkingDirectory: $(System.DefaultWorkingDirectory)
Template: ai-architecture-template.yml
ProjectLocation: "notebooks/"
PythonPath: "."
doCleanup: True
flighting_release: true
flighting_preview: true
flighting_master: true
sql_server_name: "x"
sql_database_name: "x"
sql_username: "x"
sql_password: "x"
TestPostfix: "-stable"
data_prep: true
train: true
post_cleanup: true
container_name: "x"
account_name: "x"
account_key: "x"
datastore_rg: "x"
stages:
- template: ../stage/deploy_notebooks_stage_v4.yml
parameters:
Agent: ${{parameters.Agent}}
Demands: ${{parameters.Demands}}
stageName: 'stable'
jobDisplayName: ${{parameters.jobDisplayName}}
jobTimeoutInMinutes: ${{parameters.jobTimeoutInMinutes}}
TridentWorkloadTypeShort: ${{parameters.TridentWorkloadTypeShort}}
DeployLocation: ${{parameters.DeployLocation}}
TestPostfix: ${{parameters.TestPostfix}}
DefaultWorkingDirectory: ${{parameters.DeployLocation}}
Template: ${{parameters.Template}}
ProjectLocation: ${{parameters.ProjectLocation}}
PythonPath: ${{parameters.PythonPath}}
doCleanup: ${{parameters.doCleanup}}
sql_server_name: ${{parameters.sql_server_name}}
sql_database_name: ${{parameters.sql_database_name}}
sql_username: ${{parameters.sql_username}}
sql_password: ${{parameters.sql_password}}
data_prep: ${{parameters.data_prep}}
train: ${{parameters.train}}
post_cleanup: ${{parameters.post_cleanup}}
container_name: ${{parameters.container_name}}
account_name: ${{parameters.account_name}}
account_key: ${{parameters.account_key}}
datastore_rg: ${{parameters.datastore_rg}}
- ${{ if eq(parameters.flighting_release, 'true') }}:
- template: ../stage/deploy_notebooks_stage_v4.yml
parameters:
Agent: ${{parameters.Agent}}
Demands: ${{parameters.Demands}}
stageName: 'release'
jobDisplayName: ${{parameters.jobDisplayName}}
TridentWorkloadTypeShort: ${{parameters.TridentWorkloadTypeShort}}
DeployLocation: ${{parameters.DeployLocation}}
TestPostfix: "-release"
DefaultWorkingDirectory: ${{parameters.DeployLocation}}
Template: ${{parameters.Template}}
ProjectLocation: ${{parameters.ProjectLocation}}
PythonPath: ${{parameters.PythonPath}}
flighting_release: true
doCleanup: ${{parameters.doCleanup}}
sql_server_name: ${{parameters.sql_server_name}}
sql_database_name: ${{parameters.sql_database_name}}
sql_username: ${{parameters.sql_username}}
sql_password: ${{parameters.sql_password}}
post_cleanup: ${{parameters.post_cleanup}}
container_name: ${{parameters.container_name}}
account_name: ${{parameters.account_name}}
account_key: ${{parameters.account_key}}
datastore_rg: ${{parameters.datastore_rg}}
- ${{ if eq(parameters.flighting_preview, 'true') }}:
- template: ../stage/deploy_notebooks_stage_v4.yml
parameters:
Agent: ${{parameters.Agent}}
Demands: ${{parameters.Demands}}
stageName: 'preview'
jobDisplayName: ${{parameters.jobDisplayName}}
TridentWorkloadTypeShort: ${{parameters.TridentWorkloadTypeShort}}
DeployLocation: ${{parameters.DeployLocation}}
TestPostfix: "-preview"
DefaultWorkingDirectory: ${{parameters.DeployLocation}}
Template: ${{parameters.Template}}
ProjectLocation: ${{parameters.ProjectLocation}}
PythonPath: ${{parameters.PythonPath}}
flighting_preview: true
doCleanup: ${{parameters.doCleanup}}
sql_server_name: ${{parameters.sql_server_name}}
sql_database_name: ${{parameters.sql_database_name}}
sql_username: ${{parameters.sql_username}}
sql_password: ${{parameters.sql_password}}
post_cleanup: ${{parameters.post_cleanup}}
container_name: ${{parameters.container_name}}
account_name: ${{parameters.account_name}}
account_key: ${{parameters.account_key}}
datastore_rg: ${{parameters.datastore_rg}}
- ${{ if eq(parameters.flighting_master, 'true') }}:
- template: ../stage/deploy_notebooks_stage_v4.yml
parameters:
Agent: ${{parameters.Agent}}
Demands: ${{parameters.Demands}}
stageName: 'master'
jobDisplayName: ${{parameters.jobDisplayName}}
TridentWorkloadTypeShort: ${{parameters.TridentWorkloadTypeShort}}
DeployLocation: ${{parameters.DeployLocation}}
TestPostfix: "-master"
DefaultWorkingDirectory: ${{parameters.DeployLocation}}
Template: ${{parameters.Template}}
ProjectLocation: ${{parameters.ProjectLocation}}
PythonPath: ${{parameters.PythonPath}}
flighting_master: true
doCleanup: ${{parameters.doCleanup}}
sql_server_name: ${{parameters.sql_server_name}}
sql_database_name: ${{parameters.sql_database_name}}
sql_username: ${{parameters.sql_username}}
sql_password: ${{parameters.sql_password}}
post_cleanup: ${{parameters.post_cleanup}}
container_name: ${{parameters.container_name}}
account_name: ${{parameters.account_name}}
account_key: ${{parameters.account_key}}
datastore_rg: ${{parameters.datastore_rg}}
|
AI/.ci/stages/deploy_notebooks_stages_v4.yml/0
|
{
"file_path": "AI/.ci/stages/deploy_notebooks_stages_v4.yml",
"repo_id": "AI",
"token_count": 2038
}
| 17 |
parameters:
template: ''
azureSubscription: 'x'
azure_subscription: 'x'
azureresourcegroup: 'x'
workspacename: 'aiarchtemplate'
azureregion: 'westus'
aksimagename: 'myimage'
aks_name: "akscluster"
aks_service_name: "aksservice"
location: "" #Root Dir of Project
python_path: "" #Root Dir of Python Env
cluster_name: "-"
flighting_release: false
flighting_preview: false
flighting_master: false
doCleanup: True
sql_server_name: "x"
sql_database_name: "x"
sql_username: "x"
sql_password: "x"
data_prep: true
train: true
post_cleanup: true
container_name: "x"
account_name: "x"
account_key: "x"
datastore_rg: "x"
conda: #
steps:
- template: docker_clean.yml
- template: conda_pytest_steps.yml
parameters:
azureSubscription: ${{parameters.azureSubscription}}
azure_subscription: ${{parameters.azure_subscription}}
azureresourcegroup: ${{parameters.azureresourcegroup}}
workspacename: ${{parameters.workspacename}}
azureregion: ${{parameters.azureregion}}
aksimagename: ${{parameters.aksimagename}}
aks_name: ${{parameters.aks_name}}
aks_service_name: ${{parameters.aks_service_name}}
location: ${{parameters.location}}
python_path: ${{parameters.python_path}}
cluster_name: ${{parameters.cluster_name}}
flighting_release: ${{parameters.flighting_release}}
flighting_preview: ${{parameters.flighting_preview}}
flighting_master: ${{parameters.flighting_master}}
doCleanup: ${{parameters.post_cleanup}}
sql_server_name: ${{parameters.sql_server_name}}
sql_database_name: ${{parameters.sql_database_name}}
sql_username: ${{parameters.sql_username}}
sql_password: ${{parameters.sql_password}}
data_prep: ${{parameters.data_prep}}
train: ${{parameters.train}}
post_cleanup: ${{parameters.post_cleanup}}
container_name: ${{parameters.container_name}}
account_name: ${{parameters.account_name}}
account_key: ${{parameters.account_key}}
datastore_rg: ${{parameters.datastore_rg}}
conda: ${{parameters.conda}}
|
AI/.ci/steps/deploy_notebook_steps_v5.yml/0
|
{
"file_path": "AI/.ci/steps/deploy_notebook_steps_v5.yml",
"repo_id": "AI",
"token_count": 797
}
| 18 |
variables:
TridentWorkloadTypeShort: aimlrts
DeployLocation: westus
ProjectLocation: "notebooks/"
PythonPath: "."
Template: az-ml-realtime-score.yml
|
AI/.ci/vars/az-ml-realtime-score.yml/0
|
{
"file_path": "AI/.ci/vars/az-ml-realtime-score.yml",
"repo_id": "AI",
"token_count": 53
}
| 19 |
# Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repositories using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [[email protected]](mailto:[email protected]) with any additional questions or comments.
|
AI/CONTRIBUTING.md/0
|
{
"file_path": "AI/CONTRIBUTING.md",
"repo_id": "AI",
"token_count": 213
}
| 20 |
from os.path import join
import sys
sys.path += ['../']
import argparse
import glob
import json
import logging
import os
import random
import numpy as np
import torch
torch.multiprocessing.set_sharing_strategy('file_system')
from torch.utils.data import DataLoader
from tqdm import tqdm
import torch.distributed as dist
from torch import nn
import torch.nn.functional as F
from model.models import MSMarcoConfigDict, ALL_MODELS
from utils.lamb import Lamb
import random
import transformers
from transformers import (
AdamW,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import glue_processors as processors
import copy
from torch import nn
import pickle
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
import pandas as pd
logger = logging.getLogger(__name__)
from utils.util import (
StreamingDataset,
EmbeddingCache,
get_checkpoint_no,
get_latest_ann_data,
set_seed,
is_first_worker,
)
from data.DPR_data import GetTrainingDataProcessingFn, GetTripletTrainingDataProcessingFn
from utils.dpr_utils import (
load_states_from_checkpoint,
get_model_obj,
CheckpointState,
get_optimizer,
all_gather_list
)
def train(args, model, tokenizer, query_cache, passage_cache):
""" Train the model """
logger.info("Training/evaluation parameters %s", args)
tb_writer = None
if is_first_worker():
tb_writer = SummaryWriter(log_dir=args.log_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) #nll loss for query
real_batch_size = args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)
optimizer = get_optimizer(args, model, weight_decay=args.weight_decay,)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Max steps = %d", args.max_steps)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
tr_loss = 0.0
model.zero_grad()
model.train()
set_seed(args) # Added here for reproductibility
last_ann_no = -1
train_dataloader = None
train_dataloader_iter = None
dev_ndcg = 0
step = 0
iter_count = 0
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps= args.max_steps
)
global_step = 0
if args.model_name_or_path != "bert-base-uncased":
saved_state = load_states_from_checkpoint(args.model_name_or_path)
global_step = _load_saved_state(model, optimizer, scheduler, saved_state)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from global step %d", global_step)
nq_dev_nll_loss, nq_correct_ratio = evaluate_dev(args, model, passage_cache)
dev_nll_loss_trivia, correct_ratio_trivia = evaluate_dev(args, model, passage_cache, "-trivia")
if is_first_worker():
tb_writer.add_scalar("dev_nll_loss/dev_nll_loss", nq_dev_nll_loss, global_step)
tb_writer.add_scalar("dev_nll_loss/correct_ratio", nq_correct_ratio, global_step)
tb_writer.add_scalar("dev_nll_loss/dev_nll_loss_trivia", dev_nll_loss_trivia, global_step)
tb_writer.add_scalar("dev_nll_loss/correct_ratio_trivia", correct_ratio_trivia, global_step)
while global_step < args.max_steps:
if step % args.gradient_accumulation_steps == 0 and global_step % args.logging_steps == 0:
if args.num_epoch == 0:
# check if new ann training data is availabe
ann_no, ann_path, ndcg_json = get_latest_ann_data(args.ann_dir)
if ann_path is not None and ann_no != last_ann_no:
logger.info("Training on new add data at %s", ann_path)
with open(ann_path, 'r') as f:
ann_training_data = f.readlines()
logger.info("Training data line count: %d", len(ann_training_data))
ann_training_data = [l for l in ann_training_data if len(l.split('\t')[2].split(',')) > 1]
logger.info("Filtered training data line count: %d", len(ann_training_data))
ann_checkpoint_path = ndcg_json['checkpoint']
ann_checkpoint_no = get_checkpoint_no(ann_checkpoint_path)
aligned_size = (len(ann_training_data) // args.world_size) * args.world_size
ann_training_data = ann_training_data[:aligned_size]
logger.info("Total ann queries: %d", len(ann_training_data))
if args.triplet:
train_dataset = StreamingDataset(ann_training_data, GetTripletTrainingDataProcessingFn(args, query_cache, passage_cache))
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size)
else:
train_dataset = StreamingDataset(ann_training_data, GetTrainingDataProcessingFn(args, query_cache, passage_cache))
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size*2)
train_dataloader_iter = iter(train_dataloader)
# re-warmup
if not args.single_warmup:
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps= len(ann_training_data)
)
if args.local_rank != -1:
dist.barrier()
if is_first_worker():
# add ndcg at checkpoint step used instead of current step
tb_writer.add_scalar("retrieval_accuracy/top20_nq", ndcg_json['top20'], ann_checkpoint_no)
tb_writer.add_scalar("retrieval_accuracy/top100_nq", ndcg_json['top100'], ann_checkpoint_no)
if 'top20_trivia' in ndcg_json:
tb_writer.add_scalar("retrieval_accuracy/top20_trivia", ndcg_json['top20_trivia'], ann_checkpoint_no)
tb_writer.add_scalar("retrieval_accuracy/top100_trivia", ndcg_json['top100_trivia'], ann_checkpoint_no)
if last_ann_no != -1:
tb_writer.add_scalar("epoch", last_ann_no, global_step-1)
tb_writer.add_scalar("epoch", ann_no, global_step)
last_ann_no = ann_no
elif step == 0:
train_data_path = os.path.join(args.data_dir, "train-data")
with open(train_data_path, 'r') as f:
training_data = f.readlines()
if args.triplet:
train_dataset = StreamingDataset(training_data, GetTripletTrainingDataProcessingFn(args, query_cache, passage_cache))
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size)
else:
train_dataset = StreamingDataset(training_data, GetTrainingDataProcessingFn(args, query_cache, passage_cache))
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size*2)
all_batch = [b for b in train_dataloader]
logger.info("Total batch count: %d", len(all_batch))
train_dataloader_iter = iter(train_dataloader)
try:
batch = next(train_dataloader_iter)
except StopIteration:
logger.info("Finished iterating current dataset, begin reiterate")
if args.num_epoch != 0:
iter_count += 1
if is_first_worker():
tb_writer.add_scalar("epoch", iter_count-1, global_step-1)
tb_writer.add_scalar("epoch", iter_count, global_step)
nq_dev_nll_loss, nq_correct_ratio = evaluate_dev(args, model, passage_cache)
dev_nll_loss_trivia, correct_ratio_trivia = evaluate_dev(args, model, passage_cache, "-trivia")
if is_first_worker():
tb_writer.add_scalar("dev_nll_loss/dev_nll_loss", nq_dev_nll_loss, global_step)
tb_writer.add_scalar("dev_nll_loss/correct_ratio", nq_correct_ratio, global_step)
tb_writer.add_scalar("dev_nll_loss/dev_nll_loss_trivia", dev_nll_loss_trivia, global_step)
tb_writer.add_scalar("dev_nll_loss/correct_ratio_trivia", correct_ratio_trivia, global_step)
train_dataloader_iter = iter(train_dataloader)
batch = next(train_dataloader_iter)
dist.barrier()
if args.num_epoch != 0 and iter_count > args.num_epoch:
break
step += 1
if args.triplet:
loss = triplet_fwd_pass(args, model, batch)
else:
loss, correct_cnt = do_biencoder_fwd_pass(args, model, batch)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
if step % args.gradient_accumulation_steps == 0:
loss.backward()
else:
with model.no_sync():
loss.backward()
tr_loss += loss.item()
if step % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
loss_scalar = tr_loss / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
tr_loss = 0
if is_first_worker():
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**logs, **{"step": global_step}}))
if is_first_worker() and args.save_steps > 0 and global_step % args.save_steps == 0:
_save_checkpoint(args, model, optimizer, scheduler, global_step)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
tb_writer.close()
return global_step
def evaluate_dev(args, model, passage_cache, source=""):
dev_query_collection_path = os.path.join(args.data_dir, "dev-query{}".format(source))
dev_query_cache = EmbeddingCache(dev_query_collection_path)
logger.info('NLL validation ...')
model.eval()
log_result_step = 100
batches = 0
total_loss = 0.0
total_correct_predictions = 0
with dev_query_cache:
dev_data_path = os.path.join(args.data_dir, "dev-data{}".format(source))
with open(dev_data_path, 'r') as f:
dev_data = f.readlines()
dev_dataset = StreamingDataset(dev_data, GetTrainingDataProcessingFn(args, dev_query_cache, passage_cache, shuffle=False))
dev_dataloader = DataLoader(dev_dataset, batch_size=args.train_batch_size*2)
for i, batch in enumerate(dev_dataloader):
loss, correct_cnt = do_biencoder_fwd_pass(args, model, batch)
loss.backward() # get CUDA oom without this
model.zero_grad()
total_loss += loss.item()
total_correct_predictions += correct_cnt
batches += 1
if (i + 1) % log_result_step == 0:
logger.info('Eval step: %d , loss=%f ', i, loss.item())
total_loss = total_loss / batches
total_samples = batches * args.train_batch_size * torch.distributed.get_world_size()
correct_ratio = float(total_correct_predictions / total_samples)
logger.info('NLL Validation: loss = %f. correct prediction ratio %d/%d ~ %f', total_loss,
total_correct_predictions,
total_samples,
correct_ratio
)
model.train()
return total_loss, correct_ratio
def triplet_fwd_pass(args, model, batch):
batch = tuple(t.to(args.device) for t in batch)
inputs = {"query_ids": batch[0].long(), "attention_mask_q": batch[1].long(),
"input_ids_a": batch[3].long(), "attention_mask_a": batch[4].long(),
"input_ids_b": batch[6].long(), "attention_mask_b": batch[7].long()}
loss = model(**inputs)[0]
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
return loss
def do_biencoder_fwd_pass(args, model, batch) -> (
torch.Tensor, int):
batch = tuple(t.to(args.device) for t in batch)
inputs = {"query_ids": batch[0][::2].long(), "attention_mask_q": batch[1][::2].long(),
"input_ids_a": batch[3].long(), "attention_mask_a": batch[4].long()}
local_q_vector, local_ctx_vectors = model(**inputs)
q_vector_to_send = torch.empty_like(local_q_vector).cpu().copy_(local_q_vector).detach_()
ctx_vector_to_send = torch.empty_like(local_ctx_vectors).cpu().copy_(local_ctx_vectors).detach_()
global_question_ctx_vectors = all_gather_list(
[q_vector_to_send, ctx_vector_to_send],
max_size=150000)
global_q_vector = []
global_ctxs_vector = []
for i, item in enumerate(global_question_ctx_vectors):
q_vector, ctx_vectors = item
if i != args.local_rank:
global_q_vector.append(q_vector.to(local_q_vector.device))
global_ctxs_vector.append(ctx_vectors.to(local_q_vector.device))
else:
global_q_vector.append(local_q_vector)
global_ctxs_vector.append(local_ctx_vectors)
global_q_vector = torch.cat(global_q_vector, dim=0)
global_ctxs_vector = torch.cat(global_ctxs_vector, dim=0)
scores = torch.matmul(global_q_vector, torch.transpose(global_ctxs_vector, 0, 1))
if len(global_q_vector.size()) > 1:
q_num = global_q_vector.size(0)
scores = scores.view(q_num, -1)
softmax_scores = F.log_softmax(scores, dim=1)
positive_idx_per_question = [i*2 for i in range(q_num)]
loss = F.nll_loss(softmax_scores, torch.tensor(positive_idx_per_question).to(softmax_scores.device),
reduction='mean')
max_score, max_idxs = torch.max(softmax_scores, 1)
correct_predictions_count = (max_idxs == torch.tensor(positive_idx_per_question).to(max_idxs.device)).sum()
is_correct = correct_predictions_count.sum().item()
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
return loss, is_correct
def _save_checkpoint(args, model, optimizer, scheduler, step: int) -> str:
offset = step
epoch = 0
model_to_save = get_model_obj(model)
cp = os.path.join(args.output_dir, 'checkpoint-' + str(offset))
meta_params = {}
state = CheckpointState(model_to_save.state_dict(),
optimizer.state_dict(),
scheduler.state_dict(),
offset,
epoch, meta_params
)
torch.save(state._asdict(), cp)
logger.info('Saved checkpoint at %s', cp)
return cp
def _load_saved_state(model, optimizer, scheduler, saved_state: CheckpointState):
epoch = saved_state.epoch
step = saved_state.offset
logger.info('Loading checkpoint @ step=%s', step)
model_to_load = get_model_obj(model)
logger.info('Loading saved model state ...')
model_to_load.load_state_dict(saved_state.model_dict) # set strict=False if you use extra projection
return step
def get_arguments():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the cached passage and query files",
)
parser.add_argument(
"--ann_dir",
default=None,
type=str,
required=True,
help="The ann training data dir. Should contain the output of ann data generation job",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MSMarcoConfigDict.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--num_epoch",
default=0,
type=int,
help="Number of epoch to train, if specified will use training data instead of ann",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--triplet", default = False, action="store_true", help="Whether to run training.")
parser.add_argument(
"--log_dir",
default=None,
type=str,
help="Tensorboard log dir",
)
parser.add_argument(
"--optimizer",
default="adamW",
type=str,
help="Optimizer - lamb or adamW",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=2.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--max_steps",
default=300000,
type=int,
help="If > 0: set total number of training steps to perform",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
# ----------------- ANN HyperParam ------------------
parser.add_argument(
"--load_optimizer_scheduler",
default = False,
action="store_true",
help="load scheduler from checkpoint or not",
)
parser.add_argument(
"--single_warmup",
default = True,
action="store_true",
help="use single or re-warmup",
)
# ----------------- End of Doc Ranking HyperParam ------------------
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
return args
def set_env(args):
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
def load_model(args):
# Prepare GLUE task
args.task_name = args.task_name.lower()
args.output_mode = "classification"
label_list = ["0", "1"]
num_labels = len(label_list)
# store args
if args.local_rank != -1:
args.world_size = torch.distributed.get_world_size()
args.rank = dist.get_rank()
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
configObj = MSMarcoConfigDict[args.model_type]
tokenizer = configObj.tokenizer_class.from_pretrained(
"bert-base-uncased",
do_lower_case=True,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if is_first_worker():
# Create output directory if needed
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
model = configObj.model_class(args)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
return tokenizer, model
def main():
args = get_arguments()
set_env(args)
tokenizer, model = load_model(args)
query_collection_path = os.path.join(args.data_dir, "train-query")
query_cache = EmbeddingCache(query_collection_path)
passage_collection_path = os.path.join(args.data_dir, "passages")
passage_cache = EmbeddingCache(passage_collection_path)
with query_cache, passage_cache:
global_step = train(args, model, tokenizer, query_cache, passage_cache)
logger.info(" global_step = %s", global_step)
if args.local_rank != -1:
dist.barrier()
if __name__ == "__main__":
main()
|
ANCE/drivers/run_ann_dpr.py/0
|
{
"file_path": "ANCE/drivers/run_ann_dpr.py",
"repo_id": "ANCE",
"token_count": 11637
}
| 21 |
import collections
import sys
sys.path += ['../']
import glob
import logging
import os
from typing import List, Tuple, Dict
import faiss
import pickle
import numpy as np
import unicodedata
import torch
import torch.distributed as dist
from torch import nn
from torch.serialization import default_restore_location
import regex
from transformers import AdamW
from utils.lamb import Lamb
logger = logging.getLogger()
CheckpointState = collections.namedtuple("CheckpointState",
['model_dict', 'optimizer_dict', 'scheduler_dict', 'offset', 'epoch',
'encoder_params'])
def get_encoder_checkpoint_params_names():
return ['do_lower_case', 'pretrained_model_cfg', 'encoder_model_type',
'pretrained_file',
'projection_dim', 'sequence_length']
def get_encoder_params_state(args):
"""
Selects the param values to be saved in a checkpoint, so that a trained model faile can be used for downstream
tasks without the need to specify these parameter again
:return: Dict of params to memorize in a checkpoint
"""
params_to_save = get_encoder_checkpoint_params_names()
r = {}
for param in params_to_save:
r[param] = getattr(args, param)
return r
def set_encoder_params_from_state(state, args):
if not state:
return
params_to_save = get_encoder_checkpoint_params_names()
override_params = [(param, state[param]) for param in params_to_save if param in state and state[param]]
for param, value in override_params:
if hasattr(args, param):
logger.warning('Overriding args parameter value from checkpoint state. Param = %s, value = %s', param,
value)
setattr(args, param, value)
return args
def get_model_obj(model: nn.Module):
return model.module if hasattr(model, 'module') else model
def get_model_file(args, file_prefix) -> str:
out_cp_files = glob.glob(os.path.join(args.output_dir, file_prefix + '*')) if args.output_dir else []
logger.info('Checkpoint files %s', out_cp_files)
model_file = None
if args.model_file and os.path.exists(args.model_file):
model_file = args.model_file
elif len(out_cp_files) > 0:
model_file = max(out_cp_files, key=os.path.getctime)
return model_file
def load_states_from_checkpoint(model_file: str) -> CheckpointState:
logger.info('Reading saved model from %s', model_file)
state_dict = torch.load(model_file, map_location=lambda s, l: default_restore_location(s, 'cpu'))
logger.info('model_state_dict keys %s', state_dict.keys())
return CheckpointState(**state_dict)
def get_optimizer(args, model: nn.Module, weight_decay: float = 0.0, ) -> torch.optim.Optimizer:
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.optimizer == "adamW":
return AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
elif args.optimizer == "lamb":
return Lamb(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
else:
raise Exception("optimizer {0} not recognized! Can only be lamb or adamW".format(args.optimizer))
def all_gather_list(data, group=None, max_size=16384):
"""Gathers arbitrary data from all nodes into a list.
Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python
data. Note that *data* must be picklable.
Args:
data (Any): data from the local worker to be gathered on other workers
group (optional): group of the collective
"""
SIZE_STORAGE_BYTES = 4 # int32 to encode the payload size
enc = pickle.dumps(data)
enc_size = len(enc)
if enc_size + SIZE_STORAGE_BYTES > max_size:
raise ValueError(
'encoded data exceeds max_size, this can be fixed by increasing buffer size: {}'.format(enc_size))
rank = dist.get_rank()
world_size = dist.get_world_size()
buffer_size = max_size * world_size
if not hasattr(all_gather_list, '_buffer') or \
all_gather_list._buffer.numel() < buffer_size:
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
assert enc_size < 256 ** SIZE_STORAGE_BYTES, 'Encoded object size should be less than {} bytes'.format(
256 ** SIZE_STORAGE_BYTES)
size_bytes = enc_size.to_bytes(SIZE_STORAGE_BYTES, byteorder='big')
cpu_buffer[0:SIZE_STORAGE_BYTES] = torch.ByteTensor(list(size_bytes))
cpu_buffer[SIZE_STORAGE_BYTES: enc_size + SIZE_STORAGE_BYTES] = torch.ByteTensor(list(enc))
start = rank * max_size
size = enc_size + SIZE_STORAGE_BYTES
buffer[start: start + size].copy_(cpu_buffer[:size])
if group is None:
group = dist.group.WORLD
dist.all_reduce(buffer, group=group)
try:
result = []
for i in range(world_size):
out_buffer = buffer[i * max_size: (i + 1) * max_size]
size = int.from_bytes(out_buffer[0:SIZE_STORAGE_BYTES], byteorder='big')
if size > 0:
result.append(pickle.loads(bytes(out_buffer[SIZE_STORAGE_BYTES: size + SIZE_STORAGE_BYTES].tolist())))
return result
except pickle.UnpicklingError:
raise Exception(
'Unable to unpickle data from other workers. all_gather_list requires all '
'workers to enter the function together, so this error usually indicates '
'that the workers have fallen out of sync somehow. Workers can fall out of '
'sync if one of them runs out of memory, or if there are other conditions '
'in your training script that can cause one worker to finish an epoch '
'while other workers are still iterating over their portions of the data.'
)
class DenseHNSWFlatIndexer(object):
"""
Efficient index for retrieval. Note: default settings are for hugh accuracy but also high RAM usage
"""
def __init__(self, vector_sz: int, buffer_size: int = 50000, store_n: int = 512
, ef_search: int = 128, ef_construction: int = 200):
self.buffer_size = buffer_size
self.index_id_to_db_id = []
self.index = None
# IndexHNSWFlat supports L2 similarity only
# so we have to apply DOT -> L2 similairy space conversion with the help of an extra dimension
index = faiss.IndexHNSWFlat(vector_sz + 1, store_n)
index.hnsw.efSearch = ef_search
index.hnsw.efConstruction = ef_construction
self.index = index
self.phi = 0
def index_data(self, data: List[Tuple[object, np.array]]):
n = len(data)
# max norm is required before putting all vectors in the index to convert inner product similarity to L2
if self.phi > 0:
raise RuntimeError('DPR HNSWF index needs to index all data at once,'
'results will be unpredictable otherwise.')
phi = 0
for i, item in enumerate(data):
id, doc_vector = item
norms = (doc_vector ** 2).sum()
phi = max(phi, norms)
logger.info('HNSWF DotProduct -> L2 space phi={}'.format(phi))
self.phi = 0
# indexing in batches is beneficial for many faiss index types
for i in range(0, n, self.buffer_size):
db_ids = [t[0] for t in data[i:i + self.buffer_size]]
vectors = [np.reshape(t[1], (1, -1)) for t in data[i:i + self.buffer_size]]
norms = [(doc_vector ** 2).sum() for doc_vector in vectors]
aux_dims = [np.sqrt(phi - norm) for norm in norms]
hnsw_vectors = [np.hstack((doc_vector, aux_dims[i].reshape(-1, 1))) for i, doc_vector in
enumerate(vectors)]
hnsw_vectors = np.concatenate(hnsw_vectors, axis=0)
self._update_id_mapping(db_ids)
self.index.add(hnsw_vectors)
logger.info('data indexed %d', len(self.index_id_to_db_id))
indexed_cnt = len(self.index_id_to_db_id)
logger.info('Total data indexed %d', indexed_cnt)
def search_knn(self, query_vectors: np.array, top_docs: int) -> List[Tuple[List[object], List[float]]]:
aux_dim = np.zeros(len(query_vectors), dtype='float32')
query_nhsw_vectors = np.hstack((query_vectors, aux_dim.reshape(-1, 1)))
logger.info('query_hnsw_vectors %s', query_nhsw_vectors.shape)
scores, indexes = self.index.search(query_nhsw_vectors, top_docs)
# convert to external ids
db_ids = [[self.index_id_to_db_id[i] for i in query_top_idxs] for query_top_idxs in indexes]
result = [(db_ids[i], scores[i]) for i in range(len(db_ids))]
return result
def _update_id_mapping(self, db_ids: List):
self.index_id_to_db_id.extend(db_ids)
def check_answer(passages, answers, doc_ids, tokenizer):
"""Search through all the top docs to see if they have any of the answers."""
hits = []
for i, doc_id in enumerate(doc_ids):
text = passages[doc_id][0]
hits.append(has_answer(answers, text, tokenizer))
return hits
def has_answer(answers, text, tokenizer):
"""Check if a document contains an answer string.
If `match_type` is string, token matching is done between the text and answer.
If `match_type` is regex, we search the whole text with the regex.
"""
if text is None:
logger.warning("no doc in db")
return False
text = _normalize(text)
# Answer is a list of possible strings
text = tokenizer.tokenize(text).words(uncased=True)
for single_answer in answers:
single_answer = _normalize(single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i + len(single_answer)]:
return True
return False
class SimpleTokenizer:
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
def _normalize(text):
return unicodedata.normalize('NFD', text)
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
|
ANCE/utils/dpr_utils.py/0
|
{
"file_path": "ANCE/utils/dpr_utils.py",
"repo_id": "ANCE",
"token_count": 5362
}
| 22 |
"""
Code for self-training with weak supervision.
Author: Giannis Karamanolakis ([email protected])
"""
import argparse
import json
import logging
import os
import random
import joblib
import numpy as np
import multiprocessing as mp
from functools import partial
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.linear_model import LogisticRegression
from Tokenizer import Tokenizer
class LogRegTrainer:
# Trainer Class
# has to implement: __init__, train, evaluate, save, load
def __init__(self, args, logger=None):
self.args = args
self.logger = logger
self.tokenizer_method = args.tokenizer_method
self.remove_stopwords = True
self.vocab_path = os.path.join(args.experiment_folder, "preprocessed/")
self.tokenizer = None
self.seed = args.seed
self.tokenizer_obj = Tokenizer(language='english',
tokenizer_method=self.tokenizer_method,
remove_stopwords=self.remove_stopwords,
ngram_range=(1, 1),
min_freq=1,
max_freq_perc=1.0)
self.tokenizer = self.tokenizer_obj.tokenizer
self.vectorizer = TfidfVectorizer(sublinear_tf=True, min_df=5, max_df=0.9, norm='l2',
ngram_range=(1, 2), analyzer='word', tokenizer=identity_fn,
preprocessor=identity_fn,
token_pattern=None)
self.model = LogisticRegression(random_state=self.seed, max_iter=int(1e6))
self.finetune = self.train_pseudo
def preprocess(self, texts, preprocessed_texts=None):
"""
Pre-processes a list of texts into lists of tokenized texts
:param texts: input list of texts
:param preprocessed_texts: already pre-processed list of texts
:return: tokenized texts
"""
if preprocessed_texts is not None:
return preprocessed_texts
self.logger.info("tokenizing {} documents".format(len(texts)))
with mp.Pool(processes=mp.cpu_count()) as pool:
tokenized_texts = pool.map(partial(self.tokenizer), texts)
return tokenized_texts
def train(self, train_texts, train_labels, dev_texts=None, dev_labels=None, eval_fn=None,
preprocessed_train_texts=None, preprocessed_dev_texts=None):
logger = self.logger
tokenized_texts = self.preprocess(train_texts, preprocessed_train_texts)
logger.info("Fitting vectorizer on {} texts".format(len(train_texts)))
features = self.vectorizer.fit_transform(tokenized_texts).toarray()
logger.info("Training logistic regression: {}".format(features.shape))
self.model.fit(features, train_labels)
logger.info("logreg weights: {} ({})".format(self.model.coef_.shape[1] * self.model.coef_.shape[0],
self.model.coef_.shape))
res = {}
return res
def train_pseudo(self, train_texts, train_labels, train_weights=None, dev_texts=None, dev_labels=None, eval_fn=None,
preprocessed_train_texts=None, preprocessed_dev_texts=None):
logger = self.logger
tokenized_texts = self.preprocess(train_texts, preprocessed_train_texts)
logger.info("Fitting vectorizer on {} texts".format(len(train_texts)))
features = self.vectorizer.fit_transform(tokenized_texts).toarray()
train_labels = np.array(train_labels)
if train_labels.ndim == 2:
train_labels = np.argmax(train_labels, axis=-1)
if train_weights is not None:
train_weights = np.array(train_weights)
logger.info("Training logistic regression: {}\nFirst Weights:{}".format(features.shape, train_weights[:10]))
else:
logger.info("Training logistic regression: {}\nFirst Weights: None".format(features.shape))
self.model.fit(features, train_labels, sample_weight=train_weights)
logger.info("logreg weights: {} ({})".format(self.model.coef_.shape[1] * self.model.coef_.shape[0],
self.model.coef_.shape))
res = {}
return res
def predict(self, texts, preprocessed_texts=None, prefix=""):
logger = self.logger
tokenized_texts = self.preprocess(texts, preprocessed_texts)
features = self.vectorizer.transform(tokenized_texts).toarray()
logger.info("predicting labels: {}".format(features.shape))
preds = self.model.predict(features)
soft_proba = self.model.predict_proba(features)
res = {
'preds': preds,
'proba': soft_proba,
'features': features
}
return res
def save(self, savefolder):
self.logger.info("saving student at {}".format(savefolder))
joblib.dump(self.tokenizer_obj, os.path.join(savefolder, 'tokenizer_obj.pkl'))
joblib.dump(self.vectorizer, os.path.join(savefolder, 'vectorizer.pkl'))
joblib.dump(self.model, os.path.join(savefolder, 'logreg.pkl'))
def load(self, savefolder):
self.logger.info("loading student from {}".format(savefolder))
self.tokenizer_obj = joblib.load(os.path.join(savefolder, 'tokenizer_obj.pkl'))
self.tokenizer = self.tokenizer_obj.tokenizer
self.vectorizer = joblib.load(os.path.join(savefolder, 'vectorizer.pkl'))
self.model = joblib.load(os.path.join(savefolder, 'logreg.pkl'))
def identity_fn(doc):
return doc
|
ASTRA/astra/model/LogReg.py/0
|
{
"file_path": "ASTRA/astra/model/LogReg.py",
"repo_id": "ASTRA",
"token_count": 2591
}
| 23 |
# ------------------------------------------------------------------------------------------
# Copyright (c). All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from torch import nn
import torch
from torch import Tensor
import torch.distributed as dist
from torch.nn import Module, ModuleList
import torch.nn.functional as F
import copy
import typing
import math
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class MixtureSoup(torch.nn.Module):
def __init__(self, expert, num_local_experts=1):
super(MixtureSoup, self).__init__()
self.deepspeed_experts = torch.nn.ModuleList(
[copy.deepcopy(expert) for i in range(num_local_experts)])
self.num_local_experts = num_local_experts
self.expert_score_weight = torch.nn.Parameter(torch.zeros(self.num_local_experts), requires_grad=False)
def get_expert_by_idx(self, idx):
return self.deepspeed_experts[idx]
def expert_soup_forward(self, input):
output = F.linear(input,
self.parameter_dict["weight"],
self.parameter_dict["bias"])
return output
def expert_soup(self):
weight = F.softmax(self.expert_score_weight, dim=-1)
self.parameter_dict = {"weight": 0, "bias": 0}
for idx in range(self.num_local_experts):
single_expert = self.deepspeed_experts[idx]
for s_name, s_param in single_expert.named_parameters():
if "weight" in s_name:
p_name = "weight"
self.parameter_dict[p_name] = self.parameter_dict[p_name] + (weight[idx] * s_param)
else:
p_name = "bias"
self.parameter_dict[p_name] = self.parameter_dict[p_name] + (weight[idx] * s_param)
def forward(self, *input: Tensor):
expert_output = None
if self.deepspeed_experts[0].training:
expert_idx = torch.randint(low=0, high=self.num_local_experts, size=(1,)).item() # selected expert
if self.expert_score_weight.requires_grad:
self.expert_soup()
expert_output = self.expert_soup_forward(input[0])
else:
expert_output = self.get_expert_by_idx(expert_idx)(input[0])
else:
self.expert_soup()
expert_output = self.expert_soup_forward(input[0])
return expert_output
class ExpertSoup(nn.Module):
def __init__(self, dim, r, act=None, num_expert=4, sharing_down=0, sharing_up=0):
super().__init__()
self.act = act
if sharing_down == 1:
self.MoA_A = MixtureSoup(nn.Linear(dim, r), 1)
else:
self.MoA_A = MixtureSoup(nn.Linear(dim, r), num_expert)
if act is not None:
self.act = gelu
if sharing_up == 1:
self.MoA_B = MixtureSoup(nn.Linear(r, dim), 1)
else:
self.MoA_B = MixtureSoup(nn.Linear(r, dim), num_expert)
def forward(self, x, residual):
result = self.MoA_A(x)
if self.act is not None:
result = self.act(result)
result = self.MoA_B(result)
return result + residual
|
AdaMix/NLG/src/expert_soup.py/0
|
{
"file_path": "AdaMix/NLG/src/expert_soup.py",
"repo_id": "AdaMix",
"token_count": 1561
}
| 24 |
# AdaMix (Mixture-of-Adaptations)
This is the implementation of the paper [AdaMix: Mixture-of-Adaptations for Parameter-efficient Model Tuning](https://arxiv.org/abs/2205.12410).
## Quick links
* [Overview](#overview)
* [GLUE Benchmark](#adapting-to-the-glue-benchmark)
* [BERT-base](#main-results-bert-base)
* [RoBERTa-large](#main-results-roberta-large)
* [Averaging Weigths v.s. Random Routing](#comparision-between-averaging-weights-and-random-single-adapter)
* [Checkpoints](#download-adamix-checkpoints)
* [Run the model](#steps-to-reproduce-our-results)
* [Quick start](#quick-start)
* [Evaluate checkpoints](#evaluate-the-checkpoints)
* [Notes and Acknowledgments](#notes-and-acknowledgments)
* [Contact Information](#contact-information)
* [Citation](#how-do-i-cite-adamix)
## Overview
### Training
<img src="./figures/MixAdapter.png" width="650"/>
### Inference
<img src="./figures/training_inference.png" width="450"/>
## Adapting to the GLUE Benchmark
Our experiments on the GLUE benchmark are run on 16 NVIDIA Tesla V100 GPU. The results may vary due to different GPU models, drivers, CUDA SDK versions, floating-point precisions, and random seeds.
### Main Results (BERT-base)
<img src="./figures/bert_base.png" width="750"/>
### Main Results (RoBERTa-large)
<img src="./figures/roberta_large.png" width="750"/>
### Comparision between Averaging Weights and Random Single Adapter
<img src="./figures/violin_plot.png" width="950"/>
## Download AdaMix checkpoints
We release all copies of Adapter weights for users' Adapter aggregation study.
| | Dataset | BERT base 110M <br> | RoBERTa large 355M <br> |
|---|----------|--------------------|----------------------|
| | MNLI |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_mnli_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_mnli_expert_soup.bin) |
| | SST2 |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_sst2_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_sst2_expert_soup.bin) |
| | MRPC |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_mrpc_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_mrpc_expert_soup.bin) |
| | CoLA |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_cola_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_cola_expert_soup.bin) |
| | QNLI |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_qnli_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_qnli_expert_soup.bin) |
| | QQP |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_qqp_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_qqp_expert_soup.bin) |
| | RTE |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_rte_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_rte_expert_soup.bin) |
| | STSB |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_stsb_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_stsb_expert_soup.bin) |
## Steps to reproduce our results
### Create and activate conda env
```console
conda env create -f environment.yml
```
### Install the pre-requisites
```console
pip install -e .
```
We also provide the shell scripts for bert-base and roberta-large.
### Quick start
```console
export num_gpus=1
export PYTHONHASHSEED=0
task_name=mnli
model=roberta-large
export output_dir="./models/${model}/${task_name}"
python -m torch.distributed.launch --nproc_per_node=$num_gpus \
examples/text-classification/run_glue.py \
--model_name_or_path $model \
--task_name $task_name \
--do_train \
--do_eval \
--max_seq_length 128 \
--per_device_train_batch_size 64 \
--per_device_eval_batch_size 32 \
--learning_rate 3e-4 \
--num_train_epochs 20 \
--output_dir $output_dir/model \
--overwrite_output_dir \
--logging_steps 1000 \
--logging_dir $output_dir/log \
--evaluation_strategy epoch \
--save_strategy epoch \
--warmup_ratio 0.06 \
--apply_expert_soup \
--adapter_size 16 \
--num_experts 4 \
--seed 0 \
--inference_level 3 \
--weight_decay 0.1 \
--sharing_up 1 \
--sharing_down 0 \
--use_consistency_loss 1
```
Most arguments are inherited from transformers and are easy to understand. We further explain some of the AdaMix's arguments:
* `inference_level`: There are two suggested modes
* `1`: Random Routing
* `3`: Averaging the weights of Adapters for routing (used in AdaMix)
* `num_experts`: Number of Adapters in AdaMix
* `use_consistency_loss`: Two modes.
* `0`: No consistency loss
* `1`: Use consistency loss
* `sharing_up`: There are two modes. (sharing_down is same)
* `0`: No weight sharing
* `1`: Sharing Project-up layer weights in Adapter
### Evaluate the checkpoints
Create checkpoints directory and download checkpoints of corresponding tasks under the directory. Use MNLI as an example. Use your checkpoint path in **expert_soup_path** argument.
```console
export num_gpus=1
export PYTHONHASHSEED=0
task_name=mnli
model=roberta-large
export output_dir="./models/${model}/${task_name}"
python -m torch.distributed.launch --nproc_per_node=$num_gpus \
examples/text-classification/run_glue.py \
--model_name_or_path $model \
--task_name $task_name \
--do_eval \
--expert_soup_path ./checkpoints/pytorch_model_${task_name}_expert_soup.bin \
--max_seq_length 128 \
--per_device_train_batch_size 64 \
--per_device_eval_batch_size 32 \
--learning_rate 3e-4 \
--num_train_epochs 20 \
--output_dir $output_dir/model \
--overwrite_output_dir \
--logging_steps 1000 \
--logging_dir $output_dir/log \
--evaluation_strategy epoch \
--save_strategy epoch \
--warmup_ratio 0.06 \
--apply_expert_soup \
--adapter_size 16 \
--num_experts 4 \
--seed 0 \
--inference_level 3 \
--weight_decay 0.1 \
--sharing_up 1 \
--sharing_down 0 \
--use_consistency_loss 1
```
## Notes and Acknowledgments
The implementation is based on https://github.com/huggingface/transformers <br>
We also used some code from: https://github.com/microsoft/LoRA
## Contact Information
For personal communication related to this package, please contact [Yaqing Wang](https://yaqingwang.github.io/) ([email protected]), [Sahaj Agarwal](https://www.linkedin.com/in/sahaj-agarwal-89aa49174/) ([email protected]), [Subhabrata (Subho) Mukherjee](https://www.microsoft.com/en-us/research/people/submukhe/) ([email protected]) or [Xiaodong Liu](https://sites.google.com/view/buptxiaodong/home) ([email protected]).
## How do I cite AdaMix?
```
@article{wang2022adamix,
title={AdaMix: Mixture-of-Adaptations for Parameter-efficient Model Tuning},
author={Wang, Yaqing and Agarwal, Sahaj and Mukherjee, Subhabrata and Liu, Xiaodong and Gao, Jing and Awadallah, Ahmed Hassan and Gao, Jianfeng},
journal={arXiv preprint arXiv:2205.12410},
year={2022}
}
```
|
AdaMix/README.md/0
|
{
"file_path": "AdaMix/README.md",
"repo_id": "AdaMix",
"token_count": 2792
}
| 25 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
Converting Tensorflow Checkpoints
=======================================================================================================================
A command-line interface is provided to convert original Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM checkpoints in models
than be loaded using the ``from_pretrained`` methods of the library.
.. note::
Since 2.3.0 the conversion script is now part of the transformers CLI (**transformers-cli**) available in any
transformers >= 2.3.0 installation.
The documentation below reflects the **transformers-cli convert** command format.
BERT
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can convert any TensorFlow checkpoint for BERT (in particular `the pre-trained models released by Google
<https://github.com/google-research/bert#pre-trained-models>`_\ ) in a PyTorch save file by using the
:prefix_link:`convert_bert_original_tf_checkpoint_to_pytorch.py
<src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py>` script.
This CLI takes as input a TensorFlow checkpoint (three files starting with ``bert_model.ckpt``\ ) and the associated
configuration file (\ ``bert_config.json``\ ), and creates a PyTorch model for this configuration, loads the weights
from the TensorFlow checkpoint in the PyTorch model and saves the resulting model in a standard PyTorch save file that
can be imported using ``from_pretrained()`` (see example in :doc:`quicktour` , `run_glue.py
<https://github.com/huggingface/transformers/blob/master/examples/text-classification/run_glue.py>`_\ ).
You only need to run this conversion script **once** to get a PyTorch model. You can then disregard the TensorFlow
checkpoint (the three files starting with ``bert_model.ckpt``\ ) but be sure to keep the configuration file (\
``bert_config.json``\ ) and the vocabulary file (\ ``vocab.txt``\ ) as these are needed for the PyTorch model too.
To run this specific conversion script you will need to have TensorFlow and PyTorch installed (\ ``pip install
tensorflow``\ ). The rest of the repository only requires PyTorch.
Here is an example of the conversion process for a pre-trained ``BERT-Base Uncased`` model:
.. code-block:: shell
export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
transformers-cli convert --model_type bert \
--tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \
--config $BERT_BASE_DIR/bert_config.json \
--pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin
You can download Google's pre-trained models for the conversion `here
<https://github.com/google-research/bert#pre-trained-models>`__.
ALBERT
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Convert TensorFlow model checkpoints of ALBERT to PyTorch using the
:prefix_link:`convert_albert_original_tf_checkpoint_to_pytorch.py
<src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py>` script.
The CLI takes as input a TensorFlow checkpoint (three files starting with ``model.ckpt-best``\ ) and the accompanying
configuration file (\ ``albert_config.json``\ ), then creates and saves a PyTorch model. To run this conversion you
will need to have TensorFlow and PyTorch installed.
Here is an example of the conversion process for the pre-trained ``ALBERT Base`` model:
.. code-block:: shell
export ALBERT_BASE_DIR=/path/to/albert/albert_base
transformers-cli convert --model_type albert \
--tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \
--config $ALBERT_BASE_DIR/albert_config.json \
--pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin
You can download Google's pre-trained models for the conversion `here
<https://github.com/google-research/albert#pre-trained-models>`__.
OpenAI GPT
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Here is an example of the conversion process for a pre-trained OpenAI GPT model, assuming that your NumPy checkpoint
save as the same format than OpenAI pretrained model (see `here <https://github.com/openai/finetune-transformer-lm>`__\
)
.. code-block:: shell
export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights
transformers-cli convert --model_type gpt \
--tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
[--config OPENAI_GPT_CONFIG] \
[--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \
OpenAI GPT-2
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Here is an example of the conversion process for a pre-trained OpenAI GPT-2 model (see `here
<https://github.com/openai/gpt-2>`__\ )
.. code-block:: shell
export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/gpt2/pretrained/weights
transformers-cli convert --model_type gpt2 \
--tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
[--config OPENAI_GPT2_CONFIG] \
[--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK]
Transformer-XL
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Here is an example of the conversion process for a pre-trained Transformer-XL model (see `here
<https://github.com/kimiyoung/transformer-xl/tree/master/tf#obtain-and-evaluate-pretrained-sota-models>`__\ )
.. code-block:: shell
export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint
transformers-cli convert --model_type transfo_xl \
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
[--config TRANSFO_XL_CONFIG] \
[--finetuning_task_name TRANSFO_XL_FINETUNED_TASK]
XLNet
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Here is an example of the conversion process for a pre-trained XLNet model:
.. code-block:: shell
export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint
export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config
transformers-cli convert --model_type xlnet \
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \
--config $TRANSFO_XL_CONFIG_PATH \
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
[--finetuning_task_name XLNET_FINETUNED_TASK] \
XLM
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Here is an example of the conversion process for a pre-trained XLM model:
.. code-block:: shell
export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint
transformers-cli convert --model_type xlm \
--tf_checkpoint $XLM_CHECKPOINT_PATH \
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT
[--config XML_CONFIG] \
[--finetuning_task_name XML_FINETUNED_TASK]
T5
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Here is an example of the conversion process for a pre-trained T5 model:
.. code-block:: shell
export T5=/path/to/t5/uncased_L-12_H-768_A-12
transformers-cli convert --model_type t5 \
--tf_checkpoint $T5/t5_model.ckpt \
--config $T5/t5_config.json \
--pytorch_dump_output $T5/pytorch_model.bin
|
AdaMix/docs/source/converting_tensorflow_models.rst/0
|
{
"file_path": "AdaMix/docs/source/converting_tensorflow_models.rst",
"repo_id": "AdaMix",
"token_count": 2671
}
| 26 |
Transformers
=======================================================================================================================
State-of-the-art Natural Language Processing for Pytorch and TensorFlow 2.0.
🤗 Transformers (formerly known as `pytorch-transformers` and `pytorch-pretrained-bert`) provides general-purpose
architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural Language Understanding (NLU) and Natural
Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between
TensorFlow 2.0 and PyTorch.
This is the documentation of our repository `transformers <https://github.com/huggingface/transformers>`_.
Features
-----------------------------------------------------------------------------------------------------------------------
- High performance on NLU and NLG tasks
- Low barrier to entry for educators and practitioners
State-of-the-art NLP for everyone:
- Deep learning researchers
- Hands-on practitioners
- AI/ML/NLP teachers and educators
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
Lower compute costs, smaller carbon footprint:
- Researchers can share trained models instead of always retraining
- Practitioners can reduce compute time and production costs
- 8 architectures with over 30 pretrained models, some in more than 100 languages
Choose the right framework for every part of a model's lifetime:
- Train state-of-the-art models in 3 lines of code
- Deep interoperability between TensorFlow 2.0 and PyTorch models
- Move a single model between TF2.0/PyTorch frameworks at will
- Seamlessly pick the right framework for training, evaluation, production
Experimental support for Flax with a few models right now, expected to grow in the coming months.
`All the model checkpoints <https://huggingface.co/models>`__ are seamlessly integrated from the huggingface.co `model
hub <https://huggingface.co>`__ where they are uploaded directly by `users <https://huggingface.co/users>`__ and
`organizations <https://huggingface.co/organizations>`__.
Current number of checkpoints: |checkpoints|
.. |checkpoints| image:: https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/models&color=brightgreen
Contents
-----------------------------------------------------------------------------------------------------------------------
The documentation is organized in five parts:
- **GET STARTED** contains a quick tour, the installation instructions and some useful information about our philosophy
and a glossary.
- **USING 🤗 TRANSFORMERS** contains general tutorials on how to use the library.
- **ADVANCED GUIDES** contains more advanced guides that are more specific to a given script or part of the library.
- **RESEARCH** focuses on tutorials that have less to do with how to use the library but more about general research in
transformers model
- The three last section contain the documentation of each public class and function, grouped in:
- **MAIN CLASSES** for the main classes exposing the important APIs of the library.
- **MODELS** for the classes and functions related to each model implemented in the library.
- **INTERNAL HELPERS** for the classes and functions we use internally.
The library currently contains PyTorch, Tensorflow and Flax implementations, pretrained model weights, usage scripts
and conversion utilities for the following models:
..
This list is updated automatically from the README with `make fix-copies`. Do not update manually!
1. :doc:`ALBERT <model_doc/albert>` (from Google Research and the Toyota Technological Institute at Chicago) released
with the paper `ALBERT: A Lite BERT for Self-supervised Learning of Language Representations
<https://arxiv.org/abs/1909.11942>`__, by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush
Sharma, Radu Soricut.
2. :doc:`BART <model_doc/bart>` (from Facebook) released with the paper `BART: Denoising Sequence-to-Sequence
Pre-training for Natural Language Generation, Translation, and Comprehension
<https://arxiv.org/pdf/1910.13461.pdf>`__ by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman
Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
3. :doc:`BARThez <model_doc/barthez>` (from École polytechnique) released with the paper `BARThez: a Skilled Pretrained
French Sequence-to-Sequence Model <https://arxiv.org/abs/2010.12321>`__ by Moussa Kamal Eddine, Antoine J.-P.
Tixier, Michalis Vazirgiannis.
4. :doc:`BERT <model_doc/bert>` (from Google) released with the paper `BERT: Pre-training of Deep Bidirectional
Transformers for Language Understanding <https://arxiv.org/abs/1810.04805>`__ by Jacob Devlin, Ming-Wei Chang,
Kenton Lee and Kristina Toutanova.
5. :doc:`BERT For Sequence Generation <model_doc/bertgeneration>` (from Google) released with the paper `Leveraging
Pre-trained Checkpoints for Sequence Generation Tasks <https://arxiv.org/abs/1907.12461>`__ by Sascha Rothe, Shashi
Narayan, Aliaksei Severyn.
6. :doc:`Blenderbot <model_doc/blenderbot>` (from Facebook) released with the paper `Recipes for building an
open-domain chatbot <https://arxiv.org/abs/2004.13637>`__ by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary
Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
7. :doc:`BlenderbotSmall <model_doc/blenderbot_small>` (from Facebook) released with the paper `Recipes for building an
open-domain chatbot <https://arxiv.org/abs/2004.13637>`__ by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary
Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
8. :doc:`BORT <model_doc/bort>` (from Alexa) released with the paper `Optimal Subarchitecture Extraction For BERT
<https://arxiv.org/abs/2010.10499>`__ by Adrian de Wynter and Daniel J. Perry.
9. :doc:`CamemBERT <model_doc/camembert>` (from Inria/Facebook/Sorbonne) released with the paper `CamemBERT: a Tasty
French Language Model <https://arxiv.org/abs/1911.03894>`__ by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz
Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
10. :doc:`ConvBERT <model_doc/convbert>` (from YituTech) released with the paper `ConvBERT: Improving BERT with
Span-based Dynamic Convolution <https://arxiv.org/abs/2008.02496>`__ by Zihang Jiang, Weihao Yu, Daquan Zhou,
Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
11. :doc:`CTRL <model_doc/ctrl>` (from Salesforce) released with the paper `CTRL: A Conditional Transformer Language
Model for Controllable Generation <https://arxiv.org/abs/1909.05858>`__ by Nitish Shirish Keskar*, Bryan McCann*,
Lav R. Varshney, Caiming Xiong and Richard Socher.
12. :doc:`DeBERTa <model_doc/deberta>` (from Microsoft) released with the paper `DeBERTa: Decoding-enhanced BERT with
Disentangled Attention <https://arxiv.org/abs/2006.03654>`__ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu
Chen.
13. :doc:`DeBERTa-v2 <model_doc/deberta_v2>` (from Microsoft) released with the paper `DeBERTa: Decoding-enhanced BERT
with Disentangled Attention <https://arxiv.org/abs/2006.03654>`__ by Pengcheng He, Xiaodong Liu, Jianfeng Gao,
Weizhu Chen.
14. :doc:`DialoGPT <model_doc/dialogpt>` (from Microsoft Research) released with the paper `DialoGPT: Large-Scale
Generative Pre-training for Conversational Response Generation <https://arxiv.org/abs/1911.00536>`__ by Yizhe
Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
15. :doc:`DistilBERT <model_doc/distilbert>` (from HuggingFace), released together with the paper `DistilBERT, a
distilled version of BERT: smaller, faster, cheaper and lighter <https://arxiv.org/abs/1910.01108>`__ by Victor
Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into `DistilGPT2
<https://github.com/huggingface/transformers/tree/master/examples/distillation>`__, RoBERTa into `DistilRoBERTa
<https://github.com/huggingface/transformers/tree/master/examples/distillation>`__, Multilingual BERT into
`DistilmBERT <https://github.com/huggingface/transformers/tree/master/examples/distillation>`__ and a German
version of DistilBERT.
16. :doc:`DPR <model_doc/dpr>` (from Facebook) released with the paper `Dense Passage Retrieval for Open-Domain
Question Answering <https://arxiv.org/abs/2004.04906>`__ by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick
Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
17. :doc:`ELECTRA <model_doc/electra>` (from Google Research/Stanford University) released with the paper `ELECTRA:
Pre-training text encoders as discriminators rather than generators <https://arxiv.org/abs/2003.10555>`__ by Kevin
Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
18. :doc:`FlauBERT <model_doc/flaubert>` (from CNRS) released with the paper `FlauBERT: Unsupervised Language Model
Pre-training for French <https://arxiv.org/abs/1912.05372>`__ by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne,
Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
19. :doc:`Funnel Transformer <model_doc/funnel>` (from CMU/Google Brain) released with the paper `Funnel-Transformer:
Filtering out Sequential Redundancy for Efficient Language Processing <https://arxiv.org/abs/2006.03236>`__ by
Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
20. :doc:`GPT <model_doc/gpt>` (from OpenAI) released with the paper `Improving Language Understanding by Generative
Pre-Training <https://blog.openai.com/language-unsupervised/>`__ by Alec Radford, Karthik Narasimhan, Tim Salimans
and Ilya Sutskever.
21. :doc:`GPT-2 <model_doc/gpt2>` (from OpenAI) released with the paper `Language Models are Unsupervised Multitask
Learners <https://blog.openai.com/better-language-models/>`__ by Alec Radford*, Jeffrey Wu*, Rewon Child, David
Luan, Dario Amodei** and Ilya Sutskever**.
22. :doc:`I-BERT <model_doc/ibert>` (from Berkeley) released with the paper `I-BERT: Integer-only BERT Quantization
<https://arxiv.org/abs/2101.01321>`__ by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer
23. :doc:`LayoutLM <model_doc/layoutlm>` (from Microsoft Research Asia) released with the paper `LayoutLM: Pre-training
of Text and Layout for Document Image Understanding <https://arxiv.org/abs/1912.13318>`__ by Yiheng Xu, Minghao Li,
Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
24. :doc:`LED <model_doc/led>` (from AllenAI) released with the paper `Longformer: The Long-Document Transformer
<https://arxiv.org/abs/2004.05150>`__ by Iz Beltagy, Matthew E. Peters, Arman Cohan.
25. :doc:`Longformer <model_doc/longformer>` (from AllenAI) released with the paper `Longformer: The Long-Document
Transformer <https://arxiv.org/abs/2004.05150>`__ by Iz Beltagy, Matthew E. Peters, Arman Cohan.
26. :doc:`LXMERT <model_doc/lxmert>` (from UNC Chapel Hill) released with the paper `LXMERT: Learning Cross-Modality
Encoder Representations from Transformers for Open-Domain Question Answering <https://arxiv.org/abs/1908.07490>`__
by Hao Tan and Mohit Bansal.
27. :doc:`M2M100 <model_doc/m2m_100>` (from Facebook) released with the paper `Beyond English-Centric Multilingual
Machine Translation <https://arxiv.org/abs/2010.11125>`__ by by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi
Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman
Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
28. :doc:`MarianMT <model_doc/marian>` Machine translation models trained using `OPUS <http://opus.nlpl.eu/>`__ data by
Jörg Tiedemann. The `Marian Framework <https://marian-nmt.github.io/>`__ is being developed by the Microsoft
Translator Team.
29. :doc:`MBart <model_doc/mbart>` (from Facebook) released with the paper `Multilingual Denoising Pre-training for
Neural Machine Translation <https://arxiv.org/abs/2001.08210>`__ by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li,
Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
30. :doc:`MBart-50 <model_doc/mbart>` (from Facebook) released with the paper `Multilingual Translation with Extensible
Multilingual Pretraining and Finetuning <https://arxiv.org/abs/2008.00401>`__ by Yuqing Tang, Chau Tran, Xian Li,
Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
31. :doc:`MPNet <model_doc/mpnet>` (from Microsoft Research) released with the paper `MPNet: Masked and Permuted
Pre-training for Language Understanding <https://arxiv.org/abs/2004.09297>`__ by Kaitao Song, Xu Tan, Tao Qin,
Jianfeng Lu, Tie-Yan Liu.
32. :doc:`MT5 <model_doc/mt5>` (from Google AI) released with the paper `mT5: A massively multilingual pre-trained
text-to-text transformer <https://arxiv.org/abs/2010.11934>`__ by Linting Xue, Noah Constant, Adam Roberts, Mihir
Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
33. :doc:`Pegasus <model_doc/pegasus>` (from Google) released with the paper `PEGASUS: Pre-training with Extracted
Gap-sentences for Abstractive Summarization <https://arxiv.org/abs/1912.08777>`__> by Jingqing Zhang, Yao Zhao,
Mohammad Saleh and Peter J. Liu.
34. :doc:`ProphetNet <model_doc/prophetnet>` (from Microsoft Research) released with the paper `ProphetNet: Predicting
Future N-gram for Sequence-to-Sequence Pre-training <https://arxiv.org/abs/2001.04063>`__ by Yu Yan, Weizhen Qi,
Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
35. :doc:`Reformer <model_doc/reformer>` (from Google Research) released with the paper `Reformer: The Efficient
Transformer <https://arxiv.org/abs/2001.04451>`__ by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
36. :doc:`RoBERTa <model_doc/roberta>` (from Facebook), released together with the paper a `Robustly Optimized BERT
Pretraining Approach <https://arxiv.org/abs/1907.11692>`__ by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar
Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
37. :doc:`SpeechToTextTransformer <model_doc/speech_to_text>` (from Facebook), released together with the paper
`fairseq S2T: Fast Speech-to-Text Modeling with fairseq <https://arxiv.org/abs/2010.05171>`__ by Changhan Wang, Yun
Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
38. :doc:`SqueezeBert <model_doc/squeezebert>` released with the paper `SqueezeBERT: What can computer vision teach NLP
about efficient neural networks? <https://arxiv.org/abs/2006.11316>`__ by Forrest N. Iandola, Albert E. Shaw, Ravi
Krishna, and Kurt W. Keutzer.
39. :doc:`T5 <model_doc/t5>` (from Google AI) released with the paper `Exploring the Limits of Transfer Learning with a
Unified Text-to-Text Transformer <https://arxiv.org/abs/1910.10683>`__ by Colin Raffel and Noam Shazeer and Adam
Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
40. :doc:`TAPAS <model_doc/tapas>` (from Google AI) released with the paper `TAPAS: Weakly Supervised Table Parsing via
Pre-training <https://arxiv.org/abs/2004.02349>`__ by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller,
Francesco Piccinno and Julian Martin Eisenschlos.
41. :doc:`Transformer-XL <model_doc/transformerxl>` (from Google/CMU) released with the paper `Transformer-XL:
Attentive Language Models Beyond a Fixed-Length Context <https://arxiv.org/abs/1901.02860>`__ by Zihang Dai*,
Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
42. :doc:`Wav2Vec2 <model_doc/wav2vec2>` (from Facebook AI) released with the paper `wav2vec 2.0: A Framework for
Self-Supervised Learning of Speech Representations <https://arxiv.org/abs/2006.11477>`__ by Alexei Baevski, Henry
Zhou, Abdelrahman Mohamed, Michael Auli.
43. :doc:`XLM <model_doc/xlm>` (from Facebook) released together with the paper `Cross-lingual Language Model
Pretraining <https://arxiv.org/abs/1901.07291>`__ by Guillaume Lample and Alexis Conneau.
44. :doc:`XLM-ProphetNet <model_doc/xlmprophetnet>` (from Microsoft Research) released with the paper `ProphetNet:
Predicting Future N-gram for Sequence-to-Sequence Pre-training <https://arxiv.org/abs/2001.04063>`__ by Yu Yan,
Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
45. :doc:`XLM-RoBERTa <model_doc/xlmroberta>` (from Facebook AI), released together with the paper `Unsupervised
Cross-lingual Representation Learning at Scale <https://arxiv.org/abs/1911.02116>`__ by Alexis Conneau*, Kartikay
Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke
Zettlemoyer and Veselin Stoyanov.
46. :doc:`XLNet <model_doc/xlnet>` (from Google/CMU) released with the paper `XLNet: Generalized Autoregressive
Pretraining for Language Understanding <https://arxiv.org/abs/1906.08237>`__ by Zhilin Yang*, Zihang Dai*, Yiming
Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
47. :doc:`XLSR-Wav2Vec2 <model_doc/xlsr_wav2vec2>` (from Facebook AI) released with the paper `Unsupervised
Cross-Lingual Representation Learning For Speech Recognition <https://arxiv.org/abs/2006.13979>`__ by Alexis
Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
.. _bigtable:
The table below represents the current support in the library for each of those models, whether they have a Python
tokenizer (called "slow"). A "fast" tokenizer backed by the 🤗 Tokenizers library, whether they have support in PyTorch,
TensorFlow and/or Flax.
..
This table is updated automatically from the auto modules with `make fix-copies`. Do not update manually!
.. rst-class:: center-aligned-table
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| Model | Tokenizer slow | Tokenizer fast | PyTorch support | TensorFlow support | Flax Support |
+=============================+================+================+=================+====================+==============+
| ALBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| BART | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| BERT | ✅ | ✅ | ✅ | ✅ | ✅ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| Bert Generation | ✅ | ❌ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| Blenderbot | ✅ | ❌ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| BlenderbotSmall | ✅ | ❌ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| CTRL | ✅ | ❌ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| CamemBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| ConvBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| DPR | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| DeBERTa | ✅ | ❌ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| DeBERTa-v2 | ✅ | ❌ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| DistilBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| ELECTRA | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| Encoder decoder | ❌ | ❌ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| FairSeq Machine-Translation | ✅ | ❌ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| FlauBERT | ✅ | ❌ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| Funnel Transformer | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| I-BERT | ❌ | ❌ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| LED | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| LXMERT | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| LayoutLM | ✅ | ✅ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| Longformer | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| M2M100 | ✅ | ❌ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| MPNet | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| Marian | ✅ | ❌ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| MobileBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| OpenAI GPT | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| OpenAI GPT-2 | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| Pegasus | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| ProphetNet | ✅ | ❌ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| RAG | ✅ | ❌ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| Reformer | ✅ | ✅ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| RetriBERT | ✅ | ✅ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| RoBERTa | ✅ | ✅ | ✅ | ✅ | ✅ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| Speech2Text | ✅ | ❌ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| SqueezeBERT | ✅ | ✅ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| T5 | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| TAPAS | ✅ | ❌ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| Transformer-XL | ✅ | ❌ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| Wav2Vec2 | ✅ | ❌ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| XLM | ✅ | ❌ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| XLM-RoBERTa | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| XLMProphetNet | ✅ | ❌ | ✅ | ❌ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| XLNet | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| mBART | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
| mT5 | ✅ | ✅ | ✅ | ✅ | ❌ |
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
.. toctree::
:maxdepth: 2
:caption: Get started
quicktour
installation
philosophy
glossary
.. toctree::
:maxdepth: 2
:caption: Using 🤗 Transformers
task_summary
model_summary
preprocessing
training
model_sharing
tokenizer_summary
multilingual
.. toctree::
:maxdepth: 2
:caption: Advanced guides
pretrained_models
examples
custom_datasets
notebooks
community
converting_tensorflow_models
migration
contributing
add_new_model
testing
serialization
.. toctree::
:maxdepth: 2
:caption: Research
bertology
perplexity
benchmarks
.. toctree::
:maxdepth: 2
:caption: Main Classes
main_classes/callback
main_classes/configuration
main_classes/logging
main_classes/model
main_classes/optimizer_schedules
main_classes/output
main_classes/pipelines
main_classes/processors
main_classes/tokenizer
main_classes/trainer
main_classes/feature_extractor
.. toctree::
:maxdepth: 2
:caption: Models
model_doc/albert
model_doc/auto
model_doc/bart
model_doc/barthez
model_doc/bert
model_doc/bertweet
model_doc/bertgeneration
model_doc/blenderbot
model_doc/blenderbot_small
model_doc/bort
model_doc/camembert
model_doc/convbert
model_doc/ctrl
model_doc/deberta
model_doc/deberta_v2
model_doc/dialogpt
model_doc/distilbert
model_doc/dpr
model_doc/electra
model_doc/encoderdecoder
model_doc/flaubert
model_doc/fsmt
model_doc/funnel
model_doc/herbert
model_doc/ibert
model_doc/layoutlm
model_doc/led
model_doc/longformer
model_doc/lxmert
model_doc/marian
model_doc/m2m_100
model_doc/mbart
model_doc/mobilebert
model_doc/mpnet
model_doc/mt5
model_doc/gpt
model_doc/gpt2
model_doc/pegasus
model_doc/phobert
model_doc/prophetnet
model_doc/rag
model_doc/reformer
model_doc/retribert
model_doc/roberta
model_doc/speech_to_text
model_doc/squeezebert
model_doc/t5
model_doc/tapas
model_doc/transformerxl
model_doc/wav2vec2
model_doc/xlm
model_doc/xlmprophetnet
model_doc/xlmroberta
model_doc/xlnet
model_doc/xlsr_wav2vec2
.. toctree::
:maxdepth: 2
:caption: Internal Helpers
internal/modeling_utils
internal/pipelines_utils
internal/tokenization_utils
internal/trainer_utils
internal/generation_utils
internal/file_utils
|
AdaMix/docs/source/index.rst/0
|
{
"file_path": "AdaMix/docs/source/index.rst",
"repo_id": "AdaMix",
"token_count": 12808
}
| 27 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
Processors
-----------------------------------------------------------------------------------------------------------------------
This library includes processors for several traditional tasks. These processors can be used to process a dataset into
examples that can be fed to a model.
Processors
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
All processors follow the same architecture which is that of the
:class:`~transformers.data.processors.utils.DataProcessor`. The processor returns a list of
:class:`~transformers.data.processors.utils.InputExample`. These
:class:`~transformers.data.processors.utils.InputExample` can be converted to
:class:`~transformers.data.processors.utils.InputFeatures` in order to be fed to the model.
.. autoclass:: transformers.data.processors.utils.DataProcessor
:members:
.. autoclass:: transformers.data.processors.utils.InputExample
:members:
.. autoclass:: transformers.data.processors.utils.InputFeatures
:members:
GLUE
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`General Language Understanding Evaluation (GLUE) <https://gluebenchmark.com/>`__ is a benchmark that evaluates the
performance of models across a diverse set of existing NLU tasks. It was released together with the paper `GLUE: A
multi-task benchmark and analysis platform for natural language understanding
<https://openreview.net/pdf?id=rJ4km2R5t7>`__
This library hosts a total of 10 processors for the following tasks: MRPC, MNLI, MNLI (mismatched), CoLA, SST2, STSB,
QQP, QNLI, RTE and WNLI.
Those processors are:
- :class:`~transformers.data.processors.utils.MrpcProcessor`
- :class:`~transformers.data.processors.utils.MnliProcessor`
- :class:`~transformers.data.processors.utils.MnliMismatchedProcessor`
- :class:`~transformers.data.processors.utils.Sst2Processor`
- :class:`~transformers.data.processors.utils.StsbProcessor`
- :class:`~transformers.data.processors.utils.QqpProcessor`
- :class:`~transformers.data.processors.utils.QnliProcessor`
- :class:`~transformers.data.processors.utils.RteProcessor`
- :class:`~transformers.data.processors.utils.WnliProcessor`
Additionally, the following method can be used to load values from a data file and convert them to a list of
:class:`~transformers.data.processors.utils.InputExample`.
.. automethod:: transformers.data.processors.glue.glue_convert_examples_to_features
Example usage
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
An example using these processors is given in the `run_glue.py
<https://github.com/huggingface/pytorch-transformers/blob/master/examples/text-classification/run_glue.py>`__ script.
XNLI
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`The Cross-Lingual NLI Corpus (XNLI) <https://www.nyu.edu/projects/bowman/xnli/>`__ is a benchmark that evaluates the
quality of cross-lingual text representations. XNLI is crowd-sourced dataset based on `MultiNLI
<http://www.nyu.edu/projects/bowman/multinli/>`: pairs of text are labeled with textual entailment annotations for 15
different languages (including both high-resource language such as English and low-resource languages such as Swahili).
It was released together with the paper `XNLI: Evaluating Cross-lingual Sentence Representations
<https://arxiv.org/abs/1809.05053>`__
This library hosts the processor to load the XNLI data:
- :class:`~transformers.data.processors.utils.XnliProcessor`
Please note that since the gold labels are available on the test set, evaluation is performed on the test set.
An example using these processors is given in the `run_xnli.py
<https://github.com/huggingface/pytorch-transformers/blob/master/examples/text-classification/run_xnli.py>`__ script.
SQuAD
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`The Stanford Question Answering Dataset (SQuAD) <https://rajpurkar.github.io/SQuAD-explorer//>`__ is a benchmark that
evaluates the performance of models on question answering. Two versions are available, v1.1 and v2.0. The first version
(v1.1) was released together with the paper `SQuAD: 100,000+ Questions for Machine Comprehension of Text
<https://arxiv.org/abs/1606.05250>`__. The second version (v2.0) was released alongside the paper `Know What You Don't
Know: Unanswerable Questions for SQuAD <https://arxiv.org/abs/1806.03822>`__.
This library hosts a processor for each of the two versions:
Processors
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Those processors are:
- :class:`~transformers.data.processors.utils.SquadV1Processor`
- :class:`~transformers.data.processors.utils.SquadV2Processor`
They both inherit from the abstract class :class:`~transformers.data.processors.utils.SquadProcessor`
.. autoclass:: transformers.data.processors.squad.SquadProcessor
:members:
Additionally, the following method can be used to convert SQuAD examples into
:class:`~transformers.data.processors.utils.SquadFeatures` that can be used as model inputs.
.. automethod:: transformers.data.processors.squad.squad_convert_examples_to_features
These processors as well as the aforementionned method can be used with files containing the data as well as with the
`tensorflow_datasets` package. Examples are given below.
Example usage
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Here is an example using the processors as well as the conversion method using data files:
.. code-block::
# Loading a V2 processor
processor = SquadV2Processor()
examples = processor.get_dev_examples(squad_v2_data_dir)
# Loading a V1 processor
processor = SquadV1Processor()
examples = processor.get_dev_examples(squad_v1_data_dir)
features = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=args.doc_stride,
max_query_length=max_query_length,
is_training=not evaluate,
)
Using `tensorflow_datasets` is as easy as using a data file:
.. code-block::
# tensorflow_datasets only handle Squad V1.
tfds_examples = tfds.load("squad")
examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)
features = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=args.doc_stride,
max_query_length=max_query_length,
is_training=not evaluate,
)
Another example using these processors is given in the :prefix_link:`run_squad.py
<examples/question-answering/run_squad.py>` script.
|
AdaMix/docs/source/main_classes/processors.rst/0
|
{
"file_path": "AdaMix/docs/source/main_classes/processors.rst",
"repo_id": "AdaMix",
"token_count": 2321
}
| 28 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
CTRL
-----------------------------------------------------------------------------------------------------------------------
Overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
CTRL model was proposed in `CTRL: A Conditional Transformer Language Model for Controllable Generation
<https://arxiv.org/abs/1909.05858>`_ by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and
Richard Socher. It's a causal (unidirectional) transformer pre-trained using language modeling on a very large corpus
of ~140 GB of text data with the first token reserved as a control code (such as Links, Books, Wikipedia etc.).
The abstract from the paper is the following:
*Large-scale language models show promising text generation capabilities, but users cannot easily control particular
aspects of the generated text. We release CTRL, a 1.63 billion-parameter conditional transformer language model,
trained to condition on control codes that govern style, content, and task-specific behavior. Control codes were
derived from structure that naturally co-occurs with raw text, preserving the advantages of unsupervised learning while
providing more explicit control over text generation. These codes also allow CTRL to predict which parts of the
training data are most likely given a sequence. This provides a potential method for analyzing large amounts of data
via model-based source attribution.*
Tips:
- CTRL makes use of control codes to generate text: it requires generations to be started by certain words, sentences
or links to generate coherent text. Refer to the `original implementation <https://github.com/salesforce/ctrl>`__ for
more information.
- CTRL is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than
the left.
- CTRL was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next
token in a sequence. Leveraging this feature allows CTRL to generate syntactically coherent text as it can be
observed in the `run_generation.py` example script.
- The PyTorch models can take the `past` as input, which is the previously computed key/value attention pairs. Using
this `past` value prevents the model from re-computing pre-computed values in the context of text generation. See
`reusing the past in generative models <../quickstart.html#using-the-past>`__ for more information on the usage of
this argument.
The original code can be found `here <https://github.com/salesforce/ctrl>`__.
CTRLConfig
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.CTRLConfig
:members:
CTRLTokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.CTRLTokenizer
:members: save_vocabulary
CTRLModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.CTRLModel
:members: forward
CTRLLMHeadModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.CTRLLMHeadModel
:members: forward
CTRLForSequenceClassification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.CTRLForSequenceClassification
:members: forward
TFCTRLModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFCTRLModel
:members: call
TFCTRLLMHeadModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFCTRLLMHeadModel
:members: call
TFCTRLForSequenceClassification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFCTRLForSequenceClassification
:members: call
|
AdaMix/docs/source/model_doc/ctrl.rst/0
|
{
"file_path": "AdaMix/docs/source/model_doc/ctrl.rst",
"repo_id": "AdaMix",
"token_count": 1070
}
| 29 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
LED
-----------------------------------------------------------------------------------------------------------------------
Overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The LED model was proposed in `Longformer: The Long-Document Transformer <https://arxiv.org/abs/2004.05150>`__ by Iz
Beltagy, Matthew E. Peters, Arman Cohan.
The abstract from the paper is the following:
*Transformer-based models are unable to process long sequences due to their self-attention operation, which scales
quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention
mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or
longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local
windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we
evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In
contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our
pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on
WikiHop and TriviaQA. We finally introduce the Longformer-Encoder-Decoder (LED), a Longformer variant for supporting
long document generative sequence-to-sequence tasks, and demonstrate its effectiveness on the arXiv summarization
dataset.*
Tips:
- :class:`~transformers.LEDForConditionalGeneration` is an extension of
:class:`~transformers.BartForConditionalGeneration` exchanging the traditional *self-attention* layer with
*Longformer*'s *chunked self-attention* layer. :class:`~transformers.LEDTokenizer` is an alias of
:class:`~transformers.BartTokenizer`.
- LED works very well on long-range *sequence-to-sequence* tasks where the ``input_ids`` largely exceed a length of
1024 tokens.
- LED pads the ``input_ids`` to be a multiple of ``config.attention_window`` if required. Therefore a small speed-up is
gained, when :class:`~transformers.LEDTokenizer` is used with the ``pad_to_multiple_of`` argument.
- LED makes use of *global attention* by means of the ``global_attention_mask`` (see
:class:`~transformers.LongformerModel`). For summarization, it is advised to put *global attention* only on the first
``<s>`` token. For question answering, it is advised to put *global attention* on all tokens of the question.
- To fine-tune LED on all 16384, it is necessary to enable *gradient checkpointing* by setting
``config.gradient_checkpointing = True``.
- A notebook showing how to evaluate LED, can be accessed `here
<https://colab.research.google.com/drive/12INTTR6n64TzS4RrXZxMSXfrOd9Xzamo?usp=sharing>`__.
- A notebook showing how to fine-tune LED, can be accessed `here
<https://colab.research.google.com/drive/12LjJazBl7Gam0XBPy_y0CTOJZeZ34c2v?usp=sharing>`__.
LEDConfig
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.LEDConfig
:members:
LEDTokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.LEDTokenizer
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
create_token_type_ids_from_sequences, save_vocabulary
LEDTokenizerFast
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.LEDTokenizerFast
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
create_token_type_ids_from_sequences, save_vocabulary
LED specific outputs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.models.led.modeling_led.LEDEncoderBaseModelOutput
:members:
.. autoclass:: transformers.models.led.modeling_led.LEDSeq2SeqModelOutput
:members:
.. autoclass:: transformers.models.led.modeling_led.LEDSeq2SeqLMOutput
:members:
.. autoclass:: transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput
:members:
.. autoclass:: transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput
:members:
.. autoclass:: transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput
:members:
.. autoclass:: transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput
:members:
.. autoclass:: transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput
:members:
LEDModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.LEDModel
:members: forward
LEDForConditionalGeneration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.LEDForConditionalGeneration
:members: forward
LEDForSequenceClassification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.LEDForSequenceClassification
:members: forward
LEDForQuestionAnswering
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.LEDForQuestionAnswering
:members: forward
TFLEDModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFLEDModel
:members: call
TFLEDForConditionalGeneration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFLEDForConditionalGeneration
:members: call
|
AdaMix/docs/source/model_doc/led.rst/0
|
{
"file_path": "AdaMix/docs/source/model_doc/led.rst",
"repo_id": "AdaMix",
"token_count": 1730
}
| 30 |
..
Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
Speech2Text
-----------------------------------------------------------------------------------------------------------------------
Overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Speech2Text model was proposed in `fairseq S2T: Fast Speech-to-Text Modeling with fairseq
<https://arxiv.org/abs/2010.05171>`__ by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino. It's a
transformer-based seq2seq (encoder-decoder) model designed for end-to-end Automatic Speech Recognition (ASR) and Speech
Translation (ST). It uses a convolutional downsampler to reduce the length of speech inputs by 3/4th before they are
fed into the encoder. The model is trained with standard autoregressive cross-entropy loss and generates the
transcripts/translations autoregressively. Speech2Text has been fine-tuned on several datasets for ASR and ST:
`LibriSpeech <http://www.openslr.org/12>`__, `CoVoST 2 <https://github.com/facebookresearch/covost>`__, `MuST-C
<https://ict.fbk.eu/must-c/>`__.
The original code can be found `here <https://github.com/pytorch/fairseq/tree/master/examples/speech_to_text>`__.
Inference
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Speech2Text is a speech model that accepts a float tensor of log-mel filter-bank features extracted from the speech
signal. It's a transformer-based seq2seq model, so the transcripts/translations are generated autoregressively. The
:obj:`generate()` method can be used for inference.
The :class:`~transformers.Speech2TextFeatureExtractor` class is responsible for extracting the log-mel filter-bank
features. The :class:`~transformers.Speech2TextProcessor` wraps :class:`~transformers.Speech2TextFeatureExtractor` and
:class:`~transformers.Speech2TextTokenizer` into a single instance to both extract the input features and decode the
predicted token ids.
The feature extractor depends on :obj:`torchaudio` and the tokenizer depends on :obj:`sentencepiece` so be sure to
install those packages before running the examples. You could either install those as extra speech dependancies with
``pip install transformers"[speech, sentencepiece]"`` or install the packages seperatly with ``pip install torchaudio
sentencepiece``. Also ``torchaudio`` requires the development version of the `libsndfile
<http://www.mega-nerd.com/libsndfile/>`__ package which can be installed via a system package manager. On Ubuntu it can
be installed as follows: ``apt install libsndfile1-dev``
- ASR and Speech Translation
.. code-block::
>>> import torch
>>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration
>>> from datasets import load_dataset
>>> import soundfile as sf
>>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr")
>>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr")
>>> def map_to_array(batch):
... speech, _ = sf.read(batch["file"])
... batch["speech"] = speech
... return batch
>>> ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.map(map_to_array)
>>> inputs = processor(ds["speech"][0], sampling_rate=16_000, return_tensors="pt")
>>> generated_ids = model.generate(input_ids=inputs["input_features"], attention_mask=inputs["attention_mask"])
>>> transcription = processor.batch_decode(generated_ids)
- Multilingual speech translation
For multilingual speech translation models, :obj:`eos_token_id` is used as the :obj:`decoder_start_token_id` and
the target language id is forced as the first generated token. To force the target language id as the first
generated token, pass the :obj:`forced_bos_token_id` parameter to the :obj:`generate()` method. The following
example shows how to transate English speech to French text using the `facebook/s2t-medium-mustc-multilingual-st`
checkpoint.
.. code-block::
>>> import torch
>>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration
>>> from datasets import load_dataset
>>> import soundfile as sf
>>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-medium-mustc-multilingual-st")
>>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-medium-mustc-multilingual-st")
>>> def map_to_array(batch):
... speech, _ = sf.read(batch["file"])
... batch["speech"] = speech
... return batch
>>> ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.map(map_to_array)
>>> inputs = processor(ds["speech"][0], sampling_rate=16_000, return_tensors="pt")
>>> generated_ids = model.generate(input_ids=inputs["input_features"], attention_mask=inputs["attention_mask], forced_bos_token_id=processor.tokenizer.lang_code_to_id["fr"])
>>> translation = processor.batch_decode(generated_ids)
See the `model hub <https://huggingface.co/models?filter=speech_to_text>`__ to look for Speech2Text checkpoints.
Speech2TextConfig
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.Speech2TextConfig
:members:
Speech2TextTokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.Speech2TextTokenizer
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
create_token_type_ids_from_sequences, save_vocabulary
Speech2TextFeatureExtractor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.Speech2TextFeatureExtractor
:members: __call__
Speech2TextProcessor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.Speech2TextProcessor
:members: __call__, from_pretrained, save_pretrained, batch_decode, decode, as_target_processor
Speech2TextModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.Speech2TextModel
:members: forward
Speech2TextForConditionalGeneration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.Speech2TextForConditionalGeneration
:members: forward
|
AdaMix/docs/source/model_doc/speech_to_text.rst/0
|
{
"file_path": "AdaMix/docs/source/model_doc/speech_to_text.rst",
"repo_id": "AdaMix",
"token_count": 2186
}
| 31 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
Philosophy
=======================================================================================================================
🤗 Transformers is an opinionated library built for:
- NLP researchers and educators seeking to use/study/extend large-scale transformers models
- hands-on practitioners who want to fine-tune those models and/or serve them in production
- engineers who just want to download a pretrained model and use it to solve a given NLP task.
The library was designed with two strong goals in mind:
- Be as easy and fast to use as possible:
- We strongly limited the number of user-facing abstractions to learn, in fact, there are almost no abstractions,
just three standard classes required to use each model: :doc:`configuration <main_classes/configuration>`,
:doc:`models <main_classes/model>` and :doc:`tokenizer <main_classes/tokenizer>`.
- All of these classes can be initialized in a simple and unified way from pretrained instances by using a common
:obj:`from_pretrained()` instantiation method which will take care of downloading (if needed), caching and
loading the related class instance and associated data (configurations' hyper-parameters, tokenizers' vocabulary,
and models' weights) from a pretrained checkpoint provided on `Hugging Face Hub
<https://huggingface.co/models>`__ or your own saved checkpoint.
- On top of those three base classes, the library provides two APIs: :func:`~transformers.pipeline` for quickly
using a model (plus its associated tokenizer and configuration) on a given task and
:func:`~transformers.Trainer`/:func:`~transformers.TFTrainer` to quickly train or fine-tune a given model.
- As a consequence, this library is NOT a modular toolbox of building blocks for neural nets. If you want to
extend/build-upon the library, just use regular Python/PyTorch/TensorFlow/Keras modules and inherit from the base
classes of the library to reuse functionalities like model loading/saving.
- Provide state-of-the-art models with performances as close as possible to the original models:
- We provide at least one example for each architecture which reproduces a result provided by the official authors
of said architecture.
- The code is usually as close to the original code base as possible which means some PyTorch code may be not as
*pytorchic* as it could be as a result of being converted TensorFlow code and vice versa.
A few other goals:
- Expose the models' internals as consistently as possible:
- We give access, using a single API, to the full hidden-states and attention weights.
- Tokenizer and base model's API are standardized to easily switch between models.
- Incorporate a subjective selection of promising tools for fine-tuning/investigating these models:
- A simple/consistent way to add new tokens to the vocabulary and embeddings for fine-tuning.
- Simple ways to mask and prune transformer heads.
- Switch easily between PyTorch and TensorFlow 2.0, allowing training using one framework and inference using another.
Main concepts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The library is built around three types of classes for each model:
- **Model classes** such as :class:`~transformers.BertModel`, which are 30+ PyTorch models (`torch.nn.Module
<https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__) or Keras models (`tf.keras.Model
<https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__) that work with the pretrained weights provided in the
library.
- **Configuration classes** such as :class:`~transformers.BertConfig`, which store all the parameters required to build
a model. You don't always need to instantiate these yourself. In particular, if you are using a pretrained model
without any modification, creating the model will automatically take care of instantiating the configuration (which
is part of the model).
- **Tokenizer classes** such as :class:`~transformers.BertTokenizer`, which store the vocabulary for each model and
provide methods for encoding/decoding strings in a list of token embeddings indices to be fed to a model.
All these classes can be instantiated from pretrained instances and saved locally using two methods:
- :obj:`from_pretrained()` lets you instantiate a model/configuration/tokenizer from a pretrained version either
provided by the library itself (the supported models are provided in the list :doc:`here <pretrained_models>`) or
stored locally (or on a server) by the user,
- :obj:`save_pretrained()` lets you save a model/configuration/tokenizer locally so that it can be reloaded using
:obj:`from_pretrained()`.
|
AdaMix/docs/source/philosophy.rst/0
|
{
"file_path": "AdaMix/docs/source/philosophy.rst",
"repo_id": "AdaMix",
"token_count": 1394
}
| 32 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Benchmarking the library on inference and training """
from transformers import HfArgumentParser, PyTorchBenchmark, PyTorchBenchmarkArguments
def main():
parser = HfArgumentParser(PyTorchBenchmarkArguments)
try:
benchmark_args = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
arg_error_msg = "Arg --no_{0} is no longer used, please use --no-{0} instead."
begin_error_msg = " ".join(str(e).split(" ")[:-1])
full_error_msg = ""
depreciated_args = eval(str(e).split(" ")[-1])
wrong_args = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in PyTorchBenchmarkArguments.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(arg)
if len(wrong_args) > 0:
full_error_msg = full_error_msg + begin_error_msg + str(wrong_args)
raise ValueError(full_error_msg)
benchmark = PyTorchBenchmark(args=benchmark_args)
benchmark.run()
if __name__ == "__main__":
main()
|
AdaMix/examples/benchmarking/run_benchmark.py/0
|
{
"file_path": "AdaMix/examples/benchmarking/run_benchmark.py",
"repo_id": "AdaMix",
"token_count": 699
}
| 33 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from seqeval.metrics import accuracy_score, f1_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from lightning_base import BaseTransformer, add_generic_args, generic_train
from utils_ner import TokenClassificationTask
logger = logging.getLogger(__name__)
class NERTransformer(BaseTransformer):
"""
A training module for NER. See BaseTransformer for the core options.
"""
mode = "token-classification"
def __init__(self, hparams):
if type(hparams) == dict:
hparams = Namespace(**hparams)
module = import_module("tasks")
try:
token_classification_task_clazz = getattr(module, hparams.task_type)
self.token_classification_task: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}"
)
self.labels = self.token_classification_task.get_labels(hparams.labels)
self.pad_token_label_id = CrossEntropyLoss().ignore_index
super().__init__(hparams, len(self.labels), self.mode)
def forward(self, **inputs):
return self.model(**inputs)
def training_step(self, batch, batch_num):
"Compute loss and log."
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
outputs = self(**inputs)
loss = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def prepare_data(self):
"Called to initialize data. Use the call to construct features"
args = self.hparams
for mode in ["train", "dev", "test"]:
cached_features_file = self._feature_file(mode)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = self.token_classification_task.read_examples_from_file(args.data_dir, mode)
features = self.token_classification_task.convert_examples_to_features(
examples,
self.labels,
args.max_seq_length,
self.tokenizer,
cls_token_at_end=bool(self.config.model_type in ["xlnet"]),
cls_token=self.tokenizer.cls_token,
cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0,
sep_token=self.tokenizer.sep_token,
sep_token_extra=False,
pad_on_left=bool(self.config.model_type in ["xlnet"]),
pad_token=self.tokenizer.pad_token_id,
pad_token_segment_id=self.tokenizer.pad_token_type_id,
pad_token_label_id=self.pad_token_label_id,
)
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
def get_dataloader(self, mode: int, batch_size: int, shuffle: bool = False) -> DataLoader:
"Load datasets. Called after prepare data."
cached_features_file = self._feature_file(mode)
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if features[0].token_type_ids is not None:
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
else:
all_token_type_ids = torch.tensor([0 for f in features], dtype=torch.long)
# HACK(we will not use this anymore soon)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
return DataLoader(
TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_label_ids), batch_size=batch_size
)
def validation_step(self, batch, batch_nb):
"""Compute validation""" ""
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
outputs = self(**inputs)
tmp_eval_loss, logits = outputs[:2]
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _eval_end(self, outputs):
"Evaluation called for both Val and Test"
val_loss_mean = torch.stack([x["val_loss"] for x in outputs]).mean()
preds = np.concatenate([x["pred"] for x in outputs], axis=0)
preds = np.argmax(preds, axis=2)
out_label_ids = np.concatenate([x["target"] for x in outputs], axis=0)
label_map = {i: label for i, label in enumerate(self.labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(out_label_list, preds_list),
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
ret = {k: v for k, v in results.items()}
ret["log"] = results
return ret, preds_list, out_label_list
def validation_epoch_end(self, outputs):
# when stable
ret, preds, targets = self._eval_end(outputs)
logs = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def test_epoch_end(self, outputs):
# updating to test_epoch_end instead of deprecated test_end
ret, predictions, targets = self._eval_end(outputs)
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
logs = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def add_model_specific_args(parser, root_dir):
# Add NER specific options
BaseTransformer.add_model_specific_args(parser, root_dir)
parser.add_argument(
"--task_type", default="NER", type=str, help="Task type to fine tune in training (e.g. NER, POS, etc)"
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--labels",
default="",
type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.",
)
parser.add_argument(
"--gpus",
default=0,
type=int,
help="The number of GPUs allocated for this, it is by default 0 meaning none",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
return parser
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
parser = NERTransformer.add_model_specific_args(parser, os.getcwd())
args = parser.parse_args()
model = NERTransformer(args)
trainer = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True)))
model = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
|
AdaMix/examples/legacy/pytorch-lightning/run_ner.py/0
|
{
"file_path": "AdaMix/examples/legacy/pytorch-lightning/run_ner.py",
"repo_id": "AdaMix",
"token_count": 4279
}
| 34 |
#!/usr/bin/env python
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import Seq2SeqDataset, pickle_save
def save_len_file(
tokenizer_name, data_dir, max_source_length=1024, max_target_length=1024, consider_target=False, **kwargs
):
"""Save max(src_len, tgt_len) for each example to allow dynamic batching."""
tok = AutoTokenizer.from_pretrained(tokenizer_name)
train_ds = Seq2SeqDataset(tok, data_dir, max_source_length, max_target_length, type_path="train", **kwargs)
pad = tok.pad_token_id
def get_lens(ds):
dl = tqdm(
DataLoader(ds, batch_size=512, num_workers=8, shuffle=False, collate_fn=ds.collate_fn),
desc=str(ds.len_file),
)
max_lens = []
for batch in dl:
src_lens = batch["input_ids"].ne(pad).sum(1).tolist()
tgt_lens = batch["labels"].ne(pad).sum(1).tolist()
if consider_target:
for src, tgt in zip(src_lens, tgt_lens):
max_lens.append(max(src, tgt))
else:
max_lens.extend(src_lens)
return max_lens
train_lens = get_lens(train_ds)
val_ds = Seq2SeqDataset(tok, data_dir, max_source_length, max_target_length, type_path="val", **kwargs)
val_lens = get_lens(val_ds)
pickle_save(train_lens, train_ds.len_file)
pickle_save(val_lens, val_ds.len_file)
if __name__ == "__main__":
fire.Fire(save_len_file)
|
AdaMix/examples/legacy/seq2seq/save_len_file.py/0
|
{
"file_path": "AdaMix/examples/legacy/seq2seq/save_len_file.py",
"repo_id": "AdaMix",
"token_count": 869
}
| 35 |
#!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for multiple choice.
"""
# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional, Union
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.4.0")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: int = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_val_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
},
)
def __post_init__(self):
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class DataCollatorForMultipleChoice:
"""
Data collator that will dynamically pad the inputs for multiple choice received.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
def __call__(self, features):
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature.pop(label_name) for feature in features]
batch_size = len(features)
num_choices = len(features[0]["input_ids"])
flattened_features = [
[{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features
]
flattened_features = sum(flattened_features, [])
batch = self.tokenizer.pad(
flattened_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
# Un-flatten
batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()}
# Add back labels
batch["labels"] = torch.tensor(labels, dtype=torch.int64)
return batch
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
datasets = load_dataset(extension, data_files=data_files)
else:
# Downloading and loading the swag dataset from the hub.
datasets = load_dataset("swag", "regular")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
ending_names = [f"ending{i}" for i in range(4)]
context_name = "sent1"
question_header_name = "sent2"
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warn(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Preprocessing the datasets.
def preprocess_function(examples):
first_sentences = [[context] * 4 for context in examples[context_name]]
question_headers = examples[question_header_name]
second_sentences = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers)
]
# Flatten out
first_sentences = sum(first_sentences, [])
second_sentences = sum(second_sentences, [])
# Tokenize
tokenized_examples = tokenizer(
first_sentences,
second_sentences,
truncation=True,
max_length=max_seq_length,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}
if training_args.do_train:
train_dataset = datasets["train"]
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if "validation" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = datasets["validation"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Data collator
data_collator = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
)
# Metric
def compute_metrics(eval_predictions):
predictions, label_ids = eval_predictions
preds = np.argmax(predictions, axis=1)
return {"accuracy": (preds == label_ids).astype(np.float32).mean().item()}
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
if last_checkpoint is not None:
checkpoint = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
AdaMix/examples/multiple-choice/run_swag.py/0
|
{
"file_path": "AdaMix/examples/multiple-choice/run_swag.py",
"repo_id": "AdaMix",
"token_count": 7144
}
| 36 |
#!/usr/bin/env python3
# Copyright 2018 CMU and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Bertology: this script shows how you can explore the internals of the models in the library to:
- compute the entropy of the head attentions
- compute the importance of each head
- prune (remove) the low importance head.
Some parts of this script are adapted from the code of Michel et al. (http://arxiv.org/abs/1905.10650)
which is available at https://github.com/pmichel31415/are-16-heads-really-better-than-1
"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch.utils.data import DataLoader, SequentialSampler, Subset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
GlueDataset,
default_data_collator,
glue_compute_metrics,
glue_output_modes,
glue_processors,
set_seed,
)
from transformers.trainer_utils import is_main_process
logger = logging.getLogger(__name__)
def entropy(p):
""" Compute the entropy of a probability distribution """
plogp = p * torch.log(p)
plogp[p == 0] = 0
return -plogp.sum(dim=-1)
def print_2d_tensor(tensor):
""" Print a 2D tensor """
logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor))))
for row in range(len(tensor)):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data))
else:
logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data))
def compute_heads_importance(
args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None, actually_pruned=False
):
"""This method shows how to compute:
- head attention entropy
- head importance scores according to http://arxiv.org/abs/1905.10650
"""
# Prepare our tensors
n_layers, n_heads = model.config.num_hidden_layers, model.config.num_attention_heads
head_importance = torch.zeros(n_layers, n_heads).to(args.device)
attn_entropy = torch.zeros(n_layers, n_heads).to(args.device)
if head_mask is None:
head_mask = torch.ones(n_layers, n_heads).to(args.device)
head_mask.requires_grad_(requires_grad=True)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
head_mask = None
preds = None
labels = None
tot_tokens = 0.0
for step, inputs in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
for k, v in inputs.items():
inputs[k] = v.to(args.device)
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
outputs = model(**inputs, head_mask=head_mask)
loss, logits, all_attentions = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
if compute_entropy:
for layer, attn in enumerate(all_attentions):
masked_entropy = entropy(attn.detach()) * inputs["attention_mask"].float().unsqueeze(1)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
# Also store our logits/labels if we want to compute metrics afterwards
if preds is None:
preds = logits.detach().cpu().numpy()
labels = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
labels = np.append(labels, inputs["labels"].detach().cpu().numpy(), axis=0)
tot_tokens += inputs["attention_mask"].float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
exponent = 2
norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print/save matrices
np.save(os.path.join(args.output_dir, "attn_entropy.npy"), attn_entropy.detach().cpu().numpy())
np.save(os.path.join(args.output_dir, "head_importance.npy"), head_importance.detach().cpu().numpy())
logger.info("Attention entropies")
print_2d_tensor(attn_entropy)
logger.info("Head importance scores")
print_2d_tensor(head_importance)
logger.info("Head ranked by importance scores")
head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device)
head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange(
head_importance.numel(), device=args.device
)
head_ranks = head_ranks.view_as(head_importance)
print_2d_tensor(head_ranks)
return attn_entropy, head_importance, preds, labels
def mask_heads(args, model, eval_dataloader):
"""This method shows how to mask head (set some heads to zero), to test the effect on the network,
based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650)
"""
_, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
original_score = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]
logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold)
new_head_mask = torch.ones_like(head_importance)
num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount))
current_score = original_score
while current_score >= original_score * args.masking_threshold:
head_mask = new_head_mask.clone() # save current head mask
# heads from least important to most - keep only not-masked heads
head_importance[head_mask == 0.0] = float("Inf")
current_heads_to_mask = head_importance.view(-1).sort()[1]
if len(current_heads_to_mask) <= num_to_mask:
break
# mask heads
current_heads_to_mask = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist()))
new_head_mask = new_head_mask.view(-1)
new_head_mask[current_heads_to_mask] = 0.0
new_head_mask = new_head_mask.view_as(head_mask)
new_head_mask = new_head_mask.clone().detach()
print_2d_tensor(new_head_mask)
# Compute metric and head importance again
_, head_importance, preds, labels = compute_heads_importance(
args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask
)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
current_score = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)",
current_score,
new_head_mask.sum(),
new_head_mask.sum() / new_head_mask.numel() * 100,
)
logger.info("Final head mask")
print_2d_tensor(head_mask)
np.save(os.path.join(args.output_dir, "head_mask.npy"), head_mask.detach().cpu().numpy())
return head_mask
def prune_heads(args, model, eval_dataloader, head_mask):
"""This method shows how to prune head (remove heads weights) based on
the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650)
"""
# Try pruning and test time speedup
# Pruning is like masking but we actually remove the masked weights
before_time = datetime.now()
_, _, preds, labels = compute_heads_importance(
args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask
)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
score_masking = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]
original_time = datetime.now() - before_time
original_num_params = sum(p.numel() for p in model.parameters())
heads_to_prune = dict(
(layer, (1 - head_mask[layer].long()).nonzero().squeeze().tolist()) for layer in range(len(head_mask))
)
assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(heads_to_prune)
pruned_num_params = sum(p.numel() for p in model.parameters())
before_time = datetime.now()
_, _, preds, labels = compute_heads_importance(
args,
model,
eval_dataloader,
compute_entropy=False,
compute_importance=False,
head_mask=None,
actually_pruned=True,
)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
score_pruning = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]
new_time = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)",
original_num_params,
pruned_num_params,
pruned_num_params / original_num_params * 100,
)
logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning)
logger.info("Pruning: speed ratio (new timing / original timing): %f percents", original_time / new_time * 100)
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(glue_processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name_or_path",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name_or_path",
)
parser.add_argument(
"--cache_dir",
default=None,
type=str,
help="Where do you want to store the pre-trained models downloaded from huggingface.co",
)
parser.add_argument(
"--data_subset", type=int, default=-1, help="If > 0: limit the data to a subset of data_subset instances."
)
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Whether to overwrite data in output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--dont_normalize_importance_by_layer", action="store_true", help="Don't normalize importance score by layers"
)
parser.add_argument(
"--dont_normalize_global_importance",
action="store_true",
help="Don't normalize all importance scores between 0 and 1",
)
parser.add_argument(
"--try_masking", action="store_true", help="Whether to try to mask head until a threshold of accuracy."
)
parser.add_argument(
"--masking_threshold",
default=0.9,
type=float,
help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value).",
)
parser.add_argument(
"--masking_amount", default=0.1, type=float, help="Amount to heads to masking at each masking step."
)
parser.add_argument("--metric_name", default="acc", type=str, help="Metric to use for head masking.")
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded.",
)
parser.add_argument("--batch_size", default=1, type=int, help="Batch size.")
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available")
parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.")
args = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
args.device = torch.device("cuda", args.local_rank)
args.n_gpu = 1
torch.distributed.init_process_group(backend="nccl") # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device, args.n_gpu, bool(args.local_rank != -1)))
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set seeds
set_seed(args.seed)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in glue_processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = glue_processors[args.task_name]()
args.output_mode = glue_output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
output_attentions=True,
cache_dir=args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
cache_dir=args.cache_dir,
)
model = AutoModelForSequenceClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
elif args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Print/save training arguments
os.makedirs(args.output_dir, exist_ok=True)
torch.save(args, os.path.join(args.output_dir, "run_args.bin"))
logger.info("Training/evaluation parameters %s", args)
# Prepare dataset for the GLUE task
eval_dataset = GlueDataset(args, tokenizer=tokenizer, mode="dev")
if args.data_subset > 0:
eval_dataset = Subset(eval_dataset, list(range(min(args.data_subset, len(eval_dataset)))))
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.batch_size, collate_fn=default_data_collator
)
# Compute head entropy and importance score
compute_heads_importance(args, model, eval_dataloader)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
head_mask = mask_heads(args, model, eval_dataloader)
prune_heads(args, model, eval_dataloader, head_mask)
if __name__ == "__main__":
main()
|
AdaMix/examples/research_projects/bertology/run_bertology.py/0
|
{
"file_path": "AdaMix/examples/research_projects/bertology/run_bertology.py",
"repo_id": "AdaMix",
"token_count": 7319
}
| 37 |
# Movement Pruning: Adaptive Sparsity by Fine-Tuning
Author: @VictorSanh
*Magnitude pruning is a widely used strategy for reducing model size in pure supervised learning; however, it is less effective in the transfer learning regime that has become standard for state-of-the-art natural language processing applications. We propose the use of *movement pruning*, a simple, deterministic first-order weight pruning method that is more adaptive to pretrained model fine-tuning. Experiments show that when pruning large pretrained language models, movement pruning shows significant improvements in high-sparsity regimes. When combined with distillation, the approach achieves minimal accuracy loss with down to only 3% of the model parameters:*
| Fine-pruning+Distillation<br>(Teacher=BERT-base fine-tuned) | BERT base<br>fine-tuned | Remaining<br>Weights (%) | Magnitude Pruning | L0 Regularization | Movement Pruning | Soft Movement Pruning |
| :---: | :---: | :---: | :---: | :---: | :---: | :---: |
| SQuAD - Dev<br>EM/F1 | 80.4/88.1 | 10%<br>3% | 70.2/80.1<br>45.5/59.6 | 72.4/81.9<br>64.3/75.8 | 75.6/84.3<br>67.5/78.0 | **76.6/84.9**<br>**72.7/82.3** |
| MNLI - Dev<br>acc/MM acc | 84.5/84.9 | 10%<br>3% | 78.3/79.3<br>69.4/70.6 | 78.7/79.7<br>76.0/76.2 | 80.1/80.4<br>76.5/77.4 | **81.2/81.8**<br>**79.5/80.1** |
| QQP - Dev<br>acc/F1 | 91.4/88.4 | 10%<br>3% | 79.8/65.0<br>72.4/57.8 | 88.1/82.8<br>87.0/81.9 | 89.7/86.2<br>86.1/81.5 | **90.2/86.8**<br>**89.1/85.5** |
This page contains information on how to fine-prune pre-trained models such as `BERT` to obtain extremely sparse models with movement pruning. In contrast to magnitude pruning which selects weights that are far from 0, movement pruning retains weights that are moving away from 0.
For more information, we invite you to check out [our paper](https://arxiv.org/abs/2005.07683).
You can also have a look at this fun *Explain Like I'm Five* introductory [slide deck](https://www.slideshare.net/VictorSanh/movement-pruning-explain-like-im-five-234205241).
<div align="center">
<img src="https://www.seekpng.com/png/detail/166-1669328_how-to-make-emmental-cheese-at-home-icooker.png" width="400">
</div>
## Extreme sparsity and efficient storage
One promise of extreme pruning is to obtain extremely small models that can be easily sent (and stored) on edge devices. By setting weights to 0., we reduce the amount of information we need to store, and thus decreasing the memory size. We are able to obtain extremely sparse fine-pruned models with movement pruning: ~95% of the dense performance with ~5% of total remaining weights in the BERT encoder.
In [this notebook](https://github.com/huggingface/transformers/blob/master/examples/movement-pruning/Saving_PruneBERT.ipynb), we showcase how we can leverage standard tools that exist out-of-the-box to efficiently store an extremely sparse question answering model (only 6% of total remaining weights in the encoder). We are able to reduce the memory size of the encoder **from the 340MB (the original dense BERT) to 11MB**, without any additional training of the model (every operation is performed *post fine-pruning*). It is sufficiently small to store it on a [91' floppy disk](https://en.wikipedia.org/wiki/Floptical) 📎!
While movement pruning does not directly optimize for memory footprint (but rather the number of non-null weights), we hypothetize that further memory compression ratios can be achieved with specific quantization aware trainings (see for instance [Q8BERT](https://arxiv.org/abs/1910.06188), [And the Bit Goes Down](https://arxiv.org/abs/1907.05686) or [Quant-Noise](https://arxiv.org/abs/2004.07320)).
## Fine-pruned models
As examples, we release two English PruneBERT checkpoints (models fine-pruned from a pre-trained `BERT` checkpoint), one on SQuAD and the other on MNLI.
- **`prunebert-base-uncased-6-finepruned-w-distil-squad`**<br/>
Pre-trained `BERT-base-uncased` fine-pruned with soft movement pruning on SQuAD v1.1. We use an additional distillation signal from `BERT-base-uncased` finetuned on SQuAD. The encoder counts 6% of total non-null weights and reaches 83.8 F1 score. The model can be accessed with: `pruned_bert = BertForQuestionAnswering.from_pretrained("huggingface/prunebert-base-uncased-6-finepruned-w-distil-squad")`
- **`prunebert-base-uncased-6-finepruned-w-distil-mnli`**<br/>
Pre-trained `BERT-base-uncased` fine-pruned with soft movement pruning on MNLI. We use an additional distillation signal from `BERT-base-uncased` finetuned on MNLI. The encoder counts 6% of total non-null weights and reaches 80.7 (matched) accuracy. The model can be accessed with: `pruned_bert = BertForSequenceClassification.from_pretrained("huggingface/prunebert-base-uncased-6-finepruned-w-distil-mnli")`
## How to fine-prune?
### Setup
The code relies on the 🤗 Transformers library. In addition to the dependencies listed in the [`examples`](https://github.com/huggingface/transformers/tree/master/examples) folder, you should install a few additional dependencies listed in the `requirements.txt` file: `pip install -r requirements.txt`.
Note that we built our experiments on top of a stabilized version of the library (commit https://github.com/huggingface/transformers/commit/352d5472b0c1dec0f420d606d16747d851b4bda8): we do not guarantee that everything is still compatible with the latest version of the master branch.
### Fine-pruning with movement pruning
Below, we detail how to reproduce the results reported in the paper. We use SQuAD as a running example. Commands (and scripts) can be easily adapted for other tasks.
The following command fine-prunes a pre-trained `BERT-base` on SQuAD using movement pruning towards 15% of remaining weights (85% sparsity). Note that we freeze all the embeddings modules (from their pre-trained value) and only prune the Fully Connected layers in the encoder (12 layers of Transformer Block).
```bash
SERIALIZATION_DIR=<OUTPUT_DIR>
SQUAD_DATA=<SQUAD_DATA>
python examples/movement-pruning/masked_run_squad.py \
--output_dir $SERIALIZATION_DIR \
--data_dir $SQUAD_DATA \
--train_file train-v1.1.json \
--predict_file dev-v1.1.json \
--do_train --do_eval --do_lower_case \
--model_type masked_bert \
--model_name_or_path bert-base-uncased \
--per_gpu_train_batch_size 16 \
--warmup_steps 5400 \
--num_train_epochs 10 \
--learning_rate 3e-5 --mask_scores_learning_rate 1e-2 \
--initial_threshold 1 --final_threshold 0.15 \
--initial_warmup 1 --final_warmup 2 \
--pruning_method topK --mask_init constant --mask_scale 0.
```
### Fine-pruning with other methods
We can also explore other fine-pruning methods by changing the `pruning_method` parameter:
Soft movement pruning
```bash
python examples/movement-pruning/masked_run_squad.py \
--output_dir $SERIALIZATION_DIR \
--data_dir $SQUAD_DATA \
--train_file train-v1.1.json \
--predict_file dev-v1.1.json \
--do_train --do_eval --do_lower_case \
--model_type masked_bert \
--model_name_or_path bert-base-uncased \
--per_gpu_train_batch_size 16 \
--warmup_steps 5400 \
--num_train_epochs 10 \
--learning_rate 3e-5 --mask_scores_learning_rate 1e-2 \
--initial_threshold 0 --final_threshold 0.1 \
--initial_warmup 1 --final_warmup 2 \
--pruning_method sigmoied_threshold --mask_init constant --mask_scale 0. \
--regularization l1 --final_lambda 400.
```
L0 regularization
```bash
python examples/movement-pruning/masked_run_squad.py \
--output_dir $SERIALIZATION_DIR \
--data_dir $SQUAD_DATA \
--train_file train-v1.1.json \
--predict_file dev-v1.1.json \
--do_train --do_eval --do_lower_case \
--model_type masked_bert \
--model_name_or_path bert-base-uncased \
--per_gpu_train_batch_size 16 \
--warmup_steps 5400 \
--num_train_epochs 10 \
--learning_rate 3e-5 --mask_scores_learning_rate 1e-1 \
--initial_threshold 1. --final_threshold 1. \
--initial_warmup 1 --final_warmup 1 \
--pruning_method l0 --mask_init constant --mask_scale 2.197 \
--regularization l0 --final_lambda 125.
```
Iterative Magnitude Pruning
```bash
python examples/movement-pruning/masked_run_squad.py \
--output_dir ./dbg \
--data_dir examples/distillation/data/squad_data \
--train_file train-v1.1.json \
--predict_file dev-v1.1.json \
--do_train --do_eval --do_lower_case \
--model_type masked_bert \
--model_name_or_path bert-base-uncased \
--per_gpu_train_batch_size 16 \
--warmup_steps 5400 \
--num_train_epochs 10 \
--learning_rate 3e-5 \
--initial_threshold 1 --final_threshold 0.15 \
--initial_warmup 1 --final_warmup 2 \
--pruning_method magnitude
```
### After fine-pruning
**Counting parameters**
Regularization based pruning methods (soft movement pruning and L0 regularization) rely on the penalty to induce sparsity. The multiplicative coefficient controls the sparsity level.
To obtain the effective sparsity level in the encoder, we simply count the number of activated (non-null) weights:
```bash
python examples/movement-pruning/counts_parameters.py \
--pruning_method sigmoied_threshold \
--threshold 0.1 \
--serialization_dir $SERIALIZATION_DIR
```
**Pruning once for all**
Once the model has been fine-pruned, the pruned weights can be set to 0. once for all (reducing the amount of information to store). In our running experiments, we can convert a `MaskedBertForQuestionAnswering` (a BERT model augmented to enable on-the-fly pruning capabilities) to a standard `BertForQuestionAnswering`:
```bash
python examples/movement-pruning/bertarize.py \
--pruning_method sigmoied_threshold \
--threshold 0.1 \
--model_name_or_path $SERIALIZATION_DIR
```
## Hyper-parameters
For reproducibility purposes, we share the detailed results presented in the paper. These [tables](https://docs.google.com/spreadsheets/d/17JgRq_OFFTniUrz6BZWW_87DjFkKXpI1kYDSsseT_7g/edit?usp=sharing) exhaustively describe the individual hyper-parameters used for each data point.
## Inference speed
Early experiments show that even though models fine-pruned with (soft) movement pruning are extremely sparse, they do not benefit from significant improvement in terms of inference speed when using the standard PyTorch inference.
We are currently benchmarking and exploring inference setups specifically for sparse architectures.
In particular, hardware manufacturers are announcing devices that will speedup inference for sparse networks considerably.
## Citation
If you find this resource useful, please consider citing the following paper:
```
@article{sanh2020movement,
title={Movement Pruning: Adaptive Sparsity by Fine-Tuning},
author={Victor Sanh and Thomas Wolf and Alexander M. Rush},
year={2020},
eprint={2005.07683},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
|
AdaMix/examples/research_projects/movement-pruning/README.md/0
|
{
"file_path": "AdaMix/examples/research_projects/movement-pruning/README.md",
"repo_id": "AdaMix",
"token_count": 4026
}
| 38 |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
IMPORTANT:
This code was copied from
https://github.com/google-research/google-research/blob/master/performer/fast_self_attention/fast_self_attention.py on
6/11/2020. This is very new code, so it might be prone to change soon -> make sure to check the original code and
update accordingly
Core Fast Attention Module for Flax. Implementation of the approximate fast softmax and generalized attention mechanism
leveraging structured random feature maps [RFM] techniques and low rank decomposition of the attention matrix.
"""
# pylint: disable=invalid-name, missing-function-docstring, line-too-long
import abc
import functools
from collections.abc import Iterable # pylint: disable=g-importing-member
import numpy as onp
from absl import logging
import jax
import jax.numpy as jnp
from jax import lax, random
def nonnegative_softmax_kernel_feature_creator(
data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True, eps=0.0001
):
"""
Constructs nonnegative kernel features for fast softmax attention
Args:
data: input for which features are computes
projection_matrix: random matrix used to compute features
attention_dims_t: tuple of attention dimensions
batch_dims_t: tuple of batch dimensions
precision: precision parameter
is_query: predicate indicating whether input data corresponds to queries or
keys
normalize_data: predicate indicating whether data should be normalized,
eps: numerical stabilizer
Returns:
Random features for fast softmax attention.
"""
del attention_dims_t
if normalize_data:
# We have e^{qk^T/sqrt{d}} = e^{q_norm k_norm^T}, where
# w_norm = w * data_normalizer for w in {q,k}.
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0])
data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)),
precision=precision,
)
diag_data = jnp.square(data)
diag_data = jnp.sum(diag_data, axis=data.ndim - 1)
diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer
diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1)
if is_query:
last_dims_t = (len(data_dash.shape) - 1,)
data_dash = ratio * (
jnp.exp(data_dash - diag_data - jnp.max(data_dash, axis=last_dims_t, keepdims=True)) + eps
)
else:
data_dash = ratio * (jnp.exp(data_dash - diag_data - jnp.max(data_dash)) + eps)
return data_dash
def sincos_softmax_kernel_feature_creator(
data, projection_matrix, attention_dims_t, batch_dims_t, precision, normalize_data=True
):
"""
Constructs kernel sin-cos features for fast softmax attention
Args:
data: input for which features are computes
projection_matrix: random matrix used to compute features
attention_dims_t: tuple of attention dimensions
batch_dims_t: tuple of batch dimensions
precision: precision parameter
normalize_data: predicate indicating whether data should be normalized
Returns:
Random features for fast softmax attention.
"""
if normalize_data:
# We have: exp(qk^T/sqrt{d}) = exp(|q|^2/2sqrt{d}) * exp(|k|^2/2sqrt{d}) *
# exp(-(|q*c-k*c|^2)/2), where c = 1.0 / sqrt{sqrt{d}}.
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0])
data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)),
precision=precision,
)
data_dash_cos = ratio * jnp.cos(data_dash)
data_dash_sin = ratio * jnp.sin(data_dash)
data_dash = jnp.concatenate((data_dash_cos, data_dash_sin), axis=-1)
# Constructing D_data and data^{'}
diag_data = jnp.square(data)
diag_data = jnp.sum(diag_data, axis=data.ndim - 1)
diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer
diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1)
# Additional renormalization for numerical stability
data_renormalizer = jnp.max(diag_data, attention_dims_t, keepdims=True)
diag_data -= data_renormalizer
diag_data = jnp.exp(diag_data)
data_prime = data_dash * diag_data
return data_prime
def generalized_kernel_feature_creator(
data, projection_matrix, batch_dims_t, precision, kernel_fn, kernel_epsilon, normalize_data
):
"""
Constructs kernel features for fast generalized attention
Args:
data: input for which features are computes
projection_matrix: matrix used to compute features
batch_dims_t: tuple of batch dimensions
precision: precision parameter
kernel_fn: kernel function used
kernel_epsilon: additive positive term added to every feature for numerical
stability
normalize_data: predicate indicating whether data should be normalized
Returns:
Random features for fast generalized attention.
"""
if normalize_data:
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
else:
data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)),
precision=precision,
)
data_prime = kernel_fn(data_dash) + kernel_epsilon
return data_prime
def make_fast_softmax_attention(
qkv_dim,
renormalize_attention=True,
numerical_stabilizer=0.000001,
nb_features=256,
ortho_features=True,
ortho_scaling=0.0,
redraw_features=True,
unidirectional=False,
nonnegative_features=True,
lax_scan_unroll=1,
):
"""Construct a fast softmax attention method."""
logging.info(
"Fast softmax attention: %s features and orthogonal=%s, renormalize=%s",
nb_features,
ortho_features,
renormalize_attention,
)
if ortho_features:
matrix_creator = functools.partial(GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=ortho_scaling)
else:
matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix, nb_features, qkv_dim)
if nonnegative_features:
def kernel_feature_creator(
data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True
):
return nonnegative_softmax_kernel_feature_creator(
data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data,
numerical_stabilizer,
)
else:
def kernel_feature_creator(
data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True
):
del is_query
return sincos_softmax_kernel_feature_creator(
data, projection_matrix, attention_dims_t, batch_dims_t, precision, normalize_data
)
attention_fn = FastAttentionviaLowRankDecomposition(
matrix_creator,
kernel_feature_creator,
renormalize_attention=renormalize_attention,
numerical_stabilizer=numerical_stabilizer,
redraw_features=redraw_features,
unidirectional=unidirectional,
lax_scan_unroll=lax_scan_unroll,
).dot_product_attention
return attention_fn
def make_fast_generalized_attention(
qkv_dim,
renormalize_attention=True,
numerical_stabilizer=0.0,
nb_features=256,
features_type="deterministic",
kernel_fn=jax.nn.relu,
kernel_epsilon=0.001,
redraw_features=False,
unidirectional=False,
lax_scan_unroll=1,
):
"""Construct a fast generalized attention menthod."""
logging.info("Fast generalized attention.: %s features and renormalize=%s", nb_features, renormalize_attention)
if features_type == "ortho":
matrix_creator = functools.partial(GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=False)
elif features_type == "iid":
matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix, nb_features, qkv_dim)
elif features_type == "deterministic":
matrix_creator = None
else:
raise ValueError("Unknown feature value type")
def kernel_feature_creator(
data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=False
):
del attention_dims_t
del is_query
return generalized_kernel_feature_creator(
data, projection_matrix, batch_dims_t, precision, kernel_fn, kernel_epsilon, normalize_data
)
attention_fn = FastAttentionviaLowRankDecomposition(
matrix_creator,
kernel_feature_creator,
renormalize_attention=renormalize_attention,
numerical_stabilizer=numerical_stabilizer,
redraw_features=redraw_features,
unidirectional=unidirectional,
lax_scan_unroll=lax_scan_unroll,
).dot_product_attention
return attention_fn
class RandomMatrix(object):
r"""
Abstract class providing a method for constructing 2D random arrays. Class is responsible for constructing 2D
random arrays.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_2d_array(self):
raise NotImplementedError("Abstract method")
class GaussianUnstructuredRandomMatrix(RandomMatrix):
def __init__(self, nb_rows, nb_columns, key):
self.nb_rows = nb_rows
self.nb_columns = nb_columns
self.key = key
def get_2d_array(self):
return random.normal(self.key, (self.nb_rows, self.nb_columns))
class GaussianOrthogonalRandomMatrix(RandomMatrix):
r"""
Class providing a method to create Gaussian orthogonal matrix. Class is responsible for constructing 2D Gaussian
orthogonal arrays.
"""
def __init__(self, nb_rows, nb_columns, key, scaling=0):
self.nb_rows = nb_rows
self.nb_columns = nb_columns
self.key = key
self.scaling = scaling
def get_2d_array(self):
nb_full_blocks = int(self.nb_rows / self.nb_columns)
block_list = []
rng = self.key
for _ in range(nb_full_blocks):
rng, rng_input = jax.random.split(rng)
unstructured_block = random.normal(rng_input, (self.nb_columns, self.nb_columns))
q, _ = jnp.linalg.qr(unstructured_block)
q = jnp.transpose(q)
block_list.append(q)
remaining_rows = self.nb_rows - nb_full_blocks * self.nb_columns
if remaining_rows > 0:
rng, rng_input = jax.random.split(rng)
unstructured_block = random.normal(rng_input, (self.nb_columns, self.nb_columns))
q, _ = jnp.linalg.qr(unstructured_block)
q = jnp.transpose(q)
block_list.append(q[0:remaining_rows])
final_matrix = jnp.vstack(block_list)
if self.scaling == 0:
multiplier = jnp.linalg.norm(random.normal(self.key, (self.nb_rows, self.nb_columns)), axis=1)
elif self.scaling == 1:
multiplier = jnp.sqrt(float(self.nb_columns)) * jnp.ones((self.nb_rows))
else:
raise ValueError("Scaling must be one of {0, 1}. Was %s" % self._scaling)
return jnp.matmul(jnp.diag(multiplier), final_matrix)
class FastAttention(object):
r"""
Abstract class providing a method for fast attention. Class is responsible for providing a method
<dot_product_attention> for fast approximate attention.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def dot_product_attention(
self,
query,
key,
value,
dtype=jnp.float32,
bias=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.0,
deterministic=False,
precision=None,
):
"""
Computes dot-product attention given query, key, and value. This is the core function for applying fast
approximate dot-product attention. It calculates the attention weights given query and key and combines the
values using the attention weights. This function supports multi-dimensional inputs
Args:
query: queries for calculating attention with shape of [batch_size, dim1,
dim2, ..., dimN, num_heads, mem_channels].
key: keys for calculating attention with shape of [batch_size, dim1, dim2,
..., dimN, num_heads, mem_channels].
value: values to be used in attention with shape of [batch_size, dim1,
dim2,..., dimN, num_heads, value_channels].
dtype: the dtype of the computation (default: float32)
bias: bias for the attention weights. This can be used for incorporating
autoregressive mask, padding mask, proximity bias.
axis: axises over which the attention is applied.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout.
dropout_rate: dropout rate.
deterministic: bool, deterministic or not (to apply dropout).
precision: numerical precision of the computation see `jax.lax.Precision`
for details
Returns:
Output of shape [bs, dim1, dim2, ..., dimN,, num_heads, value_channels].
"""
raise NotImplementedError("Abstract method")
def _numerator(z_slice_shape, precision, unroll=1):
def fwd(qs, ks, vs):
def body(p, qkv):
(q, k, v) = qkv
p += jnp.einsum("...m,...d->...md", k, v, precision=precision)
X_slice = jnp.einsum("...m,...md->...d", q, p, precision=precision)
return p, X_slice
init_value = jnp.zeros(z_slice_shape)
p, W = lax.scan(body, init_value, (qs, ks, vs), unroll=unroll)
return W, (p, qs, ks, vs)
def bwd(pqkv, W_ct):
def body(carry, qkv_xct):
p, p_ct = carry
q, k, v, x_ct = qkv_xct
q_ct = jnp.einsum("...d,...md->...m", x_ct, p, precision=precision)
p_ct += jnp.einsum("...d,...m->...md", x_ct, q, precision=precision)
k_ct = jnp.einsum("...md,...d->...m", p_ct, v, precision=precision)
v_ct = jnp.einsum("...md,...m->...d", p_ct, k, precision=precision)
p -= jnp.einsum("...m,...d->...md", k, v, precision=precision)
return (p, p_ct), (q_ct, k_ct, v_ct)
p, qs, ks, vs = pqkv
_, (qs_ct, ks_ct, vs_ct) = lax.scan(
body, (p, jnp.zeros_like(p)), (qs, ks, vs, W_ct), reverse=True, unroll=unroll
)
return qs_ct, ks_ct, vs_ct
@jax.custom_vjp
def _numerator_impl(qs, ks, vs):
W, _ = fwd(qs, ks, vs)
return W
_numerator_impl.defvjp(fwd, bwd)
return _numerator_impl
def _denominator(t_slice_shape, precision, unroll=1):
def fwd(qs, ks):
def body(p, qk):
q, k = qk
p += k
x = jnp.einsum("...m,...m->...", q, p, precision=precision)
return p, x
p = jnp.zeros(t_slice_shape)
p, R = lax.scan(body, p, (qs, ks), unroll=unroll)
return R, (qs, ks, p)
def bwd(qkp, R_ct):
def body(carry, qkx):
p, p_ct = carry
q, k, x_ct = qkx
q_ct = jnp.einsum("...,...m->...m", x_ct, p, precision=precision)
p_ct += jnp.einsum("...,...m->...m", x_ct, q, precision=precision)
k_ct = p_ct
p -= k
return (p, p_ct), (q_ct, k_ct)
qs, ks, p = qkp
_, (qs_ct, ks_ct) = lax.scan(body, (p, jnp.zeros_like(p)), (qs, ks, R_ct), reverse=True, unroll=unroll)
return (qs_ct, ks_ct)
@jax.custom_vjp
def _denominator_impl(qs, ks):
R, _ = fwd(qs, ks)
return R
_denominator_impl.defvjp(fwd, bwd)
return _denominator_impl
class FastAttentionviaLowRankDecomposition(FastAttention):
r"""
Class providing a method for fast attention via low rank decomposition. Class is responsible for providing a method
<dot_product_attention> for fast dot-product attention with the use of low rank decomposition (e.g. with random
feature maps).
"""
def __init__(
self,
matrix_creator,
kernel_feature_creator,
renormalize_attention,
numerical_stabilizer,
redraw_features,
unidirectional,
lax_scan_unroll=1,
): # For optimal GPU performance, set to 16.
rng = random.PRNGKey(0)
self.matrix_creator = matrix_creator
self.projection_matrix = self.draw_weights(rng)
self.kernel_feature_creator = kernel_feature_creator
self.renormalize_attention = renormalize_attention
self.numerical_stabilizer = numerical_stabilizer
self.redraw_features = redraw_features
self.unidirectional = unidirectional
self.lax_scan_unroll = lax_scan_unroll
def draw_weights(self, key):
if self.matrix_creator is None:
return None
matrixrng, _ = random.split(key)
projection_matrix = self.matrix_creator(key=matrixrng).get_2d_array()
return projection_matrix
def dot_product_attention(
self,
query,
key,
value,
dtype=jnp.float32,
bias=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.0,
deterministic=False,
precision=None,
):
assert key.shape[:-1] == value.shape[:-1]
assert query.shape[0:1] == key.shape[0:1] and query.shape[-1] == key.shape[-1]
if axis is None:
axis = tuple(range(1, key.ndim - 2))
if not isinstance(axis, Iterable):
axis = (axis,)
assert key.ndim == query.ndim
assert key.ndim == value.ndim
for ax in axis:
if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2):
raise ValueError("Attention axis must be between the batch " "axis and the last-two axes.")
n = key.ndim
# Constructing projection tensor.
if self.redraw_features:
# TODO(kchoro): Get rid of the constant below.
query_seed = lax.convert_element_type(jnp.ceil(jnp.sum(query) * 10000000.0), jnp.int32)
rng = random.PRNGKey(query_seed)
self.projection_matrix = self.draw_weights(rng)
# batch_dims is <bs, <non-attention dims>, num_heads>
batch_dims = tuple(onp.delete(range(n), axis + (n - 1,)))
# q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
qk_perm = batch_dims + axis + (n - 1,)
k_extra_perm = axis + batch_dims + (n - 1,)
key_extra = key.transpose(k_extra_perm)
key = key.transpose(qk_perm)
query = query.transpose(qk_perm)
# v -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
v_perm = batch_dims + axis + (n - 1,)
value = value.transpose(v_perm)
batch_dims_t = tuple(range(len(batch_dims)))
attention_dims_t = tuple(range(len(batch_dims), len(batch_dims) + len(axis)))
# Constructing tensors Q^{'} and K^{'}.
query_prime = self.kernel_feature_creator(
query, self.projection_matrix, attention_dims_t, batch_dims_t, precision, True
)
key_prime = self.kernel_feature_creator(
key, self.projection_matrix, attention_dims_t, batch_dims_t, precision, False
)
if self.unidirectional:
index = attention_dims_t[0]
z_slice_shape = key_prime.shape[0 : len(batch_dims_t)] + (key_prime.shape[-1],) + (value.shape[-1],)
numerator_fn = _numerator(z_slice_shape, precision, self.lax_scan_unroll)
W = numerator_fn(
jnp.moveaxis(query_prime, index, 0), jnp.moveaxis(key_prime, index, 0), jnp.moveaxis(value, index, 0)
)
# Constructing W = (Q^{'}(K^{'})^{T})_{masked}V
W = jnp.moveaxis(W, 0, index)
if not self.renormalize_attention:
# Unidirectional, not-normalized attention.
perm_inv = _invert_perm(qk_perm)
result = W.transpose(perm_inv)
return result
else:
# Unidirectional, normalized attention.
thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(key_extra.shape[0 : len(axis)])
index = attention_dims_t[0]
t_slice_shape = key_prime.shape[0 : len(batch_dims_t)] + (key_prime.shape[-1],)
denominator_fn = _denominator(t_slice_shape, precision, self.lax_scan_unroll)
R = denominator_fn(jnp.moveaxis(query_prime, index, 0), jnp.moveaxis(key_prime, index, 0))
R = jnp.moveaxis(R, 0, index)
else:
contract_query = tuple(range(len(batch_dims) + len(axis), len(batch_dims) + len(axis) + 1))
contract_z = tuple(range(len(batch_dims), len(batch_dims) + 1))
# Constructing Z = (K^{'})^{T}V
# Z (bs, <non-attention dims>, num_heads, channels_m, channels_v)
Z = lax.dot_general(
key_prime,
value,
((attention_dims_t, attention_dims_t), (batch_dims_t, batch_dims_t)),
precision=precision,
)
# Constructing W = Q^{'}Z = Q^{'}(K^{'})^{T}V
# q (bs, <non-attention dims>, num_heads, <attention dims>, channels_m)
# Z (bs, <non-attention dims>, num_heads, channels_m, channels_v)
# W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v)
W = lax.dot_general(
query_prime, Z, ((contract_query, contract_z), (batch_dims_t, batch_dims_t)), precision=precision
)
if not self.renormalize_attention:
# Bidirectional, not-normalized attention.
perm_inv = _invert_perm(qk_perm)
result = W.transpose(perm_inv)
return result
else:
# Bidirectional, normalized attention.
thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(key_extra.shape[0 : len(axis)])
contract_key = tuple(range(len(batch_dims), len(batch_dims) + len(axis)))
contract_thick_all_ones = tuple(range(thick_all_ones.ndim - len(axis), thick_all_ones.ndim))
# Construct T = (K^{'})^{T} 1_L
# k (bs, <non-attention dims>, num_heads, <attention dims>, channels)
T = lax.dot_general(
key_prime,
thick_all_ones,
((contract_key, contract_thick_all_ones), (batch_dims_t, batch_dims_t)),
precision=precision,
)
# Construct partition function: R = Q^{'} T = Q^{'}(K^{'})^{T} 1_L
# q_p (bs, <non-attention dims>, num_heads, <attention dims>, channs_m)
# T (bs, <non-attention dims>, num_heads, channels_m)
R = lax.dot_general(
query_prime,
T,
(((query_prime.ndim - 1,), (T.ndim - 1,)), (batch_dims_t, range(0, len(T.shape) - 1))),
precision=precision,
)
R = R + 2 * self.numerical_stabilizer * (jnp.abs(R) <= self.numerical_stabilizer)
R = jnp.reciprocal(R)
R = jnp.expand_dims(R, len(R.shape))
# W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v)
# R (bs, <non-attention dims>, num_heads, <attention dims>, extra_channel)
result = W * R
# back to (bs, dim1, dim2, ..., dimN, num_heads, channels)
perm_inv = _invert_perm(qk_perm)
result = result.transpose(perm_inv)
return result
def _invert_perm(perm):
perm_inv = [0] * len(perm)
for i, j in enumerate(perm):
perm_inv[j] = i
return tuple(perm_inv)
|
AdaMix/examples/research_projects/performer/modeling_flax_performer_utils.py/0
|
{
"file_path": "AdaMix/examples/research_projects/performer/modeling_flax_performer_utils.py",
"repo_id": "AdaMix",
"token_count": 11674
}
| 39 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.file_utils import requires_datasets, requires_faiss
from transformers.models.rag.retrieval_rag import CustomHFIndex
logger = logging.getLogger(__name__)
class RayRetriever:
def __init__(self):
self.initialized = False
def create_rag_retriever(self, config, question_encoder_tokenizer, generator_tokenizer, index):
if not self.initialized:
self.retriever = RagRetriever(
config,
question_encoder_tokenizer=question_encoder_tokenizer,
generator_tokenizer=generator_tokenizer,
index=index,
init_retrieval=False,
)
self.initialized = True
def init_retrieval(self):
self.retriever.index.init_index()
def retrieve(self, question_hidden_states, n_docs):
doc_ids, retrieved_doc_embeds = self.retriever._main_retrieve(question_hidden_states, n_docs)
return doc_ids, retrieved_doc_embeds
class RagRayDistributedRetriever(RagRetriever):
"""
A distributed retriever built on top of the ``Ray`` API, a library
for building distributed applications (https://docs.ray.io/en/master/).
package. During training, all training workers initialize their own
instance of a `RagRayDistributedRetriever`, and each instance of
this distributed retriever shares a common set of Retrieval Ray
Actors (https://docs.ray.io/en/master/walkthrough.html#remote
-classes-actors) that load the index on separate processes. Ray
handles the communication between the `RagRayDistributedRetriever`
instances and the remote Ray actors. If training is done in a
non-distributed setup, the index will simply be loaded in the same
process as the training worker and Ray will not be used.
Args:
config (:class:`~transformers.RagConfig`):
The configuration of the RAG model this Retriever is used with. Contains parameters indicating which ``Index`` to build.
question_encoder_tokenizer (:class:`~transformers.PretrainedTokenizer`):
The tokenizer that was used to tokenize the question.
It is used to decode the question and then use the generator_tokenizer.
generator_tokenizer (:class:`~transformers.PretrainedTokenizer`):
The tokenizer used for the generator part of the RagModel.
retrieval_workers (:obj:`List[ray.ActorClass(RayRetriever)]`): A list of already initialized `RayRetriever` actors.
These actor classes run on remote processes and are responsible for performing the index lookup.
index (:class:`~transformers.retrieval_rag.Index`, optional, defaults to the one defined by the configuration):
If specified, use this index instead of the one built using the configuration
"""
def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, retrieval_workers, index=None):
if index is not None and index.is_initialized() and len(retrieval_workers) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py "
)
super().__init__(
config,
question_encoder_tokenizer=question_encoder_tokenizer,
generator_tokenizer=generator_tokenizer,
index=index,
init_retrieval=False,
)
self.retrieval_workers = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(config, question_encoder_tokenizer, generator_tokenizer, index)
for worker in self.retrieval_workers
]
)
def init_retrieval(self):
"""
Retriever initialization function, needs to be called from the
training process. This function triggers retrieval initialization
for all retrieval actors if using distributed setting, or loads
index into current process if training is not distributed.
"""
logger.info("initializing retrieval")
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def retrieve(self, question_hidden_states, n_docs):
"""
Retrieves documents for specified ``question_hidden_states``. If
running training with multiple workers, a random retrieval actor is
selected to perform the index lookup and return the result.
Args:
question_hidden_states (:obj:`np.ndarray` of shape :obj:`(batch_size, vector_size)`):
A batch of query vectors to retrieve with.
n_docs (:obj:`int`):
The number of docs retrieved per query.
Output:
retrieved_doc_embeds (:obj:`np.ndarray` of shape :obj:`(batch_size, n_docs, dim)`
The retrieval embeddings of the retrieved docs per query.
doc_ids (:obj:`np.ndarray` of shape :obj:`batch_size, n_docs`)
The ids of the documents in the index
doc_dicts (:obj:`List[dict]`):
The retrieved_doc_embeds examples per query.
"""
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
random_worker = self.retrieval_workers[random.randint(0, len(self.retrieval_workers) - 1)]
doc_ids, retrieved_doc_embeds = ray.get(random_worker.retrieve.remote(question_hidden_states, n_docs))
else:
doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids)
@classmethod
def get_tokenizers(cls, retriever_name_or_path, indexed_dataset=None, **kwargs):
return super(RagRayDistributedRetriever, cls).get_tokenizers(retriever_name_or_path, indexed_dataset, **kwargs)
@classmethod
def from_pretrained(cls, retriever_name_or_path, actor_handles, indexed_dataset=None, **kwargs):
requires_datasets(cls)
requires_faiss(cls)
config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs)
rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config)
question_encoder_tokenizer = rag_tokenizer.question_encoder
generator_tokenizer = rag_tokenizer.generator
if indexed_dataset is not None:
config.index_name = "custom"
index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset)
else:
index = cls._build_index(config)
return cls(
config,
question_encoder_tokenizer=question_encoder_tokenizer,
generator_tokenizer=generator_tokenizer,
retrieval_workers=actor_handles,
index=index,
)
|
AdaMix/examples/research_projects/rag/distributed_ray_retriever.py/0
|
{
"file_path": "AdaMix/examples/research_projects/rag/distributed_ray_retriever.py",
"repo_id": "AdaMix",
"token_count": 2937
}
| 40 |
# as due to their complexity multi-gpu tests could impact other tests, and to aid debug we have those in a separate module.
import os
import sys
from pathlib import Path
import torch
from transformers.testing_utils import TestCasePlus, execute_subprocess_async, require_torch_multi_gpu
from utils import load_json
CUDA_AVAILABLE = torch.cuda.is_available()
ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."]
SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
CHEAP_ARGS = {
"max_tokens_per_batch": None,
"supervise_forward": True,
"normalize_hidden": True,
"label_smoothing": 0.2,
"eval_max_gen_length": None,
"eval_beams": 1,
"val_metric": "loss",
"save_top_k": 1,
"adafactor": True,
"early_stopping_patience": 2,
"logger_name": "default",
"length_penalty": 0.5,
"cache_dir": "",
"task": "summarization",
"num_workers": 2,
"alpha_hid": 0,
"freeze_embeds": True,
"enc_only": False,
"tgt_suffix": "",
"resume_from_checkpoint": None,
"sortish_sampler": True,
"student_decoder_layers": 1,
"val_check_interval": 1.0,
"output_dir": "",
"fp16": False, # TODO(SS): set this to CUDA_AVAILABLE if ci installs apex or start using native amp
"no_teacher": False,
"fp16_opt_level": "O1",
"gpus": 1 if CUDA_AVAILABLE else 0,
"n_tpu_cores": 0,
"max_grad_norm": 1.0,
"do_train": True,
"do_predict": True,
"accumulate_grad_batches": 1,
"server_ip": "",
"server_port": "",
"seed": 42,
"model_name_or_path": "sshleifer/bart-tiny-random",
"config_name": "",
"tokenizer_name": "facebook/bart-large",
"do_lower_case": False,
"learning_rate": 0.3,
"lr_scheduler": "linear",
"weight_decay": 0.0,
"adam_epsilon": 1e-08,
"warmup_steps": 0,
"max_epochs": 1,
"train_batch_size": 2,
"eval_batch_size": 2,
"max_source_length": 12,
"max_target_length": 12,
"val_max_target_length": 12,
"test_max_target_length": 12,
"fast_dev_run": False,
"no_cache": False,
"n_train": -1,
"n_val": -1,
"n_test": -1,
"student_encoder_layers": 1,
"freeze_encoder": False,
"auto_scale_batch_size": False,
"overwrite_output_dir": False,
"student": None,
}
def _dump_articles(path: Path, articles: list):
content = "\n".join(articles)
Path(path).open("w").writelines(content)
def make_test_data_dir(tmp_dir):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES)
_dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES)
return tmp_dir
class TestSummarizationDistillerMultiGPU(TestCasePlus):
@classmethod
def setUpClass(cls):
return cls
@require_torch_multi_gpu
def test_multi_gpu(self):
updates = dict(
no_teacher=True,
freeze_encoder=True,
gpus=2,
overwrite_output_dir=True,
sortish_sampler=True,
)
self._test_distiller_cli_fork(updates, check_contents=False)
def _test_distiller_cli_fork(self, updates, check_contents=True):
default_updates = dict(
label_smoothing=0.0,
early_stopping_patience=-1,
train_batch_size=1,
eval_batch_size=2,
max_epochs=2,
alpha_mlm=0.2,
alpha_ce=0.8,
do_predict=True,
model_name_or_path="sshleifer/tinier_bart",
teacher=CHEAP_ARGS["model_name_or_path"],
val_check_interval=0.5,
)
default_updates.update(updates)
args_d: dict = CHEAP_ARGS.copy()
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
output_dir = self.get_auto_remove_tmp_dir()
args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates)
def convert(k, v):
if k in ["tgt_suffix", "server_ip", "server_port", "out", "n_tpu_cores"]:
return ""
if v is False or v is None:
return ""
if v is True: # or len(str(v))==0:
return f"--{k}"
return f"--{k}={v}"
cli_args = [x for x in (convert(k, v) for k, v in args_d.items()) if len(x)]
cmd = [sys.executable, f"{self.test_file_dir}/distillation.py"] + cli_args
execute_subprocess_async(cmd, env=self.get_env())
contents = os.listdir(output_dir)
contents = {os.path.basename(p) for p in contents}
ckpt_files = [p for p in contents if p.endswith("ckpt")]
assert len(ckpt_files) > 0
self.assertIn("test_generations.txt", contents)
self.assertIn("test_results.txt", contents)
# get the following from the module, (we don't have access to `model` here)
metrics_save_path = os.path.join(output_dir, "metrics.json")
val_metric = "rouge2"
metrics = load_json(metrics_save_path)
# {'test': [{'test_avg_loss': 10.63731575012207, 'test_avg_rouge1': 0.0, 'test_avg_rouge2': 0.0, 'test_avg_rougeL': 0.0, 'test_avg_gen_time': 0.1822289228439331, 'test_avg_gen_len': 142.0, 'step_count': 1}]}
print(metrics)
last_step_stats = metrics["val"][-1]
self.assertGreaterEqual(last_step_stats["val_avg_gen_time"], 0.01)
self.assertIsInstance(last_step_stats[f"val_avg_{val_metric}"], float)
self.assertEqual(len(metrics["test"]), 1)
desired_n_evals = int(args_d["max_epochs"] * (1 / args_d["val_check_interval"]) / 2 + 1)
self.assertEqual(len(metrics["val"]), desired_n_evals)
|
AdaMix/examples/research_projects/seq2seq-distillation/_test_seq2seq_examples_multi_gpu.py/0
|
{
"file_path": "AdaMix/examples/research_projects/seq2seq-distillation/_test_seq2seq_examples_multi_gpu.py",
"repo_id": "AdaMix",
"token_count": 2672
}
| 41 |
<!---
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
## Language generation
Based on the script [`run_generation.py`](https://github.com/huggingface/transformers/blob/master/examples/text-generation/run_generation.py).
Conditional text generation using the auto-regressive models of the library: GPT, GPT-2, Transformer-XL, XLNet, CTRL.
A similar script is used for our official demo [Write With Transfomer](https://transformer.huggingface.co), where you
can try out the different models available in the library.
Example usage:
```bash
python run_generation.py \
--model_type=gpt2 \
--model_name_or_path=gpt2
```
|
AdaMix/examples/text-generation/README.md/0
|
{
"file_path": "AdaMix/examples/text-generation/README.md",
"repo_id": "AdaMix",
"token_count": 332
}
| 42 |
#!/usr/bin/env bash
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script evals the following fsmt models
# it covers:
# - allenai/wmt16-en-de-dist-12-1
# - allenai/wmt16-en-de-dist-6-1
# - allenai/wmt16-en-de-12-1
# this script needs to be run from the top level of the transformers repo
if [ ! -d "src/transformers" ]; then
echo "Error: This script needs to be run from the top of the transformers repo"
exit 1
fi
# In these scripts you may have to lower BS if you get CUDA OOM (or increase it if you have a large GPU)
### Normal eval ###
export PAIR=en-de
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=64
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
MODEL_PATH=allenai/wmt16-en-de-dist-12-1
echo $PAIR $MODEL_PATH
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py $MODEL_PATH $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
MODEL_PATH=allenai/wmt16-en-de-dist-6-1
echo $PAIR $MODEL_PATH
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py $MODEL_PATH $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
MODEL_PATH=allenai/wmt16-en-de-12-1
echo $PAIR $MODEL_PATH
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py $MODEL_PATH $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
### Searching hparams eval ###
export PAIR=en-de
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=32
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
MODEL_PATH=allenai/wmt16-en-de-dist-12-1
echo $PAIR $MODEL_PATH
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval_search.py $MODEL_PATH $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --search="num_beams=5:10:15 length_penalty=0.6:0.7:0.8:0.9:1.0:1.1"
MODEL_PATH=allenai/wmt16-en-de-dist-6-1
echo $PAIR $MODEL_PATH
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval_search.py $MODEL_PATH $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --search="num_beams=5:10:15 length_penalty=0.6:0.7:0.8:0.9:1.0:1.1"
MODEL_PATH=allenai/wmt16-en-de-12-1
echo $PAIR $MODEL_PATH
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval_search.py $MODEL_PATH $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --search="num_beams=5:10:15 length_penalty=0.6:0.7:0.8:0.9:1.0:1.1"
|
AdaMix/scripts/fsmt/eval-allenai-wmt16.sh/0
|
{
"file_path": "AdaMix/scripts/fsmt/eval-allenai-wmt16.sh",
"repo_id": "AdaMix",
"token_count": 1452
}
| 43 |
Metadata-Version: 2.1
Name: transformers
Version: 4.4.2
Summary: State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch
Home-page: https://github.com/huggingface/transformers
Author: Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Patrick von Platen, Sylvain Gugger, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors
Author-email: [email protected]
License: Apache
Keywords: NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google openai CMU
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: Intended Audience :: Education
Classifier: Intended Audience :: Science/Research
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
Requires-Python: >=3.6.0
Description-Content-Type: text/markdown
Provides-Extra: ja
Provides-Extra: sklearn
Provides-Extra: tf
Provides-Extra: tf-cpu
Provides-Extra: torch
Provides-Extra: retrieval
Provides-Extra: flax
Provides-Extra: tokenizers
Provides-Extra: onnxruntime
Provides-Extra: onnx
Provides-Extra: modelcreation
Provides-Extra: serving
Provides-Extra: speech
Provides-Extra: sentencepiece
Provides-Extra: testing
Provides-Extra: docs
Provides-Extra: quality
Provides-Extra: all
Provides-Extra: dev
Provides-Extra: torchhub
License-File: LICENSE
# AdaMix (**Mixture-of-Adapter)
This is the implementation of the paper [LiST: Lite Self-training Makes Efficient Few-shot Learners](https://arxiv.org/abs/2110.06274). LiST is short for **Li**te **S**elf-**T**raining.
## Adapting to the GLUE Benchmark
Our experiments on the GLUE benchmark are run on 16 NVIDIA Tesla V100 GPU cards out of a DGX-1. The results may vary due to different GPU models, drivers, CUDA SDK versions, floating-point precisions, and random seeds.
We report below the dev set results, taking the medium over 5 runs:
## Download AdaMix checkpoints
We release all copies of Adapter weights for aggregation study.
| | Dataset | BERT base 110M <br> | RoBERTa large 355M <br> |
|---|----------|--------------------|----------------------|
| | MNLI |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_mnli_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_mnli_expert_soup.bin) |
| | SST2 |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_sst2_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_sst2_expert_soup.bin) |
| | MRPC |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_mrpc_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_mrpc_expert_soup.bin) |
| | CoLA |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_cola_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_cola_expert_soup.bin) |
| | QNLI |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_qnli_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_qnli_expert_soup.bin) |
| | QQP |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_qqp_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_qqp_expert_soup.bin) |
| | RTE |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_rte_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_rte_expert_soup.bin) |
| | STSB |[8.5 MB](https://github.com/yaqingwang/MoA/releases/download/bert_base/pytorch_model_stsb_expert_soup.bin) |[11.7 MB](https://github.com/yaqingwang/MoA/releases/download/roberta_large/pytorch_model_stsb_expert_soup.bin) |
## Steps to reproduce our results
### Create and activate conda env
```console
conda env create -f environment.yml
```
### Install the pre-requisites
```console
pip install -e .
```
We also provide the shell scripts for bert-base and roberta-large.
### Quick start
```console
export num_gpus=1
export PYTHONHASHSEED=0
task_name=mnli
model=roberta-large
export output_dir="./models/${model}/${task_name}"
python -m torch.distributed.launch --nproc_per_node=$num_gpus \
examples/text-classification/run_glue.py \
--model_name_or_path $model \
--task_name $task_name \
--do_train \
--do_eval \
--max_seq_length 128 \
--per_device_train_batch_size 32 \
--per_device_eval_batch_size 32 \
--learning_rate 5e-4 \
--num_train_epochs 100 \
--output_dir $output_dir/model \
--overwrite_output_dir \
--logging_steps 100 \
--logging_dir $output_dir/log \
--evaluation_strategy epoch \
--save_strategy epoch \
--warmup_ratio 0.06 \
--apply_expert_soup \
--adapter_size 16 \
--num_experts 4 \
--seed 0 \
--inference_level 3 \
--sharing_up 1 \
```
Most arguments are inherited from transformers and are easy to understand. We further explain some of the AdaMix's arguments:
* `inference_level`: There are two suggested modes
* `1`: Random Routing
* `3`: Averaging the weights of Adapters for routing (used in AdaMix)
* `num_experts`: Number of Adapters in AdaMix
* `sharing_up`: There are two modes. (sharing_down is same)
* `0`: No weight sharing
* `1`: Sharing Project-up layer weights in Adapter
### Evaluate the checkpoints
Create checkpoints directory and download checkpoints of corresponding tasks under the directory. Use MNLI as an example. Use your checkpoint path in **expert_soup_path** argument.
```console
export num_gpus=1
export PYTHONHASHSEED=0
task_name=mnli
model=roberta-large
export output_dir="./models/${model}/${task_name}"
python -m torch.distributed.launch --nproc_per_node=$num_gpus \
examples/text-classification/run_glue.py \
--model_name_or_path $model \
--task_name $task_name \
--do_eval \
--expert_soup_path ./checkpoints/pytorch_model_${task_name}_expert_soup.bin \
--max_seq_length 128 \
--per_device_train_batch_size 32 \
--per_device_eval_batch_size 32 \
--learning_rate 5e-4 \
--num_train_epochs 100 \
--output_dir $output_dir/model \
--overwrite_output_dir \
--logging_steps 100 \
--logging_dir $output_dir/log \
--evaluation_strategy epoch \
--save_strategy epoch \
--warmup_ratio 0.06 \
--apply_expert_soup \
--adapter_size 16 \
--num_experts 4 \
--seed 0 \
--inference_level 3 \
--sharing_up 1 \
```
### Notes and Acknowledgments
The implementation is based on https://github.com/huggingface/transformers <br>
We also used some code from: https://github.com/microsoft/LoRA
### How do I cite LiST?
```
@article{wang2022adamix,
title={AdaMix: Mixture-of-Adapter for Parameter-efficient Tuning of Large Language Models},
author={Wang, Yaqing and Mukherjee, Subhabrata and Liu, Xiaodong and Gao, Jing and Awadallah, Ahmed Hassan and Gao, Jianfeng},
journal={arXiv preprint arXiv:2205.12410},
year={2022}
}
```
|
AdaMix/src/transformers.egg-info/PKG-INFO/0
|
{
"file_path": "AdaMix/src/transformers.egg-info/PKG-INFO",
"repo_id": "AdaMix",
"token_count": 2652
}
| 44 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import torch.nn.functional as F
from packaging import version
from .utils import logging
logger = logging.get_logger(__name__)
def _gelu_python(x):
"""
Original Implementation of the GELU activation function in Google BERT repo when initially created. For
information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in
torch.nn.functional Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def gelu_new(x):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
if version.parse(torch.__version__) < version.parse("1.4"):
gelu = _gelu_python
else:
gelu = F.gelu
def gelu_fast(x):
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
def _silu_python(x):
"""
See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear
Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function
Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated
Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with
later.
"""
return x * torch.sigmoid(x)
if version.parse(torch.__version__) < version.parse("1.7"):
silu = _silu_python
else:
silu = F.silu
def mish(x):
return x * torch.tanh(torch.nn.functional.softplus(x))
def linear_act(x):
return x
ACT2FN = {
"relu": F.relu,
"silu": silu,
"swish": silu,
"gelu": gelu,
"tanh": torch.tanh,
"gelu_new": gelu_new,
"gelu_fast": gelu_fast,
"mish": mish,
"linear": linear_act,
"sigmoid": torch.sigmoid,
}
def get_activation(activation_string):
if activation_string in ACT2FN:
return ACT2FN[activation_string]
else:
raise KeyError("function {} not found in ACT2FN mapping {}".format(activation_string, list(ACT2FN.keys())))
|
AdaMix/src/transformers/activations.py/0
|
{
"file_path": "AdaMix/src/transformers/activations.py",
"repo_id": "AdaMix",
"token_count": 1134
}
| 45 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import SUPPORTED_TASKS, Pipeline, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_serve_dependencies_installed = True
except (ImportError, AttributeError):
BaseModel = object
def Body(*x, **y):
pass
_serve_dependencies_installed = False
logger = logging.get_logger("transformers-cli/serving")
def serve_command_factory(args: Namespace):
"""
Factory function used to instantiate serving server from provided command line arguments.
Returns: ServeCommand
"""
nlp = pipeline(
task=args.task,
model=args.model if args.model else None,
config=args.config,
tokenizer=args.tokenizer,
device=args.device,
)
return ServeCommand(nlp, args.host, args.port, args.workers)
class ServeModelInfoResult(BaseModel):
"""
Expose model information
"""
infos: dict
class ServeTokenizeResult(BaseModel):
"""
Tokenize result model
"""
tokens: List[str]
tokens_ids: Optional[List[int]]
class ServeDeTokenizeResult(BaseModel):
"""
DeTokenize result model
"""
text: str
class ServeForwardResult(BaseModel):
"""
Forward result model
"""
output: Any
class ServeCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
Args:
parser: Root parser to register command-specific arguments
"""
serve_parser = parser.add_parser(
"serve", help="CLI tool to run inference requests through REST and GraphQL endpoints."
)
serve_parser.add_argument(
"--task", type=str, choices=SUPPORTED_TASKS.keys(), help="The task to run the pipeline on"
)
serve_parser.add_argument("--host", type=str, default="localhost", help="Interface the server will listen on.")
serve_parser.add_argument("--port", type=int, default=8888, help="Port the serving will listen to.")
serve_parser.add_argument("--workers", type=int, default=1, help="Number of http workers")
serve_parser.add_argument("--model", type=str, help="Model's name or path to stored model.")
serve_parser.add_argument("--config", type=str, help="Model's config name or path to stored model.")
serve_parser.add_argument("--tokenizer", type=str, help="Tokenizer name to use.")
serve_parser.add_argument(
"--device",
type=int,
default=-1,
help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
)
serve_parser.set_defaults(func=serve_command_factory)
def __init__(self, pipeline: Pipeline, host: str, port: int, workers: int):
self._pipeline = pipeline
self.host = host
self.port = port
self.workers = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and unicorn. "
'Please install transformers with [serving]: pip install "transformers[serving]".'
"Or install FastAPI and unicorn separately."
)
else:
logger.info("Serving model over {}:{}".format(host, port))
self._app = FastAPI(
routes=[
APIRoute(
"/",
self.model_info,
response_model=ServeModelInfoResult,
response_class=JSONResponse,
methods=["GET"],
),
APIRoute(
"/tokenize",
self.tokenize,
response_model=ServeTokenizeResult,
response_class=JSONResponse,
methods=["POST"],
),
APIRoute(
"/detokenize",
self.detokenize,
response_model=ServeDeTokenizeResult,
response_class=JSONResponse,
methods=["POST"],
),
APIRoute(
"/forward",
self.forward,
response_model=ServeForwardResult,
response_class=JSONResponse,
methods=["POST"],
),
],
timeout=600,
)
def run(self):
run(self._app, host=self.host, port=self.port, workers=self.workers)
def model_info(self):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
def tokenize(self, text_input: str = Body(None, embed=True), return_ids: bool = Body(False, embed=True)):
"""
Tokenize the provided input and eventually returns corresponding tokens id: - **text_input**: String to
tokenize - **return_ids**: Boolean flags indicating if the tokens have to be converted to their integer
mapping.
"""
try:
tokens_txt = self._pipeline.tokenizer.tokenize(text_input)
if return_ids:
tokens_ids = self._pipeline.tokenizer.convert_tokens_to_ids(tokens_txt)
return ServeTokenizeResult(tokens=tokens_txt, tokens_ids=tokens_ids)
else:
return ServeTokenizeResult(tokens=tokens_txt)
except Exception as e:
raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
def detokenize(
self,
tokens_ids: List[int] = Body(None, embed=True),
skip_special_tokens: bool = Body(False, embed=True),
cleanup_tokenization_spaces: bool = Body(True, embed=True),
):
"""
Detokenize the provided tokens ids to readable text: - **tokens_ids**: List of tokens ids -
**skip_special_tokens**: Flag indicating to not try to decode special tokens - **cleanup_tokenization_spaces**:
Flag indicating to remove all leading/trailing spaces and intermediate ones.
"""
try:
decoded_str = self._pipeline.tokenizer.decode(tokens_ids, skip_special_tokens, cleanup_tokenization_spaces)
return ServeDeTokenizeResult(model="", text=decoded_str)
except Exception as e:
raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
async def forward(self, inputs=Body(None, embed=True)):
"""
**inputs**:
**attention_mask**:
**tokens_type_ids**:
"""
# Check we don't have empty string
if len(inputs) == 0:
return ServeForwardResult(output=[], attention=[])
try:
# Forward through the model
output = self._pipeline(inputs)
return ServeForwardResult(output=output)
except Exception as e:
raise HTTPException(500, {"error": str(e)})
|
AdaMix/src/transformers/commands/serving.py/0
|
{
"file_path": "AdaMix/src/transformers/commands/serving.py",
"repo_id": "AdaMix",
"token_count": 3457
}
| 46 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pickle
import random
import time
import warnings
from typing import Dict, List, Optional
import torch
from torch.utils.data.dataset import Dataset
from filelock import FileLock
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
DEPRECATION_WARNING = (
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: {0}"
)
class TextDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach soon.
"""
def __init__(
self,
tokenizer: PreTrainedTokenizer,
file_path: str,
block_size: int,
overwrite_cache=False,
cache_dir: Optional[str] = None,
):
warnings.warn(
DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/master/examples/language-modeling/run_mlm.py"
),
FutureWarning,
)
assert os.path.isfile(file_path), f"Input file path {file_path} not found"
block_size = block_size - tokenizer.num_special_tokens_to_add(pair=False)
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else directory,
"cached_lm_{}_{}_{}".format(
tokenizer.__class__.__name__,
str(block_size),
filename,
),
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
start = time.time()
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
logger.info(f"Creating features from dataset file at {directory}")
self.examples = []
with open(file_path, encoding="utf-8") as f:
text = f.read()
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size
self.examples.append(
tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size])
)
# Note that we are losing the last truncated example here for the sake of simplicity (no padding)
# If your dataset is small, first you should look for a bigger one :-) and second you
# can change this behavior by adding (model specific) padding.
start = time.time()
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
logger.info(
"Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
)
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> torch.Tensor:
return torch.tensor(self.examples[i], dtype=torch.long)
class LineByLineTextDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach soon.
"""
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int):
warnings.warn(
DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/master/examples/language-modeling/run_mlm.py"
),
FutureWarning,
)
assert os.path.isfile(file_path), f"Input file path {file_path} not found"
# Here, we do not cache the features, operating under the assumption
# that we will soon use fast multithreaded tokenizers from the
# `tokenizers` repo everywhere =)
logger.info("Creating features from dataset file at %s", file_path)
with open(file_path, encoding="utf-8") as f:
lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size)
self.examples = batch_encoding["input_ids"]
self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples]
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> Dict[str, torch.tensor]:
return self.examples[i]
class LineByLineWithRefDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach soon.
"""
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, ref_path: str):
warnings.warn(
DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/master/examples/language-modeling/run_mlm_wwm.py"
),
FutureWarning,
)
assert os.path.isfile(file_path), f"Input file path {file_path} not found"
assert os.path.isfile(ref_path), f"Ref file path {file_path} not found"
# Here, we do not cache the features, operating under the assumption
# that we will soon use fast multithreaded tokenizers from the
# `tokenizers` repo everywhere =)
logger.info("Creating features from dataset file at %s", file_path)
logger.info("Use ref segment results at %s", ref_path)
with open(file_path, encoding="utf-8") as f:
data = f.readlines() # use this method to avoid delimiter '\u2029' to split a line
data = [line.strip() for line in data if len(line) > 0 and not line.isspace()]
# Get ref inf from file
with open(ref_path, encoding="utf-8") as f:
ref = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
assert len(data) == len(ref)
batch_encoding = tokenizer(data, add_special_tokens=True, truncation=True, max_length=block_size)
self.examples = batch_encoding["input_ids"]
self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples]
n = len(self.examples)
for i in range(n):
self.examples[i]["chinese_ref"] = torch.tensor(ref[i], dtype=torch.long)
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> Dict[str, torch.tensor]:
return self.examples[i]
class LineByLineWithSOPTextDataset(Dataset):
"""
Dataset for sentence order prediction task, prepare sentence pairs for SOP task
"""
def __init__(self, tokenizer: PreTrainedTokenizer, file_dir: str, block_size: int):
warnings.warn(
DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/master/examples/language-modeling/run_mlm.py"
),
FutureWarning,
)
assert os.path.isdir(file_dir)
logger.info(f"Creating features from dataset file folder at {file_dir}")
self.examples = []
# TODO: randomness could apply a random seed, ex. rng = random.Random(random_seed)
# file path looks like ./dataset/wiki_1, ./dataset/wiki_2
for file_name in os.listdir(file_dir):
file_path = os.path.join(file_dir, file_name)
assert os.path.isfile(file_path)
article_open = False
with open(file_path, encoding="utf-8") as f:
original_lines = f.readlines()
article_lines = []
for line in original_lines:
if "<doc id=" in line:
article_open = True
elif "</doc>" in line:
article_open = False
document = [
tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line))
for line in article_lines[1:]
if (len(line) > 0 and not line.isspace())
]
examples = self.create_examples_from_document(document, block_size, tokenizer)
self.examples.extend(examples)
article_lines = []
else:
if article_open:
article_lines.append(line)
logger.info("Dataset parse finished.")
def create_examples_from_document(self, document, block_size, tokenizer, short_seq_prob=0.1):
"""Creates examples for a single document."""
# Account for special tokens
max_num_tokens = block_size - tokenizer.num_special_tokens_to_add(pair=True)
# We *usually* want to fill up the entire sequence since we are padding
# to `block_size` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pretraining and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `block_size` is a hard limit.
target_seq_length = max_num_tokens
if random.random() < short_seq_prob:
target_seq_length = random.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
examples = []
current_chunk = [] # a buffer stored current working segments
current_length = 0
i = 0
while i < len(document):
segment = document[i] # get a segment
if not segment:
i += 1
continue
current_chunk.append(segment) # add a segment to current chunk
current_length += len(segment) # overall token length
# if current length goes to the target length or reaches the end of file, start building token a and b
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A` (first) sentence.
a_end = 1
# if current chunk has more than 2 sentences, pick part of it `A` (first) sentence
if len(current_chunk) >= 2:
a_end = random.randint(1, len(current_chunk) - 1)
# token a
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
# token b
tokens_b = []
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
if len(tokens_a) == 0 or len(tokens_b) == 0:
continue
# switch tokens_a and tokens_b randomly
if random.random() < 0.5:
is_next = False
tokens_a, tokens_b = tokens_b, tokens_a
else:
is_next = True
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if random.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
# add special tokens
input_ids = tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
# add token type ids, 0 for sentence a, 1 for sentence b
token_type_ids = tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
example = {
"input_ids": torch.tensor(input_ids, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"sentence_order_label": torch.tensor(0 if is_next else 1, dtype=torch.long),
}
examples.append(example)
current_chunk = [] # clear current chunk
current_length = 0 # reset current text length
i += 1 # go to next line
return examples
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> Dict[str, torch.tensor]:
return self.examples[i]
class TextDatasetForNextSentencePrediction(Dataset):
"""
This will be superseded by a framework-agnostic approach soon.
"""
def __init__(
self,
tokenizer: PreTrainedTokenizer,
file_path: str,
block_size: int,
overwrite_cache=False,
short_seq_probability=0.1,
nsp_probability=0.5,
):
warnings.warn(
DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/master/examples/language-modeling/run_mlm.py"
),
FutureWarning,
)
assert os.path.isfile(file_path), f"Input file path {file_path} not found"
self.block_size = block_size - tokenizer.num_special_tokens_to_add(pair=True)
self.short_seq_probability = short_seq_probability
self.nsp_probability = nsp_probability
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(
directory,
"cached_nsp_{}_{}_{}".format(
tokenizer.__class__.__name__,
str(block_size),
filename,
),
)
self.tokenizer = tokenizer
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
#
# Example:
# I am very happy.
# Here is the second sentence.
#
# A new document.
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
start = time.time()
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
logger.info(f"Creating features from dataset file at {directory}")
self.documents = [[]]
with open(file_path, encoding="utf-8") as f:
while True:
line = f.readline()
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line and len(self.documents[-1]) != 0:
self.documents.append([])
tokens = tokenizer.tokenize(line)
tokens = tokenizer.convert_tokens_to_ids(tokens)
if tokens:
self.documents[-1].append(tokens)
logger.info(f"Creating examples from {len(self.documents)} documents.")
self.examples = []
for doc_index, document in enumerate(self.documents):
self.create_examples_from_document(document, doc_index)
start = time.time()
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
logger.info(
"Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
)
def create_examples_from_document(self, document: List[List[int]], doc_index: int):
"""Creates examples for a single document."""
max_num_tokens = self.block_size - self.tokenizer.num_special_tokens_to_add(pair=True)
# We *usually* want to fill up the entire sequence since we are padding
# to `block_size` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pretraining and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `block_size` is a hard limit.
target_seq_length = max_num_tokens
if random.random() < self.short_seq_probability:
target_seq_length = random.randint(2, max_num_tokens)
current_chunk = [] # a buffer stored current working segments
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = random.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
if len(current_chunk) == 1 or random.random() < self.nsp_probability:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = random.randint(0, len(self.documents) - 1)
if random_document_index != doc_index:
break
random_document = self.documents[random_document_index]
random_start = random.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
# add special tokens
input_ids = self.tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
# add token type ids, 0 for sentence a, 1 for sentence b
token_type_ids = self.tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
example = {
"input_ids": torch.tensor(input_ids, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"next_sentence_label": torch.tensor(1 if is_random_next else 0, dtype=torch.long),
}
self.examples.append(example)
current_chunk = []
current_length = 0
i += 1
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return self.examples[i]
|
AdaMix/src/transformers/data/datasets/language_modeling.py/0
|
{
"file_path": "AdaMix/src/transformers/data/datasets/language_modeling.py",
"repo_id": "AdaMix",
"token_count": 11051
}
| 47 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
pkgs_to_check_at_runtime = "python tqdm regex sacremoses requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .file_utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
|
AdaMix/src/transformers/dependency_versions_check.py/0
|
{
"file_path": "AdaMix/src/transformers/dependency_versions_check.py",
"repo_id": "AdaMix",
"token_count": 546
}
| 48 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List, Optional, Tuple
import tensorflow as tf
from .file_utils import ModelOutput
@dataclass
class TFBaseModelOutput(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(tf.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFBaseModelOutputWithPooling(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`tf.Tensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
prediction (classification) objective during pretraining.
This output is usually *not* a good summary of the semantic content of the input, you're often better with
averaging or pooling the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: tf.Tensor = None
pooler_output: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFBaseModelOutputWithPast(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFSeq2SeqModelOutput(ModelOutput):
"""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
Args:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
encoder_last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
last_hidden_state: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
decoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_last_hidden_state: Optional[tf.Tensor] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFCausalLMOutput(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(n,)`, `optional`, where n is the number of non-masked labels, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFCausalLMOutputWithPast(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(n,)`, `optional`, where n is the number of non-masked labels, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFMaskedLMOutput(ModelOutput):
"""
Base class for masked language models outputs.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(n,)`, `optional`, where n is the number of non-masked labels, returned when :obj:`labels` is provided):
Masked language modeling (MLM) loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFSeq2SeqLMOutput(ModelOutput):
"""
Base class for sequence-to-sequence language models outputs.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(n,)`, `optional`, where n is the number of non-masked labels, returned when :obj:`labels` is provided):
Language modeling loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
encoder_last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
decoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_last_hidden_state: Optional[tf.Tensor] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFNextSentencePredictorOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(n,)`, `optional`, where n is the number of non-masked labels, returned when :obj:`next_sentence_label` is provided):
Next sentence prediction loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFSequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(batch_size, )`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFSeq2SeqSequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sequence-to-sequence sentence classification models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
encoder_last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
decoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_last_hidden_state: Optional[tf.Tensor] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFMultipleChoiceModelOutput(ModelOutput):
"""
Base class for outputs of multiple choice models.
Args:
loss (:obj:`tf.Tensor` of shape `(batch_size, )`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFTokenClassifierOutput(ModelOutput):
"""
Base class for outputs of token classification models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(n,)`, `optional`, where n is the number of unmasked labels, returned when ``labels`` is provided) :
Classification loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFQuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of question answering models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(batch_size, )`, `optional`, returned when :obj:`start_positions` and :obj:`end_positions` are provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
start_logits: tf.Tensor = None
end_logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFSeq2SeqQuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of sequence-to-sequence question answering models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
encoder_last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[tf.Tensor] = None
start_logits: tf.Tensor = None
end_logits: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
decoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_last_hidden_state: Optional[tf.Tensor] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFSequenceClassifierOutputWithPast(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(batch_size, )`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
past_key_values (:obj:`List[tf.Tensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tf.Tensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size,
num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
``past_key_values`` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
|
AdaMix/src/transformers/modeling_tf_outputs.py/0
|
{
"file_path": "AdaMix/src/transformers/modeling_tf_outputs.py",
"repo_id": "AdaMix",
"token_count": 13508
}
| 49 |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ALBERT checkpoint."""
import argparse
import torch
from transformers import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
from transformers.utils import logging
logging.set_verbosity_info()
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, albert_config_file, pytorch_dump_path):
# Initialise PyTorch model
config = AlbertConfig.from_json_file(albert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = AlbertForPreTraining(config)
# Load weights from tf checkpoint
load_tf_weights_in_albert(model, config, tf_checkpoint_path)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
|
AdaMix/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py/0
|
{
"file_path": "AdaMix/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py",
"repo_id": "AdaMix",
"token_count": 734
}
| 50 |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TF 2.0 DistilBERT model
"""
import warnings
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_distilbert import DistilBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
_CONFIG_FOR_DOC = "DistilBertConfig"
_TOKENIZER_FOR_DOC = "DistilBertTokenizer"
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"distilbert-base-uncased",
"distilbert-base-uncased-distilled-squad",
"distilbert-base-cased",
"distilbert-base-cased-distilled-squad",
"distilbert-base-multilingual-cased",
"distilbert-base-uncased-finetuned-sst-2-english",
# See all DistilBERT models at https://huggingface.co/models?filter=distilbert
]
class TFEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.dim = config.dim
self.initializer_range = config.initializer_range
self.max_position_embeddings = config.max_position_embeddings
self.embeddings_sum = tf.keras.layers.Add()
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.dropout)
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.dim],
initializer=get_initializer(initializer_range=self.initializer_range),
)
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.dim],
initializer=get_initializer(initializer_range=self.initializer_range),
)
super().build(input_shape)
def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (:obj:`tf.Tensor`): output embedding tensor.
"""
assert not (input_ids is None and inputs_embeds is None)
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if position_ids is None:
position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
final_embeddings = self.embeddings_sum(inputs=[inputs_embeds, position_embeds])
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
class TFMultiHeadSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.n_heads = config.n_heads
self.dim = config.dim
self.dropout = tf.keras.layers.Dropout(config.attention_dropout)
self.output_attentions = config.output_attentions
assert self.dim % self.n_heads == 0, f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}"
self.q_lin = tf.keras.layers.Dense(
config.dim, kernel_initializer=get_initializer(config.initializer_range), name="q_lin"
)
self.k_lin = tf.keras.layers.Dense(
config.dim, kernel_initializer=get_initializer(config.initializer_range), name="k_lin"
)
self.v_lin = tf.keras.layers.Dense(
config.dim, kernel_initializer=get_initializer(config.initializer_range), name="v_lin"
)
self.out_lin = tf.keras.layers.Dense(
config.dim, kernel_initializer=get_initializer(config.initializer_range), name="out_lin"
)
self.pruned_heads = set()
def prune_heads(self, heads):
raise NotImplementedError
def call(self, query, key, value, mask, head_mask, output_attentions, training=False):
"""
Parameters:
query: tf.Tensor(bs, seq_length, dim)
key: tf.Tensor(bs, seq_length, dim)
value: tf.Tensor(bs, seq_length, dim)
mask: tf.Tensor(bs, seq_length)
Returns:
weights: tf.Tensor(bs, n_heads, seq_length, seq_length) Attention weights context: tf.Tensor(bs,
seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
"""
bs, q_length, dim = shape_list(query)
k_length = shape_list(key)[1]
# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)
# assert key.size() == value.size()
dim_per_head = tf.math.divide(self.dim, self.n_heads)
dim_per_head = tf.cast(dim_per_head, dtype=tf.int32)
mask_reshape = [bs, 1, 1, k_length]
def shape(x):
""" separate heads """
return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
def unshape(x):
""" group heads """
return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
q = tf.cast(q, dtype=tf.float32)
q = tf.multiply(q, tf.math.rsqrt(tf.cast(dim_per_head, dtype=tf.float32)))
k = tf.cast(k, dtype=q.dtype)
scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, q_length, k_length)
mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
# scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length)
mask = tf.cast(mask, dtype=scores.dtype)
scores = scores - 1e30 * (1.0 - mask)
weights = tf.nn.softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, q_length, dim)
context = self.out_lin(context) # (bs, q_length, dim)
if output_attentions:
return (context, weights)
else:
return (context,)
class TFFFN(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.lin1 = tf.keras.layers.Dense(
config.hidden_dim, kernel_initializer=get_initializer(config.initializer_range), name="lin1"
)
self.lin2 = tf.keras.layers.Dense(
config.dim, kernel_initializer=get_initializer(config.initializer_range), name="lin2"
)
assert config.activation in ["relu", "gelu"], "activation ({}) must be in ['relu', 'gelu']".format(
config.activation
)
self.activation = get_tf_activation(config.activation)
def call(self, input, training=False):
x = self.lin1(input)
x = self.activation(x)
x = self.lin2(x)
x = self.dropout(x, training=training)
return x
class TFTransformerBlock(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.n_heads = config.n_heads
self.dim = config.dim
self.hidden_dim = config.hidden_dim
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation = config.activation
self.output_attentions = config.output_attentions
assert (
config.dim % config.n_heads == 0
), f"Hidden size {config.dim} not dividable by number of heads {config.n_heads}"
self.attention = TFMultiHeadSelfAttention(config, name="attention")
self.sa_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="sa_layer_norm")
self.ffn = TFFFN(config, name="ffn")
self.output_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="output_layer_norm")
def call(self, x, attn_mask, head_mask, output_attentions, training=False): # removed: src_enc=None, src_len=None
"""
Parameters:
x: tf.Tensor(bs, seq_length, dim)
attn_mask: tf.Tensor(bs, seq_length)
Outputs: sa_weights: tf.Tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
tf.Tensor(bs, seq_length, dim) The output of the transformer block contextualization.
"""
# Self-Attention
sa_output = self.attention(x, x, x, attn_mask, head_mask, output_attentions, training=training)
if output_attentions:
sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
# assert type(sa_output) == tuple
sa_output = sa_output[0]
sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
# Feed Forward Network
ffn_output = self.ffn(sa_output, training=training) # (bs, seq_length, dim)
ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
output = (ffn_output,)
if output_attentions:
output = (sa_weights,) + output
return output
class TFTransformer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.n_layers = config.n_layers
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.layer = [TFTransformerBlock(config, name="layer_._{}".format(i)) for i in range(config.n_layers)]
def call(self, x, attn_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False):
# docstyle-ignore
"""
Parameters:
x: tf.Tensor(bs, seq_length, dim) Input sequence embedded.
attn_mask: tf.Tensor(bs, seq_length) Attention mask on the sequence.
Returns:
hidden_state: tf.Tensor(bs, seq_length, dim)
Sequence of hidden states in the last (top) layer
all_hidden_states: Tuple[tf.Tensor(bs, seq_length, dim)]
Tuple of length n_layers with the hidden states from each layer.
Optional: only if output_hidden_states=True
all_attentions: Tuple[tf.Tensor(bs, n_heads, seq_length, seq_length)]
Tuple of length n_layers with the attention weights from each layer
Optional: only if output_attentions=True
"""
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_state = x
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
layer_outputs = layer_module(hidden_state, attn_mask, head_mask[i], output_attentions, training=training)
hidden_state = layer_outputs[-1]
if output_attentions:
assert len(layer_outputs) == 2
attentions = layer_outputs[0]
all_attentions = all_attentions + (attentions,)
else:
assert len(layer_outputs) == 1, f"Incorrect number of outputs {len(layer_outputs)} instead of 1"
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
)
@keras_serializable
class TFDistilBertMainLayer(tf.keras.layers.Layer):
config_class = DistilBertConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.num_hidden_layers = config.num_hidden_layers
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.return_dict = config.use_return_dict
self.embeddings = TFEmbeddings(config, name="embeddings") # Embeddings
self.transformer = TFTransformer(config, name="transformer") # Encoder
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, value):
self.embeddings.weight = value
self.embeddings.vocab_size = value.shape[0]
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def call(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.ones(input_shape) # (bs, seq_length)
inputs["attention_mask"] = tf.cast(inputs["attention_mask"], dtype=tf.float32)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if inputs["head_mask"] is not None:
raise NotImplementedError
else:
inputs["head_mask"] = [None] * self.num_hidden_layers
embedding_output = self.embeddings(
inputs["input_ids"], inputs_embeds=inputs["inputs_embeds"]
) # (bs, seq_length, dim)
tfmr_output = self.transformer(
embedding_output,
inputs["attention_mask"],
inputs["head_mask"],
inputs["output_attentions"],
inputs["output_hidden_states"],
inputs["return_dict"],
training=inputs["training"],
)
return tfmr_output # last-layer hidden-state, (all hidden_states), (all attentions)
# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
class TFDistilBertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DistilBertConfig
base_model_prefix = "distilbert"
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
DISTILBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage
and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all
the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in
the first positional argument :
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({"input_ids": input_ids})`
Parameters:
config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
DISTILBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.DistilBertTokenizer`. See
:func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This
argument can be used in eager mode, in graph mode the value will always be set to True.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
DISTILBERT_START_DOCSTRING,
)
class TFDistilBertModel(TFDistilBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.distilbert = TFDistilBertMainLayer(config, name="distilbert") # Embeddings
@add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.distilbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
class TFDistilBertLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.dim = config.dim
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self):
return self.input_embeddings
def set_output_embeddings(self, value):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self):
return {"bias": self.bias}
def set_bias(self, value):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states):
seq_length = shape_list(tensor=hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.dim])
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
@add_start_docstrings(
"""DistilBert Model with a `masked language modeling` head on top. """,
DISTILBERT_START_DOCSTRING,
)
class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel, TFMaskedLanguageModelingLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.vocab_size = config.vocab_size
self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
self.vocab_transform = tf.keras.layers.Dense(
config.dim, kernel_initializer=get_initializer(config.initializer_range), name="vocab_transform"
)
self.act = get_tf_activation("gelu")
self.vocab_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="vocab_layer_norm")
self.vocab_projector = TFDistilBertLMHead(config, self.distilbert.embeddings, name="vocab_projector")
def get_lm_head(self):
return self.vocab_projector
def get_prefix_bias_name(self):
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return self.name + "/" + self.vocab_projector.name
@add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
distilbert_output = self.distilbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = distilbert_output[0] # (bs, seq_length, dim)
prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
prediction_logits = self.act(prediction_logits) # (bs, seq_length, dim)
prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
prediction_logits = self.vocab_projector(prediction_logits)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], prediction_logits)
if not inputs["return_dict"]:
output = (prediction_logits,) + distilbert_output[1:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_logits,
hidden_states=distilbert_output.hidden_states,
attentions=distilbert_output.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
DISTILBERT_START_DOCSTRING,
)
class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
self.pre_classifier = tf.keras.layers.Dense(
config.dim,
kernel_initializer=get_initializer(config.initializer_range),
activation="relu",
name="pre_classifier",
)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
self.dropout = tf.keras.layers.Dropout(config.seq_classif_dropout)
@add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ...,
config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
distilbert_output = self.distilbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_state = distilbert_output[0] # (bs, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs, dim)
pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
pooled_output = self.dropout(pooled_output, training=inputs["training"]) # (bs, dim)
logits = self.classifier(pooled_output) # (bs, dim)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + distilbert_output[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=distilbert_output.hidden_states,
attentions=distilbert_output.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
for Named-Entity-Recognition (NER) tasks.
""",
DISTILBERT_START_DOCSTRING,
)
class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel, TFTokenClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.distilbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=inputs["training"])
logits = self.classifier(sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
a softmax) e.g. for RocStories/SWAG tasks.
""",
DISTILBERT_START_DOCSTRING,
)
class TFDistilBertForMultipleChoice(TFDistilBertPreTrainedModel, TFMultipleChoiceLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
self.dropout = tf.keras.layers.Dropout(config.seq_classif_dropout)
self.pre_classifier = tf.keras.layers.Dense(
config.dim,
kernel_initializer=get_initializer(config.initializer_range),
activation="relu",
name="pre_classifier",
)
self.classifier = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(
DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
)
flat_inputs_embeds = (
tf.reshape(inputs["inputs_embeds"], (-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
distilbert_output = self.distilbert(
flat_input_ids,
flat_attention_mask,
inputs["head_mask"],
flat_inputs_embeds,
inputs["output_attentions"],
inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_state = distilbert_output[0] # (bs, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs, dim)
pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
pooled_output = self.dropout(pooled_output, training=inputs["training"]) # (bs, dim)
logits = self.classifier(pooled_output)
reshaped_logits = tf.reshape(logits, (-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + distilbert_output[1:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=distilbert_output.hidden_states,
attentions=distilbert_output.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
DISTILBERT_START_DOCSTRING,
)
class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel, TFQuestionAnsweringLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
assert config.num_labels == 2, f"Incorrect number of labels {config.num_labels} instead of 2"
self.dropout = tf.keras.layers.Dropout(config.qa_dropout)
@add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
start_positions=None,
end_positions=None,
training=False,
**kwargs,
):
r"""
start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
distilbert_output = self.distilbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
hidden_states = self.dropout(hidden_states, training=inputs["training"]) # (bs, max_query_len, dim)
logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels, (start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + distilbert_output[1:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=distilbert_output.hidden_states,
attentions=distilbert_output.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
|
AdaMix/src/transformers/models/distilbert/modeling_tf_distilbert.py/0
|
{
"file_path": "AdaMix/src/transformers/models/distilbert/modeling_tf_distilbert.py",
"repo_id": "AdaMix",
"token_count": 21668
}
| 51 |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes to support Encoder-Decoder architectures """
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...modeling_outputs import Seq2SeqLMOutput
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_encoder_decoder import EncoderDecoderConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "EncoderDecoderConfig"
ENCODER_DECODER_START_DOCSTRING = r"""
This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
:meth:`~transformers.AutoModel.from_pretrained` function and the decoder is loaded via
:meth:`~transformers.AutoModelForCausalLM.from_pretrained` function. Cross-attention layers are automatically added
to the decoder and should be fine-tuned on a downstream generative task, like summarization.
The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
tasks was shown in `Leveraging Pre-trained Checkpoints for Sequence Generation Tasks
<https://arxiv.org/abs/1907.12461>`__ by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
Zhou, Wei Li, Peter J. Liu.
After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
(see the examples for more information).
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ENCODER_DECODER_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.PreTrainedTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.PreTrainedTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
Provide for sequence to sequence training to the decoder. Indices can be obtained using
:class:`~transformers.PretrainedTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and
:meth:`transformers.PreTrainedTokenizer.__call__` for details.
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
encoder_outputs (:obj:`tuple(torch.FloatTensor)`, `optional`):
This tuple must consist of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:
:obj:`attentions`) :obj:`last_hidden_state` (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,
sequence_length, hidden_size)`) is a tensor of hidden-states at the output of the last layer of the
encoder. Used in the cross-attention of the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`decoder_input_ids`
indices into associated vectors than the model's internal embedding lookup matrix.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss for the decoder. Indices should be in ``[-100, 0,
..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
If set to ``True``, the model will return a :class:`~transformers.file_utils.Seq2SeqLMOutput` instead of a
plain tuple.
kwargs: (`optional`) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
- Without a prefix which will be input as ``**encoder_kwargs`` for the encoder forward function.
- With a `decoder_` prefix which will be input as ``**decoder_kwargs`` for the decoder forward function.
"""
@add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
class EncoderDecoderModel(PreTrainedModel):
r"""
:class:`~transformers.EncoderDecoder` is a generic model class that will be instantiated as a transformer
architecture with one of the base model classes of the library as encoder and another one as decoder when created
with the :meth`~transformers.AutoModel.from_pretrained` class method for the encoder and
:meth`~transformers.AutoModelForCausalLM.from_pretrained` class method for the decoder.
"""
config_class = EncoderDecoderConfig
base_model_prefix = "encoder_decoder"
def __init__(
self,
config: Optional[PretrainedConfig] = None,
encoder: Optional[PreTrainedModel] = None,
decoder: Optional[PreTrainedModel] = None,
):
assert config is not None or (
encoder is not None and decoder is not None
), "Either a configuration or an Encoder and a decoder has to be provided"
if config is None:
config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
else:
assert isinstance(config, self.config_class), "config: {} has to be of type {}".format(
config, self.config_class
)
# initialize with config
super().__init__(config)
if encoder is None:
from ..auto.modeling_auto import AutoModel
encoder = AutoModel.from_config(config.encoder)
if decoder is None:
from ..auto.modeling_auto import AutoModelForCausalLM
decoder = AutoModelForCausalLM.from_config(config.decoder)
self.encoder = encoder
self.decoder = decoder
assert (
self.encoder.get_output_embeddings() is None
), "The encoder {} should not have a LM Head. Please use a model without LM Head"
# tie encoder, decoder weights if config set accordingly
self.tie_weights()
def tie_weights(self):
# tie encoder & decoder if needed
if self.config.tie_encoder_decoder:
# tie encoder and decoder base model
decoder_base_model_prefix = self.decoder.base_model_prefix
self._tie_encoder_decoder_weights(
self.encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix
)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def get_input_embeddings(self):
return self.encoder.get_input_embeddings()
def get_output_embeddings(self):
return self.decoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
return self.decoder.set_output_embeddings(new_embeddings)
@classmethod
def from_encoder_decoder_pretrained(
cls,
encoder_pretrained_model_name_or_path: str = None,
decoder_pretrained_model_name_or_path: str = None,
*model_args,
**kwargs
) -> PreTrainedModel:
r"""
Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
checkpoints.
The model is set in evaluation mode by default using :obj:`model.eval()` (Dropout modules are deactivated). To
train the model, you need to first set it back in training mode with :obj:`model.train()`.
Params:
encoder_pretrained_model_name_or_path (:obj: `str`, `optional`):
Information necessary to initiate the encoder. Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
decoder_pretrained_model_name_or_path (:obj: `str`, `optional`, defaults to `None`):
Information necessary to initiate the decoder. Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args (remaining positional arguments, `optional`):
All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
kwargs (remaining dictionary of keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`).
- To update the encoder configuration, use the prefix `encoder_` for each configuration parameter.
- To update the decoder configuration, use the prefix `decoder_` for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a :obj:`config` is provided or automatically loaded.
Example::
>>> from transformers import EncoderDecoderModel
>>> # initialize a bert2bert from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained('bert-base-uncased', 'bert-base-uncased')
>>> # saving model after fine-tuning
>>> model.save_pretrained("./bert2bert")
>>> # load fine-tuned model
>>> model = EncoderDecoderModel.from_pretrained("./bert2bert")
"""
kwargs_encoder = {
argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# remove encoder, decoder kwargs from kwargs
for key in kwargs_encoder.keys():
del kwargs["encoder_" + key]
for key in kwargs_decoder.keys():
del kwargs["decoder_" + key]
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
assert (
encoder_pretrained_model_name_or_path is not None
), "If `model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has to be defined"
from ..auto.modeling_auto import AutoModel
if "config" not in kwargs_encoder:
from ..auto.configuration_auto import AutoConfig
encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path)
if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
logger.info(
f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model from a decoder model. Cross-attention and casual mask are disabled."
)
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
kwargs_encoder["config"] = encoder_config
encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
assert (
decoder_pretrained_model_name_or_path is not None
), "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has to be defined"
from ..auto.modeling_auto import AutoModelForCausalLM
if "config" not in kwargs_decoder:
from ..auto.configuration_auto import AutoConfig
decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path)
if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
logger.info(
f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
)
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
kwargs_decoder["config"] = decoder_config
if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
logger.warning(
f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a `decoder_config` to `.from_encoder_decoder_pretrained(...)`"
)
decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
# instantiate config with corresponding kwargs
config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
return cls(encoder=encoder, decoder=decoder, config=config)
@add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
Returns:
Examples::
>>> from transformers import EncoderDecoderModel, BertTokenizer
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert from pre-trained checkpoints
>>> # forward
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids)
>>> # training
>>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=input_ids)
>>> loss, logits = outputs.loss, outputs.logits
>>> # save and load from pretrained
>>> model.save_pretrained("bert2bert")
>>> model = EncoderDecoderModel.from_pretrained("bert2bert")
>>> # generation
>>> generated = model.generate(input_ids, decoder_start_token_id=model.config.decoder.pad_token_id)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs_encoder,
)
encoder_hidden_states = encoder_outputs[0]
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=attention_mask,
inputs_embeds=decoder_inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
past_key_values=past_key_values,
return_dict=return_dict,
**kwargs_decoder,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqLMOutput(
loss=decoder_outputs.loss,
logits=decoder_outputs.logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(
self, input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
):
decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past=past)
decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
input_dict = {
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"decoder_input_ids": decoder_inputs["input_ids"],
"encoder_outputs": encoder_outputs,
"past_key_values": decoder_inputs["past_key_values"],
"use_cache": use_cache,
}
return input_dict
def _reorder_cache(self, past, beam_idx):
# apply decoder cache reordering here
return self.decoder._reorder_cache(past, beam_idx)
|
AdaMix/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py/0
|
{
"file_path": "AdaMix/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py",
"repo_id": "AdaMix",
"token_count": 9947
}
| 52 |
# coding=utf-8
# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" FSMT configuration """
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class DecoderConfig(PretrainedConfig):
r"""
Configuration class for FSMT's decoder specific things. note: this is a private helper class
"""
model_type = "fsmt_decoder"
def __init__(self, vocab_size=0, bos_token_id=0):
super().__init__()
self.vocab_size = vocab_size
self.bos_token_id = bos_token_id
class FSMTConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.FSMTModel`. It is used to
instantiate a FSMT model according to the specified arguments, defining the model architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
langs (:obj:`List[str]`):
A list with source language and target_language (e.g., ['en', 'ru']).
src_vocab_size (:obj:`int`):
Vocabulary size of the encoder. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed to the forward method in the encoder.
tgt_vocab_size (:obj:`int`):
Vocabulary size of the decoder. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed to the forward method in the decoder.
d_model (:obj:`int`, `optional`, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (:obj:`int`, `optional`, defaults to 12):
Number of encoder layers.
decoder_layers (:obj:`int`, `optional`, defaults to 12):
Number of decoder layers.
encoder_attention_heads (:obj:`int`, `optional`, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (:obj:`int`, `optional`, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
dropout (:obj:`float`, `optional`, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
max_position_embeddings (:obj:`int`, `optional`, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_embedding (:obj:`bool`, `optional`, defaults to :obj:`True`):
Scale embeddings by diving by sqrt(d_model).
bos_token_id (:obj:`int`, `optional`, defaults to 0)
Beginning of stream token id.
pad_token_id (:obj:`int`, `optional`, defaults to 1)
Padding token id.
eos_token_id (:obj:`int`, `optional`, defaults to 2)
End of stream token id.
decoder_start_token_id (:obj:`int`, `optional`):
This model starts decoding with :obj:`eos_token_id`
encoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0):
Google "layerdrop arxiv", as its not explainable in one line.
decoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0):
Google "layerdrop arxiv", as its not explainable in one line.
is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether this is an encoder/decoder model.
tie_word_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to tie input and output embeddings.
num_beams (:obj:`int`, `optional`, defaults to 5)
Number of beams for beam search that will be used by default in the :obj:`generate` method of the model. 1
means no beam search.
length_penalty (:obj:`float`, `optional`, defaults to 1)
Exponential penalty to the length that will be used by default in the :obj:`generate` method of the model.
early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`)
Flag that will be used by default in the :obj:`generate` method of the model. Whether to stop the beam
search when at least ``num_beams`` sentences are finished per batch or not.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (:obj:`int`, `optional`, defaults to 2):
The id of the token to force as the last generated token when :obj:`max_length` is reached. Usually set to
:obj:`eos_token_id`.
Examples::
>>> from transformers import FSMTConfig, FSMTModel
>>> config = FSMTConfig.from_pretrained('facebook/wmt19-en-ru')
>>> model = FSMTModel(config)
"""
model_type = "fsmt"
# update the defaults from config file
def __init__(
self,
langs=["en", "de"],
src_vocab_size=42024,
tgt_vocab_size=42024,
activation_function="relu",
d_model=1024,
max_length=200,
max_position_embeddings=1024,
encoder_ffn_dim=4096,
encoder_layers=12,
encoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_ffn_dim=4096,
decoder_layers=12,
decoder_attention_heads=16,
decoder_layerdrop=0.0,
attention_dropout=0.0,
dropout=0.1,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=2,
is_encoder_decoder=True,
scale_embedding=True,
tie_word_embeddings=False,
num_beams=5,
length_penalty=1.0,
early_stopping=False,
use_cache=True,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
forced_eos_token_id=2,
**common_kwargs
):
if "hidden_size" in common_kwargs:
raise ValueError("hidden size is called d_model")
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
decoder_start_token_id=decoder_start_token_id,
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,
forced_eos_token_id=forced_eos_token_id,
**common_kwargs,
)
self.langs = langs
self.src_vocab_size = src_vocab_size
self.tgt_vocab_size = tgt_vocab_size
self.d_model = d_model # encoder_embed_dim and decoder_embed_dim
self.max_length = max_length
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = self.num_hidden_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.max_position_embeddings = max_position_embeddings
self.init_std = init_std # Normal(0, this parameter)
self.activation_function = activation_function
self.num_beams = num_beams
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.decoder = DecoderConfig(vocab_size=tgt_vocab_size, bos_token_id=eos_token_id)
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
# 3 Types of Dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.dropout = dropout
self.use_cache = use_cache
@property
def num_attention_heads(self) -> int:
return self.encoder_attention_heads
@property
def hidden_size(self) -> int:
return self.d_model
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig`.
Returns:
:obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
output["decoder"] = self.decoder.to_dict()
output["model_type"] = self.__class__.model_type
return output
|
AdaMix/src/transformers/models/fsmt/configuration_fsmt.py/0
|
{
"file_path": "AdaMix/src/transformers/models/fsmt/configuration_fsmt.py",
"repo_id": "AdaMix",
"token_count": 4232
}
| 53 |
# coding=utf-8
# Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...utils import logging
from ..bert.tokenization_bert import BasicTokenizer
from ..xlm.tokenization_xlm import XLMTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"allegro/herbert-base-cased": 514}
PRETRAINED_INIT_CONFIGURATION = {}
class HerbertTokenizer(XLMTokenizer):
"""
Construct a BPE tokenizer for HerBERT.
Peculiarities:
- uses BERT's pre-tokenizer: BaseTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a
punctuation character will be treated separately.
- Such pretokenized input is BPE subtokenized
This tokenizer inherits from :class:`~transformers.XLMTokenizer` which contains most of the methods. Users should
refer to the superclass for more information regarding methods.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, **kwargs):
kwargs["cls_token"] = "<s>"
kwargs["unk_token"] = "<unk>"
kwargs["pad_token"] = "<pad>"
kwargs["mask_token"] = "<mask>"
kwargs["sep_token"] = "</s>"
kwargs["do_lowercase_and_remove_accent"] = False
kwargs["additional_special_tokens"] = []
super().__init__(**kwargs)
self.bert_pre_tokenizer = BasicTokenizer(
do_lower_case=False, never_split=self.all_special_tokens, tokenize_chinese_chars=False, strip_accents=False
)
def _tokenize(self, text):
pre_tokens = self.bert_pre_tokenizer.tokenize(text)
split_tokens = []
for token in pre_tokens:
if token:
split_tokens.extend([t for t in self.bpe(token).split(" ")])
return split_tokens
|
AdaMix/src/transformers/models/herbert/tokenization_herbert.py/0
|
{
"file_path": "AdaMix/src/transformers/models/herbert/tokenization_herbert.py",
"repo_id": "AdaMix",
"token_count": 1160
}
| 54 |
# coding=utf-8
# Copyright 2010, The Microsoft Research Asia LayoutLM Team authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" LayoutLM model configuration """
from ...utils import logging
from ..bert.configuration_bert import BertConfig
logger = logging.get_logger(__name__)
LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"layoutlm-base-uncased": "https://huggingface.co/microsoft/layoutlm-base-uncased/resolve/main/config.json",
"layoutlm-large-uncased": "https://huggingface.co/microsoft/layoutlm-large-uncased/resolve/main/config.json",
}
class LayoutLMConfig(BertConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.LayoutLMModel`. It is used to
instantiate a LayoutLM model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the LayoutLM `layoutlm-base-uncased
<https://huggingface.co/microsoft/layoutlm-base-uncased>`__ architecture.
Configuration objects inherit from :class:`~transformers.BertConfig` and can be used to control the model outputs.
Read the documentation from :class:`~transformers.BertConfig` for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 30522):
Vocabulary size of the LayoutLM model. Defines the different tokens that can be represented by the
`inputs_ids` passed to the forward method of :class:`~transformers.LayoutLMModel`.
hidden_size (:obj:`int`, `optional`, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, `optional`, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, `optional`, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, `optional`, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, `optional`, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, `optional`, defaults to 2):
The vocabulary size of the :obj:`token_type_ids` passed into :class:`~transformers.LayoutLMModel`.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12):
The epsilon used by the layer normalization layers.
gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
max_2d_position_embeddings (:obj:`int`, `optional`, defaults to 1024):
The maximum value that the 2D position embedding might ever used. Typically set this to something large
just in case (e.g., 1024).
Examples::
>>> from transformers import LayoutLMModel, LayoutLMConfig
>>> # Initializing a LayoutLM configuration
>>> configuration = LayoutLMConfig()
>>> # Initializing a model from the configuration
>>> model = LayoutLMModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "layoutlm"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
gradient_checkpointing=False,
max_2d_position_embeddings=1024,
**kwargs
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
pad_token_id=pad_token_id,
gradient_checkpointing=gradient_checkpointing,
**kwargs,
)
self.max_2d_position_embeddings = max_2d_position_embeddings
|
AdaMix/src/transformers/models/layoutlm/configuration_layoutlm.py/0
|
{
"file_path": "AdaMix/src/transformers/models/layoutlm/configuration_layoutlm.py",
"repo_id": "AdaMix",
"token_count": 2338
}
| 55 |
# coding=utf-8
# Copyright 2020 Google and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
PATTERNS = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def rename_state_dict_key(k):
for pegasus_name, hf_name in PATTERNS:
k = k.replace(pegasus_name, hf_name)
return k
# See appendix C of paper for all hyperparams
def convert_pegasus(tf_weights: dict, cfg_updates: dict) -> PegasusForConditionalGeneration:
cfg_kwargs = DEFAULTS.copy()
cfg_kwargs.update(cfg_updates)
cfg = PegasusConfig(**cfg_kwargs)
torch_model = PegasusForConditionalGeneration(cfg)
sd = torch_model.model.state_dict()
mapping = {}
for k, v in tf_weights.items():
new_k = rename_state_dict_key(k)
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})")
if "dense" in k or "proj" in new_k:
v = v.T
mapping[new_k] = torch.tensor(v, dtype=sd[new_k].dtype)
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
mapping["shared.weight"][cfg.pad_token_id] = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1])
mapping["encoder.embed_tokens.weight"] = mapping["shared.weight"]
mapping["decoder.embed_tokens.weight"] = mapping["shared.weight"]
empty_biases = {k: torch.zeros_like(v) for k, v in sd.items() if k.endswith("bias") and k not in mapping}
mapping.update(**empty_biases)
missing, extra = torch_model.model.load_state_dict(mapping, strict=False)
unexpected_missing = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def get_tf_weights_as_numpy(path="./ckpt/aeslc/model.ckpt-32000") -> Dict:
init_vars = tf.train.list_variables(path)
tf_weights = {}
ignore_name = ["Adafactor", "global_step"]
for name, shape in tqdm(init_vars, desc="converting tf checkpoint to dict"):
skip_key = any([pat in name for pat in ignore_name])
if skip_key:
continue
array = tf.train.load_variable(path, name)
tf_weights[name] = array
return tf_weights
def convert_pegasus_ckpt_to_pytorch(ckpt_path: str, save_dir: str):
# save tokenizer first
dataset = Path(ckpt_path).parent.name
desired_max_model_length = task_specific_params[f"summarization_{dataset}"]["max_position_embeddings"]
tok = PegasusTokenizer.from_pretrained("sshleifer/pegasus", model_max_length=desired_max_model_length)
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(save_dir)
# convert model
tf_weights = get_tf_weights_as_numpy(ckpt_path)
cfg_updates = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
cfg_updates["task_specific_params"] = task_specific_params
torch_model = convert_pegasus(tf_weights, cfg_updates)
torch_model.save_pretrained(save_dir)
sd = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight")
sd.pop("model.encoder.embed_positions.weight")
torch.save(sd, Path(save_dir) / "pytorch_model.bin")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
args = parser.parse_args()
if args.save_dir is None:
dataset = Path(args.tf_ckpt_path).parent.name
args.save_dir = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
|
AdaMix/src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py/0
|
{
"file_path": "AdaMix/src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py",
"repo_id": "AdaMix",
"token_count": 2026
}
| 56 |
# coding=utf-8
# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch ProphetNet model, ported from ProphetNet repo(fairsequery_states version). """
import copy
import math
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import LayerNorm
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_prophetnet import ProphetNetConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "ProphenetConfig"
_TOKENIZER_FOR_DOC = "ProphetNetTokenizer"
PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/prophetnet-large-uncased",
# See all ProphetNet models at https://huggingface.co/models?filter=prophetnet
]
PROPHETNET_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
Original ProphetNet code can be found at <https://github.com/microsoft/ProphetNet> . Checkpoints were converted
from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the
file ``convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py``.
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and
behavior.
Parameters:
config (:class:`~transformers.ProphetNetConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
PROPHETNET_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using :class:`~transformers.ProphetNetTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.PreTrainedTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
ProphetNet uses the :obj:`eos_token_id` as the starting token for :obj:`decoder_input_ids` generation. If
:obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
If you want to change padding behavior, you should read :func:`modeling_bart._prepare_decoder_inputs` and
modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more
information on the default strategy.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:
:obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,
`optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
PROPHETNET_STANDALONE_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using :class:`~transformers.ProphetNetTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
def softmax(hidden_state, dim, onnx_trace=False):
if onnx_trace:
return F.softmax(hidden_state.float(), dim=dim)
else:
return F.softmax(hidden_state, dim=dim, dtype=torch.float32)
def ngram_attention_bias(sequence_length, ngram, device, dtype):
"""
This function computes the bias for the predict stream
"""
left_block = torch.ones((ngram, sequence_length, sequence_length), device=device, dtype=dtype) * float("-inf")
right_block = left_block.detach().clone()
# create bias
for stream_idx in range(ngram):
right_block[stream_idx].fill_diagonal_(0, wrap=False)
left_block[stream_idx].triu_(-stream_idx + 1)
left_block[:, :, 0] = 0
return torch.cat([left_block, right_block], dim=2)
def compute_relative_buckets(num_buckets, max_distance, relative_positions, is_bidirectional=False):
"""
This function computes individual parts of the relative position buckets. For more detail, see paper.
"""
inv_relative_positions = -relative_positions
rel_positions_bucket = 0
if is_bidirectional:
num_buckets = num_buckets // 2
rel_positions_bucket = (
rel_positions_bucket
+ torch.lt(inv_relative_positions, torch.zeros_like(inv_relative_positions)).int() * num_buckets
)
inv_relative_positions = torch.abs(inv_relative_positions)
else:
inv_relative_positions = torch.max(inv_relative_positions, torch.zeros_like(inv_relative_positions))
max_exact = num_buckets // 2
is_small = torch.lt(inv_relative_positions, max_exact)
val_if_large = max_exact + torch.log(inv_relative_positions.float() / max_exact) / math.log(
max_distance / max_exact
) * (num_buckets - max_exact)
val_if_large = torch.min(val_if_large, torch.ones_like(val_if_large) * (num_buckets - 1)).int()
rel_positions_bucket = rel_positions_bucket + torch.where(is_small, inv_relative_positions.int(), val_if_large)
return rel_positions_bucket
def compute_all_stream_relative_buckets(num_buckets, max_distance, position_ids):
"""
This function computes both main and predict relative position buckets. For more detail, see paper.
"""
# main stream
main_stream_relative_positions = position_ids.unsqueeze(1).repeat(1, position_ids.size(-1), 1)
main_stream_relative_positions = main_stream_relative_positions - position_ids.unsqueeze(-1)
# predicting stream
predicting_stream_relative_positions = torch.cat((position_ids - 1, position_ids), dim=-1).unsqueeze(1)
predicting_stream_relative_positions = predicting_stream_relative_positions.repeat(1, position_ids.size(-1), 1)
predicting_stream_relative_positions = predicting_stream_relative_positions - position_ids.unsqueeze(-1)
# get both position buckets
main_relative_position_buckets = compute_relative_buckets(
num_buckets, max_distance, main_stream_relative_positions, is_bidirectional=False
)
predict_relative_position_buckets = compute_relative_buckets(
num_buckets, max_distance, predicting_stream_relative_positions, is_bidirectional=False
)
return main_relative_position_buckets, predict_relative_position_buckets
@dataclass
class ProphetNetSeq2SeqLMOutput(ModelOutput):
"""
Base class for sequence-to-sequence language models outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, decoder_sequence_length, config.vocab_size)`):
Prediction scores of the main stream language modeling head (scores for each vocabulary token before
SoftMax).
logits_ngram (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
Prediction scores of the predict stream language modeling head (scores for each vocabulary token before
SoftMax).
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`.
Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
decoder_ngram_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
decoder_ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
compute the weighted average in the
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, encoder_sequence_length)`. Attentions weights of the encoder, after the attention
softmax, used to compute the weighted average in the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
logits_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[Tuple[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@property
def decoder_cross_attentions(self):
warnings.warn(
"`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions` instead.",
FutureWarning,
)
return self.cross_attentions
@dataclass
class ProphetNetSeq2SeqModelOutput(ModelOutput):
"""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`):
Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
last_hidden_state_ngram (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,ngram * decoder_sequence_length, config.vocab_size)`):
Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`.
Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
decoder_ngram_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
decoder_ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
compute the weighted average in the
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, encoder_sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
last_hidden_state: torch.FloatTensor
last_hidden_state_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[Tuple[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@property
def decoder_cross_attentions(self):
warnings.warn(
"`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions` instead.",
FutureWarning,
)
return self.cross_attentions
@dataclass
class ProphetNetDecoderModelOutput(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`):
Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
last_hidden_state_ngram (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`.
Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
ngram_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
compute the weighted average in the
"""
last_hidden_state: torch.FloatTensor
last_hidden_state_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class ProphetNetDecoderLMOutput(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, decoder_sequence_length, config.vocab_size)`):
Prediction scores of the main stream language modeling head (scores for each vocabulary token before
SoftMax).
logits_ngram (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
Prediction scores of the predict stream language modeling head (scores for each vocabulary token before
SoftMax).
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
used (see :obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`.
Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
ngram_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, ngram * decoder_sequence_length, hidden_size)`.
Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
decoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
weighted average in the
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads,
encoder_sequence_length, decoder_sequence_length)`.
Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
compute the weighted average in the
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
logits_ngram: Optional[torch.FloatTensor] = None
past_key_values: Optional[Tuple[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
class ProphetNetPreTrainedModel(PreTrainedModel):
config_class = ProphetNetConfig
base_model_prefix = "prophetnet"
def _init_weights(self, module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.init_std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.init_std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert (
decoder_start_token_id is not None
), "self.model.config.decoder_start_token_id has to be defined. In ProphetNet it is usually set to the pad_token_id. See ProphetNet docs for more information"
# shift inputs to the right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
class ProphetNetPositionalEmbeddings(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting
based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to
the forward function.
"""
def __init__(self, config: ProphetNetConfig):
self.max_length = config.max_position_embeddings
super().__init__(config.max_position_embeddings, config.hidden_size, config.pad_token_id)
def forward(self, inputs_shape, device, attention_mask=None, past_key_values=None, position_ids=None):
assert (position_ids is None) or (
self.padding_idx is None
), "If position_ids is pre-computed then padding_idx should not be set."
if position_ids is None:
if past_key_values is not None:
# position_ids is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
prev_num_input_ids = past_key_values[0][0].shape[2]
num_input_ids = inputs_shape[1] + prev_num_input_ids
position_ids = torch.ones((1, 1), dtype=torch.long, device=device) * (
int(self.padding_idx + num_input_ids)
)
else:
if attention_mask is None:
attention_mask = torch.ones(inputs_shape, dtype=torch.long, device=device)
# retrieve position_ids from input_ids / attention_mask
position_ids = (
torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
).long() + self.padding_idx
# make sure position_ids are not bigger then max_length
position_ids = position_ids.clamp(0, self.max_length - 1)
return super().forward(position_ids), position_ids
def _forward(self, position_ids):
return super().forward(position_ids)
class ProphetNetAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
config: ProphetNetConfig,
num_attn_heads: int,
):
super().__init__()
hidden_size = config.hidden_size
self.attention_dropout = config.attention_dropout
self.dropout = config.dropout
self.num_attn_heads = num_attn_heads
self.head_dim = hidden_size // num_attn_heads
assert (
self.head_dim * num_attn_heads == hidden_size
), "`config.hidden_size` must be divisible by `config.num_encoder_attention_heads` and `config.num_decoder_attention_heads`"
self.key_proj = nn.Linear(hidden_size, hidden_size)
self.value_proj = nn.Linear(hidden_size, hidden_size)
self.query_proj = nn.Linear(hidden_size, hidden_size)
self.out_proj = nn.Linear(hidden_size, hidden_size)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states,
key_value_states: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
past_key_value: Optional[Tuple[Tensor]] = None,
output_attentions: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
batch_size, tgt_len, hidden_size = hidden_states.size()
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
assert list(hidden_states.size()) == [
batch_size,
tgt_len,
hidden_size,
], f"Size of hidden states should be {batch_size, tgt_len, hidden_size}, but is {hidden_states.size()}"
# previous time steps are cached - no need to recompute key and value if they are static
query_states = self.query_proj(hidden_states) / (self.head_dim ** 0.5)
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.key_proj(key_value_states), -1, batch_size)
value_states = self._shape(self.value_proj(key_value_states), -1, batch_size)
else:
# self_attention
key_states = self._shape(self.key_proj(hidden_states), -1, batch_size)
value_states = self._shape(self.value_proj(hidden_states), -1, batch_size)
if is_cross_attention:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
# project states into the correct shape
proj_shape = (batch_size * self.num_attn_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, batch_size).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
assert attn_weights.size() == (
batch_size * self.num_attn_heads,
tgt_len,
src_len,
), f"`attn_weights` should be of size {batch_size * self.num_attn_heads, tgt_len, src_len}, but is of size {attn_weights.shape}"
# This is part of a workaround to get around fork/join parallelism not supporting Optional types.
if attention_mask is not None and attention_mask.dim() == 0:
attention_mask = None
assert attention_mask is None or attention_mask.size() == (
self.num_attn_heads * batch_size,
1,
src_len,
), f"`attention_mask` should be `None` or of shape attention_mask.size() == {batch_size * self.num_attn_heads, 1, src_len}, but is {attention_mask.shape}"
if attention_mask is not None: # don't attend to padding symbols
attn_weights = attn_weights + attention_mask
if output_attentions:
# this operation is a bit akward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(batch_size, self.num_attn_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(batch_size * self.num_attn_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_weights = F.softmax(attn_weights, dim=-1)
attn_probs = F.dropout(
attn_weights,
p=self.attention_dropout,
training=self.training,
)
attn_output = torch.bmm(attn_probs, value_states)
assert attn_output.size() == (
batch_size * self.num_attn_heads,
tgt_len,
self.head_dim,
), "`attn_output` should be of shape {batch_size * self.num_attn_heads, tgt_len, self.head_dim}, but is of shape {attn_output.size()}"
attn_output = (
attn_output.view(batch_size, self.num_attn_heads, tgt_len, self.head_dim)
.transpose(1, 2)
.reshape(batch_size, tgt_len, hidden_size)
)
attn_output = self.out_proj(attn_output)
attn_output = F.dropout(attn_output, p=self.dropout, training=self.training)
return attn_output, attn_weights_reshaped, past_key_value
class ProphetNetFeedForward(nn.Module):
"""
This is the residual two feed-forward layer block based on the original Transformer implementation.
"""
def __init__(self, config: ProphetNetConfig, ffn_dim: int):
super().__init__()
self.activation_fn = ACT2FN[config.activation_function]
self.intermediate = nn.Linear(config.hidden_size, ffn_dim)
self.output = nn.Linear(ffn_dim, config.hidden_size)
self.activation_dropout = config.activation_dropout
self.dropout = config.dropout
def forward(self, hidden_states):
hidden_states = self.intermediate(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.output(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
class ProphetNetNgramSelfAttention(nn.Module):
def __init__(self, config: ProphetNetConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.num_buckets = config.num_buckets
self.relative_max_distance = config.relative_max_distance
self.num_attn_heads = config.num_decoder_attention_heads
self.dropout = config.dropout
self.attention_dropout = config.attention_dropout
self.head_dim = config.hidden_size // self.num_attn_heads
self.ngram = config.ngram
assert (
self.head_dim * self.num_attn_heads == config.hidden_size
), "config.hidden_size must be divisible by num_attn_heads"
# key, value, query projection
self.key_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.value_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.query_proj = nn.Linear(config.hidden_size, config.hidden_size)
# out projection
self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
# rel position embeddings
self.relative_pos_embeddings = nn.Linear(config.hidden_size, self.num_buckets * self.num_attn_heads)
# for onnx runtime
self.onnx_trace = False
def _shape(self, tensor, seq_len, batch_size):
return tensor.view(batch_size, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous()
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
hidden_states,
past_key_value: Optional[Tuple[Tensor]] = None,
attention_mask=None,
extended_predict_attention_mask=None,
main_relative_position_buckets=None,
predict_relative_position_buckets=None,
position_ids=None,
):
batch_size, ngram_sequence_length, hidden_size = hidden_states.size()
assert list(hidden_states.size()) == [
batch_size,
ngram_sequence_length,
hidden_size,
], f"`hidden_states` should be of shape {batch_size, ngram_sequence_length, hidden_size}, but is of shape {hidden_states.shape}"
# project
query_states = self.query_proj(hidden_states)
key_states = self.key_proj(hidden_states)
value_states = self.value_proj(hidden_states)
# normalize
query_states = query_states / (self.head_dim ** 0.5)
# reshape
query_states = self._shape(query_states, ngram_sequence_length, batch_size)
key_states = self._shape(key_states, -1, batch_size)
value_states = self._shape(value_states, -1, batch_size)
proj_shape = (batch_size * self.num_attn_heads, -1, self.head_dim)
query_states = query_states.view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
# chunk into main stream and predict stream
hidden_states_list = hidden_states.chunk(1 + self.ngram, dim=1)
query_states_list = query_states.chunk(1 + self.ngram, dim=1)
key_states_list = key_states.chunk(1 + self.ngram, dim=1)
value_states_list = value_states.chunk(1 + self.ngram, dim=1)
main_hidden_states, hidden_states_predict_list = hidden_states_list[0], hidden_states_list[1:]
main_query_states, predict_query_states_list = query_states_list[0], query_states_list[1:]
main_key_states, predict_key_states_list = key_states_list[0], key_states_list[1:]
main_value_states, predict_value_states_list = value_states_list[0], value_states_list[1:]
# saved states are stored with shape (batch_size, num_attn_heads, seq_len, head_dim)
if past_key_value is not None:
prev_main_key_states = past_key_value[0].view(batch_size * self.num_attn_heads, -1, self.head_dim)
main_key_states = torch.cat((prev_main_key_states, main_key_states), dim=1)
prev_main_value_states = past_key_value[1].view(batch_size * self.num_attn_heads, -1, self.head_dim)
main_value_states = torch.cat((prev_main_value_states, main_value_states), dim=1)
# Update cache
past_key_value = (
main_key_states.view(batch_size, self.num_attn_heads, -1, self.head_dim),
main_value_states.view(batch_size, self.num_attn_heads, -1, self.head_dim),
)
# get seq_length of main stream only
sequence_length = ngram_sequence_length // (1 + self.ngram)
# MAIN-STREAM
# main attn weights
main_attn_weights = torch.bmm(main_query_states, main_key_states.transpose(1, 2))
# retrieve relative position embeddings for each layer -> see paper for more details
main_relative_pos_embeddings = self.get_main_relative_pos_embeddings(
main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets
)
main_attn_weights = main_attn_weights + main_relative_pos_embeddings
if attention_mask is not None:
main_attn_weights = main_attn_weights + attention_mask
main_attn_probs = softmax(
main_attn_weights,
dim=-1,
onnx_trace=self.onnx_trace,
).type_as(main_attn_weights)
main_attn_probs = F.dropout(main_attn_probs, p=self.attention_dropout, training=self.training)
# project to attn_output
main_attn_output = torch.bmm(main_attn_probs, main_value_states)
# reshape so that num_heads dim is merged into last `head_dim` axis
main_attn_output = (
main_attn_output.view(batch_size, self.num_attn_heads, sequence_length, self.head_dim)
.transpose(1, 2)
.reshape(batch_size, 1, sequence_length, hidden_size)
)
main_attn_output = self.out_proj(main_attn_output)
# PREDICT-STREAM
# [ngram, B*head, T, c]
predict_query_states = torch.cat(predict_query_states_list, 0).view(
self.ngram, -1, sequence_length, self.head_dim
)
# [ngram, B*head, 2*T, c]
predict_key_states = torch.cat(
[torch.cat([main_key_states, key], 1).unsqueeze(0) for key in predict_key_states_list], 0
)
# [ngram, T, B, C]
predict_hidden_states = torch.cat(hidden_states_predict_list, 0).view(
self.ngram, sequence_length, batch_size, hidden_size
)
# [ngram, B*head, 2*T, c]
predict_value_states = torch.cat(
[torch.cat([main_value_states, v_p], 1).unsqueeze(0) for v_p in predict_value_states_list], 0
)
# [ngram, B*head, T, 2*T]
predict_attn_weights = torch.einsum("nbtc,nbsc->nbts", (predict_query_states, predict_key_states))
# [ngram, B*head, T, S]
# retrieve relative position embeddings for each layer -> see paper for more details
predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings(
predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets
)
# [ngram, B*head, T, 2*T]
predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings
if extended_predict_attention_mask is not None:
predict_attn_weights = predict_attn_weights + extended_predict_attention_mask.to(
predict_attn_weights.dtype
)
predict_attn_probs = softmax(
predict_attn_weights,
dim=-1,
onnx_trace=self.onnx_trace,
).type_as(predict_attn_weights)
predict_attn_probs = F.dropout(predict_attn_probs, p=self.attention_dropout, training=self.training)
# project to attention output
# [ngram, B*head, T, c]
predict_attn_output = torch.einsum("nbts,nbsc->nbtc", (predict_attn_probs, predict_value_states))
# reshape so that num_heads dim is merged into last `head_dim` axis
# [ngram, B, T, C]
predict_attn_output = (
predict_attn_output.view(self.ngram, batch_size, self.num_attn_heads, sequence_length, self.head_dim)
.permute(1, 0, 3, 2, 4)
.reshape(batch_size, self.ngram, sequence_length, hidden_size)
)
predict_attn_output = self.out_proj(predict_attn_output)
# concat to single attn output
# [B, 1+ngram*T, C]
attn_output = torch.cat([main_attn_output, predict_attn_output], 1).view(batch_size, -1, hidden_size)
# reshape into better form for `config.output_attentions`
main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, sequence_length, -1)
predict_attn_probs = predict_attn_probs.view(
self.ngram, batch_size, self.num_attn_heads, sequence_length, -1
).transpose(0, 1)
attn_output = F.dropout(attn_output, p=self.dropout, training=self.training)
return attn_output, main_attn_probs, predict_attn_probs, past_key_value
def get_main_relative_pos_embeddings(
self, hidden_states, attn_weights, position_ids, main_relative_position_buckets
):
# input hidden_states [B,T,C], input attn_weights [T*head,T,S], input position_ids [B,T] or [1,1]
if main_relative_position_buckets is None:
batch_size, sequence_length = hidden_states.shape[:2]
relative_positions = (
torch.arange(1, attn_weights.shape[-1] + 1)
.unsqueeze(0)
.unsqueeze(0)
.repeat(batch_size, sequence_length, 1)
.to(position_ids.device)
)
relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(
batch_size, sequence_length, 1
) # [B, T, s]
main_relative_position_buckets = compute_relative_buckets(
self.num_buckets, self.relative_max_distance, relative_positions, False
)
rel_pos_embeddings = self.relative_pos_embeddings(hidden_states) # [B,T,Buckets*head]
rel_pos_embeddings = rel_pos_embeddings.view(
rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads)
).permute(
0, 3, 1, 2
) # [B,T,Buckets,head]
rel_pos_embeddings = rel_pos_embeddings.reshape(attn_weights.shape[:2] + (-1,)) # [B*head,T,Buckets]
main_relative_position_buckets = (
main_relative_position_buckets.repeat(1, self.num_attn_heads, 1)
.view(-1, main_relative_position_buckets.shape[-1])
.long()
) # [B*head*T, T]
rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1)) # [B*head*T,Buckets]
main_relative_pos_embeddings = torch.gather(
rel_pos_embeddings, dim=1, index=main_relative_position_buckets
).view(attn_weights.shape[:2] + (-1,))
return main_relative_pos_embeddings
def get_predict_relative_pos_embeddings(
self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets
):
# input hidden_states [ngram, T,B,C], input attn_weights [ngram, B*head,T,S], input position_ids [B,T] or [1,1], input predict_relative_position_buckets [B,T, 2*T] or None
sequence_length, batch_size = hidden_states.shape[1:3]
if predict_relative_position_buckets is None:
key_sequence_length = attn_weights.shape[-1]
assert (
position_ids[0][0] == key_sequence_length - 1
), "`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)"
relative_positions = (
torch.arange(0, key_sequence_length)
.unsqueeze(0)
.unsqueeze(0)
.repeat(batch_size, sequence_length, 1)
.to(position_ids.device)
)
relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
predict_relative_position_buckets = compute_relative_buckets(
self.num_buckets, self.relative_max_distance, relative_positions, False
)
hidden_states = hidden_states.transpose(1, 2) # [ngram, B, T, C]
rel_pos_embeddings = self.relative_pos_embeddings(hidden_states).view(
hidden_states.shape[:-1] + (self.num_buckets, self.num_attn_heads)
) # [ngram, B, T, bucket, head]
rel_pos_embeddings = rel_pos_embeddings.permute(0, 1, 4, 2, 3).reshape(
self.ngram * batch_size * self.num_attn_heads, sequence_length, -1
) # [ngram*B*head, T, bucket]
predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0).repeat(
self.ngram, 1, self.num_attn_heads, 1
) # [ngram, B, head*T, S]
rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1))
predict_relative_position_buckets = predict_relative_position_buckets.view(
-1, predict_relative_position_buckets.size(-1)
).long() # [ngram*B*head*T, S]
predict_relative_pos_embeddings = torch.gather(
rel_pos_embeddings, dim=1, index=predict_relative_position_buckets
).view(
self.ngram, batch_size * self.num_attn_heads, sequence_length, -1
) # [ngram, B*head, T, S]
return predict_relative_pos_embeddings
class ProphetNetEncoderLayer(nn.Module):
"""
Encoder block for Prophetnet
"""
def __init__(self, config: ProphetNetConfig):
super().__init__()
# 1st residual block
self.self_attn = ProphetNetAttention(config, config.num_encoder_attention_heads)
self.self_attn_layer_norm = LayerNorm(config.hidden_size)
# 2nd residual block
self.feed_forward = ProphetNetFeedForward(config, config.encoder_ffn_dim)
self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
def forward(self, hidden_states, attention_mask, output_attentions: bool = False):
# 1st residual block
attention_output, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = self.self_attn_layer_norm(attention_output + hidden_states)
# 2nd residual block
feed_forward_output = self.feed_forward(hidden_states)
hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class ProphetNetDecoderLayer(nn.Module):
"""
Decoder block for Prophetnet
"""
def __init__(self, config: ProphetNetConfig):
super().__init__()
# 1st residual block
self.self_attn = ProphetNetNgramSelfAttention(config)
self.self_attn_layer_norm = LayerNorm(config.hidden_size)
# 2nd residual block
if config.add_cross_attention:
self.cross_attn = ProphetNetAttention(config, config.num_decoder_attention_heads)
self.cross_attn_layer_norm = LayerNorm(config.hidden_size)
# 3rd residual block
self.feed_forward = ProphetNetFeedForward(config, config.decoder_ffn_dim)
self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attn_mask=None,
extended_predict_attention_mask=None,
main_relative_position_buckets=None,
predict_relative_position_buckets=None,
position_ids=None,
past_key_value=None,
use_cache: bool = True,
output_attentions: bool = False,
):
# 1st residual block
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
ngram_attention_output, self_attn_weights, self_attn_weights_ngram, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
extended_predict_attention_mask=extended_predict_attention_mask,
main_relative_position_buckets=main_relative_position_buckets,
predict_relative_position_buckets=predict_relative_position_buckets,
position_ids=position_ids,
)
hidden_states = self.self_attn_layer_norm(hidden_states + ngram_attention_output)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attn_weights = None
if encoder_hidden_states is not None:
# 2nd residual block
attention_output, cross_attn_weights, cross_attn_present_key_value = self.cross_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attn_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = self.cross_attn_layer_norm(attention_output + hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# 3rd residual block
feed_forward_output = self.feed_forward(hidden_states)
hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, self_attn_weights_ngram, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
@add_start_docstrings(
"The standalone encoder part of the ProphetNetModel.",
PROPHETNET_START_DOCSTRING,
)
class ProphetNetEncoder(ProphetNetPreTrainedModel):
r"""
word_embeddings (:obj:`torch.nn.Embeddings` of shape :obj:`(config.vocab_size, config.hidden_size)`, `optional`):
The word embedding parameters. This can be used to initialize :class:`~transformers.ProphetNetEncoder` with
pre-defined word embeddings instead of randomely initialized word embeddings.
"""
def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = None):
super().__init__(config)
self.word_embeddings = (
word_embeddings
if word_embeddings is not None
else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
)
self.position_embeddings = ProphetNetPositionalEmbeddings(config)
self.embeddings_layer_norm = LayerNorm(config.hidden_size)
self.layers = nn.ModuleList([ProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)])
self.init_weights()
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, value):
self.word_embeddings = value
@add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example::
>>> from transformers import ProphetNetTokenizer, ProphetNetEncoder
>>> import torch
>>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased')
>>> model = ProphetNetEncoder.from_pretrained('patrickvonplaten/prophetnet-large-uncased-standalone')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is None and inputs_embeds is None:
raise ValueError("Either input_ids or inputs_embeds has to be passed.")
elif input_ids is not None and inputs_embeds is not None:
raise ValueError("Make sure to only pass input_ids or inputs_embeds.")
elif input_ids is not None and inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
# prepare attention mask
if attention_mask is not None:
extended_attention_mask = (
1.0 - attention_mask[:, None, :].repeat(self.config.num_encoder_attention_heads, 1, 1)
) * -10000.0
extended_attention_mask = extended_attention_mask.to(inputs_embeds.dtype)
else:
extended_attention_mask = None
position_embeddings, position_ids = self.position_embeddings(inputs_embeds.shape[:2], inputs_embeds.device)
hidden_states = inputs_embeds + position_embeddings
hidden_states = self.embeddings_layer_norm(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.config.dropout, training=self.training)
encoder_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
encoder_hidden_states = encoder_hidden_states + (hidden_states,)
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
extended_attention_mask,
)
else:
layer_outputs = encoder_layer(
hidden_states, attention_mask=extended_attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_hidden_states = encoder_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_hidden_states, attentions=all_attentions
)
@add_start_docstrings(
"The standalone decoder part of the ProphetNetModel.",
PROPHETNET_START_DOCSTRING,
)
class ProphetNetDecoder(ProphetNetPreTrainedModel):
r"""
word_embeddings (:obj:`torch.nn.Embeddings` of shape :obj:`(config.vocab_size, config.hidden_size)`, `optional`):
The word embedding parameters. This can be used to initialize :class:`~transformers.ProphetNetEncoder` with
pre-defined word embeddings instead of randomely initialized word embeddings.
"""
def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = None):
super().__init__(config)
self.ngram = config.ngram
self.num_buckets = config.num_buckets
self.relative_max_distance = config.relative_max_distance
self.dropout = config.dropout
self.max_target_positions = config.max_position_embeddings
self.word_embeddings = (
word_embeddings
if word_embeddings is not None
else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
)
self.position_embeddings = ProphetNetPositionalEmbeddings(config)
self.ngram_embeddings = nn.Embedding(self.ngram, config.hidden_size, None)
self.layers = nn.ModuleList([ProphetNetDecoderLayer(config) for _ in range(config.num_decoder_layers)])
self.embeddings_layer_norm = LayerNorm(config.hidden_size)
self.init_weights()
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, value):
self.word_embeddings = value
@add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=ProphetNetDecoderModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Returns:
Example::
>>> from transformers import ProphetNetTokenizer, ProphetNetDecoder
>>> import torch
>>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased')
>>> model = ProphetNetDecoder.from_pretrained('microsoft/prophetnet-large-uncased', add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is None and inputs_embeds is None:
raise ValueError("Either `decoder_input_ids` or `decoder_inputs_embeds` has to be passed.")
elif input_ids is not None and inputs_embeds is not None:
raise ValueError("Make sure to only pass `decoder_input_ids` or `decoder_inputs_embeds`.")
elif input_ids is not None and inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
batch_size, sequence_length = inputs_embeds.shape[:2]
main_stream_pos_embed, position_ids = self.position_embeddings(
(batch_size, sequence_length),
device=inputs_embeds.device,
past_key_values=past_key_values,
)
if past_key_values is not None:
main_relative_position_buckets, predict_relative_position_buckets = None, None
else:
(
main_relative_position_buckets,
predict_relative_position_buckets,
) = self.compute_buffered_relative_buckets(position_ids)
predicting_stream_pos_embed = self.position_embeddings._forward(position_ids + 1)
# add position embeddings
hidden_states = inputs_embeds + main_stream_pos_embed
ngram_embeddings = self.ngram_embeddings.weight
# prepare attention mask
if past_key_values is not None:
assert (
hidden_states.size(1) == 1
), "At the moment `use_cache` is only supported for `decoder_input_ids` of length 1"
ngram_hidden_states = [
(ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).repeat(batch_size, 1, 1)
for ngram in range(self.ngram)
]
extended_attention_mask = None
extended_predict_attention_mask = None
else:
ngram_hidden_states = [
(ngram_embeddings[ngram - 1] + predicting_stream_pos_embed) for ngram in range(self.ngram)
]
extended_attention_mask = self.prepare_attention_mask(hidden_states, attention_mask)
extended_predict_attention_mask = self.prepare_predict_attention_mask(hidden_states, attention_mask)
# prepare encoder attention mask
if encoder_attention_mask is not None:
extended_encoder_attention_mask = (
1.0 - encoder_attention_mask[:, None, :].repeat(self.config.num_decoder_attention_heads, 1, 1)
) * -10000.0
extended_encoder_attention_mask = extended_encoder_attention_mask.to(inputs_embeds.dtype)
else:
extended_encoder_attention_mask = None
hidden_states = torch.cat([hidden_states] + ngram_hidden_states, 1)
if self.embeddings_layer_norm:
hidden_states = self.embeddings_layer_norm(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# init attentions, hidden_states and cache with empty tuples
all_main_stream_hidden_states = () if output_hidden_states else None
all_ngram_stream_hidden_states = () if output_hidden_states and self.config.ngram > 0 else None
all_main_stream_attns = () if output_attentions else None
all_ngram_stream_attns = () if output_attentions else None
all_cross_attns = () if output_attentions and self.config.add_cross_attention else None
present_key_values = () if use_cache else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
# grad cannot be kept because tensor is sliced
all_main_stream_hidden_states += (hidden_states[:, :sequence_length],)
if self.config.ngram > 0:
all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],)
past_key_value = past_key_values[idx] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
extended_attention_mask,
encoder_hidden_states,
extended_encoder_attention_mask,
extended_predict_attention_mask,
main_relative_position_buckets,
predict_relative_position_buckets,
position_ids,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=extended_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attn_mask=extended_encoder_attention_mask,
extended_predict_attention_mask=extended_predict_attention_mask,
main_relative_position_buckets=main_relative_position_buckets,
predict_relative_position_buckets=predict_relative_position_buckets,
position_ids=position_ids,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
present_key_values += (layer_outputs[4 if output_attentions else 1],)
if output_attentions:
all_main_stream_attns += (layer_outputs[1],)
all_ngram_stream_attns += (layer_outputs[2],)
if self.config.add_cross_attention:
all_cross_attns += (layer_outputs[3],)
if output_hidden_states:
all_main_stream_hidden_states += (hidden_states[:, :sequence_length],)
if self.config.ngram > 0:
all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],)
# split last_hidden_state for return
last_hidden_state = hidden_states[:, :sequence_length]
last_hidden_state_ngram = hidden_states[:, sequence_length:] if self.config.ngram > 0 else None
if not return_dict:
return tuple(
v
for v in [
last_hidden_state,
last_hidden_state_ngram,
present_key_values,
all_main_stream_hidden_states,
all_ngram_stream_hidden_states,
all_main_stream_attns,
all_ngram_stream_attns,
all_cross_attns,
]
if v is not None
)
return ProphetNetDecoderModelOutput(
last_hidden_state=last_hidden_state,
last_hidden_state_ngram=last_hidden_state_ngram,
past_key_values=present_key_values,
hidden_states=all_main_stream_hidden_states,
hidden_states_ngram=all_ngram_stream_hidden_states,
attentions=all_main_stream_attns,
ngram_attentions=all_ngram_stream_attns,
cross_attentions=all_cross_attns,
)
def compute_buffered_relative_buckets(self, position_ids):
batch_size, sequence_length = position_ids.shape
position_ids = torch.arange(1, self.max_target_positions).to(position_ids.device).repeat(1, 1)
main_relative_buckets, predict_relative_buckets = compute_all_stream_relative_buckets(
self.num_buckets, self.relative_max_distance, position_ids
)
# buffer relative buckets
main_relative_buckets = main_relative_buckets[:, :sequence_length, :sequence_length].repeat(batch_size, 1, 1)
predict_relative_buckets = torch.cat(
[
predict_relative_buckets[:, :sequence_length, :sequence_length],
predict_relative_buckets[
:, :sequence_length, self.max_target_positions : self.max_target_positions + sequence_length
],
],
2,
).repeat(batch_size, 1, 1)
return main_relative_buckets, predict_relative_buckets
def prepare_attention_mask(self, hidden_states, attention_mask):
batch_size, seq_length = hidden_states.shape[:2]
# get causal mask
causal_mask = hidden_states.new(seq_length, seq_length).float().fill_(-float("inf"))
causal_mask = torch.triu(causal_mask, 1)
extended_causal_mask = causal_mask[:seq_length, :seq_length][None, :, :].expand(
(batch_size,) + causal_mask.shape
)
# add usual attention mask
if attention_mask is not None:
extended_attention_mask = (1.0 - attention_mask[:, None, :]) * -10000.0
extended_attention_mask = extended_causal_mask + extended_attention_mask
else:
extended_attention_mask = extended_causal_mask
return extended_attention_mask.repeat(self.config.num_decoder_attention_heads, 1, 1).to(hidden_states.dtype)
def prepare_predict_attention_mask(self, hidden_states, attention_mask):
batch_size, seq_length = hidden_states.shape[:2]
# get causal mask
predict_causal_mask = ngram_attention_bias(
self.max_target_positions, self.ngram, hidden_states.device, hidden_states.dtype
)
predict_causal_mask = torch.cat(
[
predict_causal_mask[:, :seq_length, :seq_length],
predict_causal_mask[
:, :seq_length, self.max_target_positions : self.max_target_positions + seq_length
],
],
dim=-1,
)
extended_predict_causal_mask = predict_causal_mask[:, None, :, :].expand(
predict_causal_mask.shape[:1] + (batch_size,) + predict_causal_mask.shape[1:]
)
# add usual attention mask
if attention_mask is not None:
extended_attention_mask = (1.0 - attention_mask[None, :, None, :]) * -10000.0
extended_attention_mask = extended_attention_mask.expand((self.ngram, batch_size, seq_length, seq_length))
# predicted stream attention_mask should always be 0
extended_attention_mask = torch.cat(
[extended_attention_mask, torch.zeros_like(extended_attention_mask)], dim=-1
)
extended_predict_attention_mask = extended_predict_causal_mask + extended_attention_mask
else:
extended_predict_attention_mask = extended_predict_causal_mask
return extended_predict_attention_mask.repeat(1, self.config.num_decoder_attention_heads, 1, 1).to(
hidden_states.dtype
)
@add_start_docstrings(
"The bare ProphetNet Model outputting raw hidden-states without any specific head on top.",
PROPHETNET_START_DOCSTRING,
)
class ProphetNetModel(ProphetNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
encoder_config = copy.deepcopy(config)
encoder_config.is_encoder_decoder = False
encoder_config.use_cache = False
self.encoder = ProphetNetEncoder(encoder_config, self.word_embeddings)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
self.decoder = ProphetNetDecoder(decoder_config, self.word_embeddings)
self.init_weights()
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, value):
self.word_embeddings = value
self.encoder.word_embeddings = self.word_embeddings
self.decoder.word_embeddings = self.word_embeddings
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(PROPHETNET_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=ProphetNetSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs: Optional[Tuple] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example::
>>> from transformers import ProphetNetTokenizer, ProphetNetModel
>>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased')
>>> model = ProphetNetModel.from_pretrained('microsoft/prophetnet-large-uncased')
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state # main stream hidden states
>>> last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states
"""
use_cache == use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return ProphetNetSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram,
decoder_attentions=decoder_outputs.attentions,
decoder_ngram_attentions=decoder_outputs.ngram_attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The ProphetNet Model with a language modeling head. Can be used for sequence generation tasks.",
PROPHETNET_START_DOCSTRING,
)
class ProphetNetForConditionalGeneration(ProphetNetPreTrainedModel):
def __init__(self, config: ProphetNetConfig):
super().__init__(config)
self.prophetnet = ProphetNetModel(config)
self.padding_idx = config.pad_token_id
self.disable_ngram_loss = config.disable_ngram_loss
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_input_embeddings(self):
return self.prophetnet.word_embeddings
@add_start_docstrings_to_model_forward(PROPHETNET_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=ProphetNetSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ...,
config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for
labels in ``[0, ..., config.vocab_size]``
Returns:
Example::
>>> from transformers import ProphetNetTokenizer, ProphetNetForConditionalGeneration
>>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased')
>>> model = ProphetNetForConditionalGeneration.from_pretrained('microsoft/prophetnet-large-uncased')
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> logits_next_token = outputs.logits # logits to predict next token as usual
>>> logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
outputs = self.prophetnet(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
batch_size, sequence_length = (
decoder_input_ids.shape if decoder_input_ids is not None else decoder_inputs_embeds.shape[:2]
)
predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
predict_logits = self.lm_head(predicting_streams)
logits = predict_logits[:, 0]
logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
# To use .view in loss computation, make sure that logits is contiguous.
if not logits.is_contiguous():
logits = logits.contiguous()
loss = None
if labels is not None:
loss = self._compute_loss(predict_logits, labels)
if not return_dict:
all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
else:
return ProphetNetSeq2SeqLMOutput(
loss=loss,
logits=logits,
logits_ngram=logits_ngram,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_ngram_hidden_states=outputs.decoder_ngram_hidden_states,
decoder_attentions=outputs.decoder_attentions,
decoder_ngram_attentions=outputs.decoder_ngram_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def _compute_loss(self, logits, labels, ignore_index=-100):
expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
for i in range(self.config.ngram):
if i > 0 and self.disable_ngram_loss:
break
expend_targets[i, :, :] = labels
lprobs = F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
)
loss = F.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
if self.config.eps > 0.0:
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
smooth_loss = smooth_loss[non_masked_tokens]
smooth_loss = smooth_loss.mean()
eps_i = self.config.eps / lprobs.size(-1)
loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
return loss
def prepare_inputs_for_generation(
self, decoder_input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
):
assert encoder_outputs is not None, "`encoder_outputs` have to be passed for generation."
if past:
decoder_input_ids = decoder_input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"use_cache": use_cache,
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
@staticmethod
# Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration._reorder_cache
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
def get_encoder(self):
return self.prophetnet.encoder
def get_decoder(self):
return self.prophetnet.decoder
@add_start_docstrings(
"The standalone decoder part of the ProphetNetModel with a lm head on top. The model can be used for causal language modeling.",
PROPHETNET_START_DOCSTRING,
)
class ProphetNetForCausalLM(ProphetNetPreTrainedModel):
def __init__(self, config):
# set config for CLM
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
super().__init__(config)
self.prophetnet = ProphetNetDecoderWrapper(config)
self.padding_idx = config.pad_token_id
self.disable_ngram_loss = config.disable_ngram_loss
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.init_weights()
def get_input_embeddings(self):
return self.prophetnet.decoder.word_embeddings
def set_input_embeddings(self, value):
self.prophetnet.decoder.word_embeddings = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.prophetnet.decoder = decoder
def get_decoder(self):
return self.prophetnet.decoder
@add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=ProphetNetDecoderLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
Returns:
Example::
>>> from transformers import ProphetNetTokenizer, ProphetNetForCausalLM
>>> import torch
>>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased')
>>> model = ProphetNetForCausalLM.from_pretrained('microsoft/prophetnet-large-uncased')
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> # Model can also be used with EncoderDecoder framework
>>> from transformers import BertTokenizer, EncoderDecoderModel, ProphetNetTokenizer
>>> import torch
>>> tokenizer_enc = BertTokenizer.from_pretrained('bert-large-uncased')
>>> tokenizer_dec = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased')
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-large-uncased", "microsoft/prophetnet-large-uncased")
>>> ARTICLE = (
... "the us state department said wednesday it had received no "
... "formal word from bolivia that it was expelling the us ambassador there "
... "but said the charges made against him are `` baseless ."
... )
>>> input_ids = tokenizer_enc(ARTICLE, return_tensors="pt").input_ids
>>> labels = tokenizer_dec("us rejects charges against its ambassador in bolivia", return_tensors="pt").input_ids
>>> outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-1], labels=labels[:, 1:])
>>> loss = outputs.loss
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
outputs = self.prophetnet.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
batch_size, sequence_length = input_ids.shape if input_ids is not None else inputs_embeds.shape[:2]
predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
predict_logits = self.lm_head(predicting_streams)
logits = predict_logits[:, 0]
logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
loss = None
if labels is not None:
loss = self._compute_loss(predict_logits, labels)
if not return_dict:
all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
else:
return ProphetNetDecoderLMOutput(
loss=loss,
logits=logits,
logits_ngram=logits_ngram,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
hidden_states_ngram=outputs.hidden_states_ngram,
attentions=outputs.attentions,
ngram_attentions=outputs.ngram_attentions,
cross_attentions=outputs.cross_attentions,
)
def _compute_loss(self, logits, labels, ignore_index=-100):
expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
for i in range(self.config.ngram):
if i > 0 and self.disable_ngram_loss:
break
expend_targets[i, :, :] = labels
lprobs = F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
)
loss = F.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
if self.config.eps > 0.0:
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
smooth_loss = smooth_loss[non_masked_tokens]
smooth_loss = smooth_loss.mean()
eps_i = self.config.eps / lprobs.size(-1)
loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
return loss
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": use_cache,
}
@staticmethod
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM._reorder_cache
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
class ProphetNetDecoderWrapper(ProphetNetPreTrainedModel):
"""
This is a wrapper class, so that :class:`~transformers.ProphetNetForCausalLM` can correctly be loaded from
pretrained prophetnet classes.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = ProphetNetDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
|
AdaMix/src/transformers/models/prophetnet/modeling_prophetnet.py/0
|
{
"file_path": "AdaMix/src/transformers/models/prophetnet/modeling_prophetnet.py",
"repo_id": "AdaMix",
"token_count": 44565
}
| 57 |
# coding=utf-8
# Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" SqueezeBERT model configuration """
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"squeezebert/squeezebert-uncased": "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/config.json",
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/config.json",
"squeezebert/squeezebert-mnli-headless": "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/config.json",
}
class SqueezeBertConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.SqueezeBertModel`. It is used
to instantiate a SqueezeBERT model according to the specified arguments, defining the model architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 30522):
Vocabulary size of the SqueezeBERT model. Defines the number of different tokens that can be represented by
the :obj:`inputs_ids` passed when calling :class:`~transformers.SqueezeBertModel`.
hidden_size (:obj:`int`, `optional`, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, `optional`, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, `optional`, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, `optional`, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, `optional`, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, `optional`, defaults to 2):
The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.BertModel` or
:class:`~transformers.TFBertModel`.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12):
pad_token_id (:obj:`int`, `optional`, defaults to 0):
The ID of the token in the word embedding to use as padding.
embedding_size (:obj:`int`, `optional`, defaults to 768):
The dimension of the word embedding vectors.
q_groups (:obj:`int`, `optional`, defaults to 4):
The number of groups in Q layer.
k_groups (:obj:`int`, `optional`, defaults to 4):
The number of groups in K layer.
v_groups (:obj:`int`, `optional`, defaults to 4):
The number of groups in V layer.
post_attention_groups (:obj:`int`, `optional`, defaults to 1):
The number of groups in the first feed forward network layer.
intermediate_groups (:obj:`int`, `optional`, defaults to 4):
The number of groups in the second feed forward network layer.
output_groups (:obj:`int`, `optional`, defaults to 4):
The number of groups in the third feed forward network layer.
Examples::
>>> from transformers import SqueezeBertModel, SqueezeBertConfig
>>> # Initializing a SqueezeBERT configuration
>>> configuration = SqueezeBertConfig()
>>> # Initializing a model from the configuration above
>>> model = SqueezeBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained
checkpoints.
"""
pretrained_config_archive_map = SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "squeezebert"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
embedding_size=768,
q_groups=4,
k_groups=4,
v_groups=4,
post_attention_groups=1,
intermediate_groups=4,
output_groups=4,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.embedding_size = embedding_size
self.q_groups = q_groups
self.k_groups = k_groups
self.v_groups = v_groups
self.post_attention_groups = post_attention_groups
self.intermediate_groups = intermediate_groups
self.output_groups = output_groups
|
AdaMix/src/transformers/models/squeezebert/configuration_squeezebert.py/0
|
{
"file_path": "AdaMix/src/transformers/models/squeezebert/configuration_squeezebert.py",
"repo_id": "AdaMix",
"token_count": 2772
}
| 58 |
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...file_utils import _BaseLazyModule, is_torch_available
_import_structure = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
if is_torch_available():
_import_structure["modeling_tapas"] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
if is_torch_available():
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
)
else:
import importlib
import os
import sys
class _LazyModule(_BaseLazyModule):
"""
Module class that surfaces all objects but only performs associated imports when the objects are requested.
"""
__file__ = globals()["__file__"]
__path__ = [os.path.dirname(__file__)]
def _get_module(self, module_name: str):
return importlib.import_module("." + module_name, self.__name__)
sys.modules[__name__] = _LazyModule(__name__, _import_structure)
|
AdaMix/src/transformers/models/tapas/__init__.py/0
|
{
"file_path": "AdaMix/src/transformers/models/tapas/__init__.py",
"repo_id": "AdaMix",
"token_count": 828
}
| 59 |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Transformer XL checkpoint and datasets."""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
data_utils.Vocab = data_utils.TransfoXLTokenizer
data_utils.Corpus = data_utils.TransfoXLCorpus
sys.modules["data_utils"] = data_utils
sys.modules["vocabulary"] = data_utils
def convert_transfo_xl_checkpoint_to_pytorch(
tf_checkpoint_path, transfo_xl_config_file, pytorch_dump_folder_path, transfo_xl_dataset_file
):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(transfo_xl_dataset_file, "rb") as fp:
corpus = pickle.load(fp, encoding="latin1")
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print("Save vocabulary to {}".format(pytorch_vocab_dump_path))
corpus_vocab_dict = corpus.vocab.__dict__
torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
corpus_dict_no_vocab = corpus.__dict__
corpus_dict_no_vocab.pop("vocab", None)
pytorch_dataset_dump_path = pytorch_dump_folder_path + "/" + CORPUS_NAME
print("Save dataset to {}".format(pytorch_dataset_dump_path))
torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
config_path = os.path.abspath(transfo_xl_config_file)
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting Transformer XL checkpoint from {} with config at {}".format(tf_path, config_path))
# Initialise PyTorch model
if transfo_xl_config_file == "":
config = TransfoXLConfig()
else:
config = TransfoXLConfig.from_json_file(transfo_xl_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = TransfoXLLMHeadModel(config)
model = load_tf_weights_in_transfo_xl(model, config, tf_path)
# Save pytorch-model
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path)))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path)))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help="An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
args = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
|
AdaMix/src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py/0
|
{
"file_path": "AdaMix/src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py",
"repo_id": "AdaMix",
"token_count": 1969
}
| 60 |
# coding=utf-8
# Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization class for Wav2Vec2."""
import json
import os
import sys
import warnings
from itertools import groupby
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/vocab.json",
},
"tokenizer_config_file": {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/tokenizer_config.json",
},
}
# Wav2Vec2 has no max input length
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"facebook/wav2vec2-base-960h": sys.maxsize}
WAV2VEC2_KWARGS_DOCSTRING = r"""
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`False`):
Activates and controls padding. Accepts the following values:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a
single sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum
length is required by one of the truncation/padding parameters. If the model has no specific maximum
input length (like XLNet) truncation/padding to a maximum length will be deactivated.
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
return_tensors (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`):
If set, will return tensors instead of list of python integers. Acceptable values are:
* :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects.
* :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects.
* :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects.
verbose (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to print more information and warnings.
"""
class Wav2Vec2CTCTokenizer(PreTrainedTokenizer):
"""
Constructs a Wav2Vec2CTC tokenizer.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains some of the main methods.
Users should refer to the superclass for more information regarding such methods.
Args:
vocab_file (:obj:`str`):
File containing the vocabulary.
bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The beginning of sentence token.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sentence token.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`"|"`):
The token used for defining the end of a word.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to accept lowercase input and lowercase the output when decoding.
**kwargs
Additional keyword arguments passed along to :class:`~transformers.PreTrainedTokenizer`
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
bos_token="<s>",
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
word_delimiter_token="|",
do_lower_case=False,
**kwargs
):
super().__init__(
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
do_lower_case=do_lower_case,
word_delimiter_token=word_delimiter_token,
**kwargs,
)
self._word_delimiter_token = word_delimiter_token
self.do_lower_case = do_lower_case
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
@property
def word_delimiter_token(self) -> str:
"""
:obj:`str`: Padding token. Log an error if used while not having been set.
"""
if self._word_delimiter_token is None and self.verbose:
logger.error("Using word_delimiter_token, but it is not set yet.")
return None
return str(self._word_delimiter_token)
@property
def word_delimiter_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the word_delimiter_token in the vocabulary. Returns :obj:`None` if the token has
not been set.
"""
if self._word_delimiter_token is None:
return None
return self.convert_tokens_to_ids(self.word_delimiter_token)
@word_delimiter_token.setter
def word_delimiter_token(self, value):
self._word_delimiter_token = value
@word_delimiter_token_id.setter
def word_delimiter_token_id(self, value):
self._word_delimiter_token = self.convert_tokens_to_ids(value)
@property
def vocab_size(self) -> int:
return len(self.decoder)
def get_vocab(self) -> Dict:
return dict(self.encoder, **self.added_tokens_encoder)
def _tokenize(self, text, **kwargs):
"""
Converts a string in a sequence of tokens (string), using the tokenizer.
"""
if self.do_lower_case:
text = text.upper()
return list(text.replace(" ", self.word_delimiter_token))
def _convert_token_to_id(self, token: str) -> int:
"""Converts a token (str) in an index (integer) using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) in a token (str) using the vocab."""
result = self.decoder.get(index, self.unk_token)
return result
def convert_tokens_to_string(
self, tokens: List[str], group_tokens: bool = True, spaces_between_special_tokens: bool = False
) -> str:
"""
Converts a connectionist-temporal-classification (CTC) output tokens into a single string.
"""
# group same tokens into non-repeating tokens in CTC style decoding
if group_tokens:
tokens = [token_group[0] for token_group in groupby(tokens)]
# filter self.pad_token which is used as CTC-blank token
filtered_tokens = list(filter(lambda token: token != self.pad_token, tokens))
if spaces_between_special_tokens:
join_token = " "
else:
join_token = ""
# replace delimiter token
string = join_token.join(
[" " if token == self.word_delimiter_token else token for token in filtered_tokens]
).strip()
if self.do_lower_case:
string = string.lower()
return string
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
if is_split_into_words:
text = " " + text
return (text, kwargs)
def _decode(
self,
token_ids: List[int],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
group_tokens: bool = True,
spaces_between_special_tokens: bool = False,
) -> str:
"""
special _decode function is needed for Wav2Vec2Tokenizer because added tokens should be treated exactly the
same as tokens of the base vocabulary and therefore the function `convert_tokens_to_string` has to be called on
the whole token list and not individually on added tokens
"""
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
result = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
result.append(token)
text = self.convert_tokens_to_string(
result, group_tokens=group_tokens, spaces_between_special_tokens=spaces_between_special_tokens
)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
return (vocab_file,)
class Wav2Vec2Tokenizer(PreTrainedTokenizer):
"""
Constructs a Wav2Vec2 tokenizer.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains some of the main methods.
Users should refer to the superclass for more information regarding such methods.
Args:
vocab_file (:obj:`str`):
File containing the vocabulary.
bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The beginning of sentence token.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sentence token.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`"|"`):
The token used for defining the end of a word.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to lowercase the output when decoding.
do_normalize (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly
improve the performance for some models, *e.g.*, `wav2vec2-lv60
<https://huggingface.co/models?search=lv60>`__.
return_attention_mask (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not :meth:`~transformers.Wav2Vec2Tokenizer.__call__` should return :obj:`attention_mask`.
.. note::
Wav2Vec2 models that have set ``config.feat_extract_norm == "group"``, such as `wav2vec2-base
<https://huggingface.co/facebook/wav2vec2-base-960h>`__, have **not** been trained using
:obj:`attention_mask`. For such models, :obj:`input_values` should simply be padded with 0 and no
:obj:`attention_mask` should be passed.
For Wav2Vec2 models that have set ``config.feat_extract_norm == "layer"``, such as `wav2vec2-lv60
<https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self>`__, :obj:`attention_mask` should be
passed for batched inference.
**kwargs
Additional keyword arguments passed along to :class:`~transformers.PreTrainedTokenizer`
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = {
"vocab_file": {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/vocab.json"
},
"tokenizer_config_file": {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/tokenizer.json",
},
}
model_input_names = ["input_values", "attention_mask"]
def __init__(
self,
vocab_file,
bos_token="<s>",
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
word_delimiter_token="|",
do_lower_case=False,
do_normalize=False,
return_attention_mask=False,
**kwargs
):
super().__init__(
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
do_lower_case=do_lower_case,
do_normalize=do_normalize,
return_attention_mask=return_attention_mask,
word_delimiter_token=word_delimiter_token,
**kwargs,
)
warnings.warn(
"The class `Wav2Vec2Tokenizer` is deprecated and will be removed in version 5 of Transformers. Please use `Wav2Vec2Processor` or `Wav2Vec2CTCTokenizer` instead.",
FutureWarning,
)
self._word_delimiter_token = word_delimiter_token
self.do_lower_case = do_lower_case
self.return_attention_mask = return_attention_mask
self.do_normalize = do_normalize
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
@property
def word_delimiter_token(self) -> str:
"""
:obj:`str`: Padding token. Log an error if used while not having been set.
"""
if self._word_delimiter_token is None and self.verbose:
logger.error("Using word_delimiter_token, but it is not set yet.")
return None
return str(self._word_delimiter_token)
@property
def word_delimiter_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the word_delimiter_token in the vocabulary. Returns :obj:`None` if the token has
not been set.
"""
if self._word_delimiter_token is None:
return None
return self.convert_tokens_to_ids(self.word_delimiter_token)
@word_delimiter_token.setter
def word_delimiter_token(self, value):
self._word_delimiter_token = value
@word_delimiter_token_id.setter
def word_delimiter_token_id(self, value):
self._word_delimiter_token = self.convert_tokens_to_ids(value)
@add_end_docstrings(WAV2VEC2_KWARGS_DOCSTRING)
def __call__(
self,
raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
padding: Union[bool, str, PaddingStrategy] = False,
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences.
Args:
raw_speech (:obj:`np.ndarray`, :obj:`List[float]`, :obj:`List[np.ndarray]`, :obj:`List[List[float]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
values, a list of numpy arrayr or a list of list of float values.
"""
is_batched = bool(
isinstance(raw_speech, (list, tuple))
and (isinstance(raw_speech[0], np.ndarray) or isinstance(raw_speech[0], (tuple, list)))
)
# make sure input is in list format
if is_batched and not isinstance(raw_speech[0], np.ndarray):
raw_speech = [np.asarray(speech) for speech in raw_speech]
elif not is_batched and not isinstance(raw_speech, np.ndarray):
raw_speech = np.asarray(raw_speech)
# always return batch
if not is_batched:
raw_speech = [raw_speech]
# zero-mean and unit-variance normalization
if self.do_normalize:
raw_speech = [(x - np.mean(x)) / np.sqrt(np.var(x) + 1e-5) for x in raw_speech]
# convert into correct format for padding
encoded_inputs = BatchEncoding({"input_values": raw_speech})
padded_inputs = self.pad(
encoded_inputs,
padding=padding,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=self.return_attention_mask,
return_tensors=return_tensors,
verbose=verbose,
)
return padded_inputs
@property
def vocab_size(self) -> int:
return len(self.decoder)
def get_vocab(self) -> Dict:
return dict(self.encoder, **self.added_tokens_encoder)
def _convert_token_to_id(self, token: str) -> int:
"""Converts a token (str) in an index (integer) using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) in a token (str) using the vocab."""
result = self.decoder.get(index, self.unk_token)
return result
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""
Converts a connectionist-temporal-classification (CTC) output tokens into a single string.
"""
# group same tokens into non-repeating tokens in CTC style decoding
grouped_tokens = [token_group[0] for token_group in groupby(tokens)]
# filter self.pad_token which is used as CTC-blank token
filtered_tokens = list(filter(lambda token: token != self.pad_token, grouped_tokens))
# replace delimiter token
string = "".join([" " if token == self.word_delimiter_token else token for token in filtered_tokens]).strip()
if self.do_lower_case:
string = string.lower()
return string
def _decode(
self,
token_ids: List[int],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
**kwargs
) -> str:
"""
special _decode function is needed for Wav2Vec2Tokenizer because added tokens should be treated exactly the
same as tokens of the base vocabulary and therefore the function `convert_tokens_to_string` has to be called on
the whole token list and not individually on added tokens
"""
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
result = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
result.append(token)
text = self.convert_tokens_to_string(result)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
return (vocab_file,)
|
AdaMix/src/transformers/models/wav2vec2/tokenization_wav2vec2.py/0
|
{
"file_path": "AdaMix/src/transformers/models/wav2vec2/tokenization_wav2vec2.py",
"repo_id": "AdaMix",
"token_count": 9327
}
| 61 |
# coding=utf-8
# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" XLM-ProphetNet model configuration """
from ...utils import logging
from ..prophetnet.configuration_prophetnet import ProphetNetConfig
logger = logging.get_logger(__name__)
XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"microsoft/xprophetnet-large-wiki100-cased": "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json",
}
class XLMProphetNetConfig(ProphetNetConfig):
"""
This class overrides :class:`~transformers.ProphetNetConfig`. Please check the superclass for the appropriate
documentation alongside usage examples.
"""
model_type = "xlm-prophetnet"
|
AdaMix/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py/0
|
{
"file_path": "AdaMix/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py",
"repo_id": "AdaMix",
"token_count": 378
}
| 62 |
from typing import TYPE_CHECKING, Optional, Union
import numpy as np
from ..file_utils import add_end_docstrings, is_tf_available, is_torch_available
from ..modelcard import ModelCard
from ..tokenization_utils import PreTrainedTokenizer
from ..utils import logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, Pipeline, PipelineException
if TYPE_CHECKING:
from ..modeling_tf_utils import TFPreTrainedModel
from ..modeling_utils import PreTrainedModel
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_WITH_LM_HEAD_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASKED_LM_MAPPING
logger = logging.get_logger(__name__)
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
top_k (:obj:`int`, defaults to 5): The number of predictions to return.
""",
)
class FillMaskPipeline(Pipeline):
"""
Masked language modeling prediction pipeline using any :obj:`ModelWithLMHead`. See the `masked language modeling
examples <../task_summary.html#masked-language-modeling>`__ for more information.
This mask filling pipeline can currently be loaded from :func:`~transformers.pipeline` using the following task
identifier: :obj:`"fill-mask"`.
The models that this pipeline can use are models that have been trained with a masked language modeling objective,
which includes the bi-directional models in the library. See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=masked-lm>`__.
.. note::
This pipeline only works for inputs with exactly one token masked.
"""
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
top_k=5,
task: str = "",
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=True,
task=task,
)
self.check_model_type(TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_MASKED_LM_MAPPING)
self.top_k = top_k
def ensure_exactly_one_mask_token(self, masked_index: np.ndarray):
numel = np.prod(masked_index.shape)
if numel > 1:
raise PipelineException(
"fill-mask",
self.model.base_model_prefix,
f"More than one mask_token ({self.tokenizer.mask_token}) is not supported",
)
elif numel < 1:
raise PipelineException(
"fill-mask",
self.model.base_model_prefix,
f"No mask_token ({self.tokenizer.mask_token}) found on the input",
)
def __call__(self, *args, targets=None, top_k: Optional[int] = None, **kwargs):
"""
Fill the masked token in the text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
One or several texts (or one list of prompts) with masked tokens.
targets (:obj:`str` or :obj:`List[str]`, `optional`):
When passed, the model will return the scores for the passed token or tokens rather than the top k
predictions in the entire vocabulary. If the provided targets are not in the model vocab, they will be
tokenized and the first resulting token will be used (with a warning).
top_k (:obj:`int`, `optional`):
When passed, overrides the number of predictions to return.
Return:
A list or a list of list of :obj:`dict`: Each result comes as list of dictionaries with the following keys:
- **sequence** (:obj:`str`) -- The corresponding input with the mask token prediction.
- **score** (:obj:`float`) -- The corresponding probability.
- **token** (:obj:`int`) -- The predicted token id (to replace the masked one).
- **token** (:obj:`str`) -- The predicted token (to replace the masked one).
"""
inputs = self._parse_and_tokenize(*args, **kwargs)
outputs = self._forward(inputs, return_tensors=True)
results = []
batch_size = outputs.shape[0] if self.framework == "tf" else outputs.size(0)
if targets is not None:
if len(targets) == 0 or len(targets[0]) == 0:
raise ValueError("At least one target must be provided when passed.")
if isinstance(targets, str):
targets = [targets]
targets_proc = []
for target in targets:
target_enc = self.tokenizer.tokenize(target)
if len(target_enc) > 1 or target_enc[0] == self.tokenizer.unk_token:
logger.warning(
"The specified target token `{}` does not exist in the model vocabulary. Replacing with `{}`.".format(
target, target_enc[0]
)
)
targets_proc.append(target_enc[0])
target_inds = np.array(self.tokenizer.convert_tokens_to_ids(targets_proc))
for i in range(batch_size):
input_ids = inputs["input_ids"][i]
result = []
if self.framework == "tf":
masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
# Fill mask pipeline supports only one ${mask_token} per sample
self.ensure_exactly_one_mask_token(masked_index)
logits = outputs[i, masked_index.item(), :]
probs = tf.nn.softmax(logits)
if targets is None:
topk = tf.math.top_k(probs, k=top_k if top_k is not None else self.top_k)
values, predictions = topk.values.numpy(), topk.indices.numpy()
else:
values = tf.gather_nd(probs, tf.reshape(target_inds, (-1, 1)))
sort_inds = tf.reverse(tf.argsort(values), [0])
values = tf.gather_nd(values, tf.reshape(sort_inds, (-1, 1))).numpy()
predictions = target_inds[sort_inds.numpy()]
else:
masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False)
# Fill mask pipeline supports only one ${mask_token} per sample
self.ensure_exactly_one_mask_token(masked_index.numpy())
logits = outputs[i, masked_index.item(), :]
probs = logits.softmax(dim=0)
if targets is None:
values, predictions = probs.topk(top_k if top_k is not None else self.top_k)
else:
values = probs[..., target_inds]
sort_inds = list(reversed(values.argsort(dim=-1)))
values = values[..., sort_inds]
predictions = target_inds[sort_inds]
for v, p in zip(values.tolist(), predictions.tolist()):
tokens = input_ids.numpy()
tokens[masked_index] = p
# Filter padding out:
tokens = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
result.append(
{
"sequence": self.tokenizer.decode(tokens, skip_special_tokens=True),
"score": v,
"token": p,
"token_str": self.tokenizer.decode(p),
}
)
# Append
results += [result]
if len(results) == 1:
return results[0]
return results
|
AdaMix/src/transformers/pipelines/fill_mask.py/0
|
{
"file_path": "AdaMix/src/transformers/pipelines/fill_mask.py",
"repo_id": "AdaMix",
"token_count": 3730
}
| 63 |
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Callbacks to use with the Trainer class and customize the training loop.
"""
import dataclasses
import json
from dataclasses import dataclass
from typing import Dict, List, Optional, Union
import numpy as np
from tqdm.auto import tqdm
from .trainer_utils import IntervalStrategy
from .training_args import TrainingArguments
from .utils import logging
logger = logging.get_logger(__name__)
@dataclass
class TrainerState:
"""
A class containing the :class:`~transformers.Trainer` inner state that will be saved along the model and optimizer
when checkpointing and passed to the :class:`~transformers.TrainerCallback`.
.. note::
In all this class, one step is to be understood as one update step. When using gradient accumulation, one
update step may require several forward and backward passes: if you use :obj:`gradient_accumulation_steps=n`,
then one update step requires going throuch `n` batches.
Args:
epoch (:obj:`float`, `optional`):
Only set during training, will represent the epoch the training is at (the decimal part being the
percentage of the current epoch completed).
global_step (:obj:`int`, `optional`, defaults to 0):
During training, represents the number of update steps completed.
max_steps (:obj:`int`, `optional`, defaults to 0):
The number of update steps to do during the current training.
total_flos (:obj:`float`, `optional`, defaults to 0):
The total number of floating operations done by the model since the beginning of training (stored as floats
to avoid overflow).
log_history (:obj:`List[Dict[str, float]]`, `optional`):
The list of logs done since the beginning of training.
best_metric (:obj:`float`, `optional`):
When tracking the best model, the value of the best metric encountered so far.
best_model_checkpoint (:obj:`str`, `optional`):
When tracking the best model, the value of the name of the checkpoint for the best model encountered so
far.
is_local_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on
several machines) main process.
is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
is_hyper_param_search (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will
impact the way data will be logged in TensorBoard.
"""
epoch: Optional[float] = None
global_step: int = 0
max_steps: int = 0
num_train_epochs: int = 0
total_flos: float = 0
log_history: List[Dict[str, float]] = None
best_metric: Optional[float] = None
best_model_checkpoint: Optional[str] = None
is_local_process_zero: bool = True
is_world_process_zero: bool = True
is_hyper_param_search: bool = False
trial_name: str = None
trial_params: Dict[str, Union[str, float, int, bool]] = None
def __post_init__(self):
if self.log_history is None:
self.log_history = []
def save_to_json(self, json_path: str):
""" Save the content of this instance in JSON format inside :obj:`json_path`."""
json_string = json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + "\n"
with open(json_path, "w", encoding="utf-8") as f:
f.write(json_string)
@classmethod
def load_from_json(cls, json_path: str):
""" Create an instance from the content of :obj:`json_path`."""
with open(json_path, "r", encoding="utf-8") as f:
text = f.read()
return cls(**json.loads(text))
@dataclass
class TrainerControl:
"""
A class that handles the :class:`~transformers.Trainer` control flow. This class is used by the
:class:`~transformers.TrainerCallback` to activate some switches in the training loop.
Args:
should_training_stop (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the training should be interrupted.
If :obj:`True`, this variable will not be set back to :obj:`False`. The training will just stop.
should_epoch_stop (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the current epoch should be interrupted.
If :obj:`True`, this variable will be set back to :obj:`False` at the beginning of the next epoch.
should_save (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should be saved at this step.
If :obj:`True`, this variable will be set back to :obj:`False` at the beginning of the next step.
should_evaluate (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should be evaluated at this step.
If :obj:`True`, this variable will be set back to :obj:`False` at the beginning of the next step.
should_log (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the logs should be reported at this step.
If :obj:`True`, this variable will be set back to :obj:`False` at the beginning of the next step.
"""
should_training_stop: bool = False
should_epoch_stop: bool = False
should_save: bool = False
should_evaluate: bool = False
should_log: bool = False
def _new_training(self):
""" Internal method that resets the variable for a new training. """
self.should_training_stop = False
def _new_epoch(self):
""" Internal method that resets the variable for a new epoch. """
self.should_epoch_stop = False
def _new_step(self):
""" Internal method that resets the variable for a new step. """
self.should_save = False
self.should_evaluate = False
self.should_log = False
class TrainerCallback:
"""
A class for objects that will inspect the state of the training loop at some events and take some decisions. At
each of those events the following arguments are available:
Args:
args (:class:`~transformers.TrainingArguments`):
The training arguments used to instantiate the :class:`~transformers.Trainer`.
state (:class:`~transformers.TrainerState`):
The current state of the :class:`~transformers.Trainer`.
control (:class:`~transformers.TrainerControl`):
The object that is returned to the :class:`~transformers.Trainer` and can be used to make some decisions.
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`):
The model being trained.
tokenizer (:class:`~transformers.PreTrainedTokenizer`):
The tokenizer used for encoding the data.
optimizer (:obj:`torch.optim.Optimizer`):
The optimizer used for the training steps.
lr_scheduler (:obj:`torch.optim.lr_scheduler.LambdaLR`):
The scheduler used for setting the learning rate.
train_dataloader (:obj:`torch.utils.data.dataloader.DataLoader`, `optional`):
The current dataloader used for training.
eval_dataloader (:obj:`torch.utils.data.dataloader.DataLoader`, `optional`):
The current dataloader used for training.
metrics (:obj:`Dict[str, float]`):
The metrics computed by the last evaluation phase.
Those are only accessible in the event :obj:`on_evaluate`.
logs (:obj:`Dict[str, float]`):
The values to log.
Those are only accessible in the event :obj:`on_log`.
The :obj:`control` object is the only one that can be changed by the callback, in which case the event that changes
it should return the modified version.
The argument :obj:`args`, :obj:`state` and :obj:`control` are positionals for all events, all the others are
grouped in :obj:`kwargs`. You can unpack the ones you need in the signature of the event using them. As an example,
see the code of the simple :class:`~transformer.PrinterCallback`.
Example::
class PrinterCallback(TrainerCallback):
def on_log(self, args, state, control, logs=None, **kwargs):
_ = logs.pop("total_flos", None)
if state.is_local_process_zero:
print(logs)
"""
def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of the initialization of the :class:`~transformers.Trainer`.
"""
pass
def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of training.
"""
pass
def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of training.
"""
pass
def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of an epoch.
"""
pass
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of an epoch.
"""
pass
def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the beginning of a training step. If using gradient accumulation, one training step might take
several inputs.
"""
pass
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called at the end of a training step. If using gradient accumulation, one training step might take
several inputs.
"""
pass
def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after an evaluation phase.
"""
pass
def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after a checkpoint save.
"""
pass
def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after logging the last logs.
"""
pass
def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
"""
Event called after a prediction step.
"""
pass
class CallbackHandler(TrainerCallback):
""" Internal class that just calls the list of callbacks in order. """
def __init__(self, callbacks, model, tokenizer, optimizer, lr_scheduler):
self.callbacks = []
for cb in callbacks:
self.add_callback(cb)
self.model = model
self.tokenizer = tokenizer
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.train_dataloader = None
self.eval_dataloader = None
if not any(isinstance(cb, DefaultFlowCallback) for cb in self.callbacks):
logger.warn(
"The Trainer will not work properly if you don't have a `DefaultFlowCallback` in its callbacks. You\n"
+ "should add one before training with `trainer.add_callback(DefaultFlowCallback). The current list of"
+ "callbacks is\n:"
+ self.callback_list
)
def add_callback(self, callback):
cb = callback() if isinstance(callback, type) else callback
cb_class = callback if isinstance(callback, type) else callback.__class__
if cb_class in [c.__class__ for c in self.callbacks]:
logger.warn(
f"You are adding a {cb_class} to the callbacks of this Trainer, but there is already one. The current"
+ "list of callbacks is\n:"
+ self.callback_list
)
self.callbacks.append(cb)
def pop_callback(self, callback):
if isinstance(callback, type):
for cb in self.callbacks:
if isinstance(cb, callback):
self.callbacks.remove(cb)
return cb
else:
for cb in self.callbacks:
if cb == callback:
self.callbacks.remove(cb)
return cb
def remove_callback(self, callback):
if isinstance(callback, type):
for cb in self.callbacks:
if isinstance(cb, callback):
self.callbacks.remove(cb)
return
else:
self.callbacks.remove(callback)
@property
def callback_list(self):
return "\n".join(cb.__class__.__name__ for cb in self.callbacks)
def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_init_end", args, state, control)
def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_training_stop = False
return self.call_event("on_train_begin", args, state, control)
def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_train_end", args, state, control)
def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_epoch_stop = False
return self.call_event("on_epoch_begin", args, state, control)
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_epoch_end", args, state, control)
def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_log = False
control.should_evaluate = False
control.should_save = False
return self.call_event("on_step_begin", args, state, control)
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_step_end", args, state, control)
def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics):
control.should_evaluate = False
return self.call_event("on_evaluate", args, state, control, metrics=metrics)
def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
control.should_save = False
return self.call_event("on_save", args, state, control)
def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, logs):
control.should_log = False
return self.call_event("on_log", args, state, control, logs=logs)
def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
return self.call_event("on_prediction_step", args, state, control)
def call_event(self, event, args, state, control, **kwargs):
for callback in self.callbacks:
result = getattr(callback, event)(
args,
state,
control,
model=self.model,
tokenizer=self.tokenizer,
optimizer=self.optimizer,
lr_scheduler=self.lr_scheduler,
train_dataloader=self.train_dataloader,
eval_dataloader=self.eval_dataloader,
**kwargs,
)
# A Callback can skip the return of `control` if it doesn't change it.
if result is not None:
control = result
return control
class DefaultFlowCallback(TrainerCallback):
"""
A :class:`~transformers.TrainerCallback` that handles the default flow of the training loop for logs, evaluation
and checkpoints.
"""
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
# Log
if state.global_step == 1 and args.logging_first_step:
control.should_log = True
if (
args.logging_strategy == IntervalStrategy.STEPS
and args.logging_steps > 0
and state.global_step % args.logging_steps == 0
):
control.should_log = True
# Evaluate
if args.evaluation_strategy == IntervalStrategy.STEPS and state.global_step % args.eval_steps == 0:
control.should_evaluate = True
if args.load_best_model_at_end:
control.should_save = True
# Save
if (
not args.load_best_model_at_end
and args.save_strategy == IntervalStrategy.STEPS
and args.save_steps > 0
and state.global_step % args.save_steps == 0
):
control.should_save = True
# End training
if state.global_step >= state.max_steps:
control.should_training_stop = True
return control
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
# Log
if args.logging_strategy == IntervalStrategy.EPOCH:
control.should_log = True
# Evaluate
if args.evaluation_strategy == IntervalStrategy.EPOCH:
control.should_evaluate = True
if args.load_best_model_at_end:
control.should_save = True
# Save
if args.save_strategy == IntervalStrategy.EPOCH:
control.should_save = True
return control
class ProgressCallback(TrainerCallback):
"""
A :class:`~transformers.TrainerCallback` that displays the progress of training or evaluation.
"""
def __init__(self):
self.training_bar = None
self.prediction_bar = None
def on_train_begin(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar = tqdm(total=state.max_steps)
self.current_step = 0
def on_step_end(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar.update(state.global_step - self.current_step)
self.current_step = state.global_step
def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs):
if state.is_local_process_zero:
if self.prediction_bar is None:
self.prediction_bar = tqdm(total=len(eval_dataloader), leave=self.training_bar is None)
self.prediction_bar.update(1)
def on_evaluate(self, args, state, control, **kwargs):
if state.is_local_process_zero:
if self.prediction_bar is not None:
self.prediction_bar.close()
self.prediction_bar = None
def on_log(self, args, state, control, logs=None, **kwargs):
if state.is_local_process_zero and self.training_bar is not None:
_ = logs.pop("total_flos", None)
self.training_bar.write(str(logs))
def on_train_end(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar.close()
self.training_bar = None
class PrinterCallback(TrainerCallback):
"""
A bare :class:`~transformers.TrainerCallback` that just prints the logs.
"""
def on_log(self, args, state, control, logs=None, **kwargs):
_ = logs.pop("total_flos", None)
if state.is_local_process_zero:
print(logs)
class EarlyStoppingCallback(TrainerCallback):
"""
A :class:`~transformers.TrainerCallback` that handles early stopping.
Args:
early_stopping_patience (:obj:`int`):
Use with :obj:`metric_for_best_model` to stop training when the specified metric worsens for
:obj:`early_stopping_patience` evaluation calls.
early_stopping_threshold(:obj:`float`, `optional`):
Use with TrainingArguments :obj:`metric_for_best_model` and :obj:`early_stopping_patience` to denote how
much the specified metric must improve to satisfy early stopping conditions. `
This callback depends on :class:`~transformers.TrainingArguments` argument `load_best_model_at_end` functionality
to set best_metric in :class:`~transformers.TrainerState`.
"""
def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0):
self.early_stopping_patience = early_stopping_patience
self.early_stopping_threshold = early_stopping_threshold
# early_stopping_patience_counter denotes the number of times validation metrics failed to improve.
self.early_stopping_patience_counter = 0
def check_metric_value(self, args, state, control, metric_value):
# best_metric is set by code for load_best_model
operator = np.greater if args.greater_is_better else np.less
if state.best_metric is None or (
operator(metric_value, state.best_metric)
and abs(metric_value - state.best_metric) > self.early_stopping_threshold
):
self.early_stopping_patience_counter = 0
else:
self.early_stopping_patience_counter += 1
def on_train_begin(self, args, state, control, **kwargs):
assert args.load_best_model_at_end, "EarlyStoppingCallback requires load_best_model_at_end = True"
assert (
args.metric_for_best_model is not None
), "EarlyStoppingCallback requires metric_for_best_model is defined"
assert (
args.evaluation_strategy != IntervalStrategy.NO
), "EarlyStoppingCallback requires IntervalStrategy of steps or epoch"
def on_evaluate(self, args, state, control, metrics, **kwargs):
metric_to_check = args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics.get(metric_to_check)
if metric_value is None:
logger.warning(
f"early stopping required metric_for_best_model, but did not find {metric_to_check} so early stopping is disabled"
)
return
self.check_metric_value(args, state, control, metric_value)
if self.early_stopping_patience_counter >= self.early_stopping_patience:
control.should_training_stop = True
|
AdaMix/src/transformers/trainer_callback.py/0
|
{
"file_path": "AdaMix/src/transformers/trainer_callback.py",
"repo_id": "AdaMix",
"token_count": 9304
}
| 64 |
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_flax
class FlaxPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
FLAX_MODEL_MAPPING = None
class FlaxAutoModel:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxBertModel:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
class FlaxRobertaModel:
def __init__(self, *args, **kwargs):
requires_flax(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_flax(self)
|
AdaMix/src/transformers/utils/dummy_flax_objects.py/0
|
{
"file_path": "AdaMix/src/transformers/utils/dummy_flax_objects.py",
"repo_id": "AdaMix",
"token_count": 465
}
| 65 |
<!---
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Using `cookiecutter` to generate models
This folder contains templates to generate new models that fit the current API and pass all tests. It generates
models in both PyTorch and TensorFlow, completes the `__init__.py` and auto-modeling files, and creates the
documentation.
## Usage
Using the `cookiecutter` utility requires to have all the `dev` dependencies installed. Let's first clone the
repository and install it in our environment:
```shell script
git clone https://github.com/huggingface/transformers
cd transformers
pip install -e ".[dev]"
```
Once the installation is done, you can use the CLI command `add-new-model` to generate your models:
```shell script
transformers-cli add-new-model
```
This should launch the `cookiecutter` package which should prompt you to fill in the configuration.
The `modelname` should be cased according to the plain text casing, i.e., BERT, RoBERTa, DeBERTa.
```
modelname [<ModelNAME>]:
uppercase_modelname [<MODEL_NAME>]:
lowercase_modelname [<model_name>]:
camelcase_modelname [<ModelName>]:
```
Fill in the `authors` with your team members:
```
authors [The HuggingFace Team]:
```
The checkpoint identifier is the checkpoint that will be used in the examples across the files. Put the name you wish,
as it will appear on the modelhub. Do not forget to include the organisation.
```
checkpoint_identifier [organisation/<model_name>-base-cased]:
```
The tokenizer should either be based on BERT if it behaves exactly like the BERT tokenizer, or a standalone otherwise.
```
Select tokenizer_type:
1 - Based on BERT
2 - Standalone
Choose from 1, 2 [1]:
```
<!---
Choose if your model is an encoder-decoder, or an encoder-only architecture.
If your model is an encoder-only architecture, the generated architecture will be based on the BERT model.
If your model is an encoder-decoder architecture, the generated architecture will be based on the BART model. You can,
of course, edit the files once the generation is complete.
```
Select is_encoder_decoder_model:
1 - True
2 - False
Choose from 1, 2 [1]:
```
-->
Once the command has finished, you should have a total of 7 new files spread across the repository:
```
docs/source/model_doc/<model_name>.rst
src/transformers/models/<model_name>/configuration_<model_name>.py
src/transformers/models/<model_name>/modeling_<model_name>.py
src/transformers/models/<model_name>/modeling_tf_<model_name>.py
src/transformers/models/<model_name>/tokenization_<model_name>.py
tests/test_modeling_<model_name>.py
tests/test_modeling_tf_<model_name>.py
```
You can run the tests to ensure that they all pass:
```
python -m pytest ./tests/test_*<model_name>*.py
```
Feel free to modify each file to mimic the behavior of your model.
⚠ You should be careful about the classes preceded by the following line:️
```python
# Copied from transformers.[...]
```
This line ensures that the copy does not diverge from the source. If it *should* diverge, because the implementation
is different, this line needs to be deleted. If you don't delete this line and run `make fix-copies`,
your changes will be overwritten.
Once you have edited the files to fit your architecture, simply re-run the tests (and edit them if a change
is needed!) afterwards to make sure everything works as expected.
Once the files are generated and you are happy with your changes, here's a checklist to ensure that your contribution
will be merged quickly:
- You should run the `make fixup` utility to fix the style of the files and to ensure the code quality meets the
library's standards.
- You should complete the documentation file (`docs/source/model_doc/<model_name>.rst`) so that your model may be
usable.
|
AdaMix/templates/adding_a_new_model/README.md/0
|
{
"file_path": "AdaMix/templates/adding_a_new_model/README.md",
"repo_id": "AdaMix",
"token_count": 1218
}
| 66 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.